code
stringlengths 82
53.2k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from __future__ import annotations
def __UpperCAmelCase ( lowerCamelCase_ : list[int] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCamelCase_ ) // 2
# choose the middle 3 elements
SCREAMING_SNAKE_CASE_ : Union[str, Any] = lst[m - 1 : m + 2]
# if middle element is peak
if three[1] > three[0] and three[1] > three[2]:
return three[1]
# if increasing, recurse on right
elif three[0] < three[2]:
if len(lst[:m] ) == 2:
m -= 1
return peak(lst[m:] )
# decreasing
else:
if len(lst[:m] ) == 2:
m += 1
return peak(lst[:m] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 105 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Tuple=None ,__A : Optional[int]=None ,__A : Optional[int]=None ,__A : int="resnet50" ,__A : int=3 ,__A : List[Any]=32 ,__A : Tuple=3 ,__A : List[Any]=True ,__A : Tuple=True ,) -> Any:
_lowercase = parent
_lowercase = out_indices if out_indices is not None else [4]
_lowercase = stage_names
_lowercase = out_features
_lowercase = backbone
_lowercase = batch_size
_lowercase = image_size
_lowercase = num_channels
_lowercase = use_pretrained_backbone
_lowercase = is_training
def __UpperCAmelCase ( self : List[Any] ) -> Tuple:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = self.get_config()
return config, pixel_values
def __UpperCAmelCase ( self : Tuple ) -> Tuple:
return TimmBackboneConfig(
image_size=self.image_size ,num_channels=self.num_channels ,out_features=self.out_features ,out_indices=self.out_indices ,stage_names=self.stage_names ,use_pretrained_backbone=self.use_pretrained_backbone ,backbone=self.backbone ,)
def __UpperCAmelCase ( self : Any ,__A : Any ,__A : Dict ) -> Union[str, Any]:
_lowercase = TimmBackbone(config=__A )
model.to(__A )
model.eval()
with torch.no_grad():
_lowercase = model(__A )
self.parent.assertEqual(
result.feature_map[-1].shape ,(self.batch_size, model.channels[-1], 14, 14) ,)
def __UpperCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class A_ ( UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (TimmBackbone,) if is_torch_available() else ()
SCREAMING_SNAKE_CASE_ : List[str] = {'''feature-extraction''': TimmBackbone} if is_torch_available() else {}
SCREAMING_SNAKE_CASE_ : List[Any] = False
SCREAMING_SNAKE_CASE_ : Tuple = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Any = False
def __UpperCAmelCase ( self : str ) -> Optional[int]:
_lowercase = TimmBackboneModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A )
def __UpperCAmelCase ( self : int ) -> Tuple:
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self : List[Any] ) -> List[str]:
_lowercase = 'resnet18'
_lowercase = 'microsoft/resnet-18'
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A )
_lowercase = AutoBackbone.from_pretrained(__A )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) ,len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices ,(-1,) )
self.assertEqual(transformers_model.out_indices ,[len(timm_model.stage_names ) - 1] )
_lowercase = AutoBackbone.from_pretrained(__A ,use_timm_backbone=__A ,out_indices=[1, 2, 3] )
_lowercase = AutoBackbone.from_pretrained(__A ,out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices ,transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) ,len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels ,transformers_model.channels )
@unittest.skip('TimmBackbone doesn\'t support feed forward chunking' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone doesn\'t have num_hidden_layers attribute' )
def __UpperCAmelCase ( self : Optional[int] ) -> Any:
pass
@unittest.skip('TimmBackbone initialization is managed on the timm side' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('TimmBackbone models doesn\'t have inputs_embeds' )
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
pass
@unittest.skip('TimmBackbone model cannot be created without specifying a backbone checkpoint' )
def __UpperCAmelCase ( self : Dict ) -> List[str]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
@unittest.skip('model weights aren\'t tied in TimmBackbone.' )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : int ) -> Any:
pass
@unittest.skip('Only checkpoints on timm can be loaded into TimmBackbone' )
def __UpperCAmelCase ( self : List[Any] ) -> Union[str, Any]:
pass
@unittest.skip('TimmBackbone doesn\'t have hidden size info in its configuration.' )
def __UpperCAmelCase ( self : Any ) -> Optional[Any]:
pass
@unittest.skip('TimmBackbone doesn\'t support output_attentions.' )
def __UpperCAmelCase ( self : Any ) -> Any:
pass
@unittest.skip('Safetensors is not supported by timm.' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Tuple:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def __UpperCAmelCase ( self : int ) -> Optional[Any]:
pass
def __UpperCAmelCase ( self : Dict ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Tuple ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
_lowercase = True
_lowercase = self.has_attentions
# no need to test all models as different heads yield the same functionality
_lowercase = self.all_model_classes[0]
_lowercase = model_class(__A )
model.to(__A )
_lowercase = self._prepare_for_class(__A ,__A )
_lowercase = model(**__A )
_lowercase = outputs[0][-1]
# Encoder-/Decoder-only models
_lowercase = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
_lowercase = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__A )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def __UpperCAmelCase ( self : List[str] ) -> int:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,len(config.out_indices ) )
self.assertEqual(len(model.channels ) ,len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
_lowercase = copy.deepcopy(__A )
_lowercase = None
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A )
self.assertEqual(len(result.feature_maps ) ,1 )
self.assertEqual(len(model.channels ) ,1 )
# Check backbone can be initialized with fresh weights
_lowercase = copy.deepcopy(__A )
_lowercase = False
_lowercase = model_class(__A )
model.to(__A )
model.eval()
_lowercase = model(**__A ) | 67 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : str ={"""configuration_mmbt""": ["""MMBTConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Any =["""MMBTForClassification""", """MMBTModel""", """ModalEmbeddings"""]
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
__lowerCAmelCase : List[str] =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 197 | """simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
__lowerCAmelCase : Union[str, Any] =subprocess.check_output("""git merge-base main HEAD""".split()).decode("""utf-8""")
__lowerCAmelCase : List[str] =subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode("""utf-8""").split()
__lowerCAmelCase : List[str] ="""|""".join(sys.argv[1:])
__lowerCAmelCase : Dict =re.compile(RF"""^({joined_dirs}).*?\.py$""")
__lowerCAmelCase : int =[x for x in modified_files if regex.match(x)]
print(""" """.join(relevant_modified_files), end="""""")
| 197 | 1 |
from typing import Tuple, Union
from ...modeling_outputs import BackboneOutput
from ...modeling_utils import PreTrainedModel
from ...utils import is_timm_available, is_torch_available, requires_backends
from ...utils.backbone_utils import BackboneMixin
from .configuration_timm_backbone import TimmBackboneConfig
if is_timm_available():
import timm
if is_torch_available():
from torch import Tensor
class UpperCAmelCase ( lowercase_ , lowercase_ ):
a__: int = "pixel_values"
a__: int = False
a__: Dict = TimmBackboneConfig
def __init__( self : List[Any] , lowerCAmelCase : Tuple , **lowerCAmelCase : Any ):
requires_backends(self , '''timm''' )
super().__init__(a__ )
lowercase : List[str] = config
if config.backbone is None:
raise ValueError('''backbone is not set in the config. Please set it to a timm model name.''' )
if config.backbone not in timm.list_models():
raise ValueError(f'''backbone {config.backbone} is not supported by timm.''' )
if hasattr(a__ , '''out_features''' ) and config.out_features is not None:
raise ValueError('''out_features is not supported by TimmBackbone. Please use out_indices instead.''' )
lowercase : int = getattr(a__ , '''use_pretrained_backbone''' , a__ )
if pretrained is None:
raise ValueError('''use_pretrained_backbone is not set in the config. Please set it to True or False.''' )
# We just take the final layer by default. This matches the default for the transformers models.
lowercase : Any = config.out_indices if getattr(a__ , '''out_indices''' , a__ ) is not None else (-1,)
lowercase : Optional[Any] = timm.create_model(
config.backbone , pretrained=a__ , features_only=config.features_only , in_chans=config.num_channels , out_indices=a__ , **a__ , )
# These are used to control the output of the model when called. If output_hidden_states is True, then
# return_layers is modified to include all layers.
lowercase : Tuple = self._backbone.return_layers
lowercase : List[Any] = {layer['''module''']: str(a__ ) for i, layer in enumerate(self._backbone.feature_info.info )}
super()._init_backbone(a__ )
@classmethod
def _lowerCAmelCase ( cls : List[Any] , lowerCAmelCase : Optional[int] , *lowerCAmelCase : Dict , **lowerCAmelCase : Union[str, Any] ):
requires_backends(cls , ['''vision''', '''timm'''] )
from ...models.timm_backbone import TimmBackboneConfig
lowercase : Tuple = kwargs.pop('''config''' , TimmBackboneConfig() )
lowercase : Optional[int] = kwargs.pop('''use_timm_backbone''' , a__ )
if not use_timm:
raise ValueError('''use_timm_backbone must be True for timm backbones''' )
lowercase : Union[str, Any] = kwargs.pop('''num_channels''' , config.num_channels )
lowercase : Any = kwargs.pop('''features_only''' , config.features_only )
lowercase : Tuple = kwargs.pop('''use_pretrained_backbone''' , config.use_pretrained_backbone )
lowercase : Dict = kwargs.pop('''out_indices''' , config.out_indices )
lowercase : Optional[Any] = TimmBackboneConfig(
backbone=a__ , num_channels=a__ , features_only=a__ , use_pretrained_backbone=a__ , out_indices=a__ , )
return super()._from_config(a__ , **a__ )
def _lowerCAmelCase ( self : Tuple , lowerCAmelCase : Any ):
pass
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Union[str, Any]=None , **lowerCAmelCase : Optional[int] ):
lowercase : str = return_dict if return_dict is not None else self.config.use_return_dict
lowercase : Dict = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
lowercase : Optional[int] = output_attentions if output_attentions is not None else self.config.output_attentions
if output_attentions:
raise ValueError('''Cannot output attentions for timm backbones at the moment''' )
if output_hidden_states:
# We modify the return layers to include all the stages of the backbone
lowercase : Tuple = self._all_layers
lowercase : List[str] = self._backbone(a__ , **a__ )
lowercase : Any = self._return_layers
lowercase : Optional[Any] = tuple(hidden_states[i] for i in self.out_indices )
else:
lowercase : Dict = self._backbone(a__ , **a__ )
lowercase : str = None
lowercase : Tuple = tuple(a__ )
lowercase : Optional[Any] = tuple(a__ ) if hidden_states is not None else None
if not return_dict:
lowercase : List[Any] = (feature_maps,)
if output_hidden_states:
lowercase : Optional[Any] = output + (hidden_states,)
return output
return BackboneOutput(feature_maps=a__ , hidden_states=a__ , attentions=a__ )
| 583 |
'''simple docstring'''
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.text import TextDatasetReader
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : int ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase_( snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Any ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : Union[str, Any] , snake_case : Dict ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = TextDatasetReader(snake_case , features=snake_case , cache_dir=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : str , snake_case : Optional[Any] ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case , split=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("path_type" , [str, list] )
def UpperCamelCase_( snake_case : str , snake_case : Union[str, Any] , snake_case : Dict ):
'''simple docstring'''
if issubclass(snake_case , snake_case ):
snake_case_ = text_path
elif issubclass(snake_case , snake_case ):
snake_case_ = [text_path]
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_text_dataset(snake_case , snake_case )
def UpperCamelCase_( snake_case : str , snake_case : int , snake_case : Dict=("train",) ):
'''simple docstring'''
assert isinstance(snake_case , snake_case )
for split in splits:
snake_case_ = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 1
assert dataset.column_names == ["text"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("keep_in_memory" , [False, True] )
def UpperCamelCase_( snake_case : List[str] , snake_case : Dict , snake_case : Tuple ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
snake_case_ = TextDatasetReader({"train": text_path} , cache_dir=snake_case , keep_in_memory=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize(
"features" , [
None,
{"text": "string"},
{"text": "int32"},
{"text": "float32"},
] , )
def UpperCamelCase_( snake_case : Union[str, Any] , snake_case : List[str] , snake_case : int ):
'''simple docstring'''
snake_case_ = tmp_path / "cache"
# CSV file loses col_1 string dtype information: default now is "int64" instead of "string"
snake_case_ = {"text": "string"}
snake_case_ = features.copy() if features else default_expected_features
snake_case_ = (
Features({feature: Value(snake_case ) for feature, dtype in features.items()} ) if features is not None else None
)
snake_case_ = TextDatasetReader({"train": text_path} , features=snake_case , cache_dir=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case )
@pytest.mark.parametrize("split" , [None, NamedSplit("train" ), "train", "test"] )
def UpperCamelCase_( snake_case : str , snake_case : Any , snake_case : Any ):
'''simple docstring'''
if split:
snake_case_ = {split: text_path}
else:
snake_case_ = "train"
snake_case_ = {"train": text_path, "test": text_path}
snake_case_ = tmp_path / "cache"
snake_case_ = {"text": "string"}
snake_case_ = TextDatasetReader(snake_case , cache_dir=snake_case ).read()
_check_text_datasetdict(snake_case , snake_case , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
| 400 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
_lowerCamelCase : int = {
"""configuration_bridgetower""": [
"""BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BridgeTowerConfig""",
"""BridgeTowerTextConfig""",
"""BridgeTowerVisionConfig""",
],
"""processing_bridgetower""": ["""BridgeTowerProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : str = ["""BridgeTowerImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Union[str, Any] = [
"""BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BridgeTowerForContrastiveLearning""",
"""BridgeTowerForImageAndTextRetrieval""",
"""BridgeTowerForMaskedLM""",
"""BridgeTowerModel""",
"""BridgeTowerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bridgetower import (
BRIDGETOWER_PRETRAINED_CONFIG_ARCHIVE_MAP,
BridgeTowerConfig,
BridgeTowerTextConfig,
BridgeTowerVisionConfig,
)
from .processing_bridgetower import BridgeTowerProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_bridgetower import BridgeTowerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bridgetower import (
BRIDGETOWER_PRETRAINED_MODEL_ARCHIVE_LIST,
BridgeTowerForContrastiveLearning,
BridgeTowerForImageAndTextRetrieval,
BridgeTowerForMaskedLM,
BridgeTowerModel,
BridgeTowerPreTrainedModel,
)
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 177 |
import argparse
from collections import OrderedDict
from pathlib import Path
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision.transforms import functional as F
from transformers import DetrImageProcessor, TableTransformerConfig, TableTransformerForObjectDetection
from transformers.utils import logging
logging.set_verbosity_info()
_lowerCamelCase : str = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
_lowerCamelCase : int = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''', F'''encoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', F'''encoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.weight''', F'''encoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear1.bias''', F'''encoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.weight''', F'''encoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.linear2.bias''', F'''encoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.encoder.layers.{i}.norm1.weight''', F'''encoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.encoder.layers.{i}.norm1.bias''', F'''encoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.weight''', F'''encoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.encoder.layers.{i}.norm2.bias''', F'''encoder.layers.{i}.final_layer_norm.bias'''))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''', F'''decoder.layers.{i}.self_attn.out_proj.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', F'''decoder.layers.{i}.self_attn.out_proj.bias''')
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
F'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
)
)
rename_keys.append(
(
F'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
F'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
)
)
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.weight''', F'''decoder.layers.{i}.fc1.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear1.bias''', F'''decoder.layers.{i}.fc1.bias'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.weight''', F'''decoder.layers.{i}.fc2.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.linear2.bias''', F'''decoder.layers.{i}.fc2.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm1.weight''', F'''decoder.layers.{i}.self_attn_layer_norm.weight''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm1.bias''', F'''decoder.layers.{i}.self_attn_layer_norm.bias'''))
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.weight''', F'''decoder.layers.{i}.encoder_attn_layer_norm.weight''')
)
rename_keys.append(
(F'''transformer.decoder.layers.{i}.norm2.bias''', F'''decoder.layers.{i}.encoder_attn_layer_norm.bias''')
)
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.weight''', F'''decoder.layers.{i}.final_layer_norm.weight'''))
rename_keys.append((F'''transformer.decoder.layers.{i}.norm3.bias''', F'''decoder.layers.{i}.final_layer_norm.bias'''))
# convolutional projection + query embeddings + layernorm of encoder + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("""input_proj.weight""", """input_projection.weight"""),
("""input_proj.bias""", """input_projection.bias"""),
("""query_embed.weight""", """query_position_embeddings.weight"""),
("""transformer.encoder.norm.weight""", """encoder.layernorm.weight"""),
("""transformer.encoder.norm.bias""", """encoder.layernorm.bias"""),
("""transformer.decoder.norm.weight""", """decoder.layernorm.weight"""),
("""transformer.decoder.norm.bias""", """decoder.layernorm.bias"""),
("""class_embed.weight""", """class_labels_classifier.weight"""),
("""class_embed.bias""", """class_labels_classifier.bias"""),
("""bbox_embed.layers.0.weight""", """bbox_predictor.layers.0.weight"""),
("""bbox_embed.layers.0.bias""", """bbox_predictor.layers.0.bias"""),
("""bbox_embed.layers.1.weight""", """bbox_predictor.layers.1.weight"""),
("""bbox_embed.layers.1.bias""", """bbox_predictor.layers.1.bias"""),
("""bbox_embed.layers.2.weight""", """bbox_predictor.layers.2.weight"""),
("""bbox_embed.layers.2.bias""", """bbox_predictor.layers.2.bias"""),
]
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = state_dict.pop(lowercase_ )
A__ = val
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> int:
"""simple docstring"""
A__ = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
A__ = key.replace('''backbone.0.body''' , '''backbone.conv_encoder.model''' )
A__ = value
else:
A__ = value
return new_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Optional[Any]:
"""simple docstring"""
A__ = ''''''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
A__ = in_proj_weight[:256, :]
A__ = in_proj_bias[:256]
A__ = in_proj_weight[256:512, :]
A__ = in_proj_bias[256:512]
A__ = in_proj_weight[-256:, :]
A__ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
A__ = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
A__ = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
A__ = in_proj_weight_cross_attn[:256, :]
A__ = in_proj_bias_cross_attn[:256]
A__ = in_proj_weight_cross_attn[256:512, :]
A__ = in_proj_bias_cross_attn[256:512]
A__ = in_proj_weight_cross_attn[-256:, :]
A__ = in_proj_bias_cross_attn[-256:]
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Dict:
"""simple docstring"""
A__ , A__ = image.size
A__ = max(lowercase_ , lowercase_ )
A__ = 800 if '''detection''' in checkpoint_url else 1_000
A__ = target_max_size / current_max_size
A__ = image.resize((int(round(scale * width ) ), int(round(scale * height ) )) )
return resized_image
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = F.to_tensor(lowercase_ )
A__ = F.normalize(lowercase_ , mean=[0.4_85, 0.4_56, 0.4_06] , std=[0.2_29, 0.2_24, 0.2_25] )
return image
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ ) -> Optional[int]:
"""simple docstring"""
logger.info('''Converting model...''' )
# load original state dict
A__ = torch.hub.load_state_dict_from_url(lowercase_ , map_location='''cpu''' )
# rename keys
for src, dest in rename_keys:
rename_key(lowercase_ , lowercase_ , lowercase_ )
A__ = rename_backbone_keys(lowercase_ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase_ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
A__ = '''model.'''
for key in state_dict.copy().keys():
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
A__ = state_dict.pop(lowercase_ )
A__ = val
# create HuggingFace model and load state dict
A__ = TableTransformerConfig(
backbone='''resnet18''' , mask_loss_coefficient=1 , dice_loss_coefficient=1 , ce_loss_coefficient=1 , bbox_loss_coefficient=5 , giou_loss_coefficient=2 , eos_coefficient=0.4 , class_cost=1 , bbox_cost=5 , giou_cost=2 , )
if "detection" in checkpoint_url:
A__ = 15
A__ = 2
A__ = {0: '''table''', 1: '''table rotated'''}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
else:
A__ = 125
A__ = 6
A__ = {
0: '''table''',
1: '''table column''',
2: '''table row''',
3: '''table column header''',
4: '''table projected row header''',
5: '''table spanning cell''',
}
A__ = idalabel
A__ = {v: k for k, v in idalabel.items()}
A__ = DetrImageProcessor(
format='''coco_detection''' , max_size=800 if '''detection''' in checkpoint_url else 1_000 )
A__ = TableTransformerForObjectDetection(lowercase_ )
model.load_state_dict(lowercase_ )
model.eval()
# verify our conversion
A__ = '''example_pdf.png''' if '''detection''' in checkpoint_url else '''example_table.png'''
A__ = hf_hub_download(repo_id='''nielsr/example-pdf''' , repo_type='''dataset''' , filename=lowercase_ )
A__ = Image.open(lowercase_ ).convert('''RGB''' )
A__ = normalize(resize(lowercase_ , lowercase_ ) ).unsqueeze(0 )
A__ = model(lowercase_ )
if "detection" in checkpoint_url:
A__ = (1, 15, 3)
A__ = torch.tensor(
[[-6.78_97, -16.99_85, 6.79_37], [-8.01_86, -22.21_92, 6.96_77], [-7.31_17, -21.07_08, 7.40_55]] )
A__ = torch.tensor([[0.48_67, 0.17_67, 0.67_32], [0.67_18, 0.44_79, 0.38_30], [0.47_16, 0.17_60, 0.63_64]] )
else:
A__ = (1, 125, 7)
A__ = torch.tensor(
[[-18.14_30, -8.32_14, 4.82_74], [-18.46_85, -7.13_61, -4.26_67], [-26.36_93, -9.34_29, -4.99_62]] )
A__ = torch.tensor([[0.49_83, 0.55_95, 0.94_40], [0.49_16, 0.63_15, 0.59_54], [0.61_08, 0.86_37, 0.11_35]] )
assert outputs.logits.shape == expected_shape
assert torch.allclose(outputs.logits[0, :3, :3] , lowercase_ , atol=1E-4 )
assert torch.allclose(outputs.pred_boxes[0, :3, :3] , lowercase_ , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase_ ).mkdir(exist_ok=lowercase_ )
model.save_pretrained(lowercase_ )
image_processor.save_pretrained(lowercase_ )
if push_to_hub:
# Push model to HF hub
logger.info('''Pushing model to the hub...''' )
A__ = (
'''microsoft/table-transformer-detection'''
if '''detection''' in checkpoint_url
else '''microsoft/table-transformer-structure-recognition'''
)
model.push_to_hub(lowercase_ )
image_processor.push_to_hub(lowercase_ )
if __name__ == "__main__":
_lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
type=str,
choices=[
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_detection_detr_r18.pth""",
"""https://pubtables1m.blob.core.windows.net/model/pubtables1m_structure_detr_r18.pth""",
],
help="""URL of the Table Transformer checkpoint you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
_lowerCamelCase : str = parser.parse_args()
convert_table_transformer_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 177 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCAmelCase__ :
__a = PegasusConfig
__a = {}
__a = """gelu"""
def __init__( self : Dict , _lowerCamelCase : List[Any] , _lowerCamelCase : Union[str, Any]=13 , _lowerCamelCase : Any=7 , _lowerCamelCase : List[str]=True , _lowerCamelCase : int=False , _lowerCamelCase : List[Any]=99 , _lowerCamelCase : Tuple=32 , _lowerCamelCase : Optional[Any]=2 , _lowerCamelCase : int=4 , _lowerCamelCase : List[str]=37 , _lowerCamelCase : Tuple=0.1 , _lowerCamelCase : Union[str, Any]=0.1 , _lowerCamelCase : List[str]=40 , _lowerCamelCase : Tuple=2 , _lowerCamelCase : Any=1 , _lowerCamelCase : Union[str, Any]=0 , ):
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = eos_token_id
_snake_case = pad_token_id
_snake_case = bos_token_id
def lowercase ( self : Any ):
_snake_case = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_snake_case = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_snake_case = tf.concat([input_ids, eos_tensor] , axis=1 )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_snake_case = prepare_pegasus_inputs_dict(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
return config, inputs_dict
def lowercase ( self : Any , _lowerCamelCase : Optional[Any] , _lowerCamelCase : str ):
_snake_case = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
_snake_case = inputs_dict['''input_ids''']
_snake_case = input_ids[:1, :]
_snake_case = inputs_dict['''attention_mask'''][:1, :]
_snake_case = inputs_dict['''head_mask''']
_snake_case = 1
# first forward pass
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , head_mask=_lowerCamelCase , use_cache=_lowerCamelCase )
_snake_case , _snake_case = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_snake_case = ids_tensor((self.batch_size, 3) , config.vocab_size )
_snake_case = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_snake_case = tf.concat([input_ids, next_tokens] , axis=-1 )
_snake_case = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
_snake_case = model(_lowerCamelCase , attention_mask=_lowerCamelCase , past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_snake_case = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_snake_case = output_from_no_past[:, -3:, random_slice_idx]
_snake_case = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase , _lowerCamelCase , rtol=1e-3 )
def _UpperCAmelCase ( __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=None , __lowerCamelCase : str=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : List[Any]=None , __lowerCamelCase : int=None , ) -> Dict:
if attention_mask is None:
_snake_case = tf.cast(tf.math.not_equal(__lowerCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
_snake_case = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
_snake_case = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_snake_case = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCAmelCase__ ( A_ , A_ , unittest.TestCase ):
__a = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
__a = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
__a = (
{
"""conversational""": TFPegasusForConditionalGeneration,
"""feature-extraction""": TFPegasusModel,
"""summarization""": TFPegasusForConditionalGeneration,
"""text2text-generation""": TFPegasusForConditionalGeneration,
"""translation""": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
__a = True
__a = False
__a = False
def lowercase ( self : Optional[int] ):
_snake_case = TFPegasusModelTester(self )
_snake_case = ConfigTester(self , config_class=_lowerCamelCase )
def lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : List[str] ):
_snake_case = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCAmelCase__ ( unittest.TestCase ):
__a = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
__a = [
"""California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"""
""" reduce the risk of wildfires.""",
"""N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.""",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
__a = """google/pegasus-xsum"""
@cached_property
def lowercase ( self : Optional[int] ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def lowercase ( self : Optional[int] ):
_snake_case = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def lowercase ( self : Union[str, Any] , **_lowerCamelCase : List[Any] ):
_snake_case = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def lowercase ( self : Union[str, Any] , **_lowerCamelCase : Optional[int] ):
_snake_case = self.tokenizer(self.src_text , **_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''tf''' )
_snake_case = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=_lowerCamelCase , )
_snake_case = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def lowercase ( self : Dict ):
self._assert_generated_batch_equal_expected()
| 224 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _UpperCAmelCase ( __lowerCamelCase : str = "isbn/0140328726" ) -> dict:
_snake_case = olid.strip().strip('''/''' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('''/''' ) != 1:
_snake_case = f'''{olid} is not a valid Open Library olid'''
raise ValueError(__lowerCamelCase )
return requests.get(f'''https://openlibrary.org/{new_olid}.json''' ).json()
def _UpperCAmelCase ( __lowerCamelCase : dict ) -> dict:
_snake_case = {
'''title''': '''Title''',
'''publish_date''': '''Publish date''',
'''authors''': '''Authors''',
'''number_of_pages''': '''Number of pages:''',
'''first_sentence''': '''First sentence''',
'''isbn_10''': '''ISBN (10)''',
'''isbn_13''': '''ISBN (13)''',
}
_snake_case = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
_snake_case = [
get_openlibrary_data(author['''key'''] )['''name'''] for author in data['''Authors''']
]
_snake_case = data['''First sentence''']['''value''']
for key, value in data.items():
if isinstance(__lowerCamelCase , __lowerCamelCase ):
_snake_case = ''', '''.join(__lowerCamelCase )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
UpperCAmelCase__ = input('\nEnter the ISBN code to search (or \'quit\' to stop): ').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.")
continue
print(F"\nSearching Open Library for ISBN: {isbn}...\n")
try:
UpperCAmelCase__ = summarize_book(get_openlibrary_data(F"isbn/{isbn}"))
print('\n'.join(F"{key}: {value}" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"Sorry, there are no results for ISBN: {isbn}.")
| 224 | 1 |
"""simple docstring"""
# This code is adapted from OpenAI's release
# https://github.com/openai/human-eval/blob/master/human_eval/execution.py
import contextlib
import faulthandler
import io
import multiprocessing
import os
import platform
import signal
import tempfile
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase ):
UpperCAmelCase__ = multiprocessing.Manager()
UpperCAmelCase__ = manager.list()
UpperCAmelCase__ = multiprocessing.Process(target=lowerCamelCase , args=(check_program, result, timeout) )
p.start()
p.join(timeout=timeout + 1 )
if p.is_alive():
p.kill()
if not result:
result.append('timed out' )
return {
"task_id": task_id,
"passed": result[0] == "passed",
"result": result[0],
"completion_id": completion_id,
}
def a_ ( lowerCamelCase , lowerCamelCase , lowerCamelCase ):
with create_tempdir():
# These system calls are needed when cleaning up tempdir.
import os
import shutil
UpperCAmelCase__ = shutil.rmtree
UpperCAmelCase__ = os.rmdir
UpperCAmelCase__ = os.chdir
# Disable functionalities that can make destructive changes to the test.
reliability_guard()
# Run program.
try:
UpperCAmelCase__ = {}
with swallow_io():
with time_limit(lowerCamelCase ):
exec(lowerCamelCase , lowerCamelCase )
result.append('passed' )
except TimeoutException:
result.append('timed out' )
except BaseException as e:
result.append(f'''failed: {e}''' )
# Needed for cleaning up.
UpperCAmelCase__ = rmtree
UpperCAmelCase__ = rmdir
UpperCAmelCase__ = chdir
@contextlib.contextmanager
def a_ ( lowerCamelCase ):
def signal_handler(lowerCamelCase , lowerCamelCase ):
raise TimeoutException('Timed out!' )
signal.setitimer(signal.ITIMER_REAL , lowerCamelCase )
signal.signal(signal.SIGALRM , lowerCamelCase )
try:
yield
finally:
signal.setitimer(signal.ITIMER_REAL , 0 )
@contextlib.contextmanager
def a_ ( ):
UpperCAmelCase__ = WriteOnlyStringIO()
with contextlib.redirect_stdout(lowerCamelCase ):
with contextlib.redirect_stderr(lowerCamelCase ):
with redirect_stdin(lowerCamelCase ):
yield
@contextlib.contextmanager
def a_ ( ):
with tempfile.TemporaryDirectory() as dirname:
with chdir(lowerCamelCase ):
yield dirname
class snake_case ( __UpperCAmelCase ):
"""simple docstring"""
pass
class snake_case ( io.StringIO ):
"""simple docstring"""
def __lowerCAmelCase ( self : Union[str, Any] ,*lowerCamelCase__ : int ,**lowerCamelCase__ : Optional[int] ):
raise OSError
def __lowerCAmelCase ( self : Any ,*lowerCamelCase__ : Optional[int] ,**lowerCamelCase__ : Any ):
raise OSError
def __lowerCAmelCase ( self : Dict ,*lowerCamelCase__ : Optional[Any] ,**lowerCamelCase__ : List[Any] ):
raise OSError
def __lowerCAmelCase ( self : List[Any] ,*lowerCamelCase__ : Tuple ,**lowerCamelCase__ : List[str] ):
return False
class snake_case ( contextlib._RedirectStream ): # type: ignore
"""simple docstring"""
snake_case__ = "stdin"
@contextlib.contextmanager
def a_ ( lowerCamelCase ):
if root == ".":
yield
return
UpperCAmelCase__ = os.getcwd()
os.chdir(lowerCamelCase )
try:
yield
except BaseException as exc:
raise exc
finally:
os.chdir(lowerCamelCase )
def a_ ( lowerCamelCase=None ):
if maximum_memory_bytes is not None:
import resource
resource.setrlimit(resource.RLIMIT_AS , (maximum_memory_bytes, maximum_memory_bytes) )
resource.setrlimit(resource.RLIMIT_DATA , (maximum_memory_bytes, maximum_memory_bytes) )
if not platform.uname().system == "Darwin":
resource.setrlimit(resource.RLIMIT_STACK , (maximum_memory_bytes, maximum_memory_bytes) )
faulthandler.disable()
import builtins
UpperCAmelCase__ = None
UpperCAmelCase__ = None
import os
UpperCAmelCase__ = '1'
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
import shutil
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
import subprocess
UpperCAmelCase__ = None # type: ignore
UpperCAmelCase__ = None
import sys
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
UpperCAmelCase__ = None
| 632 | """simple docstring"""
import os
import sys
from contextlib import contextmanager
# Windows only
if os.name == "nt":
import ctypes
import msvcrt # noqa
class snake_case ( ctypes.Structure ):
"""simple docstring"""
snake_case__ = [("size", ctypes.c_int), ("visible", ctypes.c_byte)]
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = False
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25l' )
sys.stdout.flush()
def a_ ( ):
if os.name == "nt":
UpperCAmelCase__ = CursorInfo()
UpperCAmelCase__ = ctypes.windll.kernelaa.GetStdHandle(-1_1 )
ctypes.windll.kernelaa.GetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
UpperCAmelCase__ = True
ctypes.windll.kernelaa.SetConsoleCursorInfo(lowerCamelCase , ctypes.byref(lowerCamelCase ) )
elif os.name == "posix":
sys.stdout.write('\033[?25h' )
sys.stdout.flush()
@contextmanager
def a_ ( ):
try:
hide_cursor()
yield
finally:
show_cursor()
| 632 | 1 |
'''simple docstring'''
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
lowerCAmelCase_ = logging.get_logger(__name__)
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = set()
A__ = []
def parse_line(UpperCAmelCase ):
for line in fp:
if isinstance(UpperCAmelCase ,UpperCAmelCase ):
A__ = line.decode('UTF-8' )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(' ' ):
# process a single warning and move it to `selected_warnings`.
if len(UpperCAmelCase ) > 0:
A__ = '\n'.join(UpperCAmelCase )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(UpperCAmelCase )
buffer.clear()
continue
else:
A__ = line.strip()
buffer.append(UpperCAmelCase )
if from_gh:
for filename in os.listdir(UpperCAmelCase ):
A__ = os.path.join(UpperCAmelCase ,UpperCAmelCase )
if not os.path.isdir(UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with open(UpperCAmelCase ) as fp:
parse_line(UpperCAmelCase )
else:
try:
with zipfile.ZipFile(UpperCAmelCase ) as z:
for filename in z.namelist():
if not os.path.isdir(UpperCAmelCase ):
# read the file
if filename != "warnings.txt":
continue
with z.open(UpperCAmelCase ) as fp:
parse_line(UpperCAmelCase )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _A ( UpperCAmelCase ,UpperCAmelCase ):
'''simple docstring'''
A__ = set()
A__ = [os.path.join(UpperCAmelCase ,UpperCAmelCase ) for p in os.listdir(UpperCAmelCase ) if (p.endswith('.zip' ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(UpperCAmelCase ,UpperCAmelCase ) )
return selected_warnings
if __name__ == "__main__":
def _A ( UpperCAmelCase ):
'''simple docstring'''
return values.split(',' )
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
lowerCAmelCase_ = parser.parse_args()
lowerCAmelCase_ = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
lowerCAmelCase_ = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
lowerCAmelCase_ = extract_warnings(args.output_dir, args.targets)
lowerCAmelCase_ = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4)
| 531 |
'''simple docstring'''
def _A ( ):
'''simple docstring'''
A__ = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
A__ = 6
A__ = 1
A__ = 1901
A__ = 0
while year < 2001:
day += 7
if (year % 4 == 0 and year % 100 != 0) or (year % 400 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
A__ = day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
A__ = day - 29
else:
if day > days_per_month[month - 1]:
month += 1
A__ = day - days_per_month[month - 2]
if month > 12:
year += 1
A__ = 1
if year < 2001 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 531 | 1 |
import unittest
from transformers import AutoConfig, AutoTokenizer, BertConfig, TensorType, is_flax_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, require_flax, slow
if is_flax_available():
import jax
from transformers.models.auto.modeling_flax_auto import FlaxAutoModel
from transformers.models.bert.modeling_flax_bert import FlaxBertModel
from transformers.models.roberta.modeling_flax_roberta import FlaxRobertaModel
@require_flax
class UpperCamelCase( unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
__snake_case = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> Tuple:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
with self.subTest(SCREAMING_SNAKE_CASE ):
__snake_case = AutoConfig.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
self.assertIsInstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
@slow
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> int:
'''simple docstring'''
for model_name in ["bert-base-cased", "bert-large-uncased"]:
__snake_case = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = FlaxBertModel.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE : Dict ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Union[str, Any]:
'''simple docstring'''
for model_name in ["roberta-base", "roberta-large"]:
__snake_case = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = FlaxRobertaModel.from_pretrained(SCREAMING_SNAKE_CASE )
__snake_case = tokenizer("Do you support jax jitted function?" , return_tensors=TensorType.JAX )
@jax.jit
def eval(**SCREAMING_SNAKE_CASE : List[Any] ):
return model(**SCREAMING_SNAKE_CASE )
eval(**SCREAMING_SNAKE_CASE ).block_until_ready()
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "bert-base is not a local folder and is not a valid model identifier" ):
__snake_case = FlaxAutoModel.from_pretrained("bert-base" )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Any:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
__snake_case = FlaxAutoModel.from_pretrained(SCREAMING_SNAKE_CASE , revision="aaaaaa" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
with self.assertRaisesRegex(
SCREAMING_SNAKE_CASE , "hf-internal-testing/config-no-model does not appear to have a file named flax_model.msgpack" , ):
__snake_case = FlaxAutoModel.from_pretrained("hf-internal-testing/config-no-model" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> str:
'''simple docstring'''
with self.assertRaisesRegex(SCREAMING_SNAKE_CASE , "Use `from_pt=True` to load this model" ):
__snake_case = FlaxAutoModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only" )
| 473 |
import inspect
import unittest
from transformers import ViTHybridConfig
from transformers.testing_utils import require_accelerate, require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTHybridForImageClassification, ViTHybridImageProcessor, ViTHybridModel
from transformers.models.vit_hybrid.modeling_vit_hybrid import VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
class UpperCamelCase:
def __init__( self : List[Any] , SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : List[Any]=1_3 , SCREAMING_SNAKE_CASE : Optional[Any]=6_4 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Union[str, Any]=3 , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : List[Any]=True , SCREAMING_SNAKE_CASE : Any=3_2 , SCREAMING_SNAKE_CASE : Any=5 , SCREAMING_SNAKE_CASE : List[Any]=4 , SCREAMING_SNAKE_CASE : Optional[int]=3_7 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Dict=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=0.1 , SCREAMING_SNAKE_CASE : Optional[int]=1_0 , SCREAMING_SNAKE_CASE : int=0.02 , SCREAMING_SNAKE_CASE : Dict=[1, 1_6, 4, 4] , SCREAMING_SNAKE_CASE : List[str]=None , ) -> int:
'''simple docstring'''
__snake_case = parent
__snake_case = batch_size
__snake_case = image_size
__snake_case = patch_size
__snake_case = num_channels
__snake_case = is_training
__snake_case = use_labels
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = intermediate_size
__snake_case = hidden_act
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = type_sequence_label_size
__snake_case = initializer_range
__snake_case = scope
__snake_case = backbone_featmap_shape
# in ViT hybrid, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
# the number of patches is based on the feature map of the backbone, which by default uses an output stride
# of 32, which means that the feature map has a spatial resolution of 1/32 of the input image size
__snake_case = (self.image_size // 3_2) ** 2
__snake_case = num_patches + 1
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
__snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__snake_case = None
if self.use_labels:
__snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__snake_case = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case = {
"global_padding": "same",
"layer_type": "bottleneck",
"depths": [3, 4, 9],
"out_features": ["stage1", "stage2", "stage3"],
"embedding_dynamic_padding": True,
"hidden_sizes": [4, 8, 1_6, 3_2],
"num_groups": 2,
}
return ViTHybridConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , backbone_featmap_shape=self.backbone_featmap_shape , backbone_config=SCREAMING_SNAKE_CASE , )
def SCREAMING_SNAKE_CASE_ ( self : Any , SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : str ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModel(config=SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def SCREAMING_SNAKE_CASE_ ( self : Tuple , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : List[Any] , SCREAMING_SNAKE_CASE : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = self.type_sequence_label_size
__snake_case = ViTHybridForImageClassification(SCREAMING_SNAKE_CASE )
model.to(SCREAMING_SNAKE_CASE )
model.eval()
__snake_case = model(SCREAMING_SNAKE_CASE , labels=SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self : Union[str, Any] ) -> str:
'''simple docstring'''
__snake_case = self.prepare_config_and_inputs()
__snake_case , __snake_case , __snake_case = config_and_inputs
__snake_case = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase( _a , _a , unittest.TestCase ):
snake_case_ : Union[str, Any] = (ViTHybridModel, ViTHybridForImageClassification) if is_torch_available() else ()
snake_case_ : str = (
{"""feature-extraction""": ViTHybridModel, """image-classification""": ViTHybridForImageClassification}
if is_torch_available()
else {}
)
snake_case_ : Tuple = False
snake_case_ : Optional[Any] = False
snake_case_ : Dict = False
def SCREAMING_SNAKE_CASE_ ( self : List[Any] ) -> int:
'''simple docstring'''
__snake_case = ViTHybridModelTester(self )
__snake_case = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE , has_text_modality=SCREAMING_SNAKE_CASE , hidden_size=3_7 )
def SCREAMING_SNAKE_CASE_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def SCREAMING_SNAKE_CASE_ ( self : Optional[int] ) -> List[Any]:
'''simple docstring'''
pass
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> int:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(SCREAMING_SNAKE_CASE , nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self : str ) -> Optional[Any]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__snake_case = model_class(SCREAMING_SNAKE_CASE )
__snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__snake_case = [*signature.parameters.keys()]
__snake_case = ["pixel_values"]
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : Any ) -> Dict:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Optional[Any]:
'''simple docstring'''
__snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE )
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
__snake_case , __snake_case = self.model_tester.prepare_config_and_inputs_for_common()
__snake_case = _config_zero_init(SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__snake_case = model_class(config=SCREAMING_SNAKE_CASE )
# Skip the check for the backbone
for name, module in model.named_modules():
if module.__class__.__name__ == "ViTHybridPatchEmbeddings":
__snake_case = [f'''{name}.{key}''' for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> Tuple:
'''simple docstring'''
for model_name in VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__snake_case = ViTHybridModel.from_pretrained(SCREAMING_SNAKE_CASE )
self.assertIsNotNone(SCREAMING_SNAKE_CASE )
def _lowerCAmelCase ( ) -> List[str]:
'''simple docstring'''
__snake_case = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class UpperCamelCase( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> int:
'''simple docstring'''
return (
ViTHybridImageProcessor.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self : int ) -> str:
'''simple docstring'''
__snake_case = ViTHybridForImageClassification.from_pretrained(VIT_HYBRID_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
SCREAMING_SNAKE_CASE )
__snake_case = self.default_image_processor
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" ).to(SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__snake_case = model(**SCREAMING_SNAKE_CASE )
# verify the logits
__snake_case = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE )
__snake_case = torch.tensor([-1.9090, -0.4993, -0.2389] ).to(SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE , atol=1e-4 ) )
@slow
@require_accelerate
def SCREAMING_SNAKE_CASE_ ( self : List[str] ) -> Optional[int]:
'''simple docstring'''
__snake_case = ViTHybridImageProcessor.from_pretrained("google/vit-hybrid-base-bit-384" )
__snake_case = ViTHybridForImageClassification.from_pretrained("google/vit-hybrid-base-bit-384" , device_map="auto" )
__snake_case = prepare_img()
__snake_case = image_processor(images=SCREAMING_SNAKE_CASE , return_tensors="pt" )
__snake_case = model(**SCREAMING_SNAKE_CASE )
__snake_case = outputs.logits
# model predicts one of the 1000 ImageNet classes
__snake_case = logits.argmax(-1 ).item()
self.assertTrue(model.config.idalabel[predicted_class_idx] , "tabby, tabby cat" )
| 473 | 1 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
lowerCAmelCase__ = {
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def __UpperCAmelCase ( lowerCamelCase_) -> List[Any]:
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts)
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config)
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights)
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> str:
if args.student_type == "roberta":
UpperCamelCase__ : str = False
elif args.student_type == "gpt2":
UpperCamelCase__ : int = False
def __UpperCAmelCase ( lowerCamelCase_ , lowerCamelCase_) -> Optional[Any]:
if args.student_type == "roberta":
UpperCamelCase__ : Dict = False
def __UpperCAmelCase ( ) -> List[Any]:
UpperCamelCase__ : Any = argparse.ArgumentParser(description='Training')
parser.add_argument('--force' , action='store_true' , help='Overwrite dump_path if it already exists.')
parser.add_argument(
'--dump_path' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The output directory (log, checkpoints, parameters, etc.)')
parser.add_argument(
'--data_file' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The binarized file (tokenized + tokens_to_ids) and grouped by sequence.' , )
parser.add_argument(
'--student_type' , type=lowerCamelCase_ , choices=['distilbert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='The student type (DistilBERT, RoBERTa).' , )
parser.add_argument('--student_config' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='Path to the student configuration.')
parser.add_argument(
'--student_pretrained_weights' , default=lowerCamelCase_ , type=lowerCamelCase_ , help='Load student initialization checkpoint.')
parser.add_argument(
'--teacher_type' , choices=['bert', 'roberta', 'gpt2'] , required=lowerCamelCase_ , help='Teacher type (BERT, RoBERTa).')
parser.add_argument('--teacher_name' , type=lowerCamelCase_ , required=lowerCamelCase_ , help='The teacher model.')
parser.add_argument('--temperature' , default=2.0 , type=lowerCamelCase_ , help='Temperature for the softmax temperature.')
parser.add_argument(
'--alpha_ce' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the distillation loss. Must be >=0.')
parser.add_argument(
'--alpha_mlm' , default=0.0 , type=lowerCamelCase_ , help='Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.' , )
parser.add_argument('--alpha_clm' , default=0.5 , type=lowerCamelCase_ , help='Linear weight for the CLM loss. Must be >=0.')
parser.add_argument('--alpha_mse' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the MSE loss. Must be >=0.')
parser.add_argument(
'--alpha_cos' , default=0.0 , type=lowerCamelCase_ , help='Linear weight of the cosine embedding loss. Must be >=0.')
parser.add_argument(
'--mlm' , action='store_true' , help='The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.')
parser.add_argument(
'--mlm_mask_prop' , default=0.15 , type=lowerCamelCase_ , help='Proportion of tokens for which we need to make a prediction.' , )
parser.add_argument('--word_mask' , default=0.8 , type=lowerCamelCase_ , help='Proportion of tokens to mask out.')
parser.add_argument('--word_keep' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to keep.')
parser.add_argument('--word_rand' , default=0.1 , type=lowerCamelCase_ , help='Proportion of tokens to randomly replace.')
parser.add_argument(
'--mlm_smoothing' , default=0.7 , type=lowerCamelCase_ , help='Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).' , )
parser.add_argument('--token_counts' , type=lowerCamelCase_ , help='The token counts in the data_file for MLM.')
parser.add_argument(
'--restrict_ce_to_mask' , action='store_true' , help='If true, compute the distillation loss only the [MLM] prediction distribution.' , )
parser.add_argument(
'--freeze_pos_embs' , action='store_true' , help='Freeze positional embeddings during distillation. For student_type in [\'roberta\', \'gpt2\'] only.' , )
parser.add_argument(
'--freeze_token_type_embds' , action='store_true' , help='Freeze token type embeddings during distillation if existent. For student_type in [\'roberta\'] only.' , )
parser.add_argument('--n_epoch' , type=lowerCamelCase_ , default=3 , help='Number of pass on the whole dataset.')
parser.add_argument('--batch_size' , type=lowerCamelCase_ , default=5 , help='Batch size (for each process).')
parser.add_argument(
'--group_by_size' , action='store_false' , help='If true, group sequences that have similar length into the same batch. Default is true.' , )
parser.add_argument(
'--gradient_accumulation_steps' , type=lowerCamelCase_ , default=50 , help='Gradient accumulation for larger training batches.' , )
parser.add_argument('--warmup_prop' , default=0.05 , type=lowerCamelCase_ , help='Linear warmup proportion.')
parser.add_argument('--weight_decay' , default=0.0 , type=lowerCamelCase_ , help='Weight decay if we apply some.')
parser.add_argument('--learning_rate' , default=5e-4 , type=lowerCamelCase_ , help='The initial learning rate for Adam.')
parser.add_argument('--adam_epsilon' , default=1e-6 , type=lowerCamelCase_ , help='Epsilon for Adam optimizer.')
parser.add_argument('--max_grad_norm' , default=5.0 , type=lowerCamelCase_ , help='Max gradient norm.')
parser.add_argument('--initializer_range' , default=0.02 , type=lowerCamelCase_ , help='Random initialization range.')
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCamelCase_ , default='O1' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_gpu' , type=lowerCamelCase_ , default=1 , help='Number of GPUs in the node.')
parser.add_argument('--local_rank' , type=lowerCamelCase_ , default=-1 , help='Distributed training - Local rank')
parser.add_argument('--seed' , type=lowerCamelCase_ , default=56 , help='Random seed')
parser.add_argument('--log_interval' , type=lowerCamelCase_ , default=500 , help='Tensorboard logging interval.')
parser.add_argument('--checkpoint_interval' , type=lowerCamelCase_ , default=4_000 , help='Checkpoint interval.')
UpperCamelCase__ : Optional[int] = parser.parse_args()
sanity_checks(lowerCamelCase_)
# ARGS #
init_gpu_params(lowerCamelCase_)
set_seed(lowerCamelCase_)
if args.is_master:
if os.path.exists(args.dump_path):
if not args.force:
raise ValueError(
f'Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'
' itUse `--force` if you want to overwrite it')
else:
shutil.rmtree(args.dump_path)
if not os.path.exists(args.dump_path):
os.makedirs(args.dump_path)
logger.info(f'Experiment will be dumped and logged in {args.dump_path}')
# SAVE PARAMS #
logger.info(f'Param: {args}')
with open(os.path.join(args.dump_path , 'parameters.json') , 'w') as f:
json.dump(vars(lowerCamelCase_) , lowerCamelCase_ , indent=4)
git_log(args.dump_path)
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = MODEL_CLASSES[args.student_type]
UpperCamelCase__, UpperCamelCase__, UpperCamelCase__ : List[Any] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCamelCase__ : Optional[int] = teacher_tokenizer_class.from_pretrained(args.teacher_name)
UpperCamelCase__ : Tuple = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCamelCase__ : Dict = tokenizer.all_special_tokens.index(lowerCamelCase_)
UpperCamelCase__ : Optional[int] = tokenizer.all_special_ids[idx]
logger.info(f'Special tokens {special_tok_ids}')
UpperCamelCase__ : str = special_tok_ids
UpperCamelCase__ : List[Any] = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'Loading data from {args.data_file}')
with open(args.data_file , 'rb') as fp:
UpperCamelCase__ : int = pickle.load(lowerCamelCase_)
if args.mlm:
logger.info(f'Loading token counts from {args.token_counts} (already pre-computed)')
with open(args.token_counts , 'rb') as fp:
UpperCamelCase__ : List[Any] = pickle.load(lowerCamelCase_)
UpperCamelCase__ : int = np.maximum(lowerCamelCase_ , 1) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCamelCase__ : List[str] = 0.0 # do not predict special tokens
UpperCamelCase__ : Dict = torch.from_numpy(lowerCamelCase_)
else:
UpperCamelCase__ : Union[str, Any] = None
UpperCamelCase__ : Optional[int] = LmSeqsDataset(params=lowerCamelCase_ , data=lowerCamelCase_)
logger.info('Data loader created.')
# STUDENT #
logger.info(f'Loading student config from {args.student_config}')
UpperCamelCase__ : Optional[Any] = student_config_class.from_pretrained(args.student_config)
UpperCamelCase__ : str = True
if args.student_pretrained_weights is not None:
logger.info(f'Loading pretrained weights from {args.student_pretrained_weights}')
UpperCamelCase__ : Dict = student_model_class.from_pretrained(args.student_pretrained_weights , config=lowerCamelCase_)
else:
UpperCamelCase__ : Optional[Any] = student_model_class(lowerCamelCase_)
if args.n_gpu > 0:
student.to(f'cuda:{args.local_rank}')
logger.info('Student loaded.')
# TEACHER #
UpperCamelCase__ : Union[str, Any] = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=lowerCamelCase_)
if args.n_gpu > 0:
teacher.to(f'cuda:{args.local_rank}')
logger.info(f'Teacher loaded from {args.teacher_name}.')
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(lowerCamelCase_ , lowerCamelCase_)
if args.freeze_token_type_embds:
freeze_token_type_embeddings(lowerCamelCase_ , lowerCamelCase_)
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCamelCase__ : List[Any] = Distiller(
params=lowerCamelCase_ , dataset=lowerCamelCase_ , token_probs=lowerCamelCase_ , student=lowerCamelCase_ , teacher=lowerCamelCase_)
distiller.train()
logger.info('Let\'s go get some drinks.')
if __name__ == "__main__":
main()
| 596 |
'''simple docstring'''
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class __lowercase (__lowerCamelCase ):
_lowerCamelCase = (DDIMParallelScheduler,)
_lowerCamelCase = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def __UpperCamelCase ( self : str , **UpperCAmelCase_ : Union[str, Any]):
UpperCamelCase__ : Optional[int] = {
'num_train_timesteps': 1_000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'clip_sample': True,
}
config.update(**UpperCAmelCase_)
return config
def __UpperCamelCase ( self : Optional[Any] , **UpperCAmelCase_ : Optional[Any]):
UpperCamelCase__ : List[Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config(**UpperCAmelCase_)
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : Dict = 10, 0.0
UpperCamelCase__ : Any = self.dummy_model()
UpperCamelCase__ : Union[str, Any] = self.dummy_sample_deter
scheduler.set_timesteps(UpperCAmelCase_)
for t in scheduler.timesteps:
UpperCamelCase__ : Union[str, Any] = model(UpperCAmelCase_ , UpperCAmelCase_)
UpperCamelCase__ : Tuple = scheduler.step(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_).prev_sample
return sample
def __UpperCamelCase ( self : Any):
for timesteps in [100, 500, 1_000]:
self.check_over_configs(num_train_timesteps=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=UpperCAmelCase_)
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : List[Any] = self.get_scheduler_config(steps_offset=1)
UpperCamelCase__ : Any = scheduler_class(**UpperCAmelCase_)
scheduler.set_timesteps(5)
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1]))
def __UpperCamelCase ( self : Any):
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2]):
self.check_over_configs(beta_start=UpperCAmelCase_ , beta_end=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=UpperCAmelCase_)
def __UpperCamelCase ( self : Any):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCAmelCase_)
def __UpperCamelCase ( self : List[Any]):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCAmelCase_)
def __UpperCamelCase ( self : Tuple):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=UpperCAmelCase_)
def __UpperCamelCase ( self : Dict):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
self.check_over_configs(thresholding=UpperCAmelCase_)
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=UpperCAmelCase_ , prediction_type=UpperCAmelCase_ , sample_max_value=UpperCAmelCase_ , )
def __UpperCamelCase ( self : Tuple):
for t in [1, 10, 49]:
self.check_over_forward(time_step=UpperCAmelCase_)
def __UpperCamelCase ( self : int):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500]):
self.check_over_forward(time_step=UpperCAmelCase_ , num_inference_steps=UpperCAmelCase_)
def __UpperCamelCase ( self : Optional[Any]):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0]):
self.check_over_forward(time_step=UpperCAmelCase_ , eta=UpperCAmelCase_)
def __UpperCamelCase ( self : str):
UpperCamelCase__ : Any = self.scheduler_classes[0]
UpperCamelCase__ : Optional[int] = self.get_scheduler_config()
UpperCamelCase__ : Tuple = scheduler_class(**UpperCAmelCase_)
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400) - 0.1_47_71)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960) - 0.3_24_60)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0) - 0.0)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486) - 0.0_09_79)) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998) - 0.02)) < 1e-5
def __UpperCamelCase ( self : Optional[int]):
UpperCamelCase__ : Union[str, Any] = self.scheduler_classes[0]
UpperCamelCase__ : Optional[Any] = self.get_scheduler_config()
UpperCamelCase__ : Optional[Any] = scheduler_class(**UpperCAmelCase_)
UpperCamelCase__, UpperCamelCase__ : str = 10, 0.0
scheduler.set_timesteps(UpperCAmelCase_)
UpperCamelCase__ : Tuple = self.dummy_model()
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter
UpperCamelCase__ : Optional[Any] = self.dummy_sample_deter + 0.1
UpperCamelCase__ : str = self.dummy_sample_deter - 0.1
UpperCamelCase__ : List[Any] = samplea.shape[0]
UpperCamelCase__ : List[str] = torch.stack([samplea, samplea, samplea] , dim=0)
UpperCamelCase__ : Optional[Any] = torch.arange(UpperCAmelCase_)[0:3, None].repeat(1 , UpperCAmelCase_)
UpperCamelCase__ : List[str] = model(samples.flatten(0 , 1) , timesteps.flatten(0 , 1))
UpperCamelCase__ : List[str] = scheduler.batch_step_no_noise(UpperCAmelCase_ , timesteps.flatten(0 , 1) , samples.flatten(0 , 1) , UpperCAmelCase_)
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Union[str, Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 11_47.79_04) < 1e-2
assert abs(result_mean.item() - 0.49_82) < 1e-3
def __UpperCamelCase ( self : str):
UpperCamelCase__ : int = self.full_loop()
UpperCamelCase__ : Any = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_72.00_67) < 1e-2
assert abs(result_mean.item() - 0.22_39_67) < 1e-3
def __UpperCamelCase ( self : Any):
UpperCamelCase__ : Tuple = self.full_loop(prediction_type='v_prediction')
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : Optional[int] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 52.53_02) < 1e-2
assert abs(result_mean.item() - 0.06_84) < 1e-3
def __UpperCamelCase ( self : Tuple):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : int = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Optional[int] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : List[Any] = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.82_95) < 1e-2
assert abs(result_mean.item() - 0.19_51) < 1e-3
def __UpperCamelCase ( self : str):
# We specify different beta, so that the first alpha is 0.99
UpperCamelCase__ : Dict = self.full_loop(set_alpha_to_one=UpperCAmelCase_ , beta_start=0.01)
UpperCamelCase__ : Union[str, Any] = torch.sum(torch.abs(UpperCAmelCase_))
UpperCamelCase__ : str = torch.mean(torch.abs(UpperCAmelCase_))
assert abs(result_sum.item() - 1_49.07_84) < 1e-2
assert abs(result_mean.item() - 0.19_41) < 1e-3
| 596 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class snake_case__ ( UpperCamelCase , unittest.TestCase):
a_ = "hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline"
def A ( self : List[Any] , _A : Optional[Any]=0 ) -> Union[str, Any]:
UpperCAmelCase_ : str = floats_tensor((1, 3, 1_28, 1_28) , rng=random.Random(_A ) )
UpperCAmelCase_ : Optional[Any] = np.random.RandomState(_A )
UpperCAmelCase_ : int = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''generator''': generator,
'''num_inference_steps''': 3,
'''strength''': 0.75,
'''guidance_scale''': 7.5,
'''output_type''': '''numpy''',
}
return inputs
def A ( self : Any ) -> List[str]:
UpperCAmelCase_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : str = pipe(**_A ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.69_643, 0.58_484, 0.50_314, 0.58_760, 0.55_368, 0.59_643, 0.51_529, 0.41_217, 0.49_087] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def A ( self : List[str] ) -> Optional[int]:
UpperCAmelCase_ : Tuple = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Tuple = PNDMScheduler.from_config(pipe.scheduler.config , skip_prk_steps=_A )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : Optional[int] = pipe(**_A ).images
UpperCAmelCase_ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : List[Any] = np.array([0.61_737, 0.54_642, 0.53_183, 0.54_465, 0.52_742, 0.60_525, 0.49_969, 0.40_655, 0.48_154] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : int ) -> Optional[Any]:
UpperCAmelCase_ : Any = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Optional[Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
# warmup pass to apply optimizations
UpperCAmelCase_ : Dict = pipe(**self.get_dummy_inputs() )
UpperCAmelCase_ : List[str] = self.get_dummy_inputs()
UpperCAmelCase_ : Any = pipe(**_A ).images
UpperCAmelCase_ : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Tuple = np.array([0.52_761, 0.59_977, 0.49_033, 0.49_619, 0.54_282, 0.50_311, 0.47_600, 0.40_918, 0.45_203] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Optional[Any] ) -> Tuple:
UpperCAmelCase_ : Dict = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : List[Any] = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Tuple = self.get_dummy_inputs()
UpperCAmelCase_ : Dict = pipe(**_A ).images
UpperCAmelCase_ : Any = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : List[str] = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : str ) -> Tuple:
UpperCAmelCase_ : Union[str, Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : List[Any] = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Union[str, Any] = self.get_dummy_inputs()
UpperCAmelCase_ : Any = pipe(**_A ).images
UpperCAmelCase_ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : Tuple = np.array([0.52_911, 0.60_004, 0.49_229, 0.49_805, 0.54_502, 0.50_680, 0.47_777, 0.41_028, 0.45_304] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def A ( self : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase_ : Optional[int] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint , provider='''CPUExecutionProvider''' )
UpperCAmelCase_ : Dict = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[int] = self.get_dummy_inputs()
UpperCAmelCase_ : Dict = pipe(**_A ).images
UpperCAmelCase_ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_28, 1_28, 3)
UpperCAmelCase_ : List[Any] = np.array([0.65_331, 0.58_277, 0.48_204, 0.56_059, 0.53_665, 0.56_235, 0.50_969, 0.40_009, 0.46_552] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class snake_case__ ( unittest.TestCase):
@property
def A ( self : int ) -> Dict:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase_ : Optional[Any] = ort.SessionOptions()
UpperCAmelCase_ : Tuple = False
return options
def A ( self : List[Any] ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : Tuple = init_image.resize((7_68, 5_12) )
# using the PNDM scheduler by default
UpperCAmelCase_ : Optional[Any] = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''CompVis/stable-diffusion-v1-4''' , revision='''onnx''' , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[Any] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : Optional[int] = np.random.RandomState(0 )
UpperCAmelCase_ : Optional[int] = pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=10 , generator=_A , output_type='''np''' , )
UpperCAmelCase_ : Optional[Any] = output.images
UpperCAmelCase_ : List[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase_ : str = np.array([0.4_909, 0.5_059, 0.5_372, 0.4_623, 0.4_876, 0.5_049, 0.4_820, 0.4_956, 0.5_019] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase_ : Any = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
UpperCAmelCase_ : Any = init_image.resize((7_68, 5_12) )
UpperCAmelCase_ : Tuple = LMSDiscreteScheduler.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , subfolder='''scheduler''' , revision='''onnx''' )
UpperCAmelCase_ : str = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'''runwayml/stable-diffusion-v1-5''' , revision='''onnx''' , scheduler=_A , safety_checker=_A , feature_extractor=_A , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_A )
UpperCAmelCase_ : Optional[int] = '''A fantasy landscape, trending on artstation'''
UpperCAmelCase_ : Optional[Any] = np.random.RandomState(0 )
UpperCAmelCase_ : Optional[int] = pipe(
prompt=_A , image=_A , strength=0.75 , guidance_scale=7.5 , num_inference_steps=20 , generator=_A , output_type='''np''' , )
UpperCAmelCase_ : int = output.images
UpperCAmelCase_ : Optional[Any] = images[0, 2_55:2_58, 3_83:3_86, -1]
assert images.shape == (1, 5_12, 7_68, 3)
UpperCAmelCase_ : Optional[int] = np.array([0.8_043, 0.926, 0.9_581, 0.8_119, 0.8_954, 0.913, 0.7_209, 0.7_463, 0.7_431] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
| 719 |
'''simple docstring'''
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def __UpperCAmelCase ( A : str , A : List[Any] , A : Tuple ) -> str:
return params[F"{prefix}/{prefix}/relpos_bias/rel_embedding"][:, i, :]
def __UpperCAmelCase ( A : int , A : Any , A : Dict , A : Any="attention" ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/key/kernel"][:, i, :, :] )
UpperCAmelCase_ : int = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
UpperCAmelCase_ : Dict = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/out/kernel"][:, i, :, :] )
UpperCAmelCase_ : Optional[int] = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
UpperCAmelCase_ : List[Any] = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/query/kernel"][:, i, :, :] )
UpperCAmelCase_ : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
UpperCAmelCase_ : Tuple = np.ascontiguousarray(params[F"{prefix}/{prefix}/{layer_name}/value/kernel"][:, i, :, :] )
UpperCAmelCase_ : List[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def __UpperCAmelCase ( A : Optional[Any] , A : Tuple , A : Optional[int] , A : str=False ) -> Dict:
if split_mlp_wi:
UpperCAmelCase_ : List[Any] = params[F"{prefix}/{prefix}/mlp/wi_0/kernel"][:, i, :]
UpperCAmelCase_ : str = params[F"{prefix}/{prefix}/mlp/wi_1/kernel"][:, i, :]
UpperCAmelCase_ : Tuple = (wi_a, wi_a)
else:
UpperCAmelCase_ : List[str] = params[F"{prefix}/{prefix}/mlp/wi/kernel"][:, i, :]
UpperCAmelCase_ : Dict = params[F"{prefix}/{prefix}/mlp/wo/kernel"][:, i, :]
return wi, wo
def __UpperCAmelCase ( A : Tuple , A : int , A : Optional[Any] , A : int ) -> Dict:
return params[F"{prefix}/{prefix}/{layer_name}/scale"][:, i]
def __UpperCAmelCase ( A : dict , *, A : int , A : bool , A : bool = False ) -> Any:
UpperCAmelCase_ : int = traverse_util.flatten_dict(variables['''target'''] )
UpperCAmelCase_ : Optional[int] = {'''/'''.join(A ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase_ : int = '''encoder/encoder/mlp/wi_0/kernel''' in old
print('''Split MLP:''' , A )
UpperCAmelCase_ : Any = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase_ : int = old['''token_embedder/embedding''']
# Encoder.
for i in range(A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : List[str] = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : int = tax_attention_lookup(A , A , '''encoder''' , '''attention''' )
UpperCAmelCase_ : int = layer_norm
UpperCAmelCase_ : Union[str, Any] = k.T
UpperCAmelCase_ : str = o.T
UpperCAmelCase_ : List[Any] = q.T
UpperCAmelCase_ : Dict = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''encoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = tax_mlp_lookup(A , A , '''encoder''' , A )
UpperCAmelCase_ : List[Any] = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : Dict = wi[0].T
UpperCAmelCase_ : Dict = wi[1].T
else:
UpperCAmelCase_ : Tuple = wi.T
UpperCAmelCase_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase_ : Optional[Any] = tax_relpos_bias_lookup(
A , A , '''encoder''' ).T
UpperCAmelCase_ : Any = old['''encoder/encoder_norm/scale''']
if not scalable_attention:
UpperCAmelCase_ : Optional[Any] = tax_relpos_bias_lookup(
A , 0 , '''encoder''' ).T
UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup(
A , 0 , '''decoder''' ).T
if not is_encoder_only:
# Decoder.
for i in range(A ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase_ : Optional[int] = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_self_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Tuple = tax_attention_lookup(A , A , '''decoder''' , '''self_attention''' )
UpperCAmelCase_ : int = layer_norm
UpperCAmelCase_ : Any = k.T
UpperCAmelCase_ : Optional[int] = o.T
UpperCAmelCase_ : List[Any] = q.T
UpperCAmelCase_ : str = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase_ : str = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_cross_attention_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[str] = tax_attention_lookup(A , A , '''decoder''' , '''encoder_decoder_attention''' )
UpperCAmelCase_ : Any = layer_norm
UpperCAmelCase_ : Optional[Any] = k.T
UpperCAmelCase_ : Union[str, Any] = o.T
UpperCAmelCase_ : List[str] = q.T
UpperCAmelCase_ : Any = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase_ : Dict = tax_layer_norm_lookup(A , A , '''decoder''' , '''pre_mlp_layer_norm''' )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = tax_mlp_lookup(A , A , '''decoder''' , A )
UpperCAmelCase_ : Optional[int] = layer_norm
if split_mlp_wi:
UpperCAmelCase_ : Optional[int] = wi[0].T
UpperCAmelCase_ : int = wi[1].T
else:
UpperCAmelCase_ : Any = wi.T
UpperCAmelCase_ : Any = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
UpperCAmelCase_ : List[str] = tax_relpos_bias_lookup(A , A , '''decoder''' ).T
UpperCAmelCase_ : Optional[Any] = old['''decoder/decoder_norm/scale''']
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase_ : int = old['''decoder/logits_dense/kernel'''].T
return new
def __UpperCAmelCase ( A : Tuple , A : bool ) -> List[str]:
UpperCAmelCase_ : str = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : Optional[Any] = state_dict['''shared.weight''']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase_ : Optional[int] = state_dict['''shared.weight''']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('''Using shared word embeddings as lm_head.''' )
UpperCAmelCase_ : int = state_dict['''shared.weight''']
return state_dict
def __UpperCAmelCase ( A : Any , A : Optional[Any] , A : Optional[Any] , A : str , A : Optional[int] ) -> Dict:
UpperCAmelCase_ : List[str] = checkpoints.load_tax_checkpoint(A )
UpperCAmelCase_ : str = convert_tax_to_pytorch(
A , num_layers=config.num_layers , is_encoder_only=A , scalable_attention=A )
UpperCAmelCase_ : Union[str, Any] = make_state_dict(A , A )
model.load_state_dict(A , strict=A )
def __UpperCAmelCase ( A : str , A : int , A : List[str] , A : bool = False , A : bool = False , ) -> Any:
UpperCAmelCase_ : Union[str, Any] = MTaConfig.from_json_file(A )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase_ : Dict = UMTaEncoderModel(A )
else:
UpperCAmelCase_ : Dict = UMTaForConditionalGeneration(A )
# Load weights from tf checkpoint
load_tax_weights_in_ta(A , A , A , A , A )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(A )
# Verify that we can load the checkpoint.
model.from_pretrained(A )
print('''Done''' )
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] = argparse.ArgumentParser(description='Converts a native T5X checkpoint into a PyTorch checkpoint.')
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path to the T5X checkpoint.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.',
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--is_encoder_only', action='store_true', help='Check if the model is encoder-decoder model', default=False
)
parser.add_argument(
'--scalable_attention',
action='store_true',
help='Whether the model uses scaled attention (umt5 model)',
default=False,
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 216 | 0 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ConvNextConfig, SegformerImageProcessor, UperNetConfig, UperNetForSemanticSegmentation
def lowerCAmelCase__(__snake_case ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase__ = 384
if "tiny" in model_name:
lowerCamelCase__ = [3, 3, 9, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "small" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [96, 192, 384, 768]
if "base" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [128, 256, 512, 1024]
lowerCamelCase__ = 512
if "large" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [192, 384, 768, 1536]
lowerCamelCase__ = 768
if "xlarge" in model_name:
lowerCamelCase__ = [3, 3, 27, 3]
lowerCamelCase__ = [256, 512, 1024, 2048]
lowerCamelCase__ = 1024
# set label information
lowerCamelCase__ = 150
lowerCamelCase__ = '''huggingface/label-files'''
lowerCamelCase__ = '''ade20k-id2label.json'''
lowerCamelCase__ = json.load(open(hf_hub_download(__snake_case ,__snake_case ,repo_type='''dataset''' ) ,'''r''' ) )
lowerCamelCase__ = {int(__snake_case ): v for k, v in idalabel.items()}
lowerCamelCase__ = {v: k for k, v in idalabel.items()}
lowerCamelCase__ = ConvNextConfig(
depths=__snake_case ,hidden_sizes=__snake_case ,out_features=['''stage1''', '''stage2''', '''stage3''', '''stage4'''] )
lowerCamelCase__ = UperNetConfig(
backbone_config=__snake_case ,auxiliary_in_channels=__snake_case ,num_labels=__snake_case ,idalabel=__snake_case ,labelaid=__snake_case ,)
return config
def lowerCAmelCase__(__snake_case ) -> List[str]:
'''simple docstring'''
lowerCamelCase__ = []
# fmt: off
# stem
rename_keys.append(('''backbone.downsample_layers.0.0.weight''', '''backbone.embeddings.patch_embeddings.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.0.bias''', '''backbone.embeddings.patch_embeddings.bias''') )
rename_keys.append(('''backbone.downsample_layers.0.1.weight''', '''backbone.embeddings.layernorm.weight''') )
rename_keys.append(('''backbone.downsample_layers.0.1.bias''', '''backbone.embeddings.layernorm.bias''') )
# stages
for i in range(len(config.backbone_config.depths ) ):
for j in range(config.backbone_config.depths[i] ):
rename_keys.append((F'backbone.stages.{i}.{j}.gamma', F'backbone.encoder.stages.{i}.layers.{j}.layer_scale_parameter') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.weight', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.depthwise_conv.bias', F'backbone.encoder.stages.{i}.layers.{j}.dwconv.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.weight', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.norm.bias', F'backbone.encoder.stages.{i}.layers.{j}.layernorm.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv1.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv1.bias') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.weight', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.weight') )
rename_keys.append((F'backbone.stages.{i}.{j}.pointwise_conv2.bias', F'backbone.encoder.stages.{i}.layers.{j}.pwconv2.bias') )
if i > 0:
rename_keys.append((F'backbone.downsample_layers.{i}.0.weight', F'backbone.encoder.stages.{i}.downsampling_layer.0.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.0.bias', F'backbone.encoder.stages.{i}.downsampling_layer.0.bias') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.weight', F'backbone.encoder.stages.{i}.downsampling_layer.1.weight') )
rename_keys.append((F'backbone.downsample_layers.{i}.1.bias', F'backbone.encoder.stages.{i}.downsampling_layer.1.bias') )
rename_keys.append((F'backbone.norm{i}.weight', F'backbone.hidden_states_norms.stage{i+1}.weight') )
rename_keys.append((F'backbone.norm{i}.bias', F'backbone.hidden_states_norms.stage{i+1}.bias') )
# decode head
rename_keys.extend(
[
('''decode_head.conv_seg.weight''', '''decode_head.classifier.weight'''),
('''decode_head.conv_seg.bias''', '''decode_head.classifier.bias'''),
('''auxiliary_head.conv_seg.weight''', '''auxiliary_head.classifier.weight'''),
('''auxiliary_head.conv_seg.bias''', '''auxiliary_head.classifier.bias'''),
] )
# fmt: on
return rename_keys
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase__ = dct.pop(__snake_case )
lowerCamelCase__ = val
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = {
'''upernet-convnext-tiny''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_tiny_fp16_512x512_160k_ade20k/upernet_convnext_tiny_fp16_512x512_160k_ade20k_20220227_124553-cad485de.pth''',
'''upernet-convnext-small''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_small_fp16_512x512_160k_ade20k/upernet_convnext_small_fp16_512x512_160k_ade20k_20220227_131208-1b1e394f.pth''',
'''upernet-convnext-base''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_base_fp16_512x512_160k_ade20k/upernet_convnext_base_fp16_512x512_160k_ade20k_20220227_181227-02a24fc6.pth''',
'''upernet-convnext-large''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_large_fp16_640x640_160k_ade20k/upernet_convnext_large_fp16_640x640_160k_ade20k_20220226_040532-e57aa54d.pth''',
'''upernet-convnext-xlarge''': '''https://download.openmmlab.com/mmsegmentation/v0.5/convnext/upernet_convnext_xlarge_fp16_640x640_160k_ade20k/upernet_convnext_xlarge_fp16_640x640_160k_ade20k_20220226_080344-95fc38c2.pth''',
}
lowerCamelCase__ = model_name_to_url[model_name]
lowerCamelCase__ = torch.hub.load_state_dict_from_url(__snake_case ,map_location='''cpu''' )['''state_dict''']
lowerCamelCase__ = get_upernet_config(__snake_case )
lowerCamelCase__ = UperNetForSemanticSegmentation(__snake_case )
model.eval()
# replace "bn" => "batch_norm"
for key in state_dict.copy().keys():
lowerCamelCase__ = state_dict.pop(__snake_case )
if "bn" in key:
lowerCamelCase__ = key.replace('''bn''' ,'''batch_norm''' )
lowerCamelCase__ = val
# rename keys
lowerCamelCase__ = create_rename_keys(__snake_case )
for src, dest in rename_keys:
rename_key(__snake_case ,__snake_case ,__snake_case )
model.load_state_dict(__snake_case )
# verify on image
lowerCamelCase__ = '''https://huggingface.co/datasets/hf-internal-testing/fixtures_ade20k/resolve/main/ADE_val_00000001.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw ).convert('''RGB''' )
lowerCamelCase__ = SegformerImageProcessor()
lowerCamelCase__ = processor(__snake_case ,return_tensors='''pt''' ).pixel_values
with torch.no_grad():
lowerCamelCase__ = model(__snake_case )
if model_name == "upernet-convnext-tiny":
lowerCamelCase__ = torch.tensor(
[[-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.8_1_1_0, -8.8_1_1_0, -8.6_5_2_1], [-8.7_7_4_6, -8.7_7_4_6, -8.6_1_3_0]] )
elif model_name == "upernet-convnext-small":
lowerCamelCase__ = torch.tensor(
[[-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.8_2_3_6, -8.8_2_3_6, -8.6_7_7_1], [-8.7_6_3_8, -8.7_6_3_8, -8.6_2_4_0]] )
elif model_name == "upernet-convnext-base":
lowerCamelCase__ = torch.tensor(
[[-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.8_5_5_8, -8.8_5_5_8, -8.6_9_0_5], [-8.7_6_6_9, -8.7_6_6_9, -8.6_0_2_1]] )
elif model_name == "upernet-convnext-large":
lowerCamelCase__ = torch.tensor(
[[-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_6_6_0, -8.6_6_6_0, -8.6_2_1_0], [-8.6_3_1_0, -8.6_3_1_0, -8.5_9_6_4]] )
elif model_name == "upernet-convnext-xlarge":
lowerCamelCase__ = torch.tensor(
[[-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_9_8_0, -8.4_9_8_0, -8.3_9_7_7], [-8.4_3_7_9, -8.4_3_7_9, -8.3_4_1_2]] )
print('''Logits:''' ,outputs.logits[0, 0, :3, :3] )
assert torch.allclose(outputs.logits[0, 0, :3, :3] ,__snake_case ,atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__snake_case )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(__snake_case )
if push_to_hub:
print(F'Pushing model and processor for {model_name} to hub' )
model.push_to_hub(F'openmmlab/{model_name}' )
processor.push_to_hub(F'openmmlab/{model_name}' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="upernet-convnext-tiny",
type=str,
choices=[f"""upernet-convnext-{size}""" for size in ["tiny", "small", "base", "large", "xlarge"]],
help="Name of the ConvNext UperNet model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
_a = parser.parse_args()
convert_upernet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 481 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class __A ( unittest.TestCase ):
'''simple docstring'''
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoTokenizer.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = tokenizer('''This is me''' , return_tensors='''pt''' )
lowerCamelCase__ = model.to_bettertransformer()
self.assertTrue(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowerCamelCase__ = model.generate(**__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
self.assertFalse(any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
self.assertFalse(
any('''BetterTransformer''' in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowerCamelCase__ = model_reloaded.generate(**__lowerCAmelCase )
self.assertTrue(torch.allclose(__lowerCAmelCase , __lowerCAmelCase ) )
def __lowerCamelCase ( self ):
'''simple docstring'''
lowerCamelCase__ = '''hf-internal-testing/tiny-random-t5'''
lowerCamelCase__ = AutoModelForSeqaSeqLM.from_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(__lowerCAmelCase ):
model.save_pretrained(__lowerCAmelCase )
lowerCamelCase__ = model.reverse_bettertransformer()
model.save_pretrained(__lowerCAmelCase )
| 481 | 1 |
import os
import string
import sys
_lowerCamelCase : Optional[Any] = 1 << 8
_lowerCamelCase : List[Any] = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 27,
'''up''': 65 + ARROW_KEY_FLAG,
'''down''': 66 + ARROW_KEY_FLAG,
'''right''': 67 + ARROW_KEY_FLAG,
'''left''': 68 + ARROW_KEY_FLAG,
'''mod_int''': 91,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 50,
'''delete''': 51,
'''pg_up''': 53,
'''pg_down''': 54,
}
_lowerCamelCase : Union[str, Any] = KEYMAP['''up''']
_lowerCamelCase : List[Any] = KEYMAP['''left''']
if sys.platform == "win32":
_lowerCamelCase : Optional[int] = []
_lowerCamelCase : List[str] = {
b'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
b'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
b'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
b'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
b'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(10):
_lowerCamelCase : Optional[int] = ord(str(i))
def __lowerCamelCase ():
'''simple docstring'''
if os.name == "nt":
import msvcrt
SCREAMING_SNAKE_CASE = "mbcs"
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(lowercase__ ) == 0:
# Read the keystroke
SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["mod_int"] ) )
WIN_CH_BUFFER.append(lowercase__ )
if ord(lowercase__ ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
SCREAMING_SNAKE_CASE = chr(KEYMAP["esc"] )
except KeyError:
SCREAMING_SNAKE_CASE = cha[1]
else:
SCREAMING_SNAKE_CASE = ch.decode(lowercase__ )
else:
SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
SCREAMING_SNAKE_CASE = sys.stdin.fileno()
SCREAMING_SNAKE_CASE = termios.tcgetattr(lowercase__ )
try:
tty.setraw(lowercase__ )
SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(lowercase__ , termios.TCSADRAIN , lowercase__ )
return ch
def __lowerCamelCase ():
'''simple docstring'''
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(lowercase__ ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(lowercase__ ) == KEYMAP["esc"]:
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(lowercase__ ) == KEYMAP["mod_int"]:
SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(lowercase__ ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(lowercase__ ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(lowercase__ ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 719 | import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Union[str, Any] , _UpperCamelCase : Tuple , _UpperCamelCase : List[Any]=7 , _UpperCamelCase : Any=3 , _UpperCamelCase : str=18 , _UpperCamelCase : Tuple=30 , _UpperCamelCase : Optional[int]=400 , _UpperCamelCase : Optional[int]=True , _UpperCamelCase : Optional[Any]=None , _UpperCamelCase : int=True , _UpperCamelCase : Optional[int]=[0.5, 0.5, 0.5] , _UpperCamelCase : List[str]=[0.5, 0.5, 0.5] , ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = size if size is not None else {"height": 18, "width": 18}
SCREAMING_SNAKE_CASE = parent
SCREAMING_SNAKE_CASE = batch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = min_resolution
SCREAMING_SNAKE_CASE = max_resolution
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean
SCREAMING_SNAKE_CASE = image_std
def __snake_case( self : List[Any] ) -> Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class lowercase ( a , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __snake_case( self : List[str] ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE = DPTImageProcessingTester(self )
@property
def __snake_case( self : List[Any] ) -> List[str]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __snake_case( self : str ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(_UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCamelCase , "size" ) )
def __snake_case( self : Dict ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 18, "width": 18} )
SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"height": 42, "width": 42} )
def __snake_case( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def __snake_case( self : Optional[Any] ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
SCREAMING_SNAKE_CASE = image_processing(_UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
| 647 | 0 |
from __future__ import annotations
def _lowercase ( lowercase__ , lowercase__ ):
__lowerCAmelCase : Tuple = get_failure_array(lowercase__ )
# 2) Step through text searching for pattern
__lowerCAmelCase, __lowerCAmelCase : Optional[int] = 0, 0 # index into text, pattern
while i < len(lowercase__ ):
if pattern[j] == text[i]:
if j == (len(lowercase__ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
__lowerCAmelCase : Optional[Any] = failure[j - 1]
continue
i += 1
return False
def _lowercase ( lowercase__ ):
__lowerCAmelCase : Dict = [0]
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : str = 1
while j < len(lowercase__ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
__lowerCAmelCase : Optional[Any] = failure[i - 1]
continue
j += 1
failure.append(lowercase__ )
return failure
if __name__ == "__main__":
# Test 1)
_UpperCamelCase = "abc1abc12"
_UpperCamelCase = "alskfjaldsabc1abc1abc12k23adsfabcabc"
_UpperCamelCase = "alskfjaldsk23adsfabcabc"
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
_UpperCamelCase = "ABABX"
_UpperCamelCase = "ABABZABABYABABX"
assert kmp(pattern, text)
# Test 3)
_UpperCamelCase = "AAAB"
_UpperCamelCase = "ABAAAAAB"
assert kmp(pattern, text)
# Test 4)
_UpperCamelCase = "abcdabcy"
_UpperCamelCase = "abcxabcdabxabcdabcdabcy"
assert kmp(pattern, text)
# Test 5)
_UpperCamelCase = "aabaabaaa"
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 492 |
from __future__ import annotations
import math
def _lowercase ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ ):
if depth < 0:
raise ValueError('''Depth cannot be less than 0''' )
if len(lowercase__ ) == 0:
raise ValueError('''Scores cannot be empty''' )
if depth == height:
return scores[node_index]
if is_max:
return max(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
return min(
minimax(depth + 1 , node_index * 2 , lowercase__ , lowercase__ , lowercase__ ) , minimax(depth + 1 , node_index * 2 + 1 , lowercase__ , lowercase__ , lowercase__ ) , )
def _lowercase ( ):
__lowerCAmelCase : Any = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
__lowerCAmelCase : int = math.log(len(lowercase__ ) , 2 )
print('''Optimal value : ''' , end='''''' )
print(minimax(0 , 0 , lowercase__ , lowercase__ , lowercase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 492 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase (_UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
a = CTRLTokenizer
a = False
a = False
def lowerCAmelCase_ ( self : List[str] ) -> Any:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE__ = dict(zip(__a , range(len(__a ) ) ) )
SCREAMING_SNAKE_CASE__ = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE__ = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(__a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(__a ) )
def lowerCAmelCase_ ( self : Optional[int] , **_snake_case : List[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **__a )
def lowerCAmelCase_ ( self : str , _snake_case : int ) -> List[Any]:
SCREAMING_SNAKE_CASE__ = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ = "adapt react readapt apt"
return input_text, output_text
def lowerCAmelCase_ ( self : int ) -> str:
SCREAMING_SNAKE_CASE__ = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ = "adapt react readapt apt"
SCREAMING_SNAKE_CASE__ = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
SCREAMING_SNAKE_CASE__ = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , __a )
| 707 | """simple docstring"""
from typing import TYPE_CHECKING
from ...utils import _LazyModule
_A = {'processing_wav2vec2_with_lm': ['Wav2Vec2ProcessorWithLM']}
if TYPE_CHECKING:
from .processing_wavaveca_with_lm import WavaVecaProcessorWithLM
else:
import sys
_A = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 538 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
_lowerCAmelCase : Optional[Any] = logging.get_logger(__name__)
_lowerCAmelCase : Dict = OrderedDict(
[
("align", "EfficientNetImageProcessor"),
("beit", "BeitImageProcessor"),
("bit", "BitImageProcessor"),
("blip", "BlipImageProcessor"),
("blip-2", "BlipImageProcessor"),
("bridgetower", "BridgeTowerImageProcessor"),
("chinese_clip", "ChineseCLIPImageProcessor"),
("clip", "CLIPImageProcessor"),
("clipseg", "ViTImageProcessor"),
("conditional_detr", "ConditionalDetrImageProcessor"),
("convnext", "ConvNextImageProcessor"),
("convnextv2", "ConvNextImageProcessor"),
("cvt", "ConvNextImageProcessor"),
("data2vec-vision", "BeitImageProcessor"),
("deformable_detr", "DeformableDetrImageProcessor"),
("deit", "DeiTImageProcessor"),
("deta", "DetaImageProcessor"),
("detr", "DetrImageProcessor"),
("dinat", "ViTImageProcessor"),
("donut-swin", "DonutImageProcessor"),
("dpt", "DPTImageProcessor"),
("efficientformer", "EfficientFormerImageProcessor"),
("efficientnet", "EfficientNetImageProcessor"),
("flava", "FlavaImageProcessor"),
("focalnet", "BitImageProcessor"),
("git", "CLIPImageProcessor"),
("glpn", "GLPNImageProcessor"),
("groupvit", "CLIPImageProcessor"),
("imagegpt", "ImageGPTImageProcessor"),
("instructblip", "BlipImageProcessor"),
("layoutlmv2", "LayoutLMv2ImageProcessor"),
("layoutlmv3", "LayoutLMv3ImageProcessor"),
("levit", "LevitImageProcessor"),
("mask2former", "Mask2FormerImageProcessor"),
("maskformer", "MaskFormerImageProcessor"),
("mgp-str", "ViTImageProcessor"),
("mobilenet_v1", "MobileNetV1ImageProcessor"),
("mobilenet_v2", "MobileNetV2ImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevit", "MobileViTImageProcessor"),
("mobilevitv2", "MobileViTImageProcessor"),
("nat", "ViTImageProcessor"),
("oneformer", "OneFormerImageProcessor"),
("owlvit", "OwlViTImageProcessor"),
("perceiver", "PerceiverImageProcessor"),
("pix2struct", "Pix2StructImageProcessor"),
("poolformer", "PoolFormerImageProcessor"),
("regnet", "ConvNextImageProcessor"),
("resnet", "ConvNextImageProcessor"),
("sam", "SamImageProcessor"),
("segformer", "SegformerImageProcessor"),
("swiftformer", "ViTImageProcessor"),
("swin", "ViTImageProcessor"),
("swin2sr", "Swin2SRImageProcessor"),
("swinv2", "ViTImageProcessor"),
("table-transformer", "DetrImageProcessor"),
("timesformer", "VideoMAEImageProcessor"),
("tvlt", "TvltImageProcessor"),
("upernet", "SegformerImageProcessor"),
("van", "ConvNextImageProcessor"),
("videomae", "VideoMAEImageProcessor"),
("vilt", "ViltImageProcessor"),
("vit", "ViTImageProcessor"),
("vit_hybrid", "ViTHybridImageProcessor"),
("vit_mae", "ViTImageProcessor"),
("vit_msn", "ViTImageProcessor"),
("xclip", "CLIPImageProcessor"),
("yolos", "YolosImageProcessor"),
]
)
_lowerCAmelCase : Optional[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def UpperCamelCase_( _snake_case : str ):
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
__a =model_type_to_module_name(_snake_case )
__a =importlib.import_module(F'.{module_name}' , 'transformers.models' )
try:
return getattr(_snake_case , _snake_case )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_snake_case , '__name__' , _snake_case ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
__a =importlib.import_module('transformers' )
if hasattr(_snake_case , _snake_case ):
return getattr(_snake_case , _snake_case )
return None
def UpperCamelCase_( _snake_case : Union[str, os.PathLike] , _snake_case : Optional[Union[str, os.PathLike]] = None , _snake_case : bool = False , _snake_case : bool = False , _snake_case : Optional[Dict[str, str]] = None , _snake_case : Optional[Union[bool, str]] = None , _snake_case : Optional[str] = None , _snake_case : bool = False , **_snake_case : Any , ):
"""simple docstring"""
__a =get_file_from_repo(
_snake_case , _snake_case , cache_dir=_snake_case , force_download=_snake_case , resume_download=_snake_case , proxies=_snake_case , use_auth_token=_snake_case , revision=_snake_case , local_files_only=_snake_case , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_snake_case , encoding='utf-8' ) as reader:
return json.load(_snake_case )
class __magic_name__ :
def __init__( self ) -> Dict:
'''simple docstring'''
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(__snake_case )
def __magic_name__ ( cls , __snake_case , **__snake_case ) -> int:
'''simple docstring'''
__a =kwargs.pop('config' , __snake_case )
__a =kwargs.pop('trust_remote_code' , __snake_case )
__a =True
__a , __a =ImageProcessingMixin.get_image_processor_dict(__snake_case , **__snake_case )
__a =config_dict.get('image_processor_type' , __snake_case )
__a =None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
__a =config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
__a =config_dict.pop('feature_extractor_type' , __snake_case )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
__a =feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
__a =config_dict['auto_map']['AutoFeatureExtractor']
__a =feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(__snake_case , __snake_case ):
__a =AutoConfig.from_pretrained(__snake_case , **__snake_case )
# It could be in `config.image_processor_type``
__a =getattr(__snake_case , 'image_processor_type' , __snake_case )
if hasattr(__snake_case , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
__a =config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
__a =image_processor_class_from_name(__snake_case )
__a =image_processor_auto_map is not None
__a =image_processor_class is not None or type(__snake_case ) in IMAGE_PROCESSOR_MAPPING
__a =resolve_trust_remote_code(
__snake_case , __snake_case , __snake_case , __snake_case )
if has_remote_code and trust_remote_code:
__a =get_class_from_dynamic_module(
__snake_case , __snake_case , **__snake_case )
__a =kwargs.pop('code_revision' , __snake_case )
if os.path.isdir(__snake_case ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(__snake_case , **__snake_case )
elif image_processor_class is not None:
return image_processor_class.from_dict(__snake_case , **__snake_case )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(__snake_case ) in IMAGE_PROCESSOR_MAPPING:
__a =IMAGE_PROCESSOR_MAPPING[type(__snake_case )]
return image_processor_class.from_dict(__snake_case , **__snake_case )
raise ValueError(
f'Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '
f'`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '
f'`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}' )
@staticmethod
def __magic_name__ ( __snake_case , __snake_case ) -> str:
'''simple docstring'''
IMAGE_PROCESSOR_MAPPING.register(__snake_case , __snake_case )
| 242 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
_lowerCAmelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __magic_name__ ( lowerCAmelCase_ ):
def __init__( self , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=__snake_case , text_encoder=__snake_case , tokenizer=__snake_case , unet=__snake_case , scheduler=__snake_case , safety_checker=__snake_case , feature_extractor=__snake_case , )
def __magic_name__ ( self , __snake_case = "auto" ) -> int:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__a =self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__snake_case )
def __magic_name__ ( self ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(__snake_case )
@torch.no_grad()
def __call__( self , __snake_case , __snake_case = 512 , __snake_case = 512 , __snake_case = 50 , __snake_case = 7.5 , __snake_case = None , __snake_case = 1 , __snake_case = 0.0 , __snake_case = None , __snake_case = None , __snake_case = "pil" , __snake_case = True , __snake_case = None , __snake_case = 1 , __snake_case = None , **__snake_case , ) -> Tuple:
'''simple docstring'''
if isinstance(__snake_case , __snake_case ):
__a =1
elif isinstance(__snake_case , __snake_case ):
__a =len(__snake_case )
else:
raise ValueError(f'`prompt` has to be of type `str` or `list` but is {type(__snake_case )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__snake_case , __snake_case ) or callback_steps <= 0)
):
raise ValueError(
f'`callback_steps` has to be a positive integer but is {callback_steps} of type'
f' {type(__snake_case )}.' )
# get prompt text embeddings
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
__a =text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__a =self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f' {self.tokenizer.model_max_length} tokens: {removed_text}' )
__a =text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__a =self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__a , __a , __a =text_embeddings.shape
__a =text_embeddings.repeat(1 , __snake_case , 1 )
__a =text_embeddings.view(bs_embed * num_images_per_prompt , __snake_case , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__a =guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__a =42
if negative_prompt is None:
__a =['']
elif type(__snake_case ) is not type(__snake_case ):
raise TypeError(
f'`negative_prompt` should be the same type to `prompt`, but got {type(__snake_case )} !='
f' {type(__snake_case )}.' )
elif isinstance(__snake_case , __snake_case ):
__a =[negative_prompt]
elif batch_size != len(__snake_case ):
raise ValueError(
f'`negative_prompt`: {negative_prompt} has batch size {len(__snake_case )}, but `prompt`:'
f' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
__a =negative_prompt
__a =text_input_ids.shape[-1]
__a =self.tokenizer(
__snake_case , padding='max_length' , max_length=__snake_case , truncation=__snake_case , return_tensors='pt' , )
__a =self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__a =uncond_embeddings.shape[1]
__a =uncond_embeddings.repeat(__snake_case , __snake_case , 1 )
__a =uncond_embeddings.view(batch_size * num_images_per_prompt , __snake_case , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__a =torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__a =(batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__a =text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__a =torch.randn(
__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(self.device )
__a =torch.randn(__snake_case , generator=__snake_case , device='cpu' , dtype=__snake_case ).to(
self.device )
else:
__a =torch.randn(
__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
__a =torch.randn(__snake_case , generator=__snake_case , device=self.device , dtype=__snake_case )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
__a =latents_reference.to(self.device )
__a =latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__a =(latents_shape[3] - latents_shape_reference[3]) // 2
__a =(latents_shape[2] - latents_shape_reference[2]) // 2
__a =latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__a =latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__a =0 if dx < 0 else dx
__a =0 if dy < 0 else dy
__a =max(-dx , 0 )
__a =max(-dy , 0 )
# import pdb
# pdb.set_trace()
__a =latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(__snake_case )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__a =self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__a =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__a ='eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__a ={}
if accepts_eta:
__a =eta
for i, t in enumerate(self.progress_bar(__snake_case ) ):
# expand the latents if we are doing classifier free guidance
__a =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__a =self.scheduler.scale_model_input(__snake_case , __snake_case )
# predict the noise residual
__a =self.unet(__snake_case , __snake_case , encoder_hidden_states=__snake_case ).sample
# perform guidance
if do_classifier_free_guidance:
__a , __a =noise_pred.chunk(2 )
__a =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__a =self.scheduler.step(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__snake_case , __snake_case , __snake_case )
__a =1 / 0.1_8215 * latents
__a =self.vae.decode(__snake_case ).sample
__a =(image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__a =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
__a =self.feature_extractor(self.numpy_to_pil(__snake_case ) , return_tensors='pt' ).to(
self.device )
__a , __a =self.safety_checker(
images=__snake_case , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__a =None
if output_type == "pil":
__a =self.numpy_to_pil(__snake_case )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=__snake_case , nsfw_content_detected=__snake_case )
| 242 | 1 |
'''simple docstring'''
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
def __magic_name__( lowerCamelCase):
warnings.warn(
'''The preprocess method is deprecated and will be removed in a future version. Please'''
''' use VaeImageProcessor.preprocess instead''', lowerCamelCase, )
if isinstance(lowerCamelCase, torch.Tensor):
return image
elif isinstance(lowerCamelCase, PIL.Image.Image):
__lowerCAmelCase = [image]
if isinstance(image[0], PIL.Image.Image):
__lowerCAmelCase , __lowerCAmelCase = image[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
__lowerCAmelCase = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION['''lanczos''']))[None, :] for i in image]
__lowerCAmelCase = np.concatenate(lowerCamelCase, axis=0)
__lowerCAmelCase = np.array(lowerCamelCase).astype(np.floataa) / 2_55.0
__lowerCAmelCase = image.transpose(0, 3, 1, 2)
__lowerCAmelCase = 2.0 * image - 1.0
__lowerCAmelCase = torch.from_numpy(lowerCamelCase)
elif isinstance(image[0], torch.Tensor):
__lowerCAmelCase = torch.cat(lowerCamelCase, dim=0)
return image
def __magic_name__( lowerCamelCase):
if isinstance(lowerCamelCase, torch.Tensor):
return mask
elif isinstance(lowerCamelCase, PIL.Image.Image):
__lowerCAmelCase = [mask]
if isinstance(mask[0], PIL.Image.Image):
__lowerCAmelCase , __lowerCAmelCase = mask[0].size
__lowerCAmelCase , __lowerCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
__lowerCAmelCase = [np.array(m.convert('''L''').resize((w, h), resample=PIL_INTERPOLATION['''nearest''']))[None, :] for m in mask]
__lowerCAmelCase = np.concatenate(lowerCamelCase, axis=0)
__lowerCAmelCase = mask.astype(np.floataa) / 2_55.0
__lowerCAmelCase = 0
__lowerCAmelCase = 1
__lowerCAmelCase = torch.from_numpy(lowerCamelCase)
elif isinstance(mask[0], torch.Tensor):
__lowerCAmelCase = torch.cat(lowerCamelCase, dim=0)
return mask
class a__ ( __A ):
"""simple docstring"""
__UpperCamelCase : UNetaDModel
__UpperCamelCase : RePaintScheduler
def __init__(self , __lowercase , __lowercase ):
super().__init__()
self.register_modules(unet=__lowercase , scheduler=__lowercase )
@torch.no_grad()
def __call__(self , __lowercase , __lowercase , __lowercase = 2_50 , __lowercase = 0.0 , __lowercase = 10 , __lowercase = 10 , __lowercase = None , __lowercase = "pil" , __lowercase = True , ):
__lowerCAmelCase = image
__lowerCAmelCase = _preprocess_image(__lowercase )
__lowerCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = _preprocess_mask(__lowercase )
__lowerCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
__lowerCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(__lowercase , __lowercase ) and len(__lowercase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowercase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__lowerCAmelCase = original_image.shape
__lowerCAmelCase = randn_tensor(__lowercase , generator=__lowercase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(__lowercase , __lowercase , __lowercase , self.device )
__lowerCAmelCase = eta
__lowerCAmelCase = self.scheduler.timesteps[0] + 1
__lowerCAmelCase = generator[0] if isinstance(__lowercase , __lowercase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
__lowerCAmelCase = self.unet(__lowercase , __lowercase ).sample
# compute previous image: x_t -> x_t-1
__lowerCAmelCase = self.scheduler.step(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
__lowerCAmelCase = self.scheduler.undo_step(__lowercase , __lowercase , __lowercase )
__lowerCAmelCase = t
__lowerCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
__lowerCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
__lowerCAmelCase = self.numpy_to_pil(__lowercase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowercase )
| 474 |
'''simple docstring'''
from PIL import Image
def __magic_name__( lowerCamelCase, lowerCamelCase):
def brightness(lowerCamelCase) -> float:
return 1_2_8 + level + (c - 1_2_8)
if not -2_55.0 <= level <= 2_55.0:
raise ValueError('''level must be between -255.0 (black) and 255.0 (white)''')
return img.point(lowerCamelCase)
if __name__ == "__main__":
# Load image
with Image.open("""image_data/lena.jpg""") as img:
# Change brightness to 100
_UpperCAmelCase : str = change_brightness(img, 1_0_0)
brigt_img.save("""image_data/lena_brightness.png""", format="""png""")
| 474 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class __A ( UpperCamelCase__ ):
UpperCamelCase = (PNDMScheduler,)
UpperCamelCase = (("""num_inference_steps""", 50),)
def A__ ( self :str , **__snake_case :Tuple ):
'''simple docstring'''
__magic_name__ : int ={
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0001,
"""beta_end""": 0.02,
"""beta_schedule""": """linear""",
}
config.update(**__snake_case )
return config
def A__ ( self :List[Any] , __snake_case :Union[str, Any]=0 , **__snake_case :Optional[int] ):
'''simple docstring'''
__magic_name__ : Any =dict(self.forward_default_kwargs )
__magic_name__ : str =kwargs.pop("""num_inference_steps""" , __snake_case )
__magic_name__ : int =self.dummy_sample
__magic_name__ : Dict =0.1 * sample
__magic_name__ : Tuple =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ : Optional[int] =self.get_scheduler_config(**__snake_case )
__magic_name__ : Dict =scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
__magic_name__ : Tuple =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
__magic_name__ : int =scheduler_class.from_pretrained(__snake_case )
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals
__magic_name__ : Union[str, Any] =dummy_past_residuals[:]
__magic_name__ : int =scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Tuple =new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__ : Tuple =scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : str =new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self :Tuple ):
'''simple docstring'''
pass
def A__ ( self :Dict , __snake_case :Optional[int]=0 , **__snake_case :List[Any] ):
'''simple docstring'''
__magic_name__ : List[str] =dict(self.forward_default_kwargs )
__magic_name__ : Optional[int] =kwargs.pop("""num_inference_steps""" , __snake_case )
__magic_name__ : List[Any] =self.dummy_sample
__magic_name__ : Optional[Any] =0.1 * sample
__magic_name__ : Optional[int] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
for scheduler_class in self.scheduler_classes:
__magic_name__ : List[str] =self.get_scheduler_config()
__magic_name__ : int =scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# copy over dummy past residuals (must be after setting timesteps)
__magic_name__ : Union[str, Any] =dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__snake_case )
__magic_name__ : Optional[int] =scheduler_class.from_pretrained(__snake_case )
# copy over dummy past residuals
new_scheduler.set_timesteps(__snake_case )
# copy over dummy past residual (must be after setting timesteps)
__magic_name__ : Any =dummy_past_residuals[:]
__magic_name__ : List[str] =scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : Tuple =new_scheduler.step_prk(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
__magic_name__ : Any =scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
__magic_name__ : List[Any] =new_scheduler.step_plms(__snake_case , __snake_case , __snake_case , **__snake_case ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1E-5, "Scheduler outputs are not identical"
def A__ ( self :Any , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Union[str, Any] =self.scheduler_classes[0]
__magic_name__ : Dict =self.get_scheduler_config(**__snake_case )
__magic_name__ : Tuple =scheduler_class(**__snake_case )
__magic_name__ : int =10
__magic_name__ : Tuple =self.dummy_model()
__magic_name__ : Dict =self.dummy_sample_deter
scheduler.set_timesteps(__snake_case )
for i, t in enumerate(scheduler.prk_timesteps ):
__magic_name__ : Optional[int] =model(__snake_case , __snake_case )
__magic_name__ : List[str] =scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
__magic_name__ : Optional[Any] =model(__snake_case , __snake_case )
__magic_name__ : Union[str, Any] =scheduler.step_plms(__snake_case , __snake_case , __snake_case ).prev_sample
return sample
def A__ ( self :str ):
'''simple docstring'''
__magic_name__ : Optional[Any] =dict(self.forward_default_kwargs )
__magic_name__ : Tuple =kwargs.pop("""num_inference_steps""" , __snake_case )
for scheduler_class in self.scheduler_classes:
__magic_name__ : str =self.get_scheduler_config()
__magic_name__ : Dict =scheduler_class(**__snake_case )
__magic_name__ : Optional[int] =self.dummy_sample
__magic_name__ : int =0.1 * sample
if num_inference_steps is not None and hasattr(__snake_case , """set_timesteps""" ):
scheduler.set_timesteps(__snake_case )
elif num_inference_steps is not None and not hasattr(__snake_case , """set_timesteps""" ):
__magic_name__ : List[str] =num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
__magic_name__ : List[str] =[residual + 0.2, residual + 0.15, residual + 0.1, residual + 0.05]
__magic_name__ : int =dummy_past_residuals[:]
__magic_name__ : Union[str, Any] =scheduler.step_prk(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
__magic_name__ : int =scheduler.step_prk(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
__magic_name__ : List[Any] =scheduler.step_plms(__snake_case , 0 , __snake_case , **__snake_case ).prev_sample
__magic_name__ : str =scheduler.step_plms(__snake_case , 1 , __snake_case , **__snake_case ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def A__ ( self :Tuple ):
'''simple docstring'''
for timesteps in [1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__snake_case )
def A__ ( self :int ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__snake_case )
__magic_name__ : str =self.scheduler_classes[0]
__magic_name__ : Tuple =self.get_scheduler_config(steps_offset=1 )
__magic_name__ : Tuple =scheduler_class(**__snake_case )
scheduler.set_timesteps(10 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_01, 8_51, 8_51, 8_01, 8_01, 7_51, 7_51, 7_01, 7_01, 6_51, 6_51, 6_01, 6_01, 5_01, 4_01, 3_01, 2_01, 1_01, 1] ) , )
def A__ ( self :Optional[int] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001] , [0.002, 0.02] ):
self.check_over_configs(beta_start=__snake_case , beta_end=__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__snake_case )
def A__ ( self :str ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__snake_case )
def A__ ( self :Tuple ):
'''simple docstring'''
for t in [1, 5, 10]:
self.check_over_forward(time_step=__snake_case )
def A__ ( self :Optional[Any] ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 5, 10] , [10, 50, 1_00] ):
self.check_over_forward(num_inference_steps=__snake_case )
def A__ ( self :Optional[int] ):
'''simple docstring'''
__magic_name__ : str =27
for scheduler_class in self.scheduler_classes:
__magic_name__ : Union[str, Any] =self.dummy_sample
__magic_name__ : str =0.1 * sample
__magic_name__ : List[str] =self.get_scheduler_config()
__magic_name__ : Optional[int] =scheduler_class(**__snake_case )
scheduler.set_timesteps(__snake_case )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
__magic_name__ : Dict =scheduler.step_prk(__snake_case , __snake_case , __snake_case ).prev_sample
def A__ ( self :List[Any] ):
'''simple docstring'''
with self.assertRaises(__snake_case ):
__magic_name__ : List[Any] =self.scheduler_classes[0]
__magic_name__ : Optional[Any] =self.get_scheduler_config()
__magic_name__ : List[str] =scheduler_class(**__snake_case )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def A__ ( self :Tuple ):
'''simple docstring'''
__magic_name__ : Any =self.full_loop()
__magic_name__ : int =torch.sum(torch.abs(__snake_case ) )
__magic_name__ : Optional[Any] =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 198.1318 ) < 1E-2
assert abs(result_mean.item() - 0.2580 ) < 1E-3
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : List[Any] =self.full_loop(prediction_type="""v_prediction""" )
__magic_name__ : List[str] =torch.sum(torch.abs(__snake_case ) )
__magic_name__ : List[str] =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 67.3986 ) < 1E-2
assert abs(result_mean.item() - 0.0878 ) < 1E-3
def A__ ( self :List[str] ):
'''simple docstring'''
__magic_name__ : Tuple =self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.01 )
__magic_name__ : Tuple =torch.sum(torch.abs(__snake_case ) )
__magic_name__ : List[Any] =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 230.0399 ) < 1E-2
assert abs(result_mean.item() - 0.2995 ) < 1E-3
def A__ ( self :Dict ):
'''simple docstring'''
__magic_name__ : Any =self.full_loop(set_alpha_to_one=__snake_case , beta_start=0.01 )
__magic_name__ : Union[str, Any] =torch.sum(torch.abs(__snake_case ) )
__magic_name__ : Any =torch.mean(torch.abs(__snake_case ) )
assert abs(result_sum.item() - 186.9482 ) < 1E-2
assert abs(result_mean.item() - 0.2434 ) < 1E-3
| 21 |
import os
from typing import Dict, List, Union
import tensorflow as tf
from keras_nlp.tokenizers import BytePairTokenizer
from tensorflow_text import pad_model_inputs
from .tokenization_gpta import GPTaTokenizer
class __A ( tf.keras.layers.Layer ):
def __init__( self :Optional[int] , __snake_case :Dict[str, int] , __snake_case :List[str] , __snake_case :int = None , __snake_case :int = None ):
'''simple docstring'''
super().__init__()
__magic_name__ : Optional[int] =pad_token_id
__magic_name__ : List[Any] =max_length
__magic_name__ : Dict =vocab
__magic_name__ : int =merges
__magic_name__ : Optional[int] =BytePairTokenizer(__snake_case , __snake_case , sequence_length=__snake_case )
@classmethod
def A__ ( cls :List[Any] , __snake_case :GPTaTokenizer , *__snake_case :int , **__snake_case :Any ):
'''simple docstring'''
__magic_name__ : List[Any] =[""" """.join(__snake_case ) for m in tokenizer.bpe_ranks.keys()]
__magic_name__ : str =tokenizer.get_vocab()
return cls(__snake_case , __snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Dict , __snake_case :Union[str, os.PathLike] , *__snake_case :Union[str, Any] , **__snake_case :int ):
'''simple docstring'''
__magic_name__ : Dict =GPTaTokenizer.from_pretrained(__snake_case , *__snake_case , **__snake_case )
return cls.from_tokenizer(__snake_case , *__snake_case , **__snake_case )
@classmethod
def A__ ( cls :Optional[Any] , __snake_case :List[Any] ):
'''simple docstring'''
return cls(**__snake_case )
def A__ ( self :Union[str, Any] ):
'''simple docstring'''
return {
"vocab": self.vocab,
"merges": self.merges,
"max_length": self.max_length,
"pad_token_id": self.pad_token_id,
}
def A__ ( self :List[Any] , __snake_case :Dict , __snake_case :int = None ):
'''simple docstring'''
__magic_name__ : Optional[Any] =self.tf_tokenizer(__snake_case )
__magic_name__ : Tuple =tf.ones_like(__snake_case )
if self.pad_token_id is not None:
# pad the tokens up to max length
__magic_name__ : Tuple =max_length if max_length is not None else self.max_length
if max_length is not None:
__magic_name__ , __magic_name__ : Tuple =pad_model_inputs(
__snake_case , max_seq_length=__snake_case , pad_value=self.pad_token_id )
return {"attention_mask": attention_mask, "input_ids": input_ids}
| 21 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__a: str = logging.get_logger(__name__)
__a: str = {
'''microsoft/focalnet-tiny''': '''https://huggingface.co/microsoft/focalnet-tiny/resolve/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase = '''focalnet'''
def __init__( self : Any , lowerCamelCase : List[str]=224 , lowerCamelCase : Optional[int]=4 , lowerCamelCase : str=3 , lowerCamelCase : Any=96 , lowerCamelCase : Dict=False , lowerCamelCase : Tuple=[192, 384, 768, 768] , lowerCamelCase : Dict=[2, 2, 6, 2] , lowerCamelCase : str=[2, 2, 2, 2] , lowerCamelCase : str=[3, 3, 3, 3] , lowerCamelCase : str="gelu" , lowerCamelCase : int=4.0 , lowerCamelCase : Tuple=0.0 , lowerCamelCase : Any=0.1 , lowerCamelCase : List[str]=False , lowerCamelCase : Dict=1E-4 , lowerCamelCase : Optional[int]=False , lowerCamelCase : Optional[int]=False , lowerCamelCase : int=False , lowerCamelCase : Dict=0.02 , lowerCamelCase : Tuple=1E-5 , lowerCamelCase : List[Any]=32 , lowerCamelCase : Tuple=None , lowerCamelCase : str=None , **lowerCamelCase : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
super().__init__(**lowerCamelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = use_conv_embed
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = focal_levels
_UpperCAmelCase = focal_windows
_UpperCAmelCase = hidden_act
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_layerscale
_UpperCAmelCase = layerscale_value
_UpperCAmelCase = use_post_layernorm
_UpperCAmelCase = use_post_layernorm_in_modulation
_UpperCAmelCase = normalize_modulator
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(self.depths ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=lowerCamelCase , out_indices=lowerCamelCase , stage_names=self.stage_names ) | 402 |
__a: List[Any] = tuple[float, float, float]
__a: Optional[int] = tuple[float, float, float]
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Vectorad:
_UpperCAmelCase = end_pointa[0] - end_pointa[0]
_UpperCAmelCase = end_pointa[1] - end_pointa[1]
_UpperCAmelCase = end_pointa[2] - end_pointa[2]
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> Vectorad:
_UpperCAmelCase = ab[1] * ac[2] - ab[2] * ac[1] # *i
_UpperCAmelCase = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
_UpperCAmelCase = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case ) -> bool:
return tuple(round(__snake_case , __snake_case ) for x in vector ) == (0, 0, 0)
def _SCREAMING_SNAKE_CASE ( __snake_case , __snake_case , __snake_case , __snake_case = 1_0 ) -> bool:
_UpperCAmelCase = create_vector(__snake_case , __snake_case )
_UpperCAmelCase = create_vector(__snake_case , __snake_case )
return is_zero_vector(get_ad_vectors_cross(__snake_case , __snake_case ) , __snake_case ) | 402 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , _UpperCamelCase , _UpperCamelCase=7 , _UpperCamelCase=3 , _UpperCamelCase=30 , _UpperCamelCase=4_00 , _UpperCamelCase=True , _UpperCamelCase=None , _UpperCamelCase=True , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=[0.5, 0.5, 0.5] , _UpperCamelCase=True , _UpperCamelCase=1 / 2_55 , _UpperCamelCase=True , ) -> Optional[Any]:
"""simple docstring"""
__snake_case = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 13_33}
__snake_case = parent
__snake_case = batch_size
__snake_case = num_channels
__snake_case = min_resolution
__snake_case = max_resolution
__snake_case = do_resize
__snake_case = size
__snake_case = do_normalize
__snake_case = image_mean
__snake_case = image_std
__snake_case = do_rescale
__snake_case = rescale_factor
__snake_case = do_pad
def a ( self ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def a ( self , _UpperCamelCase , _UpperCamelCase=False ) -> List[Any]:
"""simple docstring"""
if not batched:
__snake_case = image_inputs[0]
if isinstance(_UpperCamelCase , Image.Image ):
__snake_case , __snake_case = image.size
else:
__snake_case , __snake_case = image.shape[1], image.shape[2]
if w < h:
__snake_case = int(self.size["""shortest_edge"""] * h / w )
__snake_case = self.size["""shortest_edge"""]
elif w > h:
__snake_case = self.size["""shortest_edge"""]
__snake_case = int(self.size["""shortest_edge"""] * w / h )
else:
__snake_case = self.size["""shortest_edge"""]
__snake_case = self.size["""shortest_edge"""]
else:
__snake_case = []
for image in image_inputs:
__snake_case , __snake_case = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__snake_case = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[0] )[0]
__snake_case = max(_UpperCamelCase , key=lambda _UpperCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class __snake_case ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = DeformableDetrImageProcessor if is_vision_available() else None
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = DeformableDetrImageProcessingTester(self )
@property
def a ( self ) -> int:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def a ( self ) -> Optional[int]:
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """do_rescale""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """do_pad""" ) )
self.assertTrue(hasattr(_UpperCamelCase , """size""" ) )
def a ( self ) -> Optional[Any]:
"""simple docstring"""
__snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 13_33} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
__snake_case = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_UpperCamelCase )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _UpperCamelCase )
def a ( self ) -> Optional[Any]:
"""simple docstring"""
pass
def a ( self ) -> Union[str, Any]:
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , Image.Image )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
__snake_case = image_processing(_UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ) -> Dict:
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , numpify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , np.ndarray )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case = image_processing(_UpperCamelCase , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a ( self ) -> int:
"""simple docstring"""
__snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCamelCase , torchify=_UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCamelCase , torch.Tensor )
# Test not batched input
__snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__snake_case = image_processing(_UpperCamelCase , return_tensors="""pt""" ).pixel_values
__snake_case , __snake_case = self.image_processor_tester.get_expected_values(_UpperCamelCase , batched=_UpperCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a ( self ) -> List[str]:
"""simple docstring"""
__snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
__snake_case = json.loads(f.read() )
__snake_case = {"""image_id""": 3_97_69, """annotations""": target}
# encode them
__snake_case = DeformableDetrImageProcessor()
__snake_case = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__snake_case = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _UpperCamelCase )
__snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
__snake_case = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _UpperCamelCase ) )
# verify boxes
__snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _UpperCamelCase )
__snake_case = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _UpperCamelCase ) )
# verify is_crowd
__snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _UpperCamelCase ) )
# verify class_labels
__snake_case = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _UpperCamelCase ) )
# verify orig_size
__snake_case = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _UpperCamelCase ) )
# verify size
__snake_case = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _UpperCamelCase ) )
@slow
def a ( self ) -> List[str]:
"""simple docstring"""
__snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
__snake_case = json.loads(f.read() )
__snake_case = {"""file_name""": """000000039769.png""", """image_id""": 3_97_69, """segments_info""": target}
__snake_case = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
__snake_case = DeformableDetrImageProcessor(format="""coco_panoptic""" )
__snake_case = image_processing(images=_UpperCamelCase , annotations=_UpperCamelCase , masks_path=_UpperCamelCase , return_tensors="""pt""" )
# verify pixel values
__snake_case = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["""pixel_values"""].shape , _UpperCamelCase )
__snake_case = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _UpperCamelCase , atol=1E-4 ) )
# verify area
__snake_case = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _UpperCamelCase ) )
# verify boxes
__snake_case = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _UpperCamelCase )
__snake_case = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _UpperCamelCase , atol=1E-3 ) )
# verify image_id
__snake_case = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _UpperCamelCase ) )
# verify is_crowd
__snake_case = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _UpperCamelCase ) )
# verify class_labels
__snake_case = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _UpperCamelCase ) )
# verify masks
__snake_case = 82_28_73
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _UpperCamelCase )
# verify orig_size
__snake_case = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _UpperCamelCase ) )
# verify size
__snake_case = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _UpperCamelCase ) )
| 268 |
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
UpperCamelCase__ = re.compile(r'''\s+''')
def lowerCamelCase__ ( __A :int ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(__A ,"""""" ,example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = [len(__A ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(__A ), "line_max": max(__A )}
def lowerCamelCase__ ( __A :Any ):
"""simple docstring"""
__snake_case = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase__ ( __A :str ,__A :int ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def lowerCamelCase__ ( __A :Optional[int] ,__A :int=5 ):
"""simple docstring"""
__snake_case = ["""auto-generated""", """autogenerated""", """automatically generated"""]
__snake_case = example["""content"""].splitlines()
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase__ ( __A :int ,__A :int=5 ,__A :str=0.05 ):
"""simple docstring"""
__snake_case = ["""unit tests""", """test file""", """configuration file"""]
__snake_case = example["""content"""].splitlines()
__snake_case = 0
__snake_case = 0
# first test
for _, line in zip(range(__A ) ,__A ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__snake_case = example["""content"""].count("""\n""" )
__snake_case = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase__ ( __A :Dict ):
"""simple docstring"""
__snake_case = ["""def """, """class """, """for """, """while """]
__snake_case = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase__ ( __A :Dict ,__A :List[str]=4 ):
"""simple docstring"""
__snake_case = example["""content"""].splitlines()
__snake_case = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
__snake_case = tokenizer(example["""content"""] ,truncation=__A )["""input_ids"""]
__snake_case = len(example["""content"""] ) / len(__A )
return {"ratio": ratio}
def lowerCamelCase__ ( __A :Optional[Any] ):
"""simple docstring"""
__snake_case = {}
results.update(get_hash(__A ) )
results.update(line_stats(__A ) )
results.update(alpha_stats(__A ) )
results.update(char_token_ratio(__A ) )
results.update(is_autogenerated(__A ) )
results.update(is_config_or_test(__A ) )
results.update(has_no_keywords(__A ) )
results.update(has_few_assignments(__A ) )
return results
def lowerCamelCase__ ( __A :int ,__A :str ,__A :Dict ):
"""simple docstring"""
if not check_uniques(__A ,__A ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase__ ( __A :str ):
"""simple docstring"""
with open(__A ,"""rb""" ) as f_in:
with gzip.open(str(__A ) + """.gz""" ,"""wb""" ,compresslevel=6 ) as f_out:
shutil.copyfileobj(__A ,__A )
os.unlink(__A )
# Settings
UpperCamelCase__ = HfArgumentParser(PreprocessingArguments)
UpperCamelCase__ = parser.parse_args()
if args.num_workers is None:
UpperCamelCase__ = multiprocessing.cpu_count()
UpperCamelCase__ = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
UpperCamelCase__ = time.time()
UpperCamelCase__ = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
UpperCamelCase__ = set(ds.unique('''hash'''))
UpperCamelCase__ = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
UpperCamelCase__ = time.time()
UpperCamelCase__ = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
UpperCamelCase__ = time.time()
UpperCamelCase__ ,UpperCamelCase__ = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
UpperCamelCase__ = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
UpperCamelCase__ = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
UpperCamelCase__ = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
UpperCamelCase__ = str(data_dir / F'file-{file_number+1:012}.json')
UpperCamelCase__ = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}')
| 268 | 1 |
class UpperCamelCase__ :
'''simple docstring'''
def __init__( self ) -> str:
"""simple docstring"""
lowercase_ : List[Any] = {}
def snake_case__ ( self ) -> Any:
"""simple docstring"""
print(self.vertex )
for i in self.vertex:
print(lowercase__, """ -> """, """ -> """.join([str(lowercase__ ) for j in self.vertex[i]] ) )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Union[str, Any]:
"""simple docstring"""
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase__ )
else:
# else make a new vertex
lowercase_ : Tuple = [to_vertex]
def snake_case__ ( self ) -> Tuple:
"""simple docstring"""
lowercase_ : Optional[Any] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase__, lowercase__ )
def snake_case__ ( self, snake_case__, snake_case__ ) -> Any:
"""simple docstring"""
lowercase_ : Tuple = True
print(lowercase__, end=""" """ )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase__, lowercase__ )
if __name__ == "__main__":
UpperCAmelCase_ = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3 | 702 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class UpperCamelCase__ ( lowerCamelCase__ ):
'''simple docstring'''
__a : torch.FloatTensor
class UpperCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ ):
'''simple docstring'''
@register_to_config
def __init__( self, snake_case__ = 6_55_36, snake_case__ = None, snake_case__ = 2, snake_case__ = 2, snake_case__ = 0, snake_case__ = "fourier", snake_case__ = True, snake_case__ = False, snake_case__ = 0.0, snake_case__ = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D"), snake_case__ = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip"), snake_case__ = "UNetMidBlock1D", snake_case__ = None, snake_case__ = (32, 32, 64), snake_case__ = None, snake_case__ = 8, snake_case__ = 1, snake_case__ = False, ) -> int:
"""simple docstring"""
super().__init__()
lowercase_ : str = sample_size
# time
if time_embedding_type == "fourier":
lowercase_ : Optional[int] = GaussianFourierProjection(
embedding_size=8, set_W_to_weight=snake_case__, log=snake_case__, flip_sin_to_cos=snake_case__ )
lowercase_ : Union[str, Any] = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
lowercase_ : int = Timesteps(
block_out_channels[0], flip_sin_to_cos=snake_case__, downscale_freq_shift=snake_case__ )
lowercase_ : Union[str, Any] = block_out_channels[0]
if use_timestep_embedding:
lowercase_ : Any = block_out_channels[0] * 4
lowercase_ : Any = TimestepEmbedding(
in_channels=snake_case__, time_embed_dim=snake_case__, act_fn=snake_case__, out_dim=block_out_channels[0], )
lowercase_ : Optional[int] = nn.ModuleList([] )
lowercase_ : List[Any] = None
lowercase_ : str = nn.ModuleList([] )
lowercase_ : List[Any] = None
# down
lowercase_ : Any = in_channels
for i, down_block_type in enumerate(snake_case__ ):
lowercase_ : Dict = output_channel
lowercase_ : List[str] = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
lowercase_ : List[Any] = i == len(snake_case__ ) - 1
lowercase_ : List[Any] = get_down_block(
snake_case__, num_layers=snake_case__, in_channels=snake_case__, out_channels=snake_case__, temb_channels=block_out_channels[0], add_downsample=not is_final_block or downsample_each_block, )
self.down_blocks.append(snake_case__ )
# mid
lowercase_ : Tuple = get_mid_block(
snake_case__, in_channels=block_out_channels[-1], mid_channels=block_out_channels[-1], out_channels=block_out_channels[-1], embed_dim=block_out_channels[0], num_layers=snake_case__, add_downsample=snake_case__, )
# up
lowercase_ : int = list(reversed(snake_case__ ) )
lowercase_ : Dict = reversed_block_out_channels[0]
if out_block_type is None:
lowercase_ : str = out_channels
else:
lowercase_ : Dict = block_out_channels[0]
for i, up_block_type in enumerate(snake_case__ ):
lowercase_ : List[str] = output_channel
lowercase_ : Union[str, Any] = (
reversed_block_out_channels[i + 1] if i < len(snake_case__ ) - 1 else final_upsample_channels
)
lowercase_ : List[Any] = i == len(snake_case__ ) - 1
lowercase_ : Optional[int] = get_up_block(
snake_case__, num_layers=snake_case__, in_channels=snake_case__, out_channels=snake_case__, temb_channels=block_out_channels[0], add_upsample=not is_final_block, )
self.up_blocks.append(snake_case__ )
lowercase_ : Tuple = output_channel
# out
lowercase_ : Optional[int] = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4, 32 )
lowercase_ : List[str] = get_out_block(
out_block_type=snake_case__, num_groups_out=snake_case__, embed_dim=block_out_channels[0], out_channels=snake_case__, act_fn=snake_case__, fc_dim=block_out_channels[-1] // 4, )
def snake_case__ ( self, snake_case__, snake_case__, snake_case__ = True, ) -> Union[UNetaDOutput, Tuple]:
"""simple docstring"""
lowercase_ : List[Any] = timestep
if not torch.is_tensor(snake_case__ ):
lowercase_ : Any = torch.tensor([timesteps], dtype=torch.long, device=sample.device )
elif torch.is_tensor(snake_case__ ) and len(timesteps.shape ) == 0:
lowercase_ : Any = timesteps[None].to(sample.device )
lowercase_ : Tuple = self.time_proj(snake_case__ )
if self.config.use_timestep_embedding:
lowercase_ : Union[str, Any] = self.time_mlp(snake_case__ )
else:
lowercase_ : Dict = timestep_embed[..., None]
lowercase_ : List[Any] = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
lowercase_ : Union[str, Any] = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
lowercase_ : List[Any] = ()
for downsample_block in self.down_blocks:
lowercase_ , lowercase_ : Union[str, Any] = downsample_block(hidden_states=snake_case__, temb=snake_case__ )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
lowercase_ : List[Any] = self.mid_block(snake_case__, snake_case__ )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
lowercase_ : List[str] = down_block_res_samples[-1:]
lowercase_ : str = down_block_res_samples[:-1]
lowercase_ : Dict = upsample_block(snake_case__, res_hidden_states_tuple=snake_case__, temb=snake_case__ )
# 5. post-process
if self.out_block:
lowercase_ : str = self.out_block(snake_case__, snake_case__ )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=snake_case__ ) | 436 | 0 |
"""simple docstring"""
from datetime import datetime as dt
import os
from github import Github
_lowercase = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _snake_case ( ):
A = Github(os.environ['GITHUB_TOKEN'] )
A = g.get_repo('huggingface/transformers' )
A = repo.get_issues(state='open' )
for issue in open_issues:
A = sorted([comment for comment in issue.get_comments()] , key=lambda snake_case__ : i.created_at , reverse=snake_case__ )
A = comments[0] if len(snake_case__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state='closed' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main() | 91 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : tuple[int, int] , snake_case__ : int ):
A , A = position
A = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
A = []
for position in positions:
A , A = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(snake_case__ )
return permissible_positions
def _snake_case ( snake_case__ : list[list[int]] ):
return not any(elem == 0 for row in board for elem in row )
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : tuple[int, int] , snake_case__ : int ):
if is_complete(snake_case__ ):
return True
for position in get_valid_pos(snake_case__ , len(snake_case__ ) ):
A , A = position
if board[y][x] == 0:
A = curr + 1
if open_knight_tour_helper(snake_case__ , snake_case__ , curr + 1 ):
return True
A = 0
return False
def _snake_case ( snake_case__ : int ):
A = [[0 for i in range(snake_case__ )] for j in range(snake_case__ )]
for i in range(snake_case__ ):
for j in range(snake_case__ ):
A = 1
if open_knight_tour_helper(snake_case__ , (i, j) , 1 ):
return board
A = 0
A = F'Open Kight Tour cannot be performed on a board of size {n}'
raise ValueError(snake_case__ )
if __name__ == "__main__":
import doctest
doctest.testmod() | 91 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase : List[str] = len(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = sum(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[str] = [[False for x in range(s + 1 )] for y in range(n + 1 )]
for i in range(1 , n + 1 ):
lowerCAmelCase : List[Any] = True
for i in range(1 , s + 1 ):
lowerCAmelCase : int = False
for i in range(1 , n + 1 ):
for j in range(1 , s + 1 ):
lowerCAmelCase : Dict = dp[i][j - 1]
if arr[i - 1] <= j:
lowerCAmelCase : Tuple = dp[i][j] or dp[i - 1][j - arr[i - 1]]
for j in range(int(s / 2 ) , -1 , -1 ):
if dp[n][j] is True:
lowerCAmelCase : int = s - 2 * j
break
return diff
| 681 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a : CommonSchedulerState
# setable values
a : jnp.ndarray
a : jnp.ndarray
a : Optional[int] =None
@classmethod
def lowercase__ ( cls , snake_case__ , snake_case__ , snake_case__ ):
"""simple docstring"""
return cls(common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ )
@dataclass
class SCREAMING_SNAKE_CASE__ ( lowercase ):
"""simple docstring"""
a : DDPMSchedulerState
class SCREAMING_SNAKE_CASE__ ( lowercase , lowercase ):
"""simple docstring"""
a : Union[str, Any] =[e.name for e in FlaxKarrasDiffusionSchedulers]
a : jnp.dtype
@property
def lowercase__ ( self ):
"""simple docstring"""
return True
@register_to_config
def __init__( self , snake_case__ = 1_000 , snake_case__ = 0.0001 , snake_case__ = 0.02 , snake_case__ = "linear" , snake_case__ = None , snake_case__ = "fixed_small" , snake_case__ = True , snake_case__ = "epsilon" , snake_case__ = jnp.floataa , ):
"""simple docstring"""
lowerCAmelCase : Any = dtype
def lowercase__ ( self , snake_case__ = None ):
"""simple docstring"""
if common is None:
lowerCAmelCase : Dict = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
lowerCAmelCase : str = jnp.array(1.0 , dtype=self.dtype )
lowerCAmelCase : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=snake_case__ , init_noise_sigma=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = None ):
"""simple docstring"""
return sample
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ = () ):
"""simple docstring"""
lowerCAmelCase : List[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
lowerCAmelCase : Any = (jnp.arange(0 , snake_case__ ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=snake_case__ , timesteps=snake_case__ , )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = state.common.alphas_cumprod[t]
lowerCAmelCase : List[str] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
lowerCAmelCase : Union[str, Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
lowerCAmelCase : List[str] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
lowerCAmelCase : List[Any] = jnp.clip(snake_case__ , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
lowerCAmelCase : List[str] = jnp.log(jnp.clip(snake_case__ , a_min=1e-20 ) )
elif variance_type == "fixed_large":
lowerCAmelCase : Optional[int] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
lowerCAmelCase : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
lowerCAmelCase : List[str] = variance
lowerCAmelCase : Dict = state.common.betas[t]
lowerCAmelCase : Optional[Any] = (predicted_variance + 1) / 2
lowerCAmelCase : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = True , ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = timestep
if key is None:
lowerCAmelCase : Tuple = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
lowerCAmelCase , lowerCAmelCase : Optional[Any] = jnp.split(snake_case__ , sample.shape[1] , axis=1 )
else:
lowerCAmelCase : Tuple = None
# 1. compute alphas, betas
lowerCAmelCase : Optional[int] = state.common.alphas_cumprod[t]
lowerCAmelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
lowerCAmelCase : Dict = 1 - alpha_prod_t
lowerCAmelCase : Any = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
lowerCAmelCase : Union[str, Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
lowerCAmelCase : List[Any] = model_output
elif self.config.prediction_type == "v_prediction":
lowerCAmelCase : Tuple = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
lowerCAmelCase : Optional[int] = jnp.clip(snake_case__ , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
lowerCAmelCase : List[str] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase : int = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
lowerCAmelCase : Tuple = jax.random.split(snake_case__ , num=1 )
lowerCAmelCase : str = jax.random.normal(snake_case__ , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(snake_case__ , snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
lowerCAmelCase : Union[str, Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
lowerCAmelCase : Union[str, Any] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=snake_case__ , state=snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return add_noise_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def lowercase__ ( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ , ):
"""simple docstring"""
return get_velocity_common(state.common , snake_case__ , snake_case__ , snake_case__ )
def __len__( self ):
"""simple docstring"""
return self.config.num_train_timesteps
| 681 | 1 |
"""simple docstring"""
from __future__ import annotations
def A_ ( snake_case__ ) -> bool:
_UpperCamelCase :Tuple = str(snake_case__ )
return n == n[::-1]
def A_ ( snake_case__ = 1_00_00_00 ) -> int:
_UpperCamelCase :List[Any] = 0
for i in range(1 , snake_case__ ):
if is_palindrome(snake_case__ ) and is_palindrome(bin(snake_case__ ).split('''b''' )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 355 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
UpperCamelCase__ :Dict = """true"""
def A_ ( snake_case__ , snake_case__=82 , snake_case__=16 ) -> List[Any]:
set_seed(42 )
_UpperCamelCase :Dict = RegressionModel()
_UpperCamelCase :Dict = deepcopy(snake_case__ )
_UpperCamelCase :List[Any] = RegressionDataset(length=snake_case__ )
_UpperCamelCase :Optional[int] = DataLoader(snake_case__ , batch_size=snake_case__ )
model.to(accelerator.device )
_UpperCamelCase , _UpperCamelCase :List[str] = accelerator.prepare(snake_case__ , snake_case__ )
return model, ddp_model, dataloader
def A_ ( snake_case__ , snake_case__=False ) -> Optional[int]:
_UpperCamelCase :Optional[Any] = AutoTokenizer.from_pretrained('''hf-internal-testing/mrpc-bert-base-cased''' )
_UpperCamelCase :int = load_dataset('''glue''' , '''mrpc''' , split='''validation''' )
def tokenize_function(snake_case__ ):
_UpperCamelCase :Optional[int] = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=snake_case__ , max_length=snake_case__ )
return outputs
with accelerator.main_process_first():
_UpperCamelCase :Optional[int] = dataset.map(
snake_case__ , batched=snake_case__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
_UpperCamelCase :Tuple = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(snake_case__ ):
if use_longest:
return tokenizer.pad(snake_case__ , padding='''longest''' , return_tensors='''pt''' )
return tokenizer.pad(snake_case__ , padding='''max_length''' , max_length=1_28 , return_tensors='''pt''' )
return DataLoader(snake_case__ , shuffle=snake_case__ , collate_fn=snake_case__ , batch_size=16 )
def A_ ( snake_case__ , snake_case__ ) -> Tuple:
_UpperCamelCase :Union[str, Any] = Accelerator(dispatch_batches=snake_case__ , split_batches=snake_case__ )
_UpperCamelCase :Any = get_dataloader(snake_case__ , not dispatch_batches )
_UpperCamelCase :Optional[int] = AutoModelForSequenceClassification.from_pretrained(
'''hf-internal-testing/mrpc-bert-base-cased''' , return_dict=snake_case__ )
_UpperCamelCase , _UpperCamelCase :int = accelerator.prepare(snake_case__ , snake_case__ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def A_ ( snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
_UpperCamelCase :Any = []
for batch in dataloader:
_UpperCamelCase , _UpperCamelCase :List[Any] = batch.values()
with torch.no_grad():
_UpperCamelCase :List[Any] = model(snake_case__ )
_UpperCamelCase , _UpperCamelCase :Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_UpperCamelCase , _UpperCamelCase :Optional[Any] = [], []
for logit, targ in logits_and_targets:
logits.append(snake_case__ )
targs.append(snake_case__ )
_UpperCamelCase , _UpperCamelCase :List[str] = torch.cat(snake_case__ ), torch.cat(snake_case__ )
return logits, targs
def A_ ( snake_case__ , snake_case__=82 , snake_case__=False , snake_case__=False , snake_case__=16 ) -> Dict:
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :List[str] = get_basic_setup(snake_case__ , snake_case__ , snake_case__ )
_UpperCamelCase , _UpperCamelCase :Tuple = generate_predictions(snake_case__ , snake_case__ , snake_case__ )
assert (
len(snake_case__ ) == num_samples
), f"Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(snake_case__ )}"
def A_ ( snake_case__ = False , snake_case__ = False ) -> Optional[Any]:
_UpperCamelCase :Any = evaluate.load('''glue''' , '''mrpc''' )
_UpperCamelCase , _UpperCamelCase :Optional[int] = get_mrpc_setup(snake_case__ , snake_case__ )
# First do baseline
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Tuple = setup['''no''']
model.to(snake_case__ )
model.eval()
for batch in dataloader:
batch.to(snake_case__ )
with torch.inference_mode():
_UpperCamelCase :str = model(**snake_case__ )
_UpperCamelCase :Dict = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=snake_case__ , references=batch['''labels'''] )
_UpperCamelCase :int = metric.compute()
# Then do distributed
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase :Optional[int] = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_UpperCamelCase :Dict = model(**snake_case__ )
_UpperCamelCase :str = outputs.logits.argmax(dim=-1 )
_UpperCamelCase :Union[str, Any] = batch['''labels''']
_UpperCamelCase , _UpperCamelCase :Tuple = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=snake_case__ , references=snake_case__ )
_UpperCamelCase :Optional[Any] = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), f"Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"
def A_ ( ) -> Optional[Any]:
_UpperCamelCase :int = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print('''**Testing gather_for_metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`" )
test_mrpc(snake_case__ , snake_case__ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test torch metrics**''' )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_UpperCamelCase :Tuple = Accelerator(split_batches=snake_case__ , dispatch_batches=snake_case__ )
if accelerator.is_local_main_process:
print(f"With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99" )
test_torch_metrics(snake_case__ , 99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print('''**Test last batch is not dropped when perfectly divisible**''' )
_UpperCamelCase :Any = Accelerator()
test_torch_metrics(snake_case__ , 5_12 )
accelerator.state._reset_state()
def A_ ( snake_case__ ) -> Dict:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 355 | 1 |
import baseaa
def lowerCAmelCase__ ( a__ ) ->bytes:
'''simple docstring'''
return baseaa.baaencode(string.encode("utf-8" ) )
def lowerCAmelCase__ ( a__ ) ->str:
'''simple docstring'''
return baseaa.baadecode(a__ ).decode("utf-8" )
if __name__ == "__main__":
lowerCamelCase__ = '''Hello World!'''
lowerCamelCase__ = baseaa_encode(test)
print(encoded)
lowerCamelCase__ = baseaa_decode(encoded)
print(decoded)
| 82 | lowerCamelCase__ = '''Alexander Joslin'''
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( a__ ) ->int:
'''simple docstring'''
_UpperCamelCase = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
_UpperCamelCase = Stack()
_UpperCamelCase = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a__ ) )
elif i in operators:
# RULE 2
operator_stack.push(a__ )
elif i == ")":
# RULE 4
_UpperCamelCase = operator_stack.peek()
operator_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operand_stack.peek()
operand_stack.pop()
_UpperCamelCase = operators[opr](a__ , a__ )
operand_stack.push(a__ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = '''(5 + ((4 * 2) * (2 + 3)))'''
# answer = 45
print(F"{equation} = {dijkstras_two_stack_algorithm(equation)}")
| 82 | 1 |
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def _SCREAMING_SNAKE_CASE ( lowercase : Union[str, Any] , lowercase : List[Any] ):
'''simple docstring'''
lowerCamelCase_ = k_size // 2
lowerCamelCase_ = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCamelCase_ = 1 / (2 * pi * sigma) * exp(-(square(lowercase ) + square(lowercase )) / (2 * square(lowercase )) )
return g
def _SCREAMING_SNAKE_CASE ( lowercase : Optional[Any] , lowercase : List[str] , lowercase : int ):
'''simple docstring'''
lowerCamelCase_ = image.shape[0], image.shape[1]
# dst image height and width
lowerCamelCase_ = height - k_size + 1
lowerCamelCase_ = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCamelCase_ = zeros((dst_height * dst_width, k_size * k_size) )
lowerCamelCase_ = 0
for i, j in product(range(lowercase ) , range(lowercase ) ):
lowerCamelCase_ = ravel(image[i : i + k_size, j : j + k_size] )
lowerCamelCase_ = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCamelCase_ = gen_gaussian_kernel(lowercase , lowercase )
lowerCamelCase_ = ravel(lowercase )
# reshape and get the dst image
lowerCamelCase_ = dot(lowercase , lowercase ).reshape(lowercase , lowercase ).astype(lowercase )
return dst
if __name__ == "__main__":
# read original image
lowerCamelCase : str = imread(r"../image_data/lena.jpg")
# turn image in gray scale value
lowerCamelCase : Dict = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
lowerCamelCase : Optional[Any] = gaussian_filter(gray, 3, sigma=1)
lowerCamelCase : Tuple = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("gaussian filter with 3x3 mask", gaussianaxa)
imshow("gaussian filter with 5x5 mask", gaussianaxa)
waitKey()
| 70 |
"""simple docstring"""
import argparse
import math
import os
from copy import deepcopy
import torch
from audio_diffusion.models import DiffusionAttnUnetaD
from diffusion import sampling
from torch import nn
from diffusers import DanceDiffusionPipeline, IPNDMScheduler, UNetaDModel
_lowerCAmelCase : Tuple = {
"""gwf-440k""": {
"""url""": """https://model-server.zqevans2.workers.dev/gwf-440k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-small-190k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-small-190k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 65_536,
},
"""jmann-large-580k""": {
"""url""": """https://model-server.zqevans2.workers.dev/jmann-large-580k.ckpt""",
"""sample_rate""": 48_000,
"""sample_size""": 131_072,
},
"""maestro-uncond-150k""": {
"""url""": """https://model-server.zqevans2.workers.dev/maestro-uncond-150k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""unlocked-uncond-250k""": {
"""url""": """https://model-server.zqevans2.workers.dev/unlocked-uncond-250k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
"""honk-140k""": {
"""url""": """https://model-server.zqevans2.workers.dev/honk-140k.ckpt""",
"""sample_rate""": 16_000,
"""sample_size""": 65_536,
},
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[int] , snake_case : List[str] )-> Optional[int]:
'''simple docstring'''
return torch.atana(snake_case , snake_case ) / math.pi * 2
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] )-> List[Any]:
'''simple docstring'''
UpperCAmelCase__ : Dict = torch.sin(t * math.pi / 2 ) ** 2
UpperCAmelCase__ : Optional[int] = (1 - sigma**2) ** 0.5
return alpha_sigma_to_t(snake_case , snake_case )
class lowerCAmelCase__ ( __magic_name__ ):
pass
class lowerCAmelCase__ ( nn.Module ):
def __init__( self : Any , snake_case__ : str ):
'''simple docstring'''
super().__init__()
UpperCAmelCase__ : Any = DiffusionAttnUnetaD(snake_case__ , n_attn_layers=4 )
UpperCAmelCase__ : Any = deepcopy(self.diffusion )
UpperCAmelCase__ : Optional[Any] = torch.quasirandom.SobolEngine(1 , scramble=snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case : Optional[Any] )-> str:
'''simple docstring'''
UpperCAmelCase__ : Any = MODELS_MAP[model_name]["url"]
os.system(f'wget {url} ./' )
return f'./{model_name}.ckpt'
_lowerCAmelCase : List[Any] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
}
_lowerCAmelCase : Optional[Any] = {
"""8""": """resnets.0""",
"""9""": """attentions.0""",
"""10""": """resnets.1""",
"""11""": """attentions.1""",
"""12""": """resnets.2""",
"""13""": """attentions.2""",
}
_lowerCAmelCase : List[str] = {
"""1""": """resnets.0""",
"""2""": """attentions.0""",
"""3""": """resnets.1""",
"""4""": """attentions.1""",
"""5""": """resnets.2""",
"""6""": """attentions.2""",
"""8""": """resnets.3""",
"""9""": """attentions.3""",
"""10""": """resnets.4""",
"""11""": """attentions.4""",
"""12""": """resnets.5""",
"""13""": """attentions.5""",
}
_lowerCAmelCase : List[str] = {
"""0""": """resnets.0""",
"""1""": """resnets.1""",
"""2""": """resnets.2""",
"""4""": """resnets.0""",
"""5""": """resnets.1""",
"""6""": """resnets.2""",
}
_lowerCAmelCase : Dict = {
"""skip""": """conv_skip""",
"""main.0""": """conv_1""",
"""main.1""": """group_norm_1""",
"""main.3""": """conv_2""",
"""main.4""": """group_norm_2""",
}
_lowerCAmelCase : List[Any] = {
"""norm""": """group_norm""",
"""qkv_proj""": ["""query""", """key""", """value"""],
"""out_proj""": ["""proj_attn"""],
}
def SCREAMING_SNAKE_CASE__ ( snake_case : Tuple )-> Dict:
'''simple docstring'''
if name.startswith("skip" ):
return name.replace("skip" , RES_CONV_MAP["skip"] )
# name has to be of format main.{digit}
if not name.startswith("main." ):
raise ValueError(f'ResConvBlock error with {name}' )
return name.replace(name[:6] , RES_CONV_MAP[name[:6]] )
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> int:
'''simple docstring'''
for key, value in ATTN_MAP.items():
if name.startswith(snake_case ) and not isinstance(snake_case , snake_case ):
return name.replace(snake_case , snake_case )
elif name.startswith(snake_case ):
return [name.replace(snake_case , snake_case ) for v in value]
raise ValueError(f'Attn error with {name}' )
def SCREAMING_SNAKE_CASE__ ( snake_case : List[Any] , snake_case : Optional[Any]=13 )-> str:
'''simple docstring'''
UpperCAmelCase__ : Any = input_string
if string.split("." )[0] == "timestep_embed":
return string.replace("timestep_embed" , "time_proj" )
UpperCAmelCase__ : Tuple = 0
if string.startswith("net.3." ):
depth += 1
UpperCAmelCase__ : Dict = string[6:]
elif string.startswith("net." ):
UpperCAmelCase__ : Tuple = string[4:]
while string.startswith("main.7." ):
depth += 1
UpperCAmelCase__ : Any = string[7:]
if string.startswith("main." ):
UpperCAmelCase__ : Union[str, Any] = string[5:]
# mid block
if string[:2].isdigit():
UpperCAmelCase__ : Optional[Any] = string[:2]
UpperCAmelCase__ : int = string[2:]
else:
UpperCAmelCase__ : Optional[int] = string[0]
UpperCAmelCase__ : Any = string[1:]
if depth == max_depth:
UpperCAmelCase__ : Any = MID_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ : int = "mid_block"
elif depth > 0 and int(snake_case ) < 7:
UpperCAmelCase__ : Optional[Any] = DOWN_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ : int = f'down_blocks.{depth}'
elif depth > 0 and int(snake_case ) > 7:
UpperCAmelCase__ : Optional[int] = UP_NUM_TO_LAYER[layer_num]
UpperCAmelCase__ : Tuple = f'up_blocks.{max_depth - depth - 1}'
elif depth == 0:
UpperCAmelCase__ : Tuple = DEPTH_0_TO_LAYER[layer_num]
UpperCAmelCase__ : Optional[Any] = f'up_blocks.{max_depth - 1}' if int(snake_case ) > 3 else "down_blocks.0"
if not string_left.startswith("." ):
raise ValueError(f'Naming error with {input_string} and string_left: {string_left}.' )
UpperCAmelCase__ : List[Any] = string_left[1:]
if "resnets" in new_layer:
UpperCAmelCase__ : Optional[int] = convert_resconv_naming(snake_case )
elif "attentions" in new_layer:
UpperCAmelCase__ : Optional[int] = convert_attn_naming(snake_case )
UpperCAmelCase__ : Any = new_string_left
if not isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Tuple = prefix + "." + new_layer + "." + string_left
else:
UpperCAmelCase__ : str = [prefix + "." + new_layer + "." + s for s in string_left]
return new_string
def SCREAMING_SNAKE_CASE__ ( snake_case : List[str] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : str = {}
for k, v in state_dict.items():
if k.endswith("kernel" ):
# up- and downsample layers, don't have trainable weights
continue
UpperCAmelCase__ : Optional[int] = rename(snake_case )
# check if we need to transform from Conv => Linear for attention
if isinstance(snake_case , snake_case ):
UpperCAmelCase__ : Optional[Any] = transform_conv_attns(snake_case , snake_case , snake_case )
else:
UpperCAmelCase__ : List[Any] = v
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] , snake_case : List[Any] , snake_case : int )-> Union[str, Any]:
'''simple docstring'''
if len(snake_case ) == 1:
if len(v.shape ) == 3:
# weight
UpperCAmelCase__ : Optional[int] = v[:, :, 0]
else:
# bias
UpperCAmelCase__ : Any = v
else:
# qkv matrices
UpperCAmelCase__ : Any = v.shape[0]
UpperCAmelCase__ : Optional[Any] = trippled_shape // 3
for i in range(3 ):
if len(v.shape ) == 3:
UpperCAmelCase__ : List[Any] = v[i * single_shape : (i + 1) * single_shape, :, 0]
else:
UpperCAmelCase__ : Any = v[i * single_shape : (i + 1) * single_shape]
return new_state_dict
def SCREAMING_SNAKE_CASE__ ( snake_case : Union[str, Any] )-> Tuple:
'''simple docstring'''
UpperCAmelCase__ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase__ : List[str] = args.model_path.split("/" )[-1].split("." )[0]
if not os.path.isfile(args.model_path ):
assert (
model_name == args.model_path
), f'Make sure to provide one of the official model names {MODELS_MAP.keys()}'
UpperCAmelCase__ : Optional[int] = download(snake_case )
UpperCAmelCase__ : List[str] = MODELS_MAP[model_name]["sample_rate"]
UpperCAmelCase__ : Dict = MODELS_MAP[model_name]["sample_size"]
UpperCAmelCase__ : Optional[int] = Object()
UpperCAmelCase__ : int = sample_size
UpperCAmelCase__ : List[Any] = sample_rate
UpperCAmelCase__ : int = 0
UpperCAmelCase__ : int = UNetaDModel(sample_size=snake_case , sample_rate=snake_case )
UpperCAmelCase__ : int = diffusers_model.state_dict()
UpperCAmelCase__ : Optional[Any] = DiffusionUncond(snake_case )
orig_model.load_state_dict(torch.load(args.model_path , map_location=snake_case )["state_dict"] )
UpperCAmelCase__ : List[str] = orig_model.diffusion_ema.eval()
UpperCAmelCase__ : Optional[int] = orig_model.state_dict()
UpperCAmelCase__ : Any = rename_orig_weights(snake_case )
UpperCAmelCase__ : List[Any] = set(renamed_state_dict.keys() ) - set(diffusers_state_dict.keys() )
UpperCAmelCase__ : Optional[int] = set(diffusers_state_dict.keys() ) - set(renamed_state_dict.keys() )
assert len(snake_case ) == 0, f'Problem with {renamed_minus_diffusers}'
assert all(k.endswith("kernel" ) for k in list(snake_case ) ), f'Problem with {diffusers_minus_renamed}'
for key, value in renamed_state_dict.items():
assert (
diffusers_state_dict[key].squeeze().shape == value.squeeze().shape
), f'Shape for {key} doesn\'t match. Diffusers: {diffusers_state_dict[key].shape} vs. {value.shape}'
if key == "time_proj.weight":
UpperCAmelCase__ : Union[str, Any] = value.squeeze()
UpperCAmelCase__ : List[str] = value
diffusers_model.load_state_dict(snake_case )
UpperCAmelCase__ : Any = 100
UpperCAmelCase__ : str = 33
UpperCAmelCase__ : Optional[int] = IPNDMScheduler(num_train_timesteps=snake_case )
UpperCAmelCase__ : Any = torch.manual_seed(snake_case )
UpperCAmelCase__ : List[str] = torch.randn([1, 2, config.sample_size] , generator=snake_case ).to(snake_case )
UpperCAmelCase__ : Dict = torch.linspace(1 , 0 , steps + 1 , device=snake_case )[:-1]
UpperCAmelCase__ : List[str] = get_crash_schedule(snake_case )
UpperCAmelCase__ : List[str] = DanceDiffusionPipeline(unet=snake_case , scheduler=snake_case )
UpperCAmelCase__ : Optional[Any] = torch.manual_seed(33 )
UpperCAmelCase__ : int = pipe(num_inference_steps=snake_case , generator=snake_case ).audios
UpperCAmelCase__ : Optional[Any] = sampling.iplms_sample(snake_case , snake_case , snake_case , {} )
UpperCAmelCase__ : Tuple = generated.clamp(-1 , 1 )
UpperCAmelCase__ : Optional[int] = (generated - audio).abs().sum()
UpperCAmelCase__ : List[Any] = (generated - audio).abs().max()
if args.save:
pipe.save_pretrained(args.checkpoint_path )
print("Diff sum" , snake_case )
print("Diff max" , snake_case )
assert diff_max < 1E-3, f'Diff max: {diff_max} is too much :-/'
print(f'Conversion for {model_name} successful!' )
if __name__ == "__main__":
_lowerCAmelCase : Dict = argparse.ArgumentParser()
parser.add_argument("""--model_path""", default=None, type=str, required=True, help="""Path to the model to convert.""")
parser.add_argument(
"""--save""", default=True, type=bool, required=False, help="""Whether to save the converted model or not."""
)
parser.add_argument("""--checkpoint_path""", default=None, type=str, required=True, help="""Path to the output model.""")
_lowerCAmelCase : Tuple = parser.parse_args()
main(args)
| 438 | 0 |
from collections.abc import Sequence
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ):
return sum(c * (x**i) for i, c in enumerate(_UpperCamelCase ) )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Sequence[float] , _SCREAMING_SNAKE_CASE : float ):
SCREAMING_SNAKE_CASE_ = 0.0
for coeff in reversed(_UpperCamelCase ):
SCREAMING_SNAKE_CASE_ = result * x + coeff
return result
if __name__ == "__main__":
UpperCamelCase__ : Optional[Any] = (0.0, 0.0, 5.0, 9.3, 7.0)
UpperCamelCase__ : List[Any] = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 721 |
import pytest
import datasets
# Import fixture modules as plugins
UpperCamelCase__ : Union[str, Any] = ["tests.fixtures.files", "tests.fixtures.hub", "tests.fixtures.fsspec"]
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = tmp_path_factory.getbasetemp() / 'cache'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'datasets'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'metrics'
SCREAMING_SNAKE_CASE_ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
SCREAMING_SNAKE_CASE_ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(_SCREAMING_SNAKE_CASE ) )
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE , scope='session' )
def _UpperCAmelCase ( ):
"""simple docstring"""
datasets.disable_progress_bar()
@pytest.fixture(autouse=_SCREAMING_SNAKE_CASE )
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , _SCREAMING_SNAKE_CASE )
@pytest.fixture
def _UpperCAmelCase ( _SCREAMING_SNAKE_CASE : Tuple ):
"""simple docstring"""
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , _SCREAMING_SNAKE_CASE )
| 620 | 0 |
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
lowerCamelCase : Tuple = "\\n\n"
lowerCamelCase : int = "\nPerplexity (PPL) is one of the most common metrics for evaluating language models.\nIt is defined as the exponentiated average negative log-likelihood of a sequence.\n\nFor more information, see https://huggingface.co/docs/transformers/perplexity\n"
lowerCamelCase : Any = "\nArgs:\n model_id (str): model used for calculating Perplexity\n NOTE: Perplexity can only be calculated for causal language models.\n This includes models such as gpt2, causal variations of bert,\n causal versions of t5, and more (the full list can be found\n in the AutoModelForCausalLM documentation here:\n https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )\n\n input_texts (list of str): input text, each separate text snippet\n is one list entry.\n batch_size (int): the batch size to run texts through the model. Defaults to 16.\n add_start_token (bool): whether to add the start token to the texts,\n so the perplexity can include the probability of the first word. Defaults to True.\n device (str): device to run on, defaults to \'cuda\' when available\nReturns:\n perplexity: dictionary containing the perplexity scores for the texts\n in the input list, as well as the mean perplexity. If one of the input texts is\n longer than the max input length of the model, then it is truncated to the\n max length for the perplexity computation.\nExamples:\n Example 1:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = [\"lorem ipsum\", \"Happy Birthday!\", \"Bienvenue\"]\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... add_start_token=False,\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 78.22\n >>> print(round(results[\"perplexities\"][0], 2))\n 11.11\n\n Example 2:\n >>> perplexity = datasets.load_metric(\"perplexity\")\n >>> input_texts = datasets.load_dataset(\"wikitext\",\n ... \"wikitext-2-raw-v1\",\n ... split=\"test\")[\"text\"][:50] # doctest:+ELLIPSIS\n [...]\n >>> input_texts = [s for s in input_texts if s!=\'\']\n >>> results = perplexity.compute(model_id=\'gpt2\',\n ... input_texts=input_texts) # doctest:+ELLIPSIS\n >>> print(list(results.keys()))\n [\'perplexities\', \'mean_perplexity\']\n >>> print(round(results[\"mean_perplexity\"], 2))\n 60.35\n >>> print(round(results[\"perplexities\"][0], 2))\n 81.12\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A( datasets.Metric ):
'''simple docstring'''
def a__ ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'input_texts': datasets.Value('string' ),
} ) , reference_urls=['https://huggingface.co/docs/transformers/perplexity'] , )
def a__ ( self : str , A_ : Union[str, Any] , A_ : Tuple , A_ : int = 16 , A_ : bool = True , A_ : Optional[int]=None ) -> Tuple:
"""simple docstring"""
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
lowerCamelCase_ = "cuda"
else:
lowerCamelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
lowerCamelCase_ = AutoModelForCausalLM.from_pretrained(a_ )
lowerCamelCase_ = model.to(a_ )
lowerCamelCase_ = AutoTokenizer.from_pretrained(a_ )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
lowerCamelCase_ = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(a_ ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({'pad_token': existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
lowerCamelCase_ = model.config.max_length - 1
else:
lowerCamelCase_ = model.config.max_length
lowerCamelCase_ = tokenizer(
a_ , add_special_tokens=a_ , padding=a_ , truncation=a_ , max_length=a_ , return_tensors='pt' , return_attention_mask=a_ , ).to(a_ )
lowerCamelCase_ = encodings["input_ids"]
lowerCamelCase_ = encodings["attention_mask"]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
lowerCamelCase_ = []
lowerCamelCase_ = CrossEntropyLoss(reduction='none' )
for start_index in logging.tqdm(range(0 , len(a_ ) , a_ ) ):
lowerCamelCase_ = min(start_index + batch_size , len(a_ ) )
lowerCamelCase_ = encoded_texts[start_index:end_index]
lowerCamelCase_ = attn_masks[start_index:end_index]
if add_start_token:
lowerCamelCase_ = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(a_ )
lowerCamelCase_ = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
lowerCamelCase_ = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(a_ ), attn_mask] , dim=1 )
lowerCamelCase_ = encoded_batch
with torch.no_grad():
lowerCamelCase_ = model(a_ , attention_mask=a_ ).logits
lowerCamelCase_ = out_logits[..., :-1, :].contiguous()
lowerCamelCase_ = labels[..., 1:].contiguous()
lowerCamelCase_ = attn_mask[..., 1:].contiguous()
lowerCamelCase_ = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , a_ ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(a_ )}
| 70 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
__UpperCAmelCase = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : str ) -> str:
'''simple docstring'''
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def lowercase__ ( lowerCAmelCase__ : List[Any] ) -> Any:
'''simple docstring'''
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Dict ) -> List[str]:
'''simple docstring'''
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
a__ : Dict = tmp_path_factory.getbasetemp() / "cache"
a__ : int = test_hf_cache_home / "datasets"
a__ : Tuple = test_hf_cache_home / "metrics"
a__ : Any = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(lowerCAmelCase__ ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(lowerCAmelCase__ ) )
a__ : Optional[int] = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
a__ : Tuple = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(lowerCAmelCase__ ) )
@pytest.fixture(autouse=lowerCAmelCase__ , scope="session" )
def lowercase__ ( ) -> Union[str, Any]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=lowerCAmelCase__ )
def lowercase__ ( lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , lowerCAmelCase__ )
@pytest.fixture
def lowercase__ ( lowerCAmelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , lowerCAmelCase__ ) | 642 | 0 |
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
snake_case__ : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
snake_case__ : int = '\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'
def lowerCamelCase__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) ->Optional[Any]:
_UpperCAmelCase =h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_UpperCAmelCase =w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class _a ( A__ ):
"""simple docstring"""
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , ):
super().__init__()
self.register_modules(
text_encoder=_snake_case , tokenizer=_snake_case , unet=_snake_case , scheduler=_snake_case , movq=_snake_case , )
_UpperCAmelCase =2 ** (len(self.movq.config.block_out_channels ) - 1)
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
if latents is None:
_UpperCAmelCase =randn_tensor(_snake_case , generator=_snake_case , device=_snake_case , dtype=_snake_case )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
_UpperCAmelCase =latents.to(_snake_case )
_UpperCAmelCase =latents * scheduler.init_noise_sigma
return latents
def SCREAMING_SNAKE_CASE ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case=None , ):
_UpperCAmelCase =len(_snake_case ) if isinstance(_snake_case , _snake_case ) else 1
# get prompt text embeddings
_UpperCAmelCase =self.tokenizer(
_snake_case , padding="max_length" , truncation=_snake_case , max_length=77 , return_attention_mask=_snake_case , add_special_tokens=_snake_case , return_tensors="pt" , )
_UpperCAmelCase =text_inputs.input_ids
_UpperCAmelCase =self.tokenizer(_snake_case , padding="longest" , return_tensors="pt" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_snake_case , _snake_case ):
_UpperCAmelCase =self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
_UpperCAmelCase =text_input_ids.to(_snake_case )
_UpperCAmelCase =text_inputs.attention_mask.to(_snake_case )
_UpperCAmelCase , _UpperCAmelCase =self.text_encoder(
input_ids=_snake_case , attention_mask=_snake_case )
_UpperCAmelCase =prompt_embeds.repeat_interleave(_snake_case , dim=0 )
_UpperCAmelCase =text_encoder_hidden_states.repeat_interleave(_snake_case , dim=0 )
_UpperCAmelCase =text_mask.repeat_interleave(_snake_case , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase =42
if negative_prompt is None:
_UpperCAmelCase =[""] * batch_size
elif type(_snake_case ) is not type(_snake_case ):
raise TypeError(
F"`negative_prompt` should be the same type to `prompt`, but got {type(_snake_case )} !="
F" {type(_snake_case )}." )
elif isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =[negative_prompt]
elif batch_size != len(_snake_case ):
raise ValueError(
F"`negative_prompt`: {negative_prompt} has batch size {len(_snake_case )}, but `prompt`:"
F" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
" the batch size of `prompt`." )
else:
_UpperCAmelCase =negative_prompt
_UpperCAmelCase =self.tokenizer(
_snake_case , padding="max_length" , max_length=77 , truncation=_snake_case , return_attention_mask=_snake_case , add_special_tokens=_snake_case , return_tensors="pt" , )
_UpperCAmelCase =uncond_input.input_ids.to(_snake_case )
_UpperCAmelCase =uncond_input.attention_mask.to(_snake_case )
_UpperCAmelCase , _UpperCAmelCase =self.text_encoder(
input_ids=_snake_case , attention_mask=_snake_case )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase =negative_prompt_embeds.shape[1]
_UpperCAmelCase =negative_prompt_embeds.repeat(1 , _snake_case )
_UpperCAmelCase =negative_prompt_embeds.view(batch_size * num_images_per_prompt , _snake_case )
_UpperCAmelCase =uncond_text_encoder_hidden_states.shape[1]
_UpperCAmelCase =uncond_text_encoder_hidden_states.repeat(1 , _snake_case , 1 )
_UpperCAmelCase =uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _snake_case , -1 )
_UpperCAmelCase =uncond_text_mask.repeat_interleave(_snake_case , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase =torch.cat([negative_prompt_embeds, prompt_embeds] )
_UpperCAmelCase =torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_UpperCAmelCase =torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def SCREAMING_SNAKE_CASE ( self , _snake_case=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_UpperCAmelCase =torch.device(F"cuda:{gpu_id}" )
_UpperCAmelCase =[
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_snake_case , _snake_case )
def SCREAMING_SNAKE_CASE ( self , _snake_case=0 ):
if is_accelerate_available() and is_accelerate_version(">=" , "0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_UpperCAmelCase =torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to("cpu" , silence_dtype_warnings=_snake_case )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase =None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_UpperCAmelCase , _UpperCAmelCase =cpu_offload_with_hook(_snake_case , _snake_case , prev_module_hook=_snake_case )
if self.safety_checker is not None:
_UpperCAmelCase , _UpperCAmelCase =cpu_offload_with_hook(self.safety_checker , _snake_case , prev_module_hook=_snake_case )
# We'll offload the last model manually.
_UpperCAmelCase =hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def SCREAMING_SNAKE_CASE ( self ):
if not hasattr(self.unet , "_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(_snake_case , "_hf_hook" )
and hasattr(module._hf_hook , "execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_snake_case )
def __call__( self , _snake_case , _snake_case , _snake_case , _snake_case = None , _snake_case = 512 , _snake_case = 512 , _snake_case = 100 , _snake_case = 4.0 , _snake_case = 1 , _snake_case = None , _snake_case = None , _snake_case = "pil" , _snake_case = True , ):
if isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =1
elif isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =len(_snake_case )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_snake_case )}" )
_UpperCAmelCase =self._execution_device
_UpperCAmelCase =batch_size * num_images_per_prompt
_UpperCAmelCase =guidance_scale > 1.0
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase =self._encode_prompt(
_snake_case , _snake_case , _snake_case , _snake_case , _snake_case )
if isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =torch.cat(_snake_case , dim=0 )
if isinstance(_snake_case , _snake_case ):
_UpperCAmelCase =torch.cat(_snake_case , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase =image_embeds.repeat_interleave(_snake_case , dim=0 )
_UpperCAmelCase =negative_image_embeds.repeat_interleave(_snake_case , dim=0 )
_UpperCAmelCase =torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_snake_case )
self.scheduler.set_timesteps(_snake_case , device=_snake_case )
_UpperCAmelCase =self.scheduler.timesteps
_UpperCAmelCase =self.unet.config.in_channels
_UpperCAmelCase , _UpperCAmelCase =get_new_h_w(_snake_case , _snake_case , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase =self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _snake_case , _snake_case , _snake_case , self.scheduler , )
for i, t in enumerate(self.progress_bar(_snake_case ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase =torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase ={"text_embeds": prompt_embeds, "image_embeds": image_embeds}
_UpperCAmelCase =self.unet(
sample=_snake_case , timestep=_snake_case , encoder_hidden_states=_snake_case , added_cond_kwargs=_snake_case , return_dict=_snake_case , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase , _UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase , _UpperCAmelCase =noise_pred.chunk(2 )
_UpperCAmelCase , _UpperCAmelCase =variance_pred.chunk(2 )
_UpperCAmelCase =noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase =torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , "variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase , _UpperCAmelCase =noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase =self.scheduler.step(
_snake_case , _snake_case , _snake_case , generator=_snake_case , ).prev_sample
# post-processing
_UpperCAmelCase =self.movq.decode(_snake_case , force_not_quantize=_snake_case )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
_UpperCAmelCase =image * 0.5 + 0.5
_UpperCAmelCase =image.clamp(0 , 1 )
_UpperCAmelCase =image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase =self.numpy_to_pil(_snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_snake_case )
| 592 |
from importlib import import_module
from .logging import get_logger
snake_case__ : Dict = get_logger(__name__)
class _a :
"""simple docstring"""
def __init__( self , _snake_case , _snake_case=None ):
_UpperCAmelCase =attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("__" ):
setattr(self , _snake_case , getattr(_snake_case , _snake_case ) )
_UpperCAmelCase =module._original_module if isinstance(_snake_case , _PatchedModuleObj ) else module
class _a :
"""simple docstring"""
snake_case =[]
def __init__( self , _snake_case , _snake_case , _snake_case , _snake_case=None ):
_UpperCAmelCase =obj
_UpperCAmelCase =target
_UpperCAmelCase =new
_UpperCAmelCase =target.split("." )[0]
_UpperCAmelCase ={}
_UpperCAmelCase =attrs or []
def __enter__( self ):
*_UpperCAmelCase , _UpperCAmelCase =self.target.split("." )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(_snake_case ) ):
try:
_UpperCAmelCase =import_module(".".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
_UpperCAmelCase =getattr(self.obj , _snake_case )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(_snake_case , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
_UpperCAmelCase =obj_attr
# patch at top level
setattr(self.obj , _snake_case , _PatchedModuleObj(_snake_case , attrs=self.attrs ) )
_UpperCAmelCase =getattr(self.obj , _snake_case )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(_snake_case , _snake_case , _PatchedModuleObj(getattr(_snake_case , _snake_case , _snake_case ) , attrs=self.attrs ) )
_UpperCAmelCase =getattr(_snake_case , _snake_case )
# finally set the target attribute
setattr(_snake_case , _snake_case , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
_UpperCAmelCase =getattr(import_module(".".join(_snake_case ) ) , _snake_case )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , _snake_case ) is attr_value:
_UpperCAmelCase =getattr(self.obj , _snake_case )
setattr(self.obj , _snake_case , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
_UpperCAmelCase =globals()["__builtins__"][target_attr]
setattr(self.obj , _snake_case , self.new )
else:
raise RuntimeError(F"Tried to patch attribute {target_attr} instead of a submodule." )
def __exit__( self , *_snake_case ):
for attr in list(self.original ):
setattr(self.obj , _snake_case , self.original.pop(_snake_case ) )
def SCREAMING_SNAKE_CASE ( self ):
self.__enter__()
self._active_patches.append(self )
def SCREAMING_SNAKE_CASE ( self ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 592 | 1 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
lowercase__ : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( __magic_name__ ):
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase__ : AutoencoderKL , lowerCAmelCase__ : CLIPTextModel , lowerCAmelCase__ : CLIPTokenizer , lowerCAmelCase__ : UNetaDConditionModel , lowerCAmelCase__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , lowerCAmelCase__ : StableDiffusionSafetyChecker , lowerCAmelCase__ : CLIPImageProcessor , ) -> Optional[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCAmelCase__ , text_encoder=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , safety_checker=lowerCAmelCase__ , feature_extractor=lowerCAmelCase__ , )
def snake_case__ ( self : Union[str, Any] , lowerCAmelCase__ : Optional[Union[str, int]] = "auto" ) -> Union[str, Any]:
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
_UpperCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase__ )
def snake_case__ ( self : int ) -> Optional[int]:
'''simple docstring'''
self.enable_attention_slicing(lowerCAmelCase__ )
@torch.no_grad()
def __call__( self : Optional[Any] , lowerCAmelCase__ : Union[str, List[str]] , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 512 , lowerCAmelCase__ : int = 50 , lowerCAmelCase__ : float = 7.5 , lowerCAmelCase__ : Optional[Union[str, List[str]]] = None , lowerCAmelCase__ : Optional[int] = 1 , lowerCAmelCase__ : float = 0.0 , lowerCAmelCase__ : Optional[torch.Generator] = None , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , lowerCAmelCase__ : Optional[str] = "pil" , lowerCAmelCase__ : bool = True , lowerCAmelCase__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , lowerCAmelCase__ : int = 1 , lowerCAmelCase__ : Optional[torch.FloatTensor] = None , **lowerCAmelCase__ : Optional[int] , ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = 1
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = len(lowerCAmelCase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCAmelCase__ )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCAmelCase__ )}.""" )
# get prompt text embeddings
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_UpperCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_UpperCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
_UpperCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = text_embeddings.shape
_UpperCamelCase = text_embeddings.repeat(1 , lowerCAmelCase__ , 1 )
_UpperCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , lowerCAmelCase__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_UpperCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_UpperCamelCase = 42
if negative_prompt is None:
_UpperCamelCase = ['''''']
elif type(lowerCAmelCase__ ) is not type(lowerCAmelCase__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCAmelCase__ )} !="""
f""" {type(lowerCAmelCase__ )}.""" )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
_UpperCamelCase = [negative_prompt]
elif batch_size != len(lowerCAmelCase__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCAmelCase__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
''' the batch size of `prompt`.''' )
else:
_UpperCamelCase = negative_prompt
_UpperCamelCase = text_input_ids.shape[-1]
_UpperCamelCase = self.tokenizer(
lowerCAmelCase__ , padding='''max_length''' , max_length=lowerCAmelCase__ , truncation=lowerCAmelCase__ , return_tensors='''pt''' , )
_UpperCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCamelCase = uncond_embeddings.shape[1]
_UpperCamelCase = uncond_embeddings.repeat(lowerCAmelCase__ , lowerCAmelCase__ , 1 )
_UpperCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , lowerCAmelCase__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_UpperCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
_UpperCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(self.device )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device='''cpu''' , dtype=lowerCAmelCase__ ).to(
self.device )
else:
_UpperCamelCase = torch.randn(
lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
_UpperCamelCase = torch.randn(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=self.device , dtype=lowerCAmelCase__ )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
_UpperCamelCase = latents_reference.to(self.device )
_UpperCamelCase = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
_UpperCamelCase = (latents_shape[3] - latents_shape_reference[3]) // 2
_UpperCamelCase = (latents_shape[2] - latents_shape_reference[2]) // 2
_UpperCamelCase = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
_UpperCamelCase = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
_UpperCamelCase = 0 if dx < 0 else dx
_UpperCamelCase = 0 if dy < 0 else dy
_UpperCamelCase = max(-dx , 0 )
_UpperCamelCase = max(-dy , 0 )
# import pdb
# pdb.set_trace()
_UpperCamelCase = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCAmelCase__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_UpperCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_UpperCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_UpperCamelCase = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_UpperCamelCase = {}
if accepts_eta:
_UpperCamelCase = eta
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCamelCase = self.scheduler.scale_model_input(lowerCAmelCase__ , lowerCAmelCase__ )
# predict the noise residual
_UpperCamelCase = self.unet(lowerCAmelCase__ , lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ ).sample
# perform guidance
if do_classifier_free_guidance:
_UpperCamelCase , _UpperCamelCase = noise_pred.chunk(2 )
_UpperCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_UpperCamelCase = self.scheduler.step(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , **lowerCAmelCase__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
_UpperCamelCase = 1 / 0.18215 * latents
_UpperCamelCase = self.vae.decode(lowerCAmelCase__ ).sample
_UpperCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_UpperCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if self.safety_checker is not None:
_UpperCamelCase = self.feature_extractor(self.numpy_to_pil(lowerCAmelCase__ ) , return_tensors='''pt''' ).to(
self.device )
_UpperCamelCase , _UpperCamelCase = self.safety_checker(
images=lowerCAmelCase__ , clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
_UpperCamelCase = None
if output_type == "pil":
_UpperCamelCase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCAmelCase__ , nsfw_content_detected=lowerCAmelCase__ )
| 98 |
import math
def A ( lowercase__ : Tuple , lowercase__ : Union[str, Any] ) -> Optional[Any]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(lowercase__ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError("""This should never happen""" )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
UpperCamelCase = "Enter the base and the power separated by a comma: "
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
UpperCamelCase , UpperCamelCase = map(int, input(prompt).split(","))
# We find the log of each number, using the function res(), which takes two
# arguments.
UpperCamelCase = res(xa, ya)
UpperCamelCase = res(xa, ya)
# We check for the largest number
if resa > resa:
print("Largest number is", xa, "^", ya)
elif resa > resa:
print("Largest number is", xa, "^", ya)
else:
print("Both are equal") | 45 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : List[str] = 'conditional_detr'
_A : List[Any] = ['past_key_values']
_A : Dict = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=3_00 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=2_56 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.0_2 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=2 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.2_5 , **lowerCamelCase , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
snake_case__ = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
snake_case__ = backbone_config.get("model_type" )
snake_case__ = CONFIG_MAPPING[backbone_model_type]
snake_case__ = config_class.from_dict(lowerCamelCase )
snake_case__ = use_timm_backbone
snake_case__ = backbone_config
snake_case__ = num_channels
snake_case__ = num_queries
snake_case__ = d_model
snake_case__ = encoder_ffn_dim
snake_case__ = encoder_layers
snake_case__ = encoder_attention_heads
snake_case__ = decoder_ffn_dim
snake_case__ = decoder_layers
snake_case__ = decoder_attention_heads
snake_case__ = dropout
snake_case__ = attention_dropout
snake_case__ = activation_dropout
snake_case__ = activation_function
snake_case__ = init_std
snake_case__ = init_xavier_std
snake_case__ = encoder_layerdrop
snake_case__ = decoder_layerdrop
snake_case__ = encoder_layers
snake_case__ = auxiliary_loss
snake_case__ = position_embedding_type
snake_case__ = backbone
snake_case__ = use_pretrained_backbone
snake_case__ = dilation
# Hungarian matcher
snake_case__ = class_cost
snake_case__ = bbox_cost
snake_case__ = giou_cost
# Loss coefficients
snake_case__ = mask_loss_coefficient
snake_case__ = dice_loss_coefficient
snake_case__ = cls_loss_coefficient
snake_case__ = bbox_loss_coefficient
snake_case__ = giou_loss_coefficient
snake_case__ = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def A_ ( self ):
return self.encoder_attention_heads
@property
def A_ ( self ):
return self.d_model
def A_ ( self ):
snake_case__ = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
snake_case__ = self.backbone_config.to_dict()
snake_case__ = self.__class__.model_type
return output
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase ):
_A : Tuple = version.parse('1.11' )
@property
def A_ ( self ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A_ ( self ):
return 1e-5
@property
def A_ ( self ):
return 12
| 530 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--original_config_file''',
type=str,
required=True,
help='''The YAML config file corresponding to the original architecture.''',
)
parser.add_argument(
'''--num_in_channels''',
default=None,
type=int,
help='''The number of input channels. If `None` number of input channels will be automatically inferred.''',
)
parser.add_argument(
'''--image_size''',
default=512,
type=int,
help=(
'''The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'''
''' Base. Use 768 for Stable Diffusion v2.'''
),
)
parser.add_argument(
'''--extract_ema''',
action='''store_true''',
help=(
'''Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'''
''' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'''
''' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'''
),
)
parser.add_argument(
'''--upcast_attention''',
action='''store_true''',
help=(
'''Whether the attention computation should always be upcasted. This is necessary when running stable'''
''' diffusion 2.1.'''
),
)
parser.add_argument(
'''--from_safetensors''',
action='''store_true''',
help='''If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.''',
)
parser.add_argument(
'''--to_safetensors''',
action='''store_true''',
help='''Whether to store pipeline in safetensors format or not.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument('''--device''', type=str, help='''Device to use (e.g. cpu, cuda:0, cuda:1, etc.)''')
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(F"""could not parse string as bool {string}""" )
parser.add_argument(
'''--use_linear_projection''', help='''Override for use linear projection''', required=False, type=parse_bool
)
parser.add_argument('''--cross_attention_dim''', help='''Override for cross attention_dim''', required=False, type=int)
__magic_name__ = parser.parse_args()
__magic_name__ = download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 530 | 1 |
import math
def UpperCamelCase__( UpperCamelCase__ : int )->list:
A__ = [True] * n
A__ = False
A__ = False
A__ = True
for i in range(3 , int(n**0.5 + 1 ) , 2 ):
A__ = i * 2
while index < n:
A__ = False
A__ = index + i
A__ = [2]
for i in range(3 , UpperCamelCase__ , 2 ):
if is_prime[i]:
primes.append(UpperCamelCase__ )
return primes
def UpperCamelCase__( UpperCamelCase__ : int = 99_99_66_66_33_33 )->int:
A__ = math.floor(math.sqrt(UpperCamelCase__ ) ) + 1_00
A__ = prime_sieve(UpperCamelCase__ )
A__ = 0
A__ = 0
A__ = primes[prime_index]
while (last_prime**2) <= limit:
A__ = primes[prime_index + 1]
A__ = last_prime**2
A__ = next_prime**2
# Get numbers divisible by lps(current)
A__ = lower_bound + last_prime
while upper_bound > current <= limit:
matches_sum += current
current += last_prime
# Reset the upper_bound
while (upper_bound - next_prime) > limit:
upper_bound -= next_prime
# Add the numbers divisible by ups(current)
A__ = upper_bound - next_prime
while current > lower_bound:
matches_sum += current
current -= next_prime
# Remove the numbers divisible by both ups and lps
A__ = 0
while upper_bound > current <= limit:
if current <= lower_bound:
# Increment the current number
current += last_prime * next_prime
continue
if current > limit:
break
# Remove twice since it was added by both ups and lps
matches_sum -= current * 2
# Increment the current number
current += last_prime * next_prime
# Setup for next pair
A__ = next_prime
prime_index += 1
return matches_sum
if __name__ == "__main__":
print(solution())
| 190 |
import pytest
import datasets
# Import fixture modules as plugins
a__: Dict = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def UpperCamelCase__( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Tuple )->List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCamelCase__( UpperCamelCase__ : Dict )->List[Any]:
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : Optional[int] , UpperCamelCase__ : int )->Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / '''cache'''
A__ = test_hf_cache_home / '''datasets'''
A__ = test_hf_cache_home / '''metrics'''
A__ = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(UpperCamelCase__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
A__ = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(UpperCamelCase__ ) )
@pytest.fixture(autouse=UpperCamelCase__ , scope='''session''' )
def UpperCamelCase__( )->int:
datasets.disable_progress_bar()
@pytest.fixture(autouse=UpperCamelCase__ )
def UpperCamelCase__( UpperCamelCase__ : str )->Tuple:
# don't take tests into account when counting downloads
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , UpperCamelCase__ )
@pytest.fixture
def UpperCamelCase__( UpperCamelCase__ : List[Any] )->Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , UpperCamelCase__ )
| 190 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class SCREAMING_SNAKE_CASE ( lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
snake_case__ : Union[str, Any] = KandinskyVaaControlnetImgaImgPipeline
snake_case__ : int = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
snake_case__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
snake_case__ : int = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
snake_case__ : List[Any] = False
@property
def _UpperCamelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self :Tuple ) -> Optional[Any]:
'''simple docstring'''
return 32
@property
def _UpperCamelCase ( self :Dict ) -> str:
'''simple docstring'''
return self.time_input_dim
@property
def _UpperCamelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
return self.time_input_dim * 4
@property
def _UpperCamelCase ( self :Optional[Any] ) -> Optional[int]:
'''simple docstring'''
return 100
@property
def _UpperCamelCase ( self :Optional[int] ) -> Any:
'''simple docstring'''
torch.manual_seed(0 )
a__ = {
'''in_channels''': 8,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''image_hint''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
a__ = UNetaDConditionModel(**__magic_name__ )
return model
@property
def _UpperCamelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _UpperCamelCase ( self :Tuple ) -> Tuple:
'''simple docstring'''
torch.manual_seed(0 )
a__ = VQModel(**self.dummy_movq_kwargs )
return model
def _UpperCamelCase ( self :str ) -> int:
'''simple docstring'''
a__ = self.dummy_unet
a__ = self.dummy_movq
a__ = {
'''num_train_timesteps''': 1000,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00_085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
a__ = DDIMScheduler(**__magic_name__ )
a__ = {
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def _UpperCamelCase ( self :str , __magic_name__ :Dict , __magic_name__ :List[Any]=0 ) -> Any:
'''simple docstring'''
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
a__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
__magic_name__ )
# create init_image
a__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
a__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
a__ = Image.fromarray(np.uinta(__magic_name__ ) ).convert('''RGB''' ).resize((256, 256) )
# create hint
a__ = floats_tensor((1, 3, 64, 64) , rng=random.Random(__magic_name__ ) ).to(__magic_name__ )
if str(__magic_name__ ).startswith('''mps''' ):
a__ = torch.manual_seed(__magic_name__ )
else:
a__ = torch.Generator(device=__magic_name__ ).manual_seed(__magic_name__ )
a__ = {
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''hint''': hint,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self :int ) -> str:
'''simple docstring'''
a__ = '''cpu'''
a__ = self.get_dummy_components()
a__ = self.pipeline_class(**__magic_name__ )
a__ = pipe.to(__magic_name__ )
pipe.set_progress_bar_config(disable=__magic_name__ )
a__ = pipe(**self.get_dummy_inputs(__magic_name__ ) )
a__ = output.images
a__ = pipe(
**self.get_dummy_inputs(__magic_name__ ) , return_dict=__magic_name__ , )[0]
a__ = image[0, -3:, -3:, -1]
a__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
a__ = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
def _UpperCamelCase ( self :Dict ) -> str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self :int ) -> Union[str, Any]:
'''simple docstring'''
a__ = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy''' )
a__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
a__ = init_image.resize((512, 512) )
a__ = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinskyv22/hint_image_cat.png''' )
a__ = torch.from_numpy(np.array(__magic_name__ ) ).float() / 255.0
a__ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
a__ = '''A robot, 4k photo'''
a__ = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__magic_name__ )
a__ = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-2-controlnet-depth''' , torch_dtype=torch.floataa )
a__ = pipeline.to(__magic_name__ )
pipeline.set_progress_bar_config(disable=__magic_name__ )
a__ = torch.Generator(device='''cpu''' ).manual_seed(0 )
a__ , a__ = pipe_prior(
__magic_name__ , image=__magic_name__ , strength=0.85 , generator=__magic_name__ , negative_prompt='''''' , ).to_tuple()
a__ = pipeline(
image=__magic_name__ , image_embeds=__magic_name__ , negative_image_embeds=__magic_name__ , hint=__magic_name__ , generator=__magic_name__ , num_inference_steps=100 , height=512 , width=512 , strength=0.5 , output_type='''np''' , )
a__ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(__magic_name__ , __magic_name__ )
| 158 |
"""simple docstring"""
import os
from collections import namedtuple
import pytest
from datasets import ClassLabel, Features, Sequence, Value
from datasets.commands.test import TestCommand
from datasets.info import DatasetInfo, DatasetInfosDict
__lowerCAmelCase : Optional[int] = namedtuple(
'''_TestCommandArgs''',
[
'''dataset''',
'''name''',
'''cache_dir''',
'''data_dir''',
'''all_configs''',
'''save_infos''',
'''ignore_verifications''',
'''force_redownload''',
'''clear_cache''',
],
defaults=[None, None, None, False, False, False, False, False],
)
def __snake_case ( UpperCamelCase , UpperCamelCase ) -> List[str]:
"""simple docstring"""
return (abs(source - target ) / target) < 0.01
@pytest.mark.integration
def __snake_case ( UpperCamelCase ) -> Union[str, Any]:
"""simple docstring"""
a__ = _TestCommandArgs(dataset=UpperCamelCase , all_configs=UpperCamelCase , save_infos=UpperCamelCase )
a__ = TestCommand(*UpperCamelCase )
test_command.run()
a__ = os.path.join(UpperCamelCase , '''README.md''' )
assert os.path.exists(UpperCamelCase )
a__ = DatasetInfosDict.from_directory(UpperCamelCase )
a__ = DatasetInfosDict(
{
'''default''': DatasetInfo(
features=Features(
{
'''tokens''': Sequence(Value('''string''' ) ),
'''ner_tags''': Sequence(
ClassLabel(names=['''O''', '''B-PER''', '''I-PER''', '''B-ORG''', '''I-ORG''', '''B-LOC''', '''I-LOC'''] ) ),
'''langs''': Sequence(Value('''string''' ) ),
'''spans''': Sequence(Value('''string''' ) ),
} ) , splits=[
{
'''name''': '''train''',
'''num_bytes''': 2_351_563,
'''num_examples''': 10_000,
},
{
'''name''': '''validation''',
'''num_bytes''': 238_418,
'''num_examples''': 1_000,
},
] , download_size=3_940_680 , dataset_size=2_589_981 , )
} )
assert dataset_infos.keys() == expected_dataset_infos.keys()
for key in DatasetInfo._INCLUDED_INFO_IN_YAML:
a__ , a__ = getattr(dataset_infos['''default'''] , UpperCamelCase ), getattr(expected_dataset_infos['''default'''] , UpperCamelCase )
if key == "num_bytes":
assert is_apercent_close(UpperCamelCase , UpperCamelCase )
elif key == "splits":
assert list(UpperCamelCase ) == list(UpperCamelCase )
for split in result:
assert result[split].name == expected[split].name
assert result[split].num_examples == expected[split].num_examples
assert is_apercent_close(result[split].num_bytes , expected[split].num_bytes )
else:
result == expected
| 158 | 1 |
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
__a = logging.get_logger(__name__)
class __a( _a ):
"""simple docstring"""
def __init__( self ,*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) -> None:
warnings.warn(
'''The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use DeformableDetrImageProcessor instead.''' ,_SCREAMING_SNAKE_CASE ,)
super().__init__(*_SCREAMING_SNAKE_CASE ,**_SCREAMING_SNAKE_CASE ) | 30 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/config.json""",
}
class lowerCamelCase ( _UpperCamelCase ):
_lowerCAmelCase : Tuple = '''gpt_neox_japanese'''
def __init__( self , lowercase__=3_2_0_0_0 , lowercase__=2_5_6_0 , lowercase__=3_2 , lowercase__=3_2 , lowercase__=4 , lowercase__="gelu" , lowercase__=1.0_0 , lowercase__=1_0_0_0_0 , lowercase__=2_0_4_8 , lowercase__=0.0_2 , lowercase__=1e-5 , lowercase__=True , lowercase__=3_1_9_9_6 , lowercase__=3_1_9_9_9 , lowercase__=0.1 , lowercase__=0.0 , **lowercase__ , ):
super().__init__(bos_token_id=lowercase__ , eos_token_id=lowercase__ , **lowercase__)
__UpperCAmelCase : Optional[Any] = vocab_size
__UpperCAmelCase : Dict = max_position_embeddings
__UpperCAmelCase : int = hidden_size
__UpperCAmelCase : List[Any] = num_hidden_layers
__UpperCAmelCase : Union[str, Any] = num_attention_heads
__UpperCAmelCase : Optional[int] = intermediate_multiple_size
__UpperCAmelCase : Any = hidden_act
__UpperCAmelCase : str = rotary_pct
__UpperCAmelCase : Any = rotary_emb_base
__UpperCAmelCase : Dict = initializer_range
__UpperCAmelCase : Tuple = layer_norm_eps
__UpperCAmelCase : Optional[int] = use_cache
__UpperCAmelCase : str = attention_dropout
__UpperCAmelCase : Any = hidden_dropout
| 462 | 0 |
'''simple docstring'''
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def __magic_name__( lowerCamelCase):
return (data["data"], data["target"])
def __magic_name__( lowerCamelCase, lowerCamelCase):
__lowerCAmelCase = XGBClassifier()
classifier.fit(lowerCamelCase, lowerCamelCase)
return classifier
def __magic_name__( ):
__lowerCAmelCase = load_iris()
__lowerCAmelCase , __lowerCAmelCase = data_handling(lowerCamelCase)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = train_test_split(
lowerCamelCase, lowerCamelCase, test_size=0.25)
__lowerCAmelCase = iris['''target_names''']
# Create an XGBoost Classifier from the training data
__lowerCAmelCase = xgboost(lowerCamelCase, lowerCamelCase)
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
lowerCamelCase, lowerCamelCase, lowerCamelCase, display_labels=lowerCamelCase, cmap='''Blues''', normalize='''true''', )
plt.title('''Normalized Confusion Matrix - IRIS Dataset''')
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 713 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
_UpperCAmelCase : str = {
"""configuration_gpt_neox_japanese""": ["""GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoXJapaneseConfig"""],
"""tokenization_gpt_neox_japanese""": ["""GPTNeoXJapaneseTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"""GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoXJapaneseForCausalLM""",
"""GPTNeoXJapaneseLayer""",
"""GPTNeoXJapaneseModel""",
"""GPTNeoXJapanesePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
_UpperCAmelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 474 | 0 |
"""simple docstring"""
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 178 |
'''simple docstring'''
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
_a : Optional[int] = logging.get_logger(__name__)
_a : int = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
_a : str = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class _UpperCAmelCase :
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """Model type selected in the list: """ + """, """.join(lowerCAmelCase_ )} )
a : str =field(
default=lowerCAmelCase_ , metadata={"""help""": """The input data dir. Should contain the .json files for the SQuAD task."""} )
a : int =field(
default=1_28 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
a : int =field(
default=1_28 , metadata={"""help""": """When splitting up a long document into chunks, how much stride to take between chunks."""} , )
a : int =field(
default=64 , metadata={
"""help""": (
"""The maximum number of tokens for the question. Questions longer than this will """
"""be truncated to this length."""
)
} , )
a : int =field(
default=30 , metadata={
"""help""": (
"""The maximum length of an answer that can be generated. This is needed because the start """
"""and end predictions are not conditioned on one another."""
)
} , )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
a : bool =field(
default=lowerCAmelCase_ , metadata={"""help""": """If true, the SQuAD examples contain some that do not have an answer."""} )
a : float =field(
default=0.0 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=20 , metadata={"""help""": """If null_score - best_non_null is greater than the threshold predict null."""} )
a : int =field(
default=0 , metadata={
"""help""": (
"""language id of input for language-specific xlm models (see"""
""" tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)"""
)
} , )
a : int =field(default=1 , metadata={"""help""": """multiple threads for converting example to features"""} )
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : Optional[Any] ="""train"""
a : Optional[int] ="""dev"""
class _UpperCAmelCase ( lowerCAmelCase_ ):
a : SquadDataTrainingArguments
a : List[SquadFeatures]
a : Split
a : bool
def __init__( self,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = Split.train,__SCREAMING_SNAKE_CASE = False,__SCREAMING_SNAKE_CASE = None,__SCREAMING_SNAKE_CASE = "pt",):
'''simple docstring'''
__lowerCAmelCase = args
__lowerCAmelCase = is_language_sensitive
__lowerCAmelCase = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(__SCREAMING_SNAKE_CASE,__SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase = Split[mode]
except KeyError:
raise KeyError("""mode is not a valid split name""" )
__lowerCAmelCase = mode
# Load data features from cache or dataset file
__lowerCAmelCase = """v2""" if args.version_2_with_negative else """v1"""
__lowerCAmelCase = os.path.join(
cache_dir if cache_dir is not None else args.data_dir,f'cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}',)
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
__lowerCAmelCase = cached_features_file + """.lock"""
with FileLock(__SCREAMING_SNAKE_CASE ):
if os.path.exists(__SCREAMING_SNAKE_CASE ) and not args.overwrite_cache:
__lowerCAmelCase = time.time()
__lowerCAmelCase = torch.load(__SCREAMING_SNAKE_CASE )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
__lowerCAmelCase = self.old_features["""features"""]
__lowerCAmelCase = self.old_features.get("""dataset""",__SCREAMING_SNAKE_CASE )
__lowerCAmelCase = self.old_features.get("""examples""",__SCREAMING_SNAKE_CASE )
logger.info(
f'Loading features from cached file {cached_features_file} [took %.3f s]',time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'
""" future run""" )
else:
if mode == Split.dev:
__lowerCAmelCase = self.processor.get_dev_examples(args.data_dir )
else:
__lowerCAmelCase = self.processor.get_train_examples(args.data_dir )
__lowerCAmelCase , __lowerCAmelCase = squad_convert_examples_to_features(
examples=self.examples,tokenizer=__SCREAMING_SNAKE_CASE,max_seq_length=args.max_seq_length,doc_stride=args.doc_stride,max_query_length=args.max_query_length,is_training=mode == Split.train,threads=args.threads,return_dataset=__SCREAMING_SNAKE_CASE,)
__lowerCAmelCase = time.time()
torch.save(
{"""features""": self.features, """dataset""": self.dataset, """examples""": self.examples},__SCREAMING_SNAKE_CASE,)
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]' )
def __len__( self ):
'''simple docstring'''
return len(self.features )
def __getitem__( self,__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__lowerCAmelCase = self.features[i]
__lowerCAmelCase = torch.tensor(feature.input_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.attention_mask,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.token_type_ids,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.cls_index,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.p_mask,dtype=torch.float )
__lowerCAmelCase = torch.tensor(feature.is_impossible,dtype=torch.float )
__lowerCAmelCase = {
"""input_ids""": input_ids,
"""attention_mask""": attention_mask,
"""token_type_ids""": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"""cls_index""": cls_index, """p_mask""": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"""is_impossible""": is_impossible} )
if self.is_language_sensitive:
inputs.update({"""langs""": (torch.ones(input_ids.shape,dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
__lowerCAmelCase = torch.tensor(feature.start_position,dtype=torch.long )
__lowerCAmelCase = torch.tensor(feature.end_position,dtype=torch.long )
inputs.update({"""start_positions""": start_positions, """end_positions""": end_positions} )
return inputs
| 689 | 0 |
import math
import sys
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : Tuple = ''
try:
with open(__A , 'rb' ) as binary_file:
_lowerCAmelCase : Union[str, Any] = binary_file.read()
for dat in data:
_lowerCAmelCase : Optional[Any] = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : int = {'0': '0', '1': '1'}
_lowerCAmelCase , _lowerCAmelCase : str = '', ''
_lowerCAmelCase : Union[str, Any] = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_lowerCAmelCase : List[Any] = lexicon[curr_string]
result += last_match_id
_lowerCAmelCase : Any = last_match_id + '0'
if math.loga(__A ).is_integer():
_lowerCAmelCase : Any = {}
for curr_key in list(__A ):
_lowerCAmelCase : Dict = lexicon.pop(__A )
_lowerCAmelCase : int = new_lex
_lowerCAmelCase : List[str] = last_match_id + '1'
index += 1
_lowerCAmelCase : Optional[int] = ''
return result
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Union[str, Any] = 8
try:
with open(__A , 'wb' ) as opened_file:
_lowerCAmelCase : Dict = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array[:-1]:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def lowercase (_A ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = 0
for letter in data_bits:
if letter == "1":
break
counter += 1
_lowerCAmelCase : Any = data_bits[counter:]
_lowerCAmelCase : Dict = data_bits[counter + 1 :]
return data_bits
def lowercase (_A , _A ):
"""simple docstring"""
_lowerCAmelCase : Optional[int] = read_file_binary(__A )
_lowerCAmelCase : Tuple = remove_prefix(__A )
_lowerCAmelCase : List[str] = decompress_data(__A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 701 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import tensorflow as tf
from ...activations_tf import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_tf_outputs import (
TFBaseModelOutputWithNoAttention,
TFBaseModelOutputWithPoolingAndNoAttention,
TFSequenceClassifierOutput,
)
from ...modeling_tf_utils import TFPreTrainedModel, TFSequenceClassificationLoss, keras_serializable, unpack_inputs
from ...tf_utils import shape_list
from ...utils import logging
from .configuration_regnet import RegNetConfig
lowerCAmelCase : str = logging.get_logger(__name__)
# General docstring
lowerCAmelCase : Optional[Any] = """RegNetConfig"""
# Base docstring
lowerCAmelCase : int = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = [1, 10_88, 7, 7]
# Image classification docstring
lowerCAmelCase : Any = """facebook/regnet-y-040"""
lowerCAmelCase : Optional[Any] = """tabby, tabby cat"""
lowerCAmelCase : Tuple = [
"""facebook/regnet-y-040""",
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 3 , snake_case__ = 1 , snake_case__ = 1 , snake_case__ = "relu" , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ )
# The padding and conv has been verified in
# https://colab.research.google.com/gist/sayakpaul/854bc10eeaf21c9ee2119e0b9f3841a7/scratchpad.ipynb
_lowerCAmelCase : Optional[Any] = tf.keras.layers.ZeroPaddingaD(padding=kernel_size // 2 )
_lowerCAmelCase : List[Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=snake_case__ , strides=snake_case__ , padding='VALID' , groups=snake_case__ , use_bias=snake_case__ , name='convolution' , )
_lowerCAmelCase : List[Any] = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
_lowerCAmelCase : int = ACTaFN[activation] if activation is not None else tf.identity
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.convolution(self.padding(snake_case__ ) )
_lowerCAmelCase : Union[str, Any] = self.normalization(snake_case__ )
_lowerCAmelCase : int = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = config.num_channels
_lowerCAmelCase : List[Any] = TFRegNetConvLayer(
out_channels=config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act , name='embedder' , )
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = shape_list(snake_case__ )[1]
if tf.executing_eagerly() and num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
# When running on CPU, `tf.keras.layers.Conv2D` doesn't support `NCHW` format.
# So change the input format from `NCHW` to `NHWC`.
# shape = (batch_size, in_height, in_width, in_channels=num_channels)
_lowerCAmelCase : List[Any] = tf.transpose(snake_case__ , perm=(0, 2, 3, 1) )
_lowerCAmelCase : Tuple = self.embedder(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = tf.keras.layers.ConvaD(
filters=snake_case__ , kernel_size=1 , strides=snake_case__ , use_bias=snake_case__ , name='convolution' )
_lowerCAmelCase : Tuple = tf.keras.layers.BatchNormalization(epsilon=1E-5 , momentum=0.9 , name='normalization' )
def a ( self , snake_case__ , snake_case__ = False ):
'''simple docstring'''
return self.normalization(self.convolution(snake_case__ ) , training=snake_case__ )
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Any = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
_lowerCAmelCase : str = [
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='relu' , name='attention.0' ),
tf.keras.layers.ConvaD(filters=snake_case__ , kernel_size=1 , activation='sigmoid' , name='attention.2' ),
]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Any = self.pooler(snake_case__ )
for layer_module in self.attention:
_lowerCAmelCase : Tuple = layer_module(snake_case__ )
_lowerCAmelCase : Optional[Any] = hidden_state * pooled
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Optional[int] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
# `self.layers` instead of `self.layer` because that is a reserved argument.
_lowerCAmelCase : Any = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.2' ),
]
_lowerCAmelCase : List[str] = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
_lowerCAmelCase : int = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : Tuple = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 1 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : List[str] = in_channels != out_channels or stride != 1
_lowerCAmelCase : Union[str, Any] = max(1 , out_channels // config.groups_width )
_lowerCAmelCase : Optional[Any] = (
TFRegNetShortCut(snake_case__ , stride=snake_case__ , name='shortcut' )
if should_apply_shortcut
else tf.keras.layers.Activation('linear' , name='shortcut' )
)
_lowerCAmelCase : Tuple = [
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=config.hidden_act , name='layer.0' ),
TFRegNetConvLayer(
snake_case__ , stride=snake_case__ , groups=snake_case__ , activation=config.hidden_act , name='layer.1' ),
TFRegNetSELayer(snake_case__ , reduced_channels=int(round(in_channels / 4 ) ) , name='layer.2' ),
TFRegNetConvLayer(snake_case__ , kernel_size=1 , activation=snake_case__ , name='layer.3' ),
]
_lowerCAmelCase : Tuple = ACTaFN[config.hidden_act]
def a ( self , snake_case__ ):
'''simple docstring'''
_lowerCAmelCase : Dict = hidden_state
for layer_module in self.layers:
_lowerCAmelCase : List[Any] = layer_module(snake_case__ )
_lowerCAmelCase : Tuple = self.shortcut(snake_case__ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , snake_case__ , snake_case__ , snake_case__ = 2 , snake_case__ = 2 , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Dict = TFRegNetXLayer if config.layer_type == 'x' else TFRegNetYLayer
_lowerCAmelCase : Optional[int] = [
# downsampling is done in the first layer with stride of 2
layer(snake_case__ , snake_case__ , snake_case__ , stride=snake_case__ , name='layers.0' ),
*[layer(snake_case__ , snake_case__ , snake_case__ , name=F'layers.{i+1}' ) for i in range(depth - 1 )],
]
def a ( self , snake_case__ ):
'''simple docstring'''
for layer_module in self.layers:
_lowerCAmelCase : int = layer_module(snake_case__ )
return hidden_state
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : str = []
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
TFRegNetStage(
snake_case__ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , name='stages.0' , ) )
_lowerCAmelCase : Union[str, Any] = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for i, ((in_channels, out_channels), depth) in enumerate(zip(snake_case__ , config.depths[1:] ) ):
self.stages.append(TFRegNetStage(snake_case__ , snake_case__ , snake_case__ , depth=snake_case__ , name=F'stages.{i+1}' ) )
def a ( self , snake_case__ , snake_case__ = False , snake_case__ = True ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : str = hidden_states + (hidden_state,)
_lowerCAmelCase : List[str] = stage_module(snake_case__ )
if output_hidden_states:
_lowerCAmelCase : Dict = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return TFBaseModelOutputWithNoAttention(last_hidden_state=snake_case__ , hidden_states=snake_case__ )
@keras_serializable
class UpperCamelCase__ ( tf.keras.layers.Layer ):
"""simple docstring"""
__magic_name__ = RegNetConfig
def __init__( self , snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(**snake_case__ )
_lowerCAmelCase : Union[str, Any] = config
_lowerCAmelCase : Union[str, Any] = TFRegNetEmbeddings(snake_case__ , name='embedder' )
_lowerCAmelCase : Optional[int] = TFRegNetEncoder(snake_case__ , name='encoder' )
_lowerCAmelCase : Dict = tf.keras.layers.GlobalAveragePoolingaD(keepdims=snake_case__ , name='pooler' )
@unpack_inputs
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = False , ):
'''simple docstring'''
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : int = self.embedder(snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[str] = self.encoder(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : List[Any] = encoder_outputs[0]
_lowerCAmelCase : Tuple = self.pooler(snake_case__ )
# Change to NCHW output format have uniformity in the modules
_lowerCAmelCase : Optional[int] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
_lowerCAmelCase : Optional[Any] = tf.transpose(snake_case__ , perm=(0, 3, 1, 2) )
# Change the other hidden state outputs to NCHW as well
if output_hidden_states:
_lowerCAmelCase : Union[str, Any] = tuple([tf.transpose(snake_case__ , perm=(0, 3, 1, 2) ) for h in encoder_outputs[1]] )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case__ , pooler_output=snake_case__ , hidden_states=hidden_states if output_hidden_states else encoder_outputs.hidden_states , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
__magic_name__ = RegNetConfig
__magic_name__ = "regnet"
__magic_name__ = "pixel_values"
@property
def a ( self ):
'''simple docstring'''
return {"pixel_values": tf.TensorSpec(shape=(None, self.config.num_channels, 224, 224) , dtype=tf.floataa )}
lowerCAmelCase : List[Any] = r"""
Parameters:
This model is a Tensorflow
[tf.keras.layers.Layer](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Layer) sub-class. Use it as a
regular Tensorflow Module and refer to the Tensorflow documentation for all matter related to general usage and
behavior.
config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCAmelCase : Dict = r"""
Args:
pixel_values (`tf.Tensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConveNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"The bare RegNet model outputting raw features without any specific head on top." , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : List[str] = TFRegNetMainLayer(snake_case__ , name='regnet' )
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def a ( self , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : str = self.regnet(
pixel_values=snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ , )
if not return_dict:
return (outputs[0],) + outputs[1:]
return TFBaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=outputs.last_hidden_state , pooler_output=outputs.pooler_output , hidden_states=outputs.hidden_states , )
@add_start_docstrings(
"\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n " , SCREAMING_SNAKE_CASE_ , )
class UpperCamelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
def __init__( self , snake_case__ , *snake_case__ , **snake_case__ ):
'''simple docstring'''
super().__init__(snake_case__ , *snake_case__ , **snake_case__ )
_lowerCAmelCase : Optional[Any] = config.num_labels
_lowerCAmelCase : Optional[Any] = TFRegNetMainLayer(snake_case__ , name='regnet' )
# classification head
_lowerCAmelCase : Optional[int] = [
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(config.num_labels , name='classifier.1' ) if config.num_labels > 0 else tf.identity,
]
@unpack_inputs
@add_start_docstrings_to_model_forward(snake_case__ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case__ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def a ( self , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__=False , ):
'''simple docstring'''
_lowerCAmelCase : int = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : List[str] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Dict = self.regnet(
snake_case__ , output_hidden_states=snake_case__ , return_dict=snake_case__ , training=snake_case__ )
_lowerCAmelCase : Optional[Any] = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : List[Any] = self.classifier[0](snake_case__ )
_lowerCAmelCase : Tuple = self.classifier[1](snake_case__ )
_lowerCAmelCase : int = None if labels is None else self.hf_compute_loss(labels=snake_case__ , logits=snake_case__ )
if not return_dict:
_lowerCAmelCase : str = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(loss=snake_case__ , logits=snake_case__ , hidden_states=outputs.hidden_states )
| 630 | 0 |
"""simple docstring"""
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCAmelCase_ = logging.getLogger()
@unittest.skip("Temporarily disable the doc tests." )
@require_torch
@require_tf
@slow
class _snake_case ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : List[Any] , _A : Path , _A : Union[str, None] = None , _A : Union[List[str], None] = None , _A : Union[str, List[str], None] = None , _A : bool = True , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[str] = [file for file in os.listdir(lowercase__) if os.path.isfile(os.path.join(lowercase__ , lowercase__))]
if identifier is not None:
_SCREAMING_SNAKE_CASE : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(lowercase__ , lowercase__):
for n_ in n_identifier:
_SCREAMING_SNAKE_CASE : Union[str, Any] = [file for file in files if n_ not in file]
else:
_SCREAMING_SNAKE_CASE : int = [file for file in files if n_identifier not in file]
_SCREAMING_SNAKE_CASE : Union[str, Any] = ignore_files or []
ignore_files.append("""__init__.py""")
_SCREAMING_SNAKE_CASE : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print("""Testing""" , lowercase__)
if only_modules:
_SCREAMING_SNAKE_CASE : Dict = file.split(""".""")[0]
try:
_SCREAMING_SNAKE_CASE : Any = getattr(lowercase__ , lowercase__)
_SCREAMING_SNAKE_CASE : Tuple = doctest.DocTestSuite(lowercase__)
_SCREAMING_SNAKE_CASE : List[str] = unittest.TextTestRunner().run(lowercase__)
self.assertIs(len(result.failures) , 0)
except AttributeError:
logger.info(f"""{module_identifier} is not a module.""")
else:
_SCREAMING_SNAKE_CASE : Dict = doctest.testfile(str("""..""" / directory / file) , optionflags=doctest.ELLIPSIS)
self.assertIs(result.failed , 0)
def _lowerCAmelCase ( self : Optional[int]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Dict = Path("""src/transformers""")
_SCREAMING_SNAKE_CASE : Optional[Any] = """modeling"""
_SCREAMING_SNAKE_CASE : List[str] = [
"""modeling_ctrl.py""",
"""modeling_tf_ctrl.py""",
]
self.analyze_directory(lowercase__ , identifier=lowercase__ , ignore_files=lowercase__)
def _lowerCAmelCase ( self : Optional[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Union[str, Any] = Path("""src/transformers""")
_SCREAMING_SNAKE_CASE : List[str] = """tokenization"""
self.analyze_directory(lowercase__ , identifier=lowercase__)
def _lowerCAmelCase ( self : int):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : Any = Path("""src/transformers""")
_SCREAMING_SNAKE_CASE : Any = """configuration"""
self.analyze_directory(lowercase__ , identifier=lowercase__)
def _lowerCAmelCase ( self : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = Path("""src/transformers""")
_SCREAMING_SNAKE_CASE : Optional[int] = ["""configuration""", """modeling""", """tokenization"""]
self.analyze_directory(lowercase__ , n_identifier=lowercase__)
def _lowerCAmelCase ( self : str):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = Path("""docs/source""")
_SCREAMING_SNAKE_CASE : int = ["""favicon.ico"""]
self.analyze_directory(lowercase__ , ignore_files=lowercase__ , only_modules=lowercase__)
| 338 |
'''simple docstring'''
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
lowerCAmelCase__ = getLogger(__name__)
lowerCAmelCase__ = '''cuda''' if torch.cuda.is_available() else '''cpu'''
def _A ( A__ , A__ , A__ , A__ = 8 , A__ = DEFAULT_DEVICE , A__=False , A__="summarization" , A__=None , **A__ , ):
"""simple docstring"""
__lowercase = Path(A__ ).open('''w''' , encoding='''utf-8''' )
__lowercase = str(A__ )
__lowercase = AutoModelForSeqaSeqLM.from_pretrained(A__ ).to(A__ )
if fpaa:
__lowercase = model.half()
__lowercase = AutoTokenizer.from_pretrained(A__ )
logger.info(F"Inferred tokenizer type: {tokenizer.__class__}" ) # if this is wrong, check config.model_type.
__lowercase = time.time()
# update config with task specific params
use_task_specific_params(A__ , A__ )
if prefix is None:
__lowercase = prefix or getattr(model.config , '''prefix''' , '''''' ) or ''''''
for examples_chunk in tqdm(list(chunks(A__ , A__ ) ) ):
__lowercase = [prefix + text for text in examples_chunk]
__lowercase = tokenizer(A__ , return_tensors='''pt''' , truncation=A__ , padding='''longest''' ).to(A__ )
__lowercase = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **A__ , )
__lowercase = tokenizer.batch_decode(A__ , skip_special_tokens=A__ , clean_up_tokenization_spaces=A__ )
for hypothesis in dec:
fout.write(hypothesis + '''\n''' )
fout.flush()
fout.close()
__lowercase = int(time.time() - start_time ) # seconds
__lowercase = len(A__ )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def _A ( ):
"""simple docstring"""
return datetime.datetime.now().strftime('''%Y-%m-%d %H:%M:%S''' )
def _A ( A__=True ):
"""simple docstring"""
__lowercase = argparse.ArgumentParser()
parser.add_argument('''model_name''' , type=A__ , help='''like facebook/bart-large-cnn,t5-base, etc.''' )
parser.add_argument('''input_path''' , type=A__ , help='''like cnn_dm/test.source''' )
parser.add_argument('''save_path''' , type=A__ , help='''where to save summaries''' )
parser.add_argument('''--reference_path''' , type=A__ , required=A__ , help='''like cnn_dm/test.target''' )
parser.add_argument('''--score_path''' , type=A__ , required=A__ , default='''metrics.json''' , help='''where to save metrics''' )
parser.add_argument('''--device''' , type=A__ , required=A__ , default=A__ , help='''cuda, cuda:1, cpu etc.''' )
parser.add_argument(
'''--prefix''' , type=A__ , required=A__ , default=A__ , help='''will be added to the begininng of src examples''' )
parser.add_argument('''--task''' , type=A__ , default='''summarization''' , help='''used for task_specific_params + metrics''' )
parser.add_argument('''--bs''' , type=A__ , default=8 , required=A__ , help='''batch size''' )
parser.add_argument(
'''--n_obs''' , type=A__ , default=-1 , required=A__ , help='''How many observations. Defaults to all.''' )
parser.add_argument('''--fp16''' , action='''store_true''' )
parser.add_argument('''--dump-args''' , action='''store_true''' , help='''print the custom hparams with the results''' )
parser.add_argument(
'''--info''' , nargs='''?''' , type=A__ , const=datetime_now() , help=(
'''use in conjunction w/ --dump-args to print with the results whatever other info you\'d like, e.g.'''
''' lang=en-ru. If no value is passed, the current datetime string will be used.'''
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
__lowercase , __lowercase = parser.parse_known_args()
__lowercase = parse_numeric_n_bool_cl_kwargs(A__ )
if parsed_args and verbose:
print(F"parsed the following generate kwargs: {parsed_args}" )
__lowercase = [''' ''' + x.rstrip() if '''t5''' in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
__lowercase = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=A__ )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(F"score_path {args.score_path} will be overwritten unless you type ctrl-c." )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError('''Can\'t mix --fp16 and --device cpu''' )
__lowercase = generate_summaries_or_translations(
A__ , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **A__ , )
if args.reference_path is None:
return {}
# Compute scores
__lowercase = calculate_bleu if '''translation''' in args.task else calculate_rouge
__lowercase = [x.rstrip() for x in open(args.save_path ).readlines()]
__lowercase = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(A__ )]
__lowercase = score_fn(A__ , A__ )
scores.update(A__ )
if args.dump_args:
scores.update(A__ )
if args.info:
__lowercase = args.info
if verbose:
print(A__ )
if args.score_path is not None:
json.dump(A__ , open(args.score_path , '''w''' ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 41 | 0 |
'''simple docstring'''
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ : int = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {name: getattr(transformers, name + """Fast""") for name in SLOW_TO_FAST_CONVERTERS}
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.' )
if tokenizer_name is None:
SCREAMING_SNAKE_CASE_ :Tuple = TOKENIZER_CLASSES
else:
SCREAMING_SNAKE_CASE_ :Tuple = {tokenizer_name: getattr(SCREAMING_SNAKE_CASE , tokenizer_name + 'Fast' )}
logger.info(F'Loading tokenizer classes: {tokenizer_names}' )
for tokenizer_name in tokenizer_names:
SCREAMING_SNAKE_CASE_ :Dict = TOKENIZER_CLASSES[tokenizer_name]
SCREAMING_SNAKE_CASE_ :List[Any] = True
if checkpoint_name is None:
SCREAMING_SNAKE_CASE_ :List[str] = list(tokenizer_class.max_model_input_sizes.keys() )
else:
SCREAMING_SNAKE_CASE_ :Tuple = [checkpoint_name]
logger.info(F'For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}' )
for checkpoint in checkpoint_names:
logger.info(F'Loading {tokenizer_class.__class__.__name__} {checkpoint}' )
# Load tokenizer
SCREAMING_SNAKE_CASE_ :Union[str, Any] = tokenizer_class.from_pretrained(SCREAMING_SNAKE_CASE , force_download=SCREAMING_SNAKE_CASE )
# Save fast tokenizer
logger.info(F'Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}' )
# For organization names we create sub-directories
if "/" in checkpoint:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = checkpoint.split('/' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
elif add_prefix:
SCREAMING_SNAKE_CASE_ :str = checkpoint
SCREAMING_SNAKE_CASE_ :Any = dump_path
else:
SCREAMING_SNAKE_CASE_ :Optional[int] = None
SCREAMING_SNAKE_CASE_ :Optional[Any] = dump_path
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
SCREAMING_SNAKE_CASE_ :Optional[int] = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
SCREAMING_SNAKE_CASE_ :List[Any] = file_path.split(SCREAMING_SNAKE_CASE )[-1][0]
if next_char == "/":
SCREAMING_SNAKE_CASE_ :Union[str, Any] = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Optional[Any] = None
logger.info(F'=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}' )
SCREAMING_SNAKE_CASE_ :Dict = tokenizer.save_pretrained(
SCREAMING_SNAKE_CASE , legacy_format=SCREAMING_SNAKE_CASE , filename_prefix=SCREAMING_SNAKE_CASE )
logger.info(F'=> File names {file_names}' )
for file_name in file_names:
if not file_name.endswith('tokenizer.json' ):
os.remove(SCREAMING_SNAKE_CASE )
logger.info(F'=> removing {file_name}' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output generated fast tokenizer files."""
)
parser.add_argument(
"""--tokenizer_name""",
default=None,
type=str,
help=(
f"""Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will """
"""download and convert all the checkpoints from AWS."""
),
)
parser.add_argument(
"""--checkpoint_name""",
default=None,
type=str,
help="""Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.""",
)
parser.add_argument(
"""--force_download""",
action="""store_true""",
help="""Re-download checkpoints.""",
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 233 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
SCREAMING_SNAKE_CASE__ : Optional[Any] = logging.get_logger(__name__)
class __lowerCAmelCase( lowerCAmelCase__ ):
def __init__( self : Optional[int] , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : float , **SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Optional[Any] = feature_size
SCREAMING_SNAKE_CASE_ :Dict = sampling_rate
SCREAMING_SNAKE_CASE_ :Any = padding_value
SCREAMING_SNAKE_CASE_ :str = kwargs.pop('padding_side' , 'right' )
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.pop('return_attention_mask' , SCREAMING_SNAKE_CASE )
super().__init__(**SCREAMING_SNAKE_CASE )
def _lowercase ( self : str , SCREAMING_SNAKE_CASE : Union[
BatchFeature,
List[BatchFeature],
Dict[str, BatchFeature],
Dict[str, List[BatchFeature]],
List[Dict[str, BatchFeature]],
] , SCREAMING_SNAKE_CASE : Union[bool, str, PaddingStrategy] = True , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : bool = False , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , SCREAMING_SNAKE_CASE : Optional[Union[str, TensorType]] = None , ):
"""simple docstring"""
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(processed_features[0] , (dict, BatchFeature) ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
f' to this method that includes {self.model_input_names[0]}, but you provided'
f' {list(processed_features.keys() )}' )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ :Optional[int] = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(SCREAMING_SNAKE_CASE ) == 0:
if return_attention_mask:
SCREAMING_SNAKE_CASE_ :Union[str, Any] = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
SCREAMING_SNAKE_CASE_ :str = required_input[0]
if isinstance(SCREAMING_SNAKE_CASE , (list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
SCREAMING_SNAKE_CASE_ :List[str] = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Any = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = 'tf'
elif is_torch_tensor(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = 'pt'
elif isinstance(SCREAMING_SNAKE_CASE , (int, float, list, tuple, np.ndarray) ):
SCREAMING_SNAKE_CASE_ :Any = 'np'
else:
raise ValueError(
f'type of {first_element} unknown: {type(SCREAMING_SNAKE_CASE )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] , (int, float) ):
SCREAMING_SNAKE_CASE_ :Tuple = to_numpy(SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE_ :str = [to_numpy(SCREAMING_SNAKE_CASE ) for v in value]
# Convert padding_strategy in PaddingStrategy
SCREAMING_SNAKE_CASE_ :int = self._get_padding_strategies(padding=SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :List[Any] = processed_features[self.model_input_names[0]]
SCREAMING_SNAKE_CASE_ :Union[str, Any] = len(SCREAMING_SNAKE_CASE )
if not all(len(SCREAMING_SNAKE_CASE ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
SCREAMING_SNAKE_CASE_ :Optional[int] = []
for i in range(SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :List[Any] = {k: v[i] for k, v in processed_features.items()}
# truncation
SCREAMING_SNAKE_CASE_ :Optional[Any] = self._truncate(
SCREAMING_SNAKE_CASE , max_length=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , truncation=SCREAMING_SNAKE_CASE , )
truncated_inputs.append(SCREAMING_SNAKE_CASE )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
SCREAMING_SNAKE_CASE_ :List[str] = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
SCREAMING_SNAKE_CASE_ :List[str] = PaddingStrategy.MAX_LENGTH
SCREAMING_SNAKE_CASE_ :Dict = {}
for i in range(SCREAMING_SNAKE_CASE ):
# padding
SCREAMING_SNAKE_CASE_ :Tuple = self._pad(
truncated_inputs[i] , max_length=SCREAMING_SNAKE_CASE , padding_strategy=SCREAMING_SNAKE_CASE , pad_to_multiple_of=SCREAMING_SNAKE_CASE , return_attention_mask=SCREAMING_SNAKE_CASE , )
for key, value in outputs.items():
if key not in batch_outputs:
SCREAMING_SNAKE_CASE_ :Dict = []
if value.dtype is np.dtype(np.floataa ):
SCREAMING_SNAKE_CASE_ :str = value.astype(np.floataa )
batch_outputs[key].append(SCREAMING_SNAKE_CASE )
return BatchFeature(SCREAMING_SNAKE_CASE , tensor_type=SCREAMING_SNAKE_CASE )
def _lowercase ( self : Optional[int] , SCREAMING_SNAKE_CASE : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
SCREAMING_SNAKE_CASE_ :List[str] = len(SCREAMING_SNAKE_CASE )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ :int = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ :List[Any] = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(SCREAMING_SNAKE_CASE ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
SCREAMING_SNAKE_CASE_ :List[Any] = np.ones(len(SCREAMING_SNAKE_CASE ) , dtype=np.intaa )
if needs_to_be_padded:
SCREAMING_SNAKE_CASE_ :List[str] = max_length - len(SCREAMING_SNAKE_CASE )
if self.padding_side == "right":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ :str = np.pad(
processed_features['attention_mask'] , (0, difference) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
SCREAMING_SNAKE_CASE_ :str = np.pad(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'constant' , constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
SCREAMING_SNAKE_CASE_ :int = np.pad(
processed_features['attention_mask'] , (difference, 0) )
SCREAMING_SNAKE_CASE_ :List[str] = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
SCREAMING_SNAKE_CASE_ :Tuple = np.pad(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , 'constant' , constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : Union[Dict[str, np.ndarray], BatchFeature] , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[int] = None , SCREAMING_SNAKE_CASE : Optional[bool] = None , ):
"""simple docstring"""
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
SCREAMING_SNAKE_CASE_ :Any = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
SCREAMING_SNAKE_CASE_ :Dict = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
SCREAMING_SNAKE_CASE_ :List[Any] = len(SCREAMING_SNAKE_CASE ) > max_length
if needs_to_be_truncated:
SCREAMING_SNAKE_CASE_ :Optional[Any] = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
SCREAMING_SNAKE_CASE_ :Any = processed_features['attention_mask'][:max_length]
return processed_features
def _lowercase ( self : int , SCREAMING_SNAKE_CASE : int=False , SCREAMING_SNAKE_CASE : List[str]=None ):
"""simple docstring"""
if padding is not False:
if padding is True:
SCREAMING_SNAKE_CASE_ :Any = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :Tuple = PaddingStrategy(SCREAMING_SNAKE_CASE )
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE_ :int = padding
else:
SCREAMING_SNAKE_CASE_ :int = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
f'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy
| 233 | 1 |
"""simple docstring"""
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCamelCase__ ( nn.Module ):
def __init__( self : str , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : int=0.0 , _lowercase : Optional[int] = None , _lowercase : str = "geglu" , _lowercase : Optional[int] = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = False , _lowercase : bool = True , _lowercase : str = "layer_norm" , _lowercase : bool = False , ):
super().__init__()
A = only_cross_attention
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm_zero'
A = (num_embeds_ada_norm is not None) and norm_type == 'ada_norm'
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
f'`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to'
f' define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.' )
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
A = AdaLayerNorm(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
A = AdaLayerNormZero(_lowercase , _lowercase )
else:
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
A = Attention(
query_dim=_lowercase , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=_lowercase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
A = (
AdaLayerNorm(_lowercase , _lowercase )
if self.use_ada_layer_norm
else nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
)
A = Attention(
query_dim=_lowercase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=_lowercase , dim_head=_lowercase , dropout=_lowercase , bias=_lowercase , upcast_attention=_lowercase , ) # is self-attn if encoder_hidden_states is none
else:
A = None
A = None
# 3. Feed-forward
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
A = FeedForward(_lowercase , dropout=_lowercase , activation_fn=_lowercase , final_dropout=_lowercase )
# let chunk size default to None
A = None
A = 0
def __a ( self : Tuple , _lowercase : Optional[int] , _lowercase : int ):
# Sets chunk feed-forward
A = chunk_size
A = dim
def __a ( self : Tuple , _lowercase : torch.FloatTensor , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.LongTensor] = None , _lowercase : Dict[str, Any] = None , _lowercase : Optional[torch.LongTensor] = None , ):
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
A = self.norma(_lowercase , _lowercase )
elif self.use_ada_layer_norm_zero:
A , A , A , A , A = self.norma(
_lowercase , _lowercase , _lowercase , hidden_dtype=hidden_states.dtype )
else:
A = self.norma(_lowercase )
A = cross_attention_kwargs if cross_attention_kwargs is not None else {}
A = self.attna(
_lowercase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=_lowercase , **_lowercase , )
if self.use_ada_layer_norm_zero:
A = gate_msa.unsqueeze(1 ) * attn_output
A = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
A = (
self.norma(_lowercase , _lowercase ) if self.use_ada_layer_norm else self.norma(_lowercase )
)
A = self.attna(
_lowercase , encoder_hidden_states=_lowercase , attention_mask=_lowercase , **_lowercase , )
A = attn_output + hidden_states
# 3. Feed-forward
A = self.norma(_lowercase )
if self.use_ada_layer_norm_zero:
A = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
f'`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.' )
A = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
A = torch.cat(
[self.ff(_lowercase ) for hid_slice in norm_hidden_states.chunk(_lowercase , dim=self._chunk_dim )] , dim=self._chunk_dim , )
else:
A = self.ff(_lowercase )
if self.use_ada_layer_norm_zero:
A = gate_mlp.unsqueeze(1 ) * ff_output
A = ff_output + hidden_states
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Union[str, Any] , _lowercase : int , _lowercase : Optional[int] = None , _lowercase : int = 4 , _lowercase : float = 0.0 , _lowercase : str = "geglu" , _lowercase : bool = False , ):
super().__init__()
A = int(dim * mult )
A = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
A = GELU(_lowercase , _lowercase )
if activation_fn == "gelu-approximate":
A = GELU(_lowercase , _lowercase , approximate='tanh' )
elif activation_fn == "geglu":
A = GEGLU(_lowercase , _lowercase )
elif activation_fn == "geglu-approximate":
A = ApproximateGELU(_lowercase , _lowercase )
A = nn.ModuleList([] )
# project in
self.net.append(_lowercase )
# project dropout
self.net.append(nn.Dropout(_lowercase ) )
# project out
self.net.append(nn.Linear(_lowercase , _lowercase ) )
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(_lowercase ) )
def __a ( self : int , _lowercase : Optional[int] ):
for module in self.net:
A = module(_lowercase )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : int , _lowercase : int , _lowercase : int , _lowercase : str = "none" ):
super().__init__()
A = nn.Linear(_lowercase , _lowercase )
A = approximate
def __a ( self : Union[str, Any] , _lowercase : Tuple ):
if gate.device.type != "mps":
return F.gelu(_lowercase , approximate=self.approximate )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) , approximate=self.approximate ).to(dtype=gate.dtype )
def __a ( self : Dict , _lowercase : Any ):
A = self.proj(_lowercase )
A = self.gelu(_lowercase )
return hidden_states
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Dict , _lowercase : int , _lowercase : int ):
super().__init__()
A = nn.Linear(_lowercase , dim_out * 2 )
def __a ( self : Tuple , _lowercase : Optional[int] ):
if gate.device.type != "mps":
return F.gelu(_lowercase )
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa ) ).to(dtype=gate.dtype )
def __a ( self : Tuple , _lowercase : str ):
A , A = self.proj(_lowercase ).chunk(2 , dim=-1 )
return hidden_states * self.gelu(_lowercase )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , _lowercase : int , _lowercase : int ):
super().__init__()
A = nn.Linear(_lowercase , _lowercase )
def __a ( self : Dict , _lowercase : str ):
A = self.proj(_lowercase )
return x * torch.sigmoid(1.7_0_2 * x )
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , _lowercase : Any , _lowercase : Tuple ):
super().__init__()
A = nn.Embedding(_lowercase , _lowercase )
A = nn.SiLU()
A = nn.Linear(_lowercase , embedding_dim * 2 )
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase )
def __a ( self : List[Any] , _lowercase : int , _lowercase : Any ):
A = self.linear(self.silu(self.emb(_lowercase ) ) )
A , A = torch.chunk(_lowercase , 2 )
A = self.norm(_lowercase ) * (1 + scale) + shift
return x
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Any , _lowercase : Dict , _lowercase : List[Any] ):
super().__init__()
A = CombinedTimestepLabelEmbeddings(_lowercase , _lowercase )
A = nn.SiLU()
A = nn.Linear(_lowercase , 6 * embedding_dim , bias=_lowercase )
A = nn.LayerNorm(_lowercase , elementwise_affine=_lowercase , eps=1e-6 )
def __a ( self : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any]=None ):
A = self.linear(self.silu(self.emb(_lowercase , _lowercase , hidden_dtype=_lowercase ) ) )
A , A , A , A , A , A = emb.chunk(6 , dim=1 )
A = self.norm(_lowercase ) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCamelCase__ ( nn.Module ):
def __init__( self : Tuple , _lowercase : int , _lowercase : int , _lowercase : int , _lowercase : Optional[str] = None , _lowercase : float = 1e-5 ):
super().__init__()
A = num_groups
A = eps
if act_fn is None:
A = None
else:
A = get_activation(_lowercase )
A = nn.Linear(_lowercase , out_dim * 2 )
def __a ( self : str , _lowercase : int , _lowercase : List[Any] ):
if self.act:
A = self.act(_lowercase )
A = self.linear(_lowercase )
A = emb[:, :, None, None]
A , A = emb.chunk(2 , dim=1 )
A = F.group_norm(_lowercase , self.num_groups , eps=self.eps )
A = x * (1 + scale) + shift
return x
| 690 |
"""simple docstring"""
from __future__ import annotations
UpperCamelCase : Any = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def __snake_case ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the reference grid
A = 1
A = [
[0 for col in range(len(grid[0] ) )] for row in range(len(UpperCamelCase__ ) )
] # the action grid
A = init[0]
A = init[1]
A = 0
A = g + heuristic[x][y] # cost from starting cell to destination cell
A = [[f, g, x, y]]
A = False # flag that is set when search is complete
A = False # flag set if we can't find expand
while not found and not resign:
if len(UpperCamelCase__ ) == 0:
raise ValueError('Algorithm is unable to find solution' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
A = cell.pop()
A = next_cell[2]
A = next_cell[3]
A = next_cell[1]
if x == goal[0] and y == goal[1]:
A = True
else:
for i in range(len(UpperCamelCase__ ) ): # to try out different valid actions
A = x + DIRECTIONS[i][0]
A = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(UpperCamelCase__ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
A = g + cost
A = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
A = 1
A = i
A = []
A = goal[0]
A = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
A = x - DIRECTIONS[action[x][y]][0]
A = y - DIRECTIONS[action[x][y]][1]
A = xa
A = ya
invpath.append([x, y] )
A = []
for i in range(len(UpperCamelCase__ ) ):
path.append(invpath[len(UpperCamelCase__ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase : Any = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase : List[Any] = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase : int = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase : Tuple = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase : Union[str, Any] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase : List[str] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase : Dict = 99
UpperCamelCase , UpperCamelCase : Optional[Any] = search(grid, init, goal, cost, heuristic)
print("ACTION MAP")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 690 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
UpperCamelCase = {
"""configuration_bloom""": ["""BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BloomConfig""", """BloomOnnxConfig"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""BloomTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BloomForCausalLM""",
"""BloomModel""",
"""BloomPreTrainedModel""",
"""BloomForSequenceClassification""",
"""BloomForTokenClassification""",
"""BloomForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_bloom import BLOOM_PRETRAINED_CONFIG_ARCHIVE_MAP, BloomConfig, BloomOnnxConfig
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bloom_fast import BloomTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bloom import (
BLOOM_PRETRAINED_MODEL_ARCHIVE_LIST,
BloomForCausalLM,
BloomForQuestionAnswering,
BloomForSequenceClassification,
BloomForTokenClassification,
BloomModel,
BloomPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 721 |
"""simple docstring"""
from math import factorial
def A( snake_case_ = 20 ):
"""simple docstring"""
lowercase__: Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
lowercase__: int = n // 2
return int(factorial(snake_case_ ) / (factorial(snake_case_ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
UpperCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print("""Invalid entry - please enter a number.""")
| 120 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFAutoModel, is_tensorflow_text_available, is_tf_available
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.testing_utils import require_tensorflow_text, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_tensorflow_text_available():
from transformers.models.bert import TFBertTokenizer
__lowerCamelCase :str = ['bert-base-uncased', 'bert-base-cased']
__lowerCamelCase :str = 'hf-internal-testing/tiny-bert-tf-only'
if is_tf_available():
class A__ ( tf.keras.Model):
"""simple docstring"""
def __init__( self: Union[str, Any] , __a: int )-> List[str]:
super().__init__()
lowerCamelCase : Optional[int] = tokenizer
lowerCamelCase : Any = AutoConfig.from_pretrained(__a )
lowerCamelCase : Optional[int] = TFAutoModel.from_config(__a )
def a__ ( self: str , __a: int )-> Tuple:
lowerCamelCase : Optional[Any] = self.tokenizer(__a )
lowerCamelCase : Any = self.bert(**__a )
return out["pooler_output"]
@require_tf
@require_tensorflow_text
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: List[Any] )-> List[str]:
super().setUp()
lowerCamelCase : Dict = [
BertTokenizer.from_pretrained(__a ) for checkpoint in (TOKENIZER_CHECKPOINTS * 2)
] # repeat for when fast_bert_tokenizer=false
lowerCamelCase : int = [TFBertTokenizer.from_pretrained(__a ) for checkpoint in TOKENIZER_CHECKPOINTS] + [
TFBertTokenizer.from_pretrained(__a , use_fast_bert_tokenizer=__a )
for checkpoint in TOKENIZER_CHECKPOINTS
]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
lowerCamelCase : List[Any] = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
lowerCamelCase : int = list(zip(self.test_sentences , self.test_sentences[::-1] ) )
def a__ ( self: Any )-> Union[str, Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers , self.tf_tokenizers ):
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase : Tuple = tokenizer(__a , return_tensors="""tf""" , padding="""longest""" )
lowerCamelCase : str = tf_tokenizer(__a )
for key in python_outputs.keys():
self.assertTrue(tf.reduce_all(python_outputs[key].shape == tf_outputs[key].shape ) )
self.assertTrue(tf.reduce_all(tf.cast(python_outputs[key] , tf.intaa ) == tf_outputs[key] ) )
@slow
def a__ ( self: Optional[Any] )-> str:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Any = tf_tokenizer(self.paired_sentences )
lowerCamelCase : Optional[int] = tf_tokenizer(
text=[sentence[0] for sentence in self.paired_sentences] , text_pair=[sentence[1] for sentence in self.paired_sentences] , )
for key in merged_outputs.keys():
self.assertTrue(tf.reduce_all(tf.cast(merged_outputs[key] , tf.intaa ) == separated_outputs[key] ) )
@slow
def a__ ( self: int )-> Any:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Any = tf.function(__a )
for test_inputs in (self.test_sentences, self.paired_sentences):
lowerCamelCase : Optional[Any] = tf.constant(__a )
lowerCamelCase : Tuple = compiled_tokenizer(__a )
lowerCamelCase : Dict = tf_tokenizer(__a )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def a__ ( self: Union[str, Any] )-> Union[str, Any]:
for tf_tokenizer in self.tf_tokenizers:
lowerCamelCase : Optional[Any] = ModelToSave(tokenizer=__a )
lowerCamelCase : List[Any] = tf.convert_to_tensor(self.test_sentences )
lowerCamelCase : str = model(__a ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
lowerCamelCase : Any = Path(__a ) / """saved.model"""
model.save(__a )
lowerCamelCase : Dict = tf.keras.models.load_model(__a )
lowerCamelCase : Union[str, Any] = loaded_model(__a )
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertLessEqual(tf.reduce_max(tf.abs(out - loaded_output ) ) , 1e-5 )
| 222 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
__lowerCamelCase :str = logging.get_logger(__name__)
__lowerCamelCase :Union[str, Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__lowerCamelCase :str = {
'vocab_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/vocab.txt',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/vocab.txt',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'
),
'bert-base-multilingual-cased': 'https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt',
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'
),
'bert-base-german-dbmdz-cased': 'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt',
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'bert-base-uncased': 'https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json',
'bert-large-uncased': 'https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json',
'bert-base-cased': 'https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json',
'bert-large-cased': 'https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json',
'bert-base-multilingual-uncased': (
'https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'
),
'bert-base-multilingual-cased': (
'https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'
),
'bert-base-chinese': 'https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json',
'bert-base-german-cased': 'https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json',
'bert-large-uncased-whole-word-masking': (
'https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking': (
'https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'
),
'bert-large-uncased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-large-cased-whole-word-masking-finetuned-squad': (
'https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'
),
'bert-base-cased-finetuned-mrpc': (
'https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-cased': (
'https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'
),
'bert-base-german-dbmdz-uncased': (
'https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-cased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'
),
'TurkuNLP/bert-base-finnish-uncased-v1': (
'https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'
),
'wietsedv/bert-base-dutch-cased': (
'https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'
),
},
}
__lowerCamelCase :List[Any] = {
'bert-base-uncased': 512,
'bert-large-uncased': 512,
'bert-base-cased': 512,
'bert-large-cased': 512,
'bert-base-multilingual-uncased': 512,
'bert-base-multilingual-cased': 512,
'bert-base-chinese': 512,
'bert-base-german-cased': 512,
'bert-large-uncased-whole-word-masking': 512,
'bert-large-cased-whole-word-masking': 512,
'bert-large-uncased-whole-word-masking-finetuned-squad': 512,
'bert-large-cased-whole-word-masking-finetuned-squad': 512,
'bert-base-cased-finetuned-mrpc': 512,
'bert-base-german-dbmdz-cased': 512,
'bert-base-german-dbmdz-uncased': 512,
'TurkuNLP/bert-base-finnish-cased-v1': 512,
'TurkuNLP/bert-base-finnish-uncased-v1': 512,
'wietsedv/bert-base-dutch-cased': 512,
}
__lowerCamelCase :Tuple = {
'bert-base-uncased': {'do_lower_case': True},
'bert-large-uncased': {'do_lower_case': True},
'bert-base-cased': {'do_lower_case': False},
'bert-large-cased': {'do_lower_case': False},
'bert-base-multilingual-uncased': {'do_lower_case': True},
'bert-base-multilingual-cased': {'do_lower_case': False},
'bert-base-chinese': {'do_lower_case': False},
'bert-base-german-cased': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking': {'do_lower_case': True},
'bert-large-cased-whole-word-masking': {'do_lower_case': False},
'bert-large-uncased-whole-word-masking-finetuned-squad': {'do_lower_case': True},
'bert-large-cased-whole-word-masking-finetuned-squad': {'do_lower_case': False},
'bert-base-cased-finetuned-mrpc': {'do_lower_case': False},
'bert-base-german-dbmdz-cased': {'do_lower_case': False},
'bert-base-german-dbmdz-uncased': {'do_lower_case': True},
'TurkuNLP/bert-base-finnish-cased-v1': {'do_lower_case': False},
'TurkuNLP/bert-base-finnish-uncased-v1': {'do_lower_case': True},
'wietsedv/bert-base-dutch-cased': {'do_lower_case': False},
}
class A__ ( __lowercase):
"""simple docstring"""
snake_case__ : Union[str, Any] =VOCAB_FILES_NAMES
snake_case__ : Union[str, Any] =PRETRAINED_VOCAB_FILES_MAP
snake_case__ : int =PRETRAINED_INIT_CONFIGURATION
snake_case__ : Tuple =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ : str =BertTokenizer
def __init__( self: str , __a: Union[str, Any]=None , __a: Tuple=None , __a: int=True , __a: List[str]="[UNK]" , __a: Optional[Any]="[SEP]" , __a: Union[str, Any]="[PAD]" , __a: Optional[Any]="[CLS]" , __a: Optional[Any]="[MASK]" , __a: List[Any]=True , __a: int=None , **__a: Union[str, Any] , )-> int:
super().__init__(
__a , tokenizer_file=__a , do_lower_case=__a , unk_token=__a , sep_token=__a , pad_token=__a , cls_token=__a , mask_token=__a , tokenize_chinese_chars=__a , strip_accents=__a , **__a , )
lowerCamelCase : Tuple = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" , __a ) != do_lower_case
or normalizer_state.get("""strip_accents""" , __a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" , __a ) != tokenize_chinese_chars
):
lowerCamelCase : int = getattr(__a , normalizer_state.pop("""type""" ) )
lowerCamelCase : str = do_lower_case
lowerCamelCase : List[Any] = strip_accents
lowerCamelCase : Tuple = tokenize_chinese_chars
lowerCamelCase : str = normalizer_class(**__a )
lowerCamelCase : Union[str, Any] = do_lower_case
def a__ ( self: Tuple , __a: List[Any] , __a: int=None )-> Optional[Any]:
lowerCamelCase : Optional[int] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def a__ ( self: Any , __a: List[int] , __a: Optional[List[int]] = None )-> List[int]:
lowerCamelCase : Dict = [self.sep_token_id]
lowerCamelCase : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def a__ ( self: Optional[int] , __a: str , __a: Optional[str] = None )-> Tuple[str]:
lowerCamelCase : Optional[Any] = self._tokenizer.model.save(__a , name=__a )
return tuple(__a )
| 222 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__UpperCAmelCase : Optional[int] = logging.get_logger()
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = True) -> Tuple:
'''simple docstring'''
print(F'''Converting {name}...''')
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
__snake_case: str = timm.create_model("""levit_128s""" , pretrained=SCREAMING_SNAKE_CASE__)
else:
__snake_case: Dict = timm.create_model("""levit_128""" , pretrained=SCREAMING_SNAKE_CASE__)
if hidden_sizes == 192:
__snake_case: Union[str, Any] = timm.create_model("""levit_192""" , pretrained=SCREAMING_SNAKE_CASE__)
if hidden_sizes == 256:
__snake_case: List[Any] = timm.create_model("""levit_256""" , pretrained=SCREAMING_SNAKE_CASE__)
if hidden_sizes == 384:
__snake_case: int = timm.create_model("""levit_384""" , pretrained=SCREAMING_SNAKE_CASE__)
from_model.eval()
__snake_case: List[Any] = LevitForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE__).eval()
__snake_case: Any = OrderedDict()
__snake_case: List[str] = from_model.state_dict()
__snake_case: str = list(from_model.state_dict().keys())
__snake_case: List[Any] = list(our_model.state_dict().keys())
print(len(SCREAMING_SNAKE_CASE__) , len(SCREAMING_SNAKE_CASE__))
for i in range(len(SCREAMING_SNAKE_CASE__)):
__snake_case: Dict = weights[og_keys[i]]
our_model.load_state_dict(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = torch.randn((2, 3, 224, 224))
__snake_case: str = from_model(SCREAMING_SNAKE_CASE__)
__snake_case: List[Any] = our_model(SCREAMING_SNAKE_CASE__).logits
assert torch.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__), "The model logits don't match the original one."
__snake_case: Any = name
print(SCREAMING_SNAKE_CASE__)
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name)
__snake_case: Optional[int] = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name)
print(F'''Pushed {checkpoint_name}''')
def A__ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , SCREAMING_SNAKE_CASE__ = True) -> int:
'''simple docstring'''
__snake_case: Any = """imagenet-1k-id2label.json"""
__snake_case: Optional[int] = 1000
__snake_case: Optional[int] = (1, num_labels)
__snake_case: Tuple = """huggingface/label-files"""
__snake_case: Tuple = num_labels
__snake_case: Tuple = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="""dataset""") , """r"""))
__snake_case: int = {int(SCREAMING_SNAKE_CASE__): v for k, v in idalabel.items()}
__snake_case: Tuple = idalabel
__snake_case: Union[str, Any] = {v: k for k, v in idalabel.items()}
__snake_case: List[str] = partial(SCREAMING_SNAKE_CASE__ , num_labels=SCREAMING_SNAKE_CASE__ , idalabel=SCREAMING_SNAKE_CASE__ , labelaid=SCREAMING_SNAKE_CASE__)
__snake_case: Optional[int] = {
"""levit-128S""": 128,
"""levit-128""": 128,
"""levit-192""": 192,
"""levit-256""": 256,
"""levit-384""": 384,
}
__snake_case: List[str] = {
"""levit-128S""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-128""": ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
"""levit-192""": ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-256""": ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
"""levit-384""": ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , names_to_config[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__)
return config, expected_shape
if __name__ == "__main__":
__UpperCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help="The name of the model you wish to convert, it must be one of the supported Levit* architecture,",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="levit-dump-folder/",
type=Path,
required=False,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
parser.add_argument(
"--no-push_to_hub",
dest="push_to_hub",
action="store_false",
help="Do not push model and image processor to the hub",
)
__UpperCAmelCase : int = parser.parse_args()
__UpperCAmelCase : Path = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__UpperCAmelCase : Tuple = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase : Optional[Any] = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 155 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCAmelCase_ ( __snake_case ):
_UpperCamelCase : str = ["image_processor", "tokenizer"]
_UpperCamelCase : Union[str, Any] = "AutoImageProcessor"
_UpperCamelCase : Union[str, Any] = "AutoTokenizer"
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ):
super().__init__(_lowerCAmelCase , _lowerCAmelCase )
_lowercase : Union[str, Any] = self.image_processor
def __call__( self , _lowerCAmelCase=None , _lowerCAmelCase=None , _lowerCAmelCase=None , **_lowerCAmelCase ):
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
_lowercase : Dict = self.tokenizer(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if images is not None:
_lowercase : Union[str, Any] = self.image_processor(_lowerCAmelCase , return_tensors=_lowerCAmelCase , **_lowerCAmelCase )
if text is not None and images is not None:
_lowercase : Optional[Any] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_lowerCAmelCase ) , tensor_type=_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.batch_decode(*_lowerCAmelCase , **_lowerCAmelCase )
def __a ( self , *_lowerCAmelCase , **_lowerCAmelCase ):
return self.tokenizer.decode(*_lowerCAmelCase , **_lowerCAmelCase )
@property
def __a ( self ):
return ["input_ids", "attention_mask", "pixel_values"]
| 66 |
from sklearn.metrics import mean_squared_error
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = """\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
"""
SCREAMING_SNAKE_CASE__ : Tuple = """\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = """
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {\"raw_values\", \"uniform_average\"} or array-like of shape (n_outputs,), default=\"uniform_average\"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
\"raw_values\" : Returns a full set of errors in case of multioutput input.
\"uniform_average\" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric(\"mse\")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{'mse': 0.6123724356957945}
If you're using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric(\"mse\", \"multilist\")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'mse': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput='raw_values')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{'mse': array([0.41666667, 1. ])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION ,_KWARGS_DESCRIPTION )
class __lowerCAmelCase ( datasets.Metric ):
def _snake_case ( self ) -> List[str]:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"
] , )
def _snake_case ( self ) -> Optional[int]:
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("float" ) ),
"references": datasets.Sequence(datasets.Value("float" ) ),
}
else:
return {
"predictions": datasets.Value("float" ),
"references": datasets.Value("float" ),
}
def _snake_case ( self , snake_case , snake_case , snake_case=None , snake_case="uniform_average" , snake_case=True ) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = mean_squared_error(
snake_case , snake_case , sample_weight=snake_case , multioutput=snake_case , squared=snake_case )
return {"mse": mse}
| 112 | 0 |
import dataclasses
import json
import warnings
from dataclasses import dataclass, field
from time import time
from typing import List
from ..utils import logging
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
def A ( _lowercase=None , _lowercase=None ):
return field(default_factory=lambda: default , metadata=_lowercase )
@dataclass
class lowercase__ :
UpperCamelCase_ = list_field(
default=[] , metadata={
"""help""": (
"""Model checkpoints to be provided to the AutoModel classes. Leave blank to benchmark the base version"""
""" of all available models"""
)
} , )
UpperCamelCase_ = list_field(
default=[8] , metadata={"""help""": """List of batch sizes for which memory and time performance will be evaluated"""})
UpperCamelCase_ = list_field(
default=[8, 32, 128, 512] , metadata={"""help""": """List of sequence lengths for which memory and time performance will be evaluated"""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to benchmark inference of model. Inference can be disabled via --no-inference."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available cuda devices. Cuda can be disabled via --no-cuda."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to run on available tpu devices. TPU can be disabled via --no-tpu."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Use FP16 to accelerate inference."""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Benchmark training of model"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Verbose memory tracing"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={"""help""": """Whether to perform speed measurements. Speed measurements can be disabled via --no-speed."""} , )
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": """Whether to perform memory measurements. Memory measurements can be disabled via --no-memory"""
} , )
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Trace memory line by line"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save result to a CSV file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Save all print statements in a log file"""})
UpperCamelCase_ = field(default=UpperCamelCase_ , metadata={"""help""": """Whether to print environment information"""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Whether to use multiprocessing for memory and speed measurement. It is highly recommended to use"""
""" multiprocessing for accurate CPU and GPU memory measurements. This option should only be disabled"""
""" for debugging / testing and on TPU."""
)
} , )
UpperCamelCase_ = field(
default=f"inference_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv."""} , )
UpperCamelCase_ = field(
default=f"inference_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv."""} , )
UpperCamelCase_ = field(
default=f"train_time_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving time results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"train_memory_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving memory results to csv for training."""} , )
UpperCamelCase_ = field(
default=f"env_info_{round(time())}.csv" , metadata={"""help""": """CSV filename used if saving environment information."""} , )
UpperCamelCase_ = field(
default=f"log_{round(time())}.csv" , metadata={"""help""": """Log filename used if print statements are saved in log."""} , )
UpperCamelCase_ = field(default=3 , metadata={"""help""": """Times an experiment will be run."""})
UpperCamelCase_ = field(
default=UpperCamelCase_ , metadata={
"""help""": (
"""Instead of loading the model as defined in `config.architectures` if exists, just load the pretrain"""
""" model weights."""
)
} , )
def __A ( self : Dict ):
'''simple docstring'''
warnings.warn(
f"""The class {self.__class__} is deprecated. Hugging Face Benchmarking utils"""
''' are deprecated in general and it is advised to use external Benchmarking libraries '''
''' to benchmark Transformer models.''' , UpperCamelCase__ , )
def __A ( self : Any ):
'''simple docstring'''
return json.dumps(dataclasses.asdict(self ) , indent=2 )
@property
def __A ( self : List[str] ):
'''simple docstring'''
if len(self.models ) <= 0:
raise ValueError(
'''Please make sure you provide at least one model name / model identifier, *e.g.* `--models'''
''' bert-base-cased` or `args.models = [\'bert-base-cased\'].''' )
return self.models
@property
def __A ( self : List[Any] ):
'''simple docstring'''
if not self.multi_process:
return False
elif self.is_tpu:
logger.info('''Multiprocessing is currently not possible on TPU.''' )
return False
else:
return True
| 34 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 1 |
import argparse
import json
from collections import OrderedDict
from functools import partial
from pathlib import Path
import timm
import torch
from huggingface_hub import hf_hub_download
from transformers import LevitConfig, LevitForImageClassificationWithTeacher, LevitImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
UpperCAmelCase = logging.get_logger()
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True ):
print(F'''Converting {name}...''' )
with torch.no_grad():
if hidden_sizes == 128:
if name[-1] == "S":
lowercase = timm.create_model('levit_128s' , pretrained=__SCREAMING_SNAKE_CASE )
else:
lowercase = timm.create_model('levit_128' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 192:
lowercase = timm.create_model('levit_192' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 256:
lowercase = timm.create_model('levit_256' , pretrained=__SCREAMING_SNAKE_CASE )
if hidden_sizes == 384:
lowercase = timm.create_model('levit_384' , pretrained=__SCREAMING_SNAKE_CASE )
from_model.eval()
lowercase = LevitForImageClassificationWithTeacher(__SCREAMING_SNAKE_CASE ).eval()
lowercase = OrderedDict()
lowercase = from_model.state_dict()
lowercase = list(from_model.state_dict().keys() )
lowercase = list(our_model.state_dict().keys() )
print(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
lowercase = weights[og_keys[i]]
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
lowercase = torch.randn((2, 3, 224, 224) )
lowercase = from_model(__SCREAMING_SNAKE_CASE )
lowercase = our_model(__SCREAMING_SNAKE_CASE ).logits
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
lowercase = name
print(__SCREAMING_SNAKE_CASE )
if push_to_hub:
our_model.save_pretrained(save_directory / checkpoint_name )
lowercase = LevitImageProcessor()
image_processor.save_pretrained(save_directory / checkpoint_name )
print(F'''Pushed {checkpoint_name}''' )
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True ):
lowercase = 'imagenet-1k-id2label.json'
lowercase = 1000
lowercase = (1, num_labels)
lowercase = 'huggingface/label-files'
lowercase = num_labels
lowercase = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type='dataset' ) , 'r' ) )
lowercase = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
lowercase = idalabel
lowercase = {v: k for k, v in idalabel.items()}
lowercase = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
lowercase = {
'levit-128S': 128,
'levit-128': 128,
'levit-192': 192,
'levit-256': 256,
'levit-384': 384,
}
lowercase = {
'levit-128S': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 6, 8] , depths=[2, 3, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-128': ImageNetPreTrainedConfig(
hidden_sizes=[128, 256, 384] , num_attention_heads=[4, 8, 12] , depths=[4, 4, 4] , key_dim=[16, 16, 16] , drop_path_rate=0 , ),
'levit-192': ImageNetPreTrainedConfig(
hidden_sizes=[192, 288, 384] , num_attention_heads=[3, 5, 6] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-256': ImageNetPreTrainedConfig(
hidden_sizes=[256, 384, 512] , num_attention_heads=[4, 6, 8] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0 , ),
'levit-384': ImageNetPreTrainedConfig(
hidden_sizes=[384, 512, 768] , num_attention_heads=[6, 9, 12] , depths=[4, 4, 4] , key_dim=[32, 32, 32] , drop_path_rate=0.1 , ),
}
if model_name:
convert_weight_and_push(
names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(names_to_hidden_sizes[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return config, expected_shape
if __name__ == "__main__":
UpperCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help='''The name of the model you wish to convert, it must be one of the supported Levit* architecture,''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''levit-dump-folder/''',
type=Path,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 84 |
def UpperCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
lowercase = [0] * len(__SCREAMING_SNAKE_CASE )
lowercase = []
lowercase = []
lowercase = 0
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
lowercase = queue.pop(0 )
cnt += 1
topo.append(__SCREAMING_SNAKE_CASE )
for x in graph[vertex]:
indegree[x] -= 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
if cnt != len(__SCREAMING_SNAKE_CASE ):
print('Cycle exists' )
else:
print(__SCREAMING_SNAKE_CASE )
# Adjacency List of Graph
UpperCAmelCase = {0: [1, 2], 1: [3], 2: [3], 3: [4, 5], 4: [], 5: []}
topological_sort(graph)
| 84 | 1 |
def lowerCamelCase ( UpperCamelCase : int ) -> int:
if not isinstance(UpperCamelCase , UpperCamelCase ):
_lowerCamelCase = F"""Input value of [number={number}] must be an integer"""
raise TypeError(UpperCamelCase )
if number < 1:
_lowerCamelCase = F"""Input value of [number={number}] must be > 0"""
raise ValueError(UpperCamelCase )
_lowerCamelCase = 1
for i in range(1 , UpperCamelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod() | 234 | import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class lowerCAmelCase__ :
'''simple docstring'''
def __init__( self : Tuple , snake_case__ : str , snake_case__ : int = 1_3 , snake_case__ : int = 6_4 , snake_case__ : int = 2 , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : bool = True , snake_case__ : bool = True , snake_case__ : int = 1_2_8 , snake_case__ : Optional[int]=[1_6, 3_2, 6_4, 1_2_8] , snake_case__ : int = 7 , snake_case__ : int = 4 , snake_case__ : int = 3_7 , snake_case__ : str = "gelu" , snake_case__ : float = 0.1 , snake_case__ : float = 0.1 , snake_case__ : int = 1_0 , snake_case__ : float = 0.02 , snake_case__ : int = 2 , snake_case__ : int = 1 , snake_case__ : int = 1_2_8 , snake_case__ : List[int] = [2, 2, 2, 2] , snake_case__ : int = 2 , snake_case__ : int = 2 , ) -> Optional[Any]:
_lowerCamelCase = parent
_lowerCamelCase = batch_size
_lowerCamelCase = image_size
_lowerCamelCase = patch_size
_lowerCamelCase = num_channels
_lowerCamelCase = is_training
_lowerCamelCase = use_labels
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = type_sequence_label_size
_lowerCamelCase = initializer_range
_lowerCamelCase = encoder_stride
_lowerCamelCase = num_attention_outputs
_lowerCamelCase = embed_dim
_lowerCamelCase = embed_dim + 1
_lowerCamelCase = resolution
_lowerCamelCase = depths
_lowerCamelCase = hidden_sizes
_lowerCamelCase = dim
_lowerCamelCase = mlp_expansion_ratio
def _snake_case ( self : Union[str, Any] ) -> List[str]:
_lowerCamelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowerCamelCase = None
if self.use_labels:
_lowerCamelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCamelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Union[str, Any] ) -> int:
return EfficientFormerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=snake_case__ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , resolution=self.resolution , depths=self.depths , hidden_sizes=self.hidden_sizes , dim=self.dim , mlp_expansion_ratio=self.mlp_expansion_ratio , )
def _snake_case ( self : str , snake_case__ : Any , snake_case__ : Any , snake_case__ : List[str] ) -> Optional[int]:
_lowerCamelCase = TFEfficientFormerModel(config=snake_case__ )
_lowerCamelCase = model(snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Any , snake_case__ : List[str] , snake_case__ : List[str] , snake_case__ : Tuple ) -> Optional[Any]:
_lowerCamelCase = self.type_sequence_label_size
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ , training=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowerCamelCase = 1
_lowerCamelCase = TFEfficientFormerForImageClassification(snake_case__ )
_lowerCamelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowerCamelCase = model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Dict ) -> List[str]:
_lowerCamelCase = self.prepare_config_and_inputs()
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = config_and_inputs
_lowerCamelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase_ = (
{
'feature-extraction': TFEfficientFormerModel,
'image-classification': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def _snake_case ( self : Optional[Any] ) -> Any:
_lowerCamelCase = TFEfficientFormerModelTester(self )
_lowerCamelCase = ConfigTester(
self , config_class=snake_case__ , has_text_modality=snake_case__ , hidden_size=3_7 )
def _snake_case ( self : Dict ) -> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='EfficientFormer does not use inputs_embeds' )
def _snake_case ( self : str ) -> Union[str, Any]:
pass
@unittest.skip(reason='EfficientFormer does not support input and output embeddings' )
def _snake_case ( self : Optional[int] ) -> List[str]:
pass
def _snake_case ( self : Any ) -> List[Any]:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowerCamelCase = [*signature.parameters.keys()]
_lowerCamelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case__ )
def _snake_case ( self : int ) -> int:
def check_hidden_states_output(snake_case__ : List[str] , snake_case__ : Optional[Any] , snake_case__ : str ):
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_lowerCamelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case__ ) , snake_case__ )
if hasattr(self.model_tester , 'encoder_seq_length' ):
_lowerCamelCase = self.model_tester.encoder_seq_length
if hasattr(self.model_tester , 'chunk_length' ) and self.model_tester.chunk_length > 1:
_lowerCamelCase = seq_length * self.model_tester.chunk_length
else:
_lowerCamelCase = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
if config.is_encoder_decoder:
_lowerCamelCase = outputs.decoder_hidden_states
self.asseretIsInstance(snake_case__ , (list, tuple) )
self.assertEqual(len(snake_case__ ) , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'decoder_seq_length' , snake_case__ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) , [decoder_seq_length, self.model_tester.hidden_size] , )
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_lowerCamelCase = True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def _snake_case ( self : Dict , snake_case__ : int , snake_case__ : str , snake_case__ : List[str]=False ) -> List[Any]:
_lowerCamelCase = super()._prepare_for_class(snake_case__ , snake_case__ , return_labels=snake_case__ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _snake_case ( self : Optional[Any] ) -> Dict:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
@unittest.skip(reason='EfficientFormer does not implement masked image modeling yet' )
def _snake_case ( self : str ) -> Tuple:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case__ )
def _snake_case ( self : Optional[Any] ) -> Optional[Any]:
_lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
@slow
def _snake_case ( self : Union[str, Any] ) -> Any:
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCamelCase = TFEfficientFormerModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _snake_case ( self : List[Any] ) -> int:
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
_lowerCamelCase = True
_lowerCamelCase = getattr(self.model_tester , 'seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'encoder_seq_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'key_length' , snake_case__ )
_lowerCamelCase = getattr(self.model_tester , 'chunk_length' , snake_case__ )
if chunk_length is not None and hasattr(self.model_tester , 'num_hashes' ):
_lowerCamelCase = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
_lowerCamelCase = True
_lowerCamelCase = False
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowerCamelCase = True
_lowerCamelCase = model_class(snake_case__ )
_lowerCamelCase = model(**self._prepare_for_class(snake_case__ , snake_case__ ) , training=snake_case__ )
_lowerCamelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(snake_case__ ) , self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] , )
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] , )
def _snake_case ( self : Any ) -> Union[str, Any]:
# We use a simplified version of this test for EfficientFormer because it requires training=False
# and Keras refuses to let us force that during functional construction
_lowerCamelCase , _lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
_lowerCamelCase = model_class(snake_case__ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
_lowerCamelCase = {
key: tf.keras.Input(shape=val.shape[1:] , dtype=val.dtype , name=snake_case__ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
_lowerCamelCase = model(snake_case__ )
self.assertTrue(outputs_dict is not None )
def lowerCamelCase ( ) -> Optional[int]:
_lowerCamelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : List[str] ) -> Tuple:
return (
EfficientFormerImageProcessor.from_pretrained('snap-research/efficientformer-l1-300' )
if is_vision_available()
else None
)
@slow
def _snake_case ( self : List[str] ) -> List[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassification.from_pretrained('snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.0555, 0.4825, -0.0852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) )
@slow
def _snake_case ( self : List[Any] ) -> Optional[Any]:
_lowerCamelCase = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
'snap-research/efficientformer-l1-300' )
_lowerCamelCase = self.default_image_processor
_lowerCamelCase = prepare_img()
_lowerCamelCase = image_processor(images=snake_case__ , return_tensors='tf' )
# forward pass
_lowerCamelCase = model(**snake_case__ , training=snake_case__ )
# verify the logits
_lowerCamelCase = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case__ )
_lowerCamelCase = tf.constant([-0.1312, 0.4353, -1.0499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , snake_case__ , atol=1e-4 ) ) | 234 | 1 |
"""simple docstring"""
from collections import deque
from .hash_table import HashTable
class A_(snake_case_ ):
"""simple docstring"""
def __init__( self , *A , **A ):
super().__init__(*A , **A )
def _lowerCAmelCase ( self , A , A ):
_lowerCamelCase : Dict = deque([] ) if self.values[key] is None else self.values[key]
self.values[key].appendleft(A )
_lowerCamelCase : str = self.values[key]
def _lowerCAmelCase ( self ):
return (
sum(self.charge_factor - len(A ) for slot in self.values )
/ self.size_table
* self.charge_factor
)
def _lowerCAmelCase ( self , A , A=None ):
if not (
len(self.values[key] ) == self.charge_factor and self.values.count(A ) == 0
):
return key
return super()._collision_resolution(A , A )
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''Helsinki-NLP/opus-mt-en-de''': '''https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json''',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''marian'''
_UpperCAmelCase = ['''past_key_values''']
_UpperCAmelCase = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self , snake_case=58101 , snake_case=None , snake_case=1024 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=12 , snake_case=4096 , snake_case=16 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=1024 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=58100 , snake_case=False , snake_case=58100 , snake_case=0 , snake_case=0 , snake_case=True , **snake_case , ) -> Dict:
_UpperCAmelCase = vocab_size
_UpperCAmelCase = decoder_vocab_size or vocab_size
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = share_encoder_decoder_embeddings
super().__init__(
pad_token_id=snake_case , eos_token_id=snake_case , is_encoder_decoder=snake_case , decoder_start_token_id=snake_case , forced_eos_token_id=snake_case , **snake_case , )
class lowercase__ ( A ):
'''simple docstring'''
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase = {0: 'batch'}
_UpperCAmelCase = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(snake_case ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
else:
_UpperCAmelCase = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super().outputs
else:
_UpperCAmelCase = super(snake_case , self ).outputs
if self.use_past:
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
for i in range(snake_case ):
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
_UpperCAmelCase = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
# Generate decoder inputs
_UpperCAmelCase = seq_length if not self.use_past else 1
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
_UpperCAmelCase = {f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
_UpperCAmelCase = dict(**snake_case , **snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
_UpperCAmelCase = common_inputs['decoder_input_ids'].shape[1]
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = decoder_seq_length + 3
_UpperCAmelCase = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
_UpperCAmelCase = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(snake_case , snake_case )] , dim=1 )
_UpperCAmelCase = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase = min(snake_case , snake_case )
_UpperCAmelCase = max(snake_case , snake_case ) - min_num_layers
_UpperCAmelCase = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(snake_case ):
common_inputs["past_key_values"].append(
(
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
torch.zeros(snake_case ),
) )
# TODO: test this.
_UpperCAmelCase = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(snake_case , snake_case ):
common_inputs["past_key_values"].append((torch.zeros(snake_case ), torch.zeros(snake_case )) )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
_UpperCAmelCase = self._generate_dummy_inputs_for_encoder_and_decoder(
snake_case , snake_case , snake_case , snake_case , snake_case )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
_UpperCAmelCase , _UpperCAmelCase = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
_UpperCAmelCase = seqlen + 2
_UpperCAmelCase , _UpperCAmelCase = self.num_layers
_UpperCAmelCase , _UpperCAmelCase = self.num_attention_heads
_UpperCAmelCase = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
_UpperCAmelCase = common_inputs['attention_mask'].dtype
_UpperCAmelCase = torch.cat(
[common_inputs['attention_mask'], torch.ones(snake_case , snake_case , dtype=snake_case )] , dim=1 )
_UpperCAmelCase = [
(torch.zeros(snake_case ), torch.zeros(snake_case )) for _ in range(snake_case )
]
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
_UpperCAmelCase = tokenizer.num_special_tokens_to_add(snake_case )
_UpperCAmelCase = compute_effective_axis_dimension(
snake_case , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=snake_case )
# Generate dummy inputs according to compute batch and sequence
_UpperCAmelCase = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
_UpperCAmelCase = dict(tokenizer(snake_case , return_tensors=snake_case ) )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case = -1 , snake_case = -1 , snake_case = False , snake_case = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
else:
_UpperCAmelCase = self._generate_dummy_inputs_for_causal_lm(
snake_case , batch_size=snake_case , seq_length=snake_case , is_pair=snake_case , framework=snake_case )
return common_inputs
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case ) -> str:
if self.task in ["default", "seq2seq-lm"]:
_UpperCAmelCase = super()._flatten_past_key_values_(snake_case , snake_case , snake_case , snake_case )
else:
_UpperCAmelCase = super(snake_case , self )._flatten_past_key_values_(
snake_case , snake_case , snake_case , snake_case )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 708 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 0 |
def lowercase_ (A : Union[str, Any] , A : List[str] ):
snake_case__ : Dict = [1]
for i in range(2 , A ):
factorials.append(factorials[-1] * i )
assert 0 <= k < factorials[-1] * n, "k out of bounds"
snake_case__ : str = []
snake_case__ : Optional[int] = list(range(A ) )
# Find permutation
while factorials:
snake_case__ : List[str] = factorials.pop()
snake_case__ , snake_case__ : Tuple = divmod(A , A )
permutation.append(elements[number] )
elements.remove(elements[number] )
permutation.append(elements[0] )
return permutation
if __name__ == "__main__":
import doctest
doctest.testmod()
| 478 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a_ :int = logging.get_logger(__name__)
class snake_case__ ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = """maskformer-swin"""
_SCREAMING_SNAKE_CASE = {
"""num_attention_heads""": """num_heads""",
"""num_hidden_layers""": """num_layers""",
}
def __init__( self : Optional[Any], _snake_case : Dict=2_2_4, _snake_case : Optional[Any]=4, _snake_case : Dict=3, _snake_case : int=9_6, _snake_case : int=[2, 2, 6, 2], _snake_case : int=[3, 6, 1_2, 2_4], _snake_case : Tuple=7, _snake_case : Tuple=4.0, _snake_case : int=True, _snake_case : Union[str, Any]=0.0, _snake_case : Tuple=0.0, _snake_case : Dict=0.1, _snake_case : Optional[int]="gelu", _snake_case : List[str]=False, _snake_case : Union[str, Any]=0.0_2, _snake_case : int=1e-5, _snake_case : Any=None, _snake_case : Tuple=None, **_snake_case : int, ) ->Optional[int]:
super().__init__(**_snake_case )
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Union[str, Any] = embed_dim
snake_case__ : Dict = depths
snake_case__ : Optional[Any] = len(_snake_case )
snake_case__ : Optional[int] = num_heads
snake_case__ : Union[str, Any] = window_size
snake_case__ : Tuple = mlp_ratio
snake_case__ : Tuple = qkv_bias
snake_case__ : int = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[Any] = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Tuple = use_absolute_embeddings
snake_case__ : Tuple = layer_norm_eps
snake_case__ : Optional[int] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : int = int(embed_dim * 2 ** (len(_snake_case ) - 1) )
snake_case__ : int = ['stem'] + [F'''stage{idx}''' for idx in range(1, len(_snake_case ) + 1 )]
snake_case__ , snake_case__ : int = get_aligned_output_features_output_indices(
out_features=_snake_case, out_indices=_snake_case, stage_names=self.stage_names )
| 478 | 1 |
import unittest
from diffusers import FlaxAutoencoderKL
from diffusers.utils import is_flax_available
from diffusers.utils.testing_utils import require_flax
from .test_modeling_common_flax import FlaxModelTesterMixin
if is_flax_available():
import jax
@require_flax
class __SCREAMING_SNAKE_CASE( SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
_UpperCAmelCase = FlaxAutoencoderKL
@property
def lowerCAmelCase_ ( self: Dict ) -> List[Any]:
snake_case__ = 4
snake_case__ = 3
snake_case__ = (32, 32)
snake_case__ = jax.random.PRNGKey(0 )
snake_case__ = jax.random.uniform(snake_case__ , ((batch_size, num_channels) + sizes) )
return {"sample": image, "prng_key": prng_key}
def lowerCAmelCase_ ( self: str ) -> List[Any]:
snake_case__ = {
"block_out_channels": [32, 64],
"in_channels": 3,
"out_channels": 3,
"down_block_types": ["DownEncoderBlock2D", "DownEncoderBlock2D"],
"up_block_types": ["UpDecoderBlock2D", "UpDecoderBlock2D"],
"latent_channels": 4,
}
snake_case__ = self.dummy_input
return init_dict, inputs_dict
| 719 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : str = OrderedDict(
[
("""align""", """EfficientNetImageProcessor"""),
("""beit""", """BeitImageProcessor"""),
("""bit""", """BitImageProcessor"""),
("""blip""", """BlipImageProcessor"""),
("""blip-2""", """BlipImageProcessor"""),
("""bridgetower""", """BridgeTowerImageProcessor"""),
("""chinese_clip""", """ChineseCLIPImageProcessor"""),
("""clip""", """CLIPImageProcessor"""),
("""clipseg""", """ViTImageProcessor"""),
("""conditional_detr""", """ConditionalDetrImageProcessor"""),
("""convnext""", """ConvNextImageProcessor"""),
("""convnextv2""", """ConvNextImageProcessor"""),
("""cvt""", """ConvNextImageProcessor"""),
("""data2vec-vision""", """BeitImageProcessor"""),
("""deformable_detr""", """DeformableDetrImageProcessor"""),
("""deit""", """DeiTImageProcessor"""),
("""deta""", """DetaImageProcessor"""),
("""detr""", """DetrImageProcessor"""),
("""dinat""", """ViTImageProcessor"""),
("""donut-swin""", """DonutImageProcessor"""),
("""dpt""", """DPTImageProcessor"""),
("""efficientformer""", """EfficientFormerImageProcessor"""),
("""efficientnet""", """EfficientNetImageProcessor"""),
("""flava""", """FlavaImageProcessor"""),
("""focalnet""", """BitImageProcessor"""),
("""git""", """CLIPImageProcessor"""),
("""glpn""", """GLPNImageProcessor"""),
("""groupvit""", """CLIPImageProcessor"""),
("""imagegpt""", """ImageGPTImageProcessor"""),
("""instructblip""", """BlipImageProcessor"""),
("""layoutlmv2""", """LayoutLMv2ImageProcessor"""),
("""layoutlmv3""", """LayoutLMv3ImageProcessor"""),
("""levit""", """LevitImageProcessor"""),
("""mask2former""", """Mask2FormerImageProcessor"""),
("""maskformer""", """MaskFormerImageProcessor"""),
("""mgp-str""", """ViTImageProcessor"""),
("""mobilenet_v1""", """MobileNetV1ImageProcessor"""),
("""mobilenet_v2""", """MobileNetV2ImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevit""", """MobileViTImageProcessor"""),
("""mobilevitv2""", """MobileViTImageProcessor"""),
("""nat""", """ViTImageProcessor"""),
("""oneformer""", """OneFormerImageProcessor"""),
("""owlvit""", """OwlViTImageProcessor"""),
("""perceiver""", """PerceiverImageProcessor"""),
("""pix2struct""", """Pix2StructImageProcessor"""),
("""poolformer""", """PoolFormerImageProcessor"""),
("""regnet""", """ConvNextImageProcessor"""),
("""resnet""", """ConvNextImageProcessor"""),
("""sam""", """SamImageProcessor"""),
("""segformer""", """SegformerImageProcessor"""),
("""swiftformer""", """ViTImageProcessor"""),
("""swin""", """ViTImageProcessor"""),
("""swin2sr""", """Swin2SRImageProcessor"""),
("""swinv2""", """ViTImageProcessor"""),
("""table-transformer""", """DetrImageProcessor"""),
("""timesformer""", """VideoMAEImageProcessor"""),
("""tvlt""", """TvltImageProcessor"""),
("""upernet""", """SegformerImageProcessor"""),
("""van""", """ConvNextImageProcessor"""),
("""videomae""", """VideoMAEImageProcessor"""),
("""vilt""", """ViltImageProcessor"""),
("""vit""", """ViTImageProcessor"""),
("""vit_hybrid""", """ViTHybridImageProcessor"""),
("""vit_mae""", """ViTImageProcessor"""),
("""vit_msn""", """ViTImageProcessor"""),
("""xclip""", """CLIPImageProcessor"""),
("""yolos""", """YolosImageProcessor"""),
]
)
__UpperCamelCase : Union[str, Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def a_ ( _A ) -> Optional[int]:
"""simple docstring"""
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
snake_case__ = model_type_to_module_name(_A )
snake_case__ = importlib.import_module(f'''.{module_name}''' , 'transformers.models' )
try:
return getattr(_A , _A )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(_A , '__name__' , _A ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
snake_case__ = importlib.import_module('transformers' )
if hasattr(_A , _A ):
return getattr(_A , _A )
return None
def a_ ( _A , _A = None , _A = False , _A = False , _A = None , _A = None , _A = None , _A = False , **_A , ) -> Optional[Any]:
"""simple docstring"""
snake_case__ = get_file_from_repo(
_A , _A , cache_dir=_A , force_download=_A , resume_download=_A , proxies=_A , use_auth_token=_A , revision=_A , local_files_only=_A , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(_A , encoding='utf-8' ) as reader:
return json.load(_A )
class __SCREAMING_SNAKE_CASE:
def __init__( self: Optional[int] ) -> Union[str, Any]:
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(UpperCamelCase )
def lowerCAmelCase_ ( cls: int , UpperCamelCase: int , **UpperCamelCase: str ) -> Optional[Any]:
snake_case__ = kwargs.pop('config' , UpperCamelCase )
snake_case__ = kwargs.pop('trust_remote_code' , UpperCamelCase )
snake_case__ = True
snake_case__ , snake_case__ = ImageProcessingMixin.get_image_processor_dict(UpperCamelCase , **UpperCamelCase )
snake_case__ = config_dict.get('image_processor_type' , UpperCamelCase )
snake_case__ = None
if "AutoImageProcessor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
snake_case__ = config_dict.pop('feature_extractor_type' , UpperCamelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
snake_case__ = feature_extractor_class.replace('FeatureExtractor' , 'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' , {} ):
snake_case__ = config_dict['auto_map']['AutoFeatureExtractor']
snake_case__ = feature_extractor_auto_map.replace('FeatureExtractor' , 'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(UpperCamelCase , UpperCamelCase ):
snake_case__ = AutoConfig.from_pretrained(UpperCamelCase , **UpperCamelCase )
# It could be in `config.image_processor_type``
snake_case__ = getattr(UpperCamelCase , 'image_processor_type' , UpperCamelCase )
if hasattr(UpperCamelCase , 'auto_map' ) and "AutoImageProcessor" in config.auto_map:
snake_case__ = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
snake_case__ = image_processor_class_from_name(UpperCamelCase )
snake_case__ = image_processor_auto_map is not None
snake_case__ = image_processor_class is not None or type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING
snake_case__ = resolve_trust_remote_code(
UpperCamelCase , UpperCamelCase , UpperCamelCase , UpperCamelCase )
if has_remote_code and trust_remote_code:
snake_case__ = get_class_from_dynamic_module(
UpperCamelCase , UpperCamelCase , **UpperCamelCase )
snake_case__ = kwargs.pop('code_revision' , UpperCamelCase )
if os.path.isdir(UpperCamelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(UpperCamelCase ) in IMAGE_PROCESSOR_MAPPING:
snake_case__ = IMAGE_PROCESSOR_MAPPING[type(UpperCamelCase )]
return image_processor_class.from_dict(UpperCamelCase , **UpperCamelCase )
raise ValueError(
F'''Unrecognized image processor in {pretrained_model_name_or_path}. Should have a '''
F'''`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following '''
F'''`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}''' )
@staticmethod
def lowerCAmelCase_ ( UpperCamelCase: Optional[Any] , UpperCamelCase: int ) -> Optional[Any]:
IMAGE_PROCESSOR_MAPPING.register(UpperCamelCase , UpperCamelCase )
| 372 | 0 |
'''simple docstring'''
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __UpperCAmelCase ( _UpperCAmelCase : str ) -> Optional[int]:
return 1 / (1 + np.exp(-z ))
def __UpperCAmelCase ( _UpperCAmelCase : Tuple , _UpperCAmelCase : Dict ) -> List[str]:
return (-y * np.log(_UpperCAmelCase ) - (1 - y) * np.log(1 - h )).mean()
def __UpperCAmelCase ( _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : List[Any] ) -> Optional[Any]:
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
return np.sum(y * scores - np.log(1 + np.exp(_UpperCAmelCase ) ) )
def __UpperCAmelCase ( _UpperCAmelCase : List[Any] , _UpperCAmelCase : str , _UpperCAmelCase : Dict , _UpperCAmelCase : List[str]=7_00_00 ) -> Union[str, Any]:
__snake_case = np.zeros(x.shape[1] )
for iterations in range(_UpperCAmelCase ):
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = sigmoid_function(_UpperCAmelCase )
__snake_case = np.dot(x.T , h - y ) / y.size
__snake_case = theta - alpha * gradient # updating the weights
__snake_case = np.dot(_UpperCAmelCase , _UpperCAmelCase )
__snake_case = sigmoid_function(_UpperCAmelCase )
__snake_case = cost_function(_UpperCAmelCase , _UpperCAmelCase )
if iterations % 1_00 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a : int = datasets.load_iris()
a : int = iris.data[:, :2]
a : Optional[Any] = (iris.target != 0) * 1
a : Tuple = 0.1
a : List[str] = logistic_reg(alpha, x, y, max_iterations=70_000)
print('''theta: ''', theta) # printing the theta i.e our weights vector
def __UpperCAmelCase ( _UpperCAmelCase : Optional[int] ) -> Union[str, Any]:
return sigmoid_function(
np.dot(_UpperCAmelCase , _UpperCAmelCase ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='''b''', label='''0''')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='''r''', label='''1''')
((a) , (a)) : Any = (x[:, 0].min(), x[:, 0].max())
((a) , (a)) : Any = (x[:, 1].min(), x[:, 1].max())
((a) , (a)) : Any = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a : Optional[Any] = np.c_[xxa.ravel(), xxa.ravel()]
a : List[Any] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='''black''')
plt.legend()
plt.show()
| 69 |
from collections import defaultdict
from math import ceil, sqrt
def lowerCAmelCase_ ( _snake_case : int = 1000000 , _snake_case : int = 10 ) -> int:
'''simple docstring'''
__magic_name__ : defaultdict = defaultdict(_snake_case )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : Optional[int] = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__magic_name__ : Optional[int] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(_snake_case , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F"{solution() = }")
| 124 | 0 |
'''simple docstring'''
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ : Any = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ : Any = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class __lowerCAmelCase( lowerCAmelCase__ ):
__snake_case : Optional[Any] = VOCAB_FILES_NAMES
__snake_case : str = PRETRAINED_VOCAB_FILES_MAP
__snake_case : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__snake_case : List[str] = ['input_ids', 'attention_mask']
__snake_case : str = GPTaTokenizer
def __init__( self : Tuple , SCREAMING_SNAKE_CASE : List[Any]=None , SCREAMING_SNAKE_CASE : List[str]=None , SCREAMING_SNAKE_CASE : Any=None , SCREAMING_SNAKE_CASE : Tuple="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[Any]="<|endoftext|>" , SCREAMING_SNAKE_CASE : List[str]="<|endoftext|>" , SCREAMING_SNAKE_CASE : Optional[int]=False , **SCREAMING_SNAKE_CASE : Optional[int] , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , tokenizer_file=SCREAMING_SNAKE_CASE , unk_token=SCREAMING_SNAKE_CASE , bos_token=SCREAMING_SNAKE_CASE , eos_token=SCREAMING_SNAKE_CASE , add_prefix_space=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
SCREAMING_SNAKE_CASE_ :Optional[Any] = kwargs.pop('add_bos_token' , SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , SCREAMING_SNAKE_CASE ) != add_prefix_space:
SCREAMING_SNAKE_CASE_ :str = getattr(SCREAMING_SNAKE_CASE , pre_tok_state.pop('type' ) )
SCREAMING_SNAKE_CASE_ :Union[str, Any] = add_prefix_space
SCREAMING_SNAKE_CASE_ :int = pre_tok_class(**SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE_ :Any = add_prefix_space
def _lowercase ( self : Optional[int] , *SCREAMING_SNAKE_CASE : Tuple , **SCREAMING_SNAKE_CASE : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :int = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[str] , *SCREAMING_SNAKE_CASE : Union[str, Any] , **SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :str = kwargs.get('is_split_into_words' , SCREAMING_SNAKE_CASE )
assert self.add_prefix_space or not is_split_into_words, (
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
def _lowercase ( self : Dict , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[str] = None ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self._tokenizer.model.save(SCREAMING_SNAKE_CASE , name=SCREAMING_SNAKE_CASE )
return tuple(SCREAMING_SNAKE_CASE )
def _lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : "Conversation" ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ :Tuple = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(SCREAMING_SNAKE_CASE , add_special_tokens=SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(SCREAMING_SNAKE_CASE ) > self.model_max_length:
SCREAMING_SNAKE_CASE_ :Optional[Any] = input_ids[-self.model_max_length :]
return input_ids
| 233 |
'''simple docstring'''
from math import sqrt
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE = 100_0000 ):
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int = 0
SCREAMING_SNAKE_CASE_ :int
while num_cuboids <= limit:
max_cuboid_size += 1
for sum_shortest_sides in range(2 , 2 * max_cuboid_size + 1 ):
if sqrt(sum_shortest_sides**2 + max_cuboid_size**2 ).is_integer():
num_cuboids += (
min(SCREAMING_SNAKE_CASE , sum_shortest_sides // 2 )
- max(1 , sum_shortest_sides - max_cuboid_size )
+ 1
)
return max_cuboid_size
if __name__ == "__main__":
print(f"""{solution() = }""")
| 233 | 1 |
from __future__ import annotations
import math
def _UpperCAmelCase ( a__ , a__ , a__ , a__ , a__):
'''simple docstring'''
if depth < 0:
raise ValueError("""Depth cannot be less than 0""")
if not scores:
raise ValueError("""Scores cannot be empty""")
if depth == height:
return scores[node_index]
return (
max(
minimax(depth + 1 , node_index * 2 , a__ , a__ , a__) , minimax(depth + 1 , node_index * 2 + 1 , a__ , a__ , a__) , )
if is_max
else min(
minimax(depth + 1 , node_index * 2 , a__ , a__ , a__) , minimax(depth + 1 , node_index * 2 + 1 , a__ , a__ , a__) , )
)
def _UpperCAmelCase ( ):
'''simple docstring'''
a_ : List[Any] = [9_0, 2_3, 6, 3_3, 2_1, 6_5, 1_2_3, 3_4_4_2_3]
a_ : Tuple = math.log(len(a__) , 2)
print(f'''Optimal value : {minimax(0 , 0 , a__ , a__ , a__)}''')
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 540 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case : int = {
"""configuration_llama""": ["""LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LlamaConfig"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : int = ["""LlamaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = [
"""LlamaForCausalLM""",
"""LlamaModel""",
"""LlamaPreTrainedModel""",
"""LlamaForSequenceClassification""",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
__snake_case : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 540 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCamelCase : Union[str, Any] =logging.get_logger(__name__)
_UpperCamelCase : List[str] ={"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'openai-gpt'
SCREAMING_SNAKE_CASE_ = {
'max_position_embeddings': 'n_positions',
'hidden_size': 'n_embd',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , _snake_case=4_04_78 , _snake_case=5_12 , _snake_case=7_68 , _snake_case=12 , _snake_case=12 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=0.1 , _snake_case=1E-5 , _snake_case=0.0_2 , _snake_case="cls_index" , _snake_case=True , _snake_case=None , _snake_case=True , _snake_case=0.1 , **_snake_case , ):
"""simple docstring"""
__lowerCamelCase = vocab_size
__lowerCamelCase = n_positions
__lowerCamelCase = n_embd
__lowerCamelCase = n_layer
__lowerCamelCase = n_head
__lowerCamelCase = afn
__lowerCamelCase = resid_pdrop
__lowerCamelCase = embd_pdrop
__lowerCamelCase = attn_pdrop
__lowerCamelCase = layer_norm_epsilon
__lowerCamelCase = initializer_range
__lowerCamelCase = summary_type
__lowerCamelCase = summary_use_proj
__lowerCamelCase = summary_activation
__lowerCamelCase = summary_first_dropout
__lowerCamelCase = summary_proj_to_labels
super().__init__(**_snake_case )
| 706 |
'''simple docstring'''
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = []
__lowerCamelCase = []
__lowerCamelCase = {
'''^''': 3,
'''*''': 2,
'''/''': 2,
'''%''': 2,
'''+''': 1,
'''-''': 1,
} # Priority of each operator
__lowerCamelCase = len(A_ ) if (len(A_ ) > 7) else 7
# Print table header for output
print(
'''Symbol'''.center(8 ) , '''Stack'''.center(A_ ) , '''Postfix'''.center(A_ ) , sep=''' | ''' , )
print('''-''' * (print_width * 3 + 7) )
for x in infix:
if x.isalpha() or x.isdigit():
post_fix.append(A_ ) # if x is Alphabet / Digit, add it to Postfix
elif x == "(":
stack.append(A_ ) # if x is "(" push to Stack
elif x == ")": # if x is ")" pop stack until "(" is encountered
while stack[-1] != "(":
post_fix.append(stack.pop() ) # Pop stack & add the content to Postfix
stack.pop()
else:
if len(A_ ) == 0:
stack.append(A_ ) # If stack is empty, push x to stack
else: # while priority of x is not > priority of element in the stack
while len(A_ ) > 0 and priority[x] <= priority[stack[-1]]:
post_fix.append(stack.pop() ) # pop stack & add to Postfix
stack.append(A_ ) # push x to stack
print(
x.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format
while len(A_ ) > 0: # while stack is not empty
post_fix.append(stack.pop() ) # pop stack & add to Postfix
print(
''' '''.center(8 ) , (''''''.join(A_ )).ljust(A_ ) , (''''''.join(A_ )).ljust(A_ ) , sep=''' | ''' , ) # Output in tabular format
return "".join(A_ ) # return Postfix as str
def lowerCamelCase_ ( A_ ):
__lowerCamelCase = list(infix[::-1] ) # reverse the infix equation
for i in range(len(A_ ) ):
if infix[i] == "(":
__lowerCamelCase = ''')''' # change "(" to ")"
elif infix[i] == ")":
__lowerCamelCase = '''(''' # change ")" to "("
return (infix_2_postfix(''''''.join(A_ ) ))[
::-1
] # call infix_2_postfix on Infix, return reverse of Postfix
if __name__ == "__main__":
_UpperCamelCase : Optional[Any] =input("\nEnter an Infix Equation = ") # Input an Infix equation
_UpperCamelCase : str ="".join(Infix.split()) # Remove spaces from the input
print("\n\t", Infix, "(Infix) -> ", infix_2_prefix(Infix), "(Prefix)")
| 575 | 0 |
"""simple docstring"""
import math
from numpy import inf
from scipy.integrate import quad
def _a ( UpperCAmelCase__ ) -> float:
if num <= 0:
raise ValueError('''math domain error''' )
return quad(UpperCAmelCase__ , 0 , UpperCAmelCase__ , args=(UpperCAmelCase__) )[0]
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> float:
return math.pow(UpperCAmelCase__ , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 482 |
"""simple docstring"""
# Usage:
# ./gen-card-allenai-wmt16.py
import os
from pathlib import Path
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> Any:
__SCREAMING_SNAKE_CASE = {
'''en''': '''Machine learning is great, isn\'t it?''',
'''ru''': '''Машинное обучение - это здорово, не так ли?''',
'''de''': '''Maschinelles Lernen ist großartig, nicht wahr?''',
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
__SCREAMING_SNAKE_CASE = {
'''wmt16-en-de-dist-12-1''': [28.3, 27.52],
'''wmt16-en-de-dist-6-1''': [27.4, 27.11],
'''wmt16-en-de-12-1''': [26.9, 25.75],
}
__SCREAMING_SNAKE_CASE = f"""{src_lang}-{tgt_lang}"""
__SCREAMING_SNAKE_CASE = f"""
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt16
- allenai
license: apache-2.0
datasets:
- wmt16
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of fairseq-based [wmt16 transformer](https://github.com/jungokasai/deep-shallow/) for {src_lang}-{tgt_lang}.
For more details, please, see [Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation](https://arxiv.org/abs/2006.10369).
All 3 models are available:
* [wmt16-en-de-dist-12-1](https://huggingface.co/allenai/wmt16-en-de-dist-12-1)
* [wmt16-en-de-dist-6-1](https://huggingface.co/allenai/wmt16-en-de-dist-6-1)
* [wmt16-en-de-12-1](https://huggingface.co/allenai/wmt16-en-de-12-1)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = \"allenai/{model_name}\"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = \"{texts[src_lang]}\"
input_ids = tokenizer.encode(input, return_tensors=\"pt\")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
## Training data
Pretrained weights were left identical to the original model released by allenai. For more details, please, see the [paper](https://arxiv.org/abs/2006.10369).
## Eval results
Here are the BLEU scores:
model | fairseq | transformers
-------|---------|----------
{model_name} | {scores[model_name][0]} | {scores[model_name][1]}
The score is slightly below the score reported in the paper, as the researchers don't use `sacrebleu` and measure the score on tokenized outputs. `transformers` score was measured using `sacrebleu` on detokenized outputs.
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=5
mkdir -p $DATA_DIR
sacrebleu -t wmt16 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt16 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py allenai/{model_name} $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
## Data Sources
- [training, etc.](http://www.statmt.org/wmt16/)
- [test set](http://matrix.statmt.org/test_sets/newstest2016.tgz?1504722372)
### BibTeX entry and citation info
```
@misc{{kasai2020deep,
title={{Deep Encoder, Shallow Decoder: Reevaluating the Speed-Quality Tradeoff in Machine Translation}},
author={{Jungo Kasai and Nikolaos Pappas and Hao Peng and James Cross and Noah A. Smith}},
year={{2020}},
eprint={{2006.10369}},
archivePrefix={{arXiv}},
primaryClass={{cs.CL}}
}}
```
"""
model_card_dir.mkdir(parents=UpperCAmelCase__ , exist_ok=UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = os.path.join(UpperCAmelCase__ , '''README.md''' )
print(f"""Generating {path}""" )
with open(UpperCAmelCase__ , '''w''' , encoding='''utf-8''' ) as f:
f.write(UpperCAmelCase__ )
# make sure we are under the root of the project
lowerCAmelCase__ =Path(__file__).resolve().parent.parent.parent
lowerCAmelCase__ =repo_dir / "model_cards"
for model_name in ["wmt16-en-de-dist-12-1", "wmt16-en-de-dist-6-1", "wmt16-en-de-12-1"]:
lowerCAmelCase__ =model_cards_dir / "allenai" / model_name
write_model_card(model_card_dir, src_lang="en", tgt_lang="de", model_name=model_name)
| 482 | 1 |
'''simple docstring'''
from collections.abc import Sequence
from queue import Queue
class lowercase :
def __init__( self : Optional[int] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Optional[int] , __lowerCAmelCase : Union[str, Any]=None , __lowerCAmelCase : int=None) -> Union[str, Any]:
lowercase_ = start
lowercase_ = end
lowercase_ = val
lowercase_ = (start + end) // 2
lowercase_ = left
lowercase_ = right
def __repr__( self : str) -> int:
return F'SegmentTreeNode(start={self.start}, end={self.end}, val={self.val})'
class lowercase :
def __init__( self : Dict , __lowerCAmelCase : Sequence , __lowerCAmelCase : Dict) -> Tuple:
lowercase_ = collection
lowercase_ = function
if self.collection:
lowercase_ = self._build_tree(0 , len(__lowerCAmelCase) - 1)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : Any , __lowerCAmelCase : Optional[int]) -> Tuple:
self._update_tree(self.root , __lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : int , __lowerCAmelCase : Optional[Any]) -> Any:
return self._query_range(self.root , __lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : Tuple) -> Tuple:
if start == end:
return SegmentTreeNode(__lowerCAmelCase , __lowerCAmelCase , self.collection[start])
lowercase_ = (start + end) // 2
lowercase_ = self._build_tree(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = self._build_tree(mid + 1 , __lowerCAmelCase)
return SegmentTreeNode(__lowerCAmelCase , __lowerCAmelCase , self.fn(left.val , right.val) , __lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : int , __lowerCAmelCase : List[Any] , __lowerCAmelCase : Tuple , __lowerCAmelCase : int) -> int:
if node.start == i and node.end == i:
lowercase_ = val
return
if i <= node.mid:
self._update_tree(node.left , __lowerCAmelCase , __lowerCAmelCase)
else:
self._update_tree(node.right , __lowerCAmelCase , __lowerCAmelCase)
lowercase_ = self.fn(node.left.val , node.right.val)
def __UpperCAmelCase ( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str) -> int:
if node.start == i and node.end == j:
return node.val
if i <= node.mid:
if j <= node.mid:
# range in left child tree
return self._query_range(node.left , __lowerCAmelCase , __lowerCAmelCase)
else:
# range in left child tree and right child tree
return self.fn(
self._query_range(node.left , __lowerCAmelCase , node.mid) , self._query_range(node.right , node.mid + 1 , __lowerCAmelCase) , )
else:
# range in right child tree
return self._query_range(node.right , __lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
if self.root is not None:
lowercase_ = Queue()
queue.put(self.root)
while not queue.empty():
lowercase_ = queue.get()
yield node
if node.left is not None:
queue.put(node.left)
if node.right is not None:
queue.put(node.right)
if __name__ == "__main__":
import operator
for fn in [operator.add, max, min]:
print("*" * 50)
lowerCAmelCase_ : Optional[Any] = SegmentTree([2, 1, 5, 3, 4], fn)
for node in arr.traverse():
print(node)
print()
arr.update(1, 5)
for node in arr.traverse():
print(node)
print()
print(arr.query_range(3, 4)) # 7
print(arr.query_range(2, 2)) # 5
print(arr.query_range(1, 3)) # 13
print()
| 461 | '''simple docstring'''
import unittest
from transformers import SPIECE_UNDERLINE, ReformerTokenizer, ReformerTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase_ : List[Any] = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowercase ( __lowerCamelCase , unittest.TestCase ):
lowerCamelCase_ =ReformerTokenizer
lowerCamelCase_ =ReformerTokenizerFast
lowerCamelCase_ =True
lowerCamelCase_ =False
lowerCamelCase_ =True
def __UpperCAmelCase ( self : Optional[Any]) -> Optional[Any]:
super().setUp()
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def __UpperCAmelCase ( self : int) -> int:
lowercase_ = "<s>"
lowercase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__lowerCAmelCase) , __lowerCAmelCase)
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__lowerCAmelCase) , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple) -> Optional[int]:
lowercase_ = list(self.get_tokenizer().get_vocab().keys())
self.assertEqual(vocab_keys[0] , "<unk>")
self.assertEqual(vocab_keys[1] , "<s>")
self.assertEqual(vocab_keys[-1] , "j")
self.assertEqual(len(__lowerCAmelCase) , 1000)
def __UpperCAmelCase ( self : Union[str, Any]) -> Optional[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 1000)
def __UpperCAmelCase ( self : List[Any]) -> List[Any]:
if not self.test_rust_tokenizer:
return
lowercase_ = self.get_tokenizer()
lowercase_ = self.get_rust_tokenizer()
lowercase_ = "I was born in 92000, and this is falsé."
lowercase_ = tokenizer.tokenize(__lowerCAmelCase)
lowercase_ = rust_tokenizer.tokenize(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase , add_special_tokens=__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
lowercase_ = self.get_rust_tokenizer()
lowercase_ = tokenizer.encode(__lowerCAmelCase)
lowercase_ = rust_tokenizer.encode(__lowerCAmelCase)
self.assertListEqual(__lowerCAmelCase , __lowerCAmelCase)
def __UpperCAmelCase ( self : Tuple , __lowerCAmelCase : List[str]=15) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})'):
lowercase_ = self.rust_tokenizer_class.from_pretrained(__lowerCAmelCase , **__lowerCAmelCase)
# Simple input
lowercase_ = "This is a simple input"
lowercase_ = ["This is a simple input 1", "This is a simple input 2"]
lowercase_ = ("This is a simple input", "This is a pair")
lowercase_ = [
("This is a simple input 1", "This is a simple input 2"),
("This is a simple pair 1", "This is a simple pair 2"),
]
# Simple input tests
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Simple input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(__lowerCAmelCase , tokenizer_r.encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length")
# Pair input
self.assertRaises(
__lowerCAmelCase , tokenizer_r.batch_encode_plus , __lowerCAmelCase , max_length=__lowerCAmelCase , padding="max_length" , )
def __UpperCAmelCase ( self : Tuple) -> Tuple:
pass
def __UpperCAmelCase ( self : Any) -> Optional[Any]:
lowercase_ = ReformerTokenizer(__lowerCAmelCase , keep_accents=__lowerCAmelCase)
lowercase_ = tokenizer.tokenize("This is a test")
self.assertListEqual(__lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__lowerCAmelCase) , [285, 46, 10, 170, 382] , )
lowercase_ = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
lowercase_ = tokenizer.convert_tokens_to_ids(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
lowercase_ = tokenizer.convert_ids_to_tokens(__lowerCAmelCase)
self.assertListEqual(
__lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def __UpperCAmelCase ( self : List[str]) -> Any:
return ReformerTokenizer.from_pretrained("google/reformer-crime-and-punishment")
@slow
def __UpperCAmelCase ( self : Tuple) -> str:
lowercase_ = "Hello World!"
lowercase_ = [126, 32, 262, 152, 38, 72, 287]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@slow
def __UpperCAmelCase ( self : Any) -> Dict:
lowercase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
lowercase_ = [
108,
265,
24,
111,
4,
258,
156,
35,
28,
275,
3,
259,
297,
260,
84,
4,
35,
110,
44,
8,
259,
91,
268,
21,
11,
209,
274,
109,
266,
277,
117,
86,
93,
315,
258,
278,
258,
277,
258,
0,
258,
288,
258,
319,
258,
0,
258,
0,
258,
0,
258,
0,
258,
287,
258,
315,
258,
289,
258,
278,
99,
269,
266,
262,
8,
259,
241,
4,
217,
230,
268,
266,
55,
168,
106,
75,
193,
266,
223,
27,
49,
26,
282,
25,
264,
299,
19,
26,
0,
258,
277,
117,
86,
93,
176,
183,
270,
11,
262,
42,
61,
265,
]
self.assertListEqual(__lowerCAmelCase , self.big_tokenizer.encode(__lowerCAmelCase))
@require_torch
@slow
def __UpperCAmelCase ( self : Any) -> Tuple:
import torch
from transformers import ReformerConfig, ReformerModel
# Build sequence
lowercase_ = list(self.big_tokenizer.get_vocab().keys())[:10]
lowercase_ = " ".join(__lowerCAmelCase)
lowercase_ = self.big_tokenizer.encode_plus(__lowerCAmelCase , return_tensors="pt")
lowercase_ = self.big_tokenizer.batch_encode_plus([sequence, sequence] , return_tensors="pt")
lowercase_ = ReformerConfig()
# The input gets padded during training so adjust the axial position encodings from the pretrained model value of (512, 1024)
lowercase_ = encoded_sequence["input_ids"].shape
lowercase_ = ReformerModel(__lowerCAmelCase)
# Reformer has config.vocab_size == tokenizer.vocab_size == len(tokenizer) - 1 = 320; len(tokenizer) is 321 (including a pad token with id 320)
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**__lowerCAmelCase)
model(**__lowerCAmelCase)
@slow
def __UpperCAmelCase ( self : List[Any]) -> Tuple:
# fmt: off
lowercase_ = {"input_ids": [[108, 265, 24, 111, 4, 258, 156, 7, 51, 279, 58, 7, 76, 25, 69, 278], [140, 243, 264, 134, 17, 267, 77, 263, 22, 262, 297, 258, 304, 177, 279, 266, 14, 89, 13, 35, 261, 299, 272, 137, 275, 278]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# This tokenizer does not know some characters like ")".
# That is the reason why we use very simple texts here.
# Also see https://github.com/huggingface/transformers/pull/11737#issuecomment-850769064
lowercase_ = [
"This is a very simple sentence.",
"The quick brown fox jumps over the lazy dog.",
]
self.tokenizer_integration_test_util(
expected_encoding=__lowerCAmelCase , model_name="google/reformer-crime-and-punishment" , revision="0e6c3decb8211d49bf881013425dc8b0448b3f5a" , padding=__lowerCAmelCase , sequences=__lowerCAmelCase , )
| 461 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowercase = {
'configuration_pegasus_x': ['PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP', 'PegasusXConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST',
'PegasusXForConditionalGeneration',
'PegasusXModel',
'PegasusXPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 342 |
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__a :Optional[Any] = 'true'
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : List[Any]=82 ,__UpperCamelCase : Dict=16 ):
"""simple docstring"""
set_seed(42 )
A_ = RegressionModel()
A_ = deepcopy(__UpperCamelCase )
A_ = RegressionDataset(length=__UpperCamelCase )
A_ = DataLoader(__UpperCamelCase ,batch_size=__UpperCamelCase )
model.to(accelerator.device )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return model, ddp_model, dataloader
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=False ):
"""simple docstring"""
A_ = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
A_ = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__UpperCamelCase : Optional[Any] ):
A_ = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=__UpperCamelCase ,max_length=__UpperCamelCase )
return outputs
with accelerator.main_process_first():
A_ = dataset.map(
__UpperCamelCase ,batched=__UpperCamelCase ,remove_columns=["idx", "sentence1", "sentence2"] ,)
A_ = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__UpperCamelCase : Union[str, Any] ):
if use_longest:
return tokenizer.pad(__UpperCamelCase ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(__UpperCamelCase ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(__UpperCamelCase ,shuffle=__UpperCamelCase ,collate_fn=__UpperCamelCase ,batch_size=16 )
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : str ):
"""simple docstring"""
A_ = Accelerator(dispatch_batches=__UpperCamelCase ,split_batches=__UpperCamelCase )
A_ = get_dataloader(__UpperCamelCase ,not dispatch_batches )
A_ = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=__UpperCamelCase )
A_ , A_ = accelerator.prepare(__UpperCamelCase ,__UpperCamelCase )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def __snake_case ( __UpperCamelCase : Dict ,__UpperCamelCase : int ,__UpperCamelCase : Optional[Any] ):
"""simple docstring"""
A_ = []
for batch in dataloader:
A_ , A_ = batch.values()
with torch.no_grad():
A_ = model(__UpperCamelCase )
A_ , A_ = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
A_ , A_ = [], []
for logit, targ in logits_and_targets:
logits.append(__UpperCamelCase )
targs.append(__UpperCamelCase )
A_ , A_ = torch.cat(__UpperCamelCase ), torch.cat(__UpperCamelCase )
return logits, targs
def __snake_case ( __UpperCamelCase : Accelerator ,__UpperCamelCase : Dict=82 ,__UpperCamelCase : List[Any]=False ,__UpperCamelCase : Dict=False ,__UpperCamelCase : Optional[int]=16 ):
"""simple docstring"""
A_ , A_ , A_ = get_basic_setup(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
A_ , A_ = generate_predictions(__UpperCamelCase ,__UpperCamelCase ,__UpperCamelCase )
assert (
len(__UpperCamelCase ) == num_samples
), f'''Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(__UpperCamelCase )}'''
def __snake_case ( __UpperCamelCase : bool = False ,__UpperCamelCase : bool = False ):
"""simple docstring"""
A_ = evaluate.load("glue" ,"mrpc" )
A_ , A_ = get_mrpc_setup(__UpperCamelCase ,__UpperCamelCase )
# First do baseline
A_ , A_ , A_ = setup["no"]
model.to(__UpperCamelCase )
model.eval()
for batch in dataloader:
batch.to(__UpperCamelCase )
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=__UpperCamelCase ,references=batch["labels"] )
A_ = metric.compute()
# Then do distributed
A_ , A_ , A_ = setup["ddp"]
model.eval()
for batch in dataloader:
with torch.inference_mode():
A_ = model(**__UpperCamelCase )
A_ = outputs.logits.argmax(dim=-1 )
A_ = batch["labels"]
A_ , A_ = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=__UpperCamelCase ,references=__UpperCamelCase )
A_ = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), f'''Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n'''
def __snake_case ( ):
"""simple docstring"""
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`''' )
test_mrpc(__UpperCamelCase ,__UpperCamelCase )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
A_ = Accelerator(split_batches=__UpperCamelCase ,dispatch_batches=__UpperCamelCase )
if accelerator.is_local_main_process:
print(f'''With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99''' )
test_torch_metrics(__UpperCamelCase ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
A_ = Accelerator()
test_torch_metrics(__UpperCamelCase ,512 )
accelerator.state._reset_state()
def __snake_case ( __UpperCamelCase : List[Any] ):
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 86 | 0 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
'''simple docstring'''
UpperCamelCase__ =GPTSanJapaneseTokenizer
UpperCamelCase__ =False
UpperCamelCase__ ={"do_clean_text": False, "add_prefix_space": False}
def SCREAMING_SNAKE_CASE__ ( self : int ):
super().setUp()
# fmt: off
_lowerCAmelCase = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
_lowerCAmelCase = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
_lowerCAmelCase = {'unk_token': '<unk>'}
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(lowercase__ ) )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **lowercase__ : Tuple ):
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , lowercase__ : List[str] ):
_lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
_lowerCAmelCase = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , lowercase__ : List[Any] ):
_lowerCAmelCase , _lowerCAmelCase = self.get_input_output_texts(lowercase__ )
_lowerCAmelCase = tokenizer.encode(lowercase__ , add_special_tokens=lowercase__ )
_lowerCAmelCase = tokenizer.decode(lowercase__ , clean_up_tokenization_spaces=lowercase__ )
return text, ids
def SCREAMING_SNAKE_CASE__ ( self : str ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
pass # TODO add if relevant
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.get_tokenizer()
# Testing tokenization
_lowerCAmelCase = 'こんにちは、世界。 こんばんは、㔺界。'
_lowerCAmelCase = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
_lowerCAmelCase = tokenizer.tokenize(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids without special tokens
_lowerCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
# Testing conversion to ids with special tokens
_lowerCAmelCase = tokens + [tokenizer.unk_token]
_lowerCAmelCase = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
_lowerCAmelCase = tokenizer.convert_tokens_to_ids(lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.get_tokenizer()
# Testing tokenization
_lowerCAmelCase = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
_lowerCAmelCase = 'こんにちは、、、、世界。こんばんは、、、、世界。'
_lowerCAmelCase = tokenizer.encode(lowercase__ )
_lowerCAmelCase = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_lowerCAmelCase = 'こんにちは、世界。'
_lowerCAmelCase = 'こんばんは、㔺界。😀'
_lowerCAmelCase = 'こんにちは、世界。こんばんは、世界。😀'
_lowerCAmelCase = tokenizer.encode(prefix_text + input_text )
_lowerCAmelCase = tokenizer.encode('' , prefix_text=prefix_text + input_text )
_lowerCAmelCase = tokenizer.encode(lowercase__ , prefix_text=lowercase__ )
_lowerCAmelCase = tokenizer.decode(lowercase__ )
_lowerCAmelCase = tokenizer.decode(lowercase__ )
_lowerCAmelCase = tokenizer.decode(lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
self.assertEqual(lowercase__ , lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
_lowerCAmelCase = 'こんにちは、世界。'
_lowerCAmelCase = 'こんばんは、㔺界。😀'
_lowerCAmelCase = len(tokenizer.encode(lowercase__ ) ) - 2
_lowerCAmelCase = len(tokenizer.encode(lowercase__ ) ) - 2
_lowerCAmelCase = [1] + [0] * (len_prefix + len_text + 1)
_lowerCAmelCase = [1] * (len_prefix + len_text + 1) + [0]
_lowerCAmelCase = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
_lowerCAmelCase = tokenizer(prefix_text + input_text ).token_type_ids
_lowerCAmelCase = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
_lowerCAmelCase = tokenizer(lowercase__ , prefix_text=lowercase__ ).token_type_ids
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
self.assertListEqual(lowercase__ , lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
_lowerCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_lowerCAmelCase = tokenizer.encode('あンいワ' )
_lowerCAmelCase = tokenizer.encode('' , prefix_text='あンいワ' )
_lowerCAmelCase = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(lowercase__ ) , tokenizer.decode(lowercase__ ) )
self.assertEqual(tokenizer.decode(lowercase__ ) , tokenizer.decode(lowercase__ ) )
self.assertNotEqual(lowercase__ , lowercase__ )
self.assertNotEqual(lowercase__ , lowercase__ )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
_lowerCAmelCase = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
_lowerCAmelCase = tokenizer(lowercase__ , padding=lowercase__ )
_lowerCAmelCase = tokenizer.batch_encode_plus(lowercase__ , padding=lowercase__ )
# fmt: off
_lowerCAmelCase = [[3_59_93, 86_40, 2_59_48, 3_59_98, 3_06_47, 3_56_75, 3_59_99, 3_59_99], [3_59_93, 1_03_82, 98_68, 3_59_98, 3_06_46, 94_59, 3_06_46, 3_56_75]]
_lowerCAmelCase = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
_lowerCAmelCase = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , lowercase__ )
self.assertListEqual(x_token.token_type_ids , lowercase__ )
self.assertListEqual(x_token.attention_mask , lowercase__ )
self.assertListEqual(x_token_a.input_ids , lowercase__ )
self.assertListEqual(x_token_a.token_type_ids , lowercase__ )
self.assertListEqual(x_token_a.attention_mask , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : int ):
# Intentionally convert some words to accommodate character fluctuations unique to Japanese
pass
def SCREAMING_SNAKE_CASE__ ( self : str ):
# tokenizer has no padding token
pass
| 716 | def _lowerCamelCase ( snake_case = 50_000_000 ):
_lowerCAmelCase = set()
_lowerCAmelCase = int((limit - 24) ** (1 / 2) )
_lowerCAmelCase = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , snake_case ) ) )
for primea in primes:
_lowerCAmelCase = primea * primea
for primea in primes:
_lowerCAmelCase = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_lowerCAmelCase = primea * primea * primea * primea
_lowerCAmelCase = square + cube + tetr
if total >= limit:
break
ret.add(snake_case )
return len(snake_case )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 225 | 0 |
'''simple docstring'''
import argparse
import os
# New Code #
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils import find_executable_batch_size
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to ensure out-of-memory errors never
# interrupt training, and builds off the `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCamelCase = 16
__UpperCamelCase = 32
def _a ( _lowerCamelCase , _lowerCamelCase = 16 ) -> Dict:
"""simple docstring"""
__snake_case : Optional[Any] = AutoTokenizer.from_pretrained("""bert-base-cased""" )
__snake_case : Dict = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_lowerCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__snake_case : List[Any] = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__snake_case : Optional[Any] = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__snake_case : int = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_lowerCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__snake_case : Optional[int] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__snake_case : int = 16
elif accelerator.mixed_precision != "no":
__snake_case : Any = 8
else:
__snake_case : List[Any] = None
return tokenizer.pad(
_lowerCamelCase , padding="""longest""" , max_length=_lowerCamelCase , pad_to_multiple_of=_lowerCamelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
__snake_case : List[str] = DataLoader(
tokenized_datasets["""train"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
__snake_case : Dict = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__UpperCamelCase = mocked_dataloaders # noqa: F811
def _a ( _lowerCamelCase , _lowerCamelCase ) -> Tuple:
"""simple docstring"""
if os.environ.get("""TESTING_MOCKED_DATALOADERS""" , _lowerCamelCase ) == "1":
__snake_case : Optional[int] = 2
# Initialize accelerator
__snake_case : List[Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__snake_case : Union[str, Any] = config["""lr"""]
__snake_case : Optional[int] = int(config["""num_epochs"""] )
__snake_case : Optional[int] = int(config["""seed"""] )
__snake_case : List[Any] = int(config["""batch_size"""] )
__snake_case : int = evaluate.load("""glue""" , """mrpc""" )
# New Code #
# We now can define an inner training loop function. It should take a batch size as the only parameter,
# and build the dataloaders in there.
# It also gets our decorator
@find_executable_batch_size(starting_batch_size=_lowerCamelCase )
def inner_training_loop(_lowerCamelCase ):
# And now just move everything below under this function
# We need to bring in the Accelerator object from earlier
nonlocal accelerator
# And reset all of its attributes that could hold onto any memory:
accelerator.free_memory()
# Then we can declare the model, optimizer, and everything else:
set_seed(_lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__snake_case : str = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=_lowerCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__snake_case : Union[str, Any] = model.to(accelerator.device )
# Instantiate optimizer
__snake_case : int = AdamW(params=model.parameters() , lr=_lowerCamelCase )
__snake_case , __snake_case : Optional[int] = get_dataloaders(_lowerCamelCase , _lowerCamelCase )
# Instantiate scheduler
__snake_case : Optional[Any] = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case : int = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Now we train the model
for epoch in range(_lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__snake_case : Optional[int] = model(**_lowerCamelCase )
__snake_case : Optional[Any] = outputs.loss
accelerator.backward(_lowerCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__snake_case : Optional[Any] = model(**_lowerCamelCase )
__snake_case : List[Any] = outputs.logits.argmax(dim=-1 )
__snake_case , __snake_case : Optional[int] = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
__snake_case : Any = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCamelCase )
# New Code #
# And call it at the end with no arguments
# Note: You could also refactor this outside of your training loop function
inner_training_loop()
def _a ( ) -> Optional[int]:
"""simple docstring"""
__snake_case : Optional[Any] = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=_lowerCamelCase , default=_lowerCamelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
__snake_case : Any = parser.parse_args()
__snake_case : Any = {"""lr""": 2E-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 26 |
"""simple docstring"""
import argparse
import math
import os
import torch
from neural_compressor.utils.pytorch import load
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, StableDiffusionPipeline, UNetaDConditionModel
def _SCREAMING_SNAKE_CASE ():
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'-m' , '--pretrained_model_name_or_path' , type=_UpperCAmelCase , default=_UpperCAmelCase , required=_UpperCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models.' , )
parser.add_argument(
'-c' , '--caption' , type=_UpperCAmelCase , default='robotic cat with wings' , help='Text used to generate images.' , )
parser.add_argument(
'-n' , '--images_num' , type=_UpperCAmelCase , default=4 , help='How much images to generate.' , )
parser.add_argument(
'-s' , '--seed' , type=_UpperCAmelCase , default=42 , help='Seed for random process.' , )
parser.add_argument(
'-ci' , '--cuda_id' , type=_UpperCAmelCase , default=0 , help='cuda_id.' , )
lowerCAmelCase = parser.parse_args()
return args
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : int , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[int] ):
if not len(_UpperCAmelCase ) == rows * cols:
raise ValueError('The specified number of rows and columns are not correct.' )
lowerCAmelCase ,lowerCAmelCase = imgs[0].size
lowerCAmelCase = Image.new('RGB' , size=(cols * w, rows * h) )
lowerCAmelCase ,lowerCAmelCase = grid.size
for i, img in enumerate(_UpperCAmelCase ):
grid.paste(_UpperCAmelCase , box=(i % cols * w, i // cols * h) )
return grid
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Any , _UpperCAmelCase : Union[str, Any]="robotic cat with wings" , _UpperCAmelCase : Optional[int]=7.5 , _UpperCAmelCase : Dict=50 , _UpperCAmelCase : Tuple=1 , _UpperCAmelCase : int=42 , ):
lowerCAmelCase = torch.Generator(pipeline.device ).manual_seed(_UpperCAmelCase )
lowerCAmelCase = pipeline(
_UpperCAmelCase , guidance_scale=_UpperCAmelCase , num_inference_steps=_UpperCAmelCase , generator=_UpperCAmelCase , num_images_per_prompt=_UpperCAmelCase , ).images
lowerCAmelCase = int(math.sqrt(_UpperCAmelCase ) )
lowerCAmelCase = image_grid(_UpperCAmelCase , rows=_rows , cols=num_images_per_prompt // _rows )
return grid, images
__UpperCamelCase : Optional[Any] = parse_args()
# Load models and create wrapper for stable diffusion
__UpperCamelCase : List[Any] = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder='''tokenizer''')
__UpperCamelCase : str = CLIPTextModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''text_encoder''')
__UpperCamelCase : Optional[int] = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder='''vae''')
__UpperCamelCase : List[str] = UNetaDConditionModel.from_pretrained(args.pretrained_model_name_or_path, subfolder='''unet''')
__UpperCamelCase : Tuple = StableDiffusionPipeline.from_pretrained(
args.pretrained_model_name_or_path, text_encoder=text_encoder, vae=vae, unet=unet, tokenizer=tokenizer
)
__UpperCamelCase : Union[str, Any] = lambda images, clip_input: (images, False)
if os.path.exists(os.path.join(args.pretrained_model_name_or_path, '''best_model.pt''')):
__UpperCamelCase : Dict = load(args.pretrained_model_name_or_path, model=unet)
unet.eval()
setattr(pipeline, '''unet''', unet)
else:
__UpperCamelCase : Dict = unet.to(torch.device('''cuda''', args.cuda_id))
__UpperCamelCase : Optional[Any] = pipeline.to(unet.device)
__UpperCamelCase ,__UpperCamelCase : List[Any] = generate_images(pipeline, prompt=args.caption, num_images_per_prompt=args.images_num, seed=args.seed)
grid.save(os.path.join(args.pretrained_model_name_or_path, '''{}.png'''.format('''_'''.join(args.caption.split()))))
__UpperCamelCase : int = os.path.join(args.pretrained_model_name_or_path, '''_'''.join(args.caption.split()))
os.makedirs(dirname, exist_ok=True)
for idx, image in enumerate(images):
image.save(os.path.join(dirname, '''{}.png'''.format(idx + 1)))
| 4 | 0 |
import string
import numpy
def _snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , SCREAMING_SNAKE_CASE )
class A__ :
"""simple docstring"""
_lowercase : Tuple = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
_lowercase : Union[str, Any] = numpy.vectorize(lambda A : x % 36 )
_lowercase : List[str] = numpy.vectorize(A )
def __init__( self : Optional[Any] , A_ : numpy.ndarray ):
'''simple docstring'''
_lowerCAmelCase : int = self.modulus(A_ ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
_lowerCAmelCase : Tuple = encrypt_key.shape[0]
def __magic_name__ ( self : Optional[int] , A_ : str ):
'''simple docstring'''
return self.key_string.index(A_ )
def __magic_name__ ( self : str , A_ : int ):
'''simple docstring'''
return self.key_string[round(A_ )]
def __magic_name__ ( self : str ):
'''simple docstring'''
_lowerCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : str = det % len(self.key_string )
_lowerCAmelCase : Tuple = len(self.key_string )
if greatest_common_divisor(A_ , len(self.key_string ) ) != 1:
_lowerCAmelCase : str = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(A_ )
def __magic_name__ ( self : Any , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = [char for char in text.upper() if char in self.key_string]
_lowerCAmelCase : List[str] = chars[-1]
while len(A_ ) % self.break_key != 0:
chars.append(A_ )
return "".join(A_ )
def __magic_name__ ( self : int , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : Optional[int] = self.process_text(text.upper() )
_lowerCAmelCase : List[str] = ""
for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : Optional[Any] = text[i : i + self.break_key]
_lowerCAmelCase : List[str] = [self.replace_letters(A_ ) for char in batch]
_lowerCAmelCase : Optional[int] = numpy.array([vec] ).T
_lowerCAmelCase : int = self.modulus(self.encrypt_key.dot(A_ ) ).T.tolist()[
0
]
_lowerCAmelCase : List[Any] = "".join(
self.replace_digits(A_ ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def __magic_name__ ( self : List[Any] ):
'''simple docstring'''
_lowerCAmelCase : List[str] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
_lowerCAmelCase : Optional[Any] = det % len(self.key_string )
_lowerCAmelCase : List[str] = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
_lowerCAmelCase : List[str] = i
break
_lowerCAmelCase : List[Any] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(A_ ) )
def __magic_name__ ( self : str , A_ : str ):
'''simple docstring'''
_lowerCAmelCase : Tuple = self.make_decrypt_key()
_lowerCAmelCase : Tuple = self.process_text(text.upper() )
_lowerCAmelCase : int = ""
for i in range(0 , len(A_ ) - self.break_key + 1 , self.break_key ):
_lowerCAmelCase : str = text[i : i + self.break_key]
_lowerCAmelCase : Any = [self.replace_letters(A_ ) for char in batch]
_lowerCAmelCase : Any = numpy.array([vec] ).T
_lowerCAmelCase : Optional[Any] = self.modulus(decrypt_key.dot(A_ ) ).T.tolist()[0]
_lowerCAmelCase : Union[str, Any] = "".join(
self.replace_digits(A_ ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def _snake_case ( ):
"""simple docstring"""
_lowerCAmelCase : List[Any] = int(input("Enter the order of the encryption key: " ) )
_lowerCAmelCase : int = []
print("Enter each row of the encryption key with space separated integers" )
for _ in range(SCREAMING_SNAKE_CASE ):
_lowerCAmelCase : int = [int(SCREAMING_SNAKE_CASE ) for x in input().split()]
hill_matrix.append(SCREAMING_SNAKE_CASE )
_lowerCAmelCase : Tuple = HillCipher(numpy.array(SCREAMING_SNAKE_CASE ) )
print("Would you like to encrypt or decrypt some text? (1 or 2)" )
_lowerCAmelCase : Any = input("\n1. Encrypt\n2. Decrypt\n" )
if option == "1":
_lowerCAmelCase : Union[str, Any] = input("What text would you like to encrypt?: " )
print("Your encrypted text is:" )
print(hc.encrypt(SCREAMING_SNAKE_CASE ) )
elif option == "2":
_lowerCAmelCase : Any = input("What text would you like to decrypt?: " )
print("Your decrypted text is:" )
print(hc.decrypt(SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 717 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import center_crop, normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
__UpperCAmelCase = logging.get_logger(__name__)
class A__ ( A ):
"""simple docstring"""
_lowercase : Union[str, Any] = ['''pixel_values''']
def __init__( self : List[str] , A_ : bool = True , A_ : Dict[str, int] = None , A_ : PILImageResampling = PIL.Image.BICUBIC , A_ : bool = True , A_ : Dict[str, int] = None , A_ : Union[int, float] = 1 / 2_5_5 , A_ : bool = True , A_ : bool = True , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , **A_ : List[str] , ):
'''simple docstring'''
super().__init__(**A_ )
_lowerCAmelCase : str = size if size is not None else {"height": 2_5_6, "width": 2_5_6}
_lowerCAmelCase : Any = get_size_dict(A_ )
_lowerCAmelCase : Tuple = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
_lowerCAmelCase : Any = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : str = do_resize
_lowerCAmelCase : str = size
_lowerCAmelCase : Dict = resample
_lowerCAmelCase : Union[str, Any] = do_center_crop
_lowerCAmelCase : Union[str, Any] = crop_size
_lowerCAmelCase : List[Any] = do_rescale
_lowerCAmelCase : Tuple = rescale_factor
_lowerCAmelCase : Any = do_normalize
_lowerCAmelCase : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_lowerCAmelCase : List[str] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __magic_name__ ( self : Optional[int] , A_ : np.ndarray , A_ : Dict[str, int] , A_ : PILImageResampling = PIL.Image.BICUBIC , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return resize(
A_ , size=(size["height"], size["width"]) , resample=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Any , A_ : np.ndarray , A_ : Dict[str, int] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : str , ):
'''simple docstring'''
_lowerCAmelCase : Any = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'''The size dictionary must have keys \'height\' and \'width\'. Got {size.keys()}''' )
return center_crop(A_ , size=(size["height"], size["width"]) , data_format=A_ , **A_ )
def __magic_name__ ( self : List[Any] , A_ : np.ndarray , A_ : Union[int, float] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : List[Any] , ):
'''simple docstring'''
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Optional[Any] , A_ : np.ndarray , A_ : Union[float, List[float]] , A_ : Union[float, List[float]] , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Optional[int] , ):
'''simple docstring'''
return normalize(A_ , mean=A_ , std=A_ , data_format=A_ , **A_ )
def __magic_name__ ( self : Union[str, Any] , A_ : ImageInput , A_ : bool = None , A_ : Dict[str, int] = None , A_ : Union[str, Any]=None , A_ : bool = None , A_ : Dict[str, int] = None , A_ : bool = None , A_ : float = None , A_ : bool = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[float, List[float]]] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : ChannelDimension = ChannelDimension.FIRST , **A_ : Dict , ):
'''simple docstring'''
_lowerCAmelCase : List[str] = do_resize if do_resize is not None else self.do_resize
_lowerCAmelCase : str = resample if resample is not None else self.resample
_lowerCAmelCase : int = do_center_crop if do_center_crop is not None else self.do_center_crop
_lowerCAmelCase : str = do_rescale if do_rescale is not None else self.do_rescale
_lowerCAmelCase : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
_lowerCAmelCase : int = do_normalize if do_normalize is not None else self.do_normalize
_lowerCAmelCase : str = image_mean if image_mean is not None else self.image_mean
_lowerCAmelCase : List[Any] = image_std if image_std is not None else self.image_std
_lowerCAmelCase : Optional[int] = size if size is not None else self.size
_lowerCAmelCase : List[str] = get_size_dict(A_ )
_lowerCAmelCase : int = crop_size if crop_size is not None else self.crop_size
_lowerCAmelCase : List[str] = get_size_dict(A_ , param_name="crop_size" )
_lowerCAmelCase : Tuple = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
_lowerCAmelCase : Union[str, Any] = [to_numpy_array(A_ ) for image in images]
if do_resize:
_lowerCAmelCase : List[str] = [self.resize(image=A_ , size=A_ , resample=A_ ) for image in images]
if do_center_crop:
_lowerCAmelCase : Any = [self.center_crop(image=A_ , size=A_ ) for image in images]
if do_rescale:
_lowerCAmelCase : str = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_normalize:
_lowerCAmelCase : Dict = [self.normalize(image=A_ , mean=A_ , std=A_ ) for image in images]
_lowerCAmelCase : Tuple = [to_channel_dimension_format(A_ , A_ ) for image in images]
_lowerCAmelCase : int = {"pixel_values": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 503 | 0 |
'''simple docstring'''
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( __A ) -> Optional[int]:
if len(__A ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
_snake_case = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 495 |
import os
import sys
import unittest
a_ :Dict = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, "utils"))
import get_test_info # noqa: E402
from get_test_info import ( # noqa: E402
get_model_to_test_mapping,
get_model_to_tester_mapping,
get_test_to_tester_mapping,
)
a_ :List[Any] = os.path.join("tests", "models", "bert", "test_modeling_bert.py")
a_ :Any = os.path.join("tests", "models", "blip", "test_modeling_blip.py")
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self : Tuple ) ->List[str]:
snake_case__ : Optional[int] = get_test_to_tester_mapping(_snake_case )
snake_case__ : int = get_test_to_tester_mapping(_snake_case )
snake_case__ : Optional[int] = {'BertModelTest': 'BertModelTester'}
snake_case__ : Union[str, Any] = {
'BlipModelTest': 'BlipModelTester',
'BlipTextImageModelTest': 'BlipTextImageModelsModelTester',
'BlipTextModelTest': 'BlipTextModelTester',
'BlipTextRetrievalModelTest': 'BlipTextRetrievalModelTester',
'BlipVQAModelTest': 'BlipVQAModelTester',
'BlipVisionModelTest': 'BlipVisionModelTester',
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
def lowercase_ ( self : Dict ) ->Tuple:
snake_case__ : List[str] = get_model_to_test_mapping(_snake_case )
snake_case__ : int = get_model_to_test_mapping(_snake_case )
snake_case__ : Optional[Any] = {
'BertForMaskedLM': ['BertModelTest'],
'BertForMultipleChoice': ['BertModelTest'],
'BertForNextSentencePrediction': ['BertModelTest'],
'BertForPreTraining': ['BertModelTest'],
'BertForQuestionAnswering': ['BertModelTest'],
'BertForSequenceClassification': ['BertModelTest'],
'BertForTokenClassification': ['BertModelTest'],
'BertLMHeadModel': ['BertModelTest'],
'BertModel': ['BertModelTest'],
}
snake_case__ : Optional[Any] = {
'BlipForConditionalGeneration': ['BlipTextImageModelTest'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTest'],
'BlipForQuestionAnswering': ['BlipVQAModelTest'],
'BlipModel': ['BlipModelTest'],
'BlipTextModel': ['BlipTextModelTest'],
'BlipVisionModel': ['BlipVisionModelTest'],
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
def lowercase_ ( self : List[Any] ) ->Tuple:
snake_case__ : Union[str, Any] = get_model_to_tester_mapping(_snake_case )
snake_case__ : Optional[int] = get_model_to_tester_mapping(_snake_case )
snake_case__ : Union[str, Any] = {
'BertForMaskedLM': ['BertModelTester'],
'BertForMultipleChoice': ['BertModelTester'],
'BertForNextSentencePrediction': ['BertModelTester'],
'BertForPreTraining': ['BertModelTester'],
'BertForQuestionAnswering': ['BertModelTester'],
'BertForSequenceClassification': ['BertModelTester'],
'BertForTokenClassification': ['BertModelTester'],
'BertLMHeadModel': ['BertModelTester'],
'BertModel': ['BertModelTester'],
}
snake_case__ : List[str] = {
'BlipForConditionalGeneration': ['BlipTextImageModelsModelTester'],
'BlipForImageTextRetrieval': ['BlipTextRetrievalModelTester'],
'BlipForQuestionAnswering': ['BlipVQAModelTester'],
'BlipModel': ['BlipModelTester'],
'BlipTextModel': ['BlipTextModelTester'],
'BlipVisionModel': ['BlipVisionModelTester'],
}
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
self.assertEqual(get_test_info.to_json(_snake_case ), _snake_case )
| 478 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
UpperCAmelCase : Any = logging.get_logger(__name__)
# TODO: upload to AWS
UpperCAmelCase : int = {
'yjernite/retribert-base-uncased': (
'https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'
),
}
class lowerCamelCase__ ( A ):
__a = """retribert"""
def __init__( self : int , UpperCamelCase : List[str]=30_522 , UpperCamelCase : Optional[Any]=768 , UpperCamelCase : Dict=8 , UpperCamelCase : str=12 , UpperCamelCase : Optional[int]=3_072 , UpperCamelCase : Union[str, Any]="gelu" , UpperCamelCase : Optional[int]=0.1 , UpperCamelCase : int=0.1 , UpperCamelCase : Dict=512 , UpperCamelCase : List[str]=2 , UpperCamelCase : Any=0.02 , UpperCamelCase : List[str]=1e-1_2 , UpperCamelCase : Tuple=True , UpperCamelCase : str=128 , UpperCamelCase : Optional[Any]=0 , **UpperCamelCase : Optional[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=UpperCamelCase , **UpperCamelCase )
__UpperCAmelCase : Dict = vocab_size
__UpperCAmelCase : Optional[Any] = hidden_size
__UpperCAmelCase : Dict = num_hidden_layers
__UpperCAmelCase : int = num_attention_heads
__UpperCAmelCase : Optional[Any] = hidden_act
__UpperCAmelCase : Optional[int] = intermediate_size
__UpperCAmelCase : str = hidden_dropout_prob
__UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
__UpperCAmelCase : Optional[Any] = max_position_embeddings
__UpperCAmelCase : Optional[int] = type_vocab_size
__UpperCAmelCase : int = initializer_range
__UpperCAmelCase : Optional[int] = layer_norm_eps
__UpperCAmelCase : int = share_encoders
__UpperCAmelCase : Any = projection_dim
| 708 |
"""simple docstring"""
from __future__ import annotations
def lowerCamelCase ( _UpperCamelCase : int ) -> list[int]:
'''simple docstring'''
__UpperCAmelCase : Tuple = 2
__UpperCAmelCase : Optional[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCamelCase )
if n > 1:
factors.append(_UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 299 | 0 |
'''simple docstring'''
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def A__ ( A : Any , A : Dict , A : Tuple):
'''simple docstring'''
UpperCamelCase : int = TaConfig.from_json_file(A)
print(F'''Building PyTorch model from configuration: {config}''')
UpperCamelCase : Any = TaForConditionalGeneration(A)
# Load weights from tf checkpoint
load_tf_weights_in_ta(A , A , A)
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''')
model.save_pretrained(A)
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase_ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 173 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'microsoft/table-transformer-detection': (
'https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json'
),
}
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = '''table-transformer'''
__SCREAMING_SNAKE_CASE = ['''past_key_values''']
__SCREAMING_SNAKE_CASE = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=3 , lowerCamelCase=1_00 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=6 , lowerCamelCase=20_48 , lowerCamelCase=8 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=True , lowerCamelCase="relu" , lowerCamelCase=2_56 , lowerCamelCase=0.1 , lowerCamelCase=0.0 , lowerCamelCase=0.0 , lowerCamelCase=0.02 , lowerCamelCase=1.0 , lowerCamelCase=False , lowerCamelCase="sine" , lowerCamelCase="resnet50" , lowerCamelCase=True , lowerCamelCase=False , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=1 , lowerCamelCase=1 , lowerCamelCase=5 , lowerCamelCase=2 , lowerCamelCase=0.1 , **lowerCamelCase , ) -> Dict:
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can't specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCamelCase : Any = CONFIG_MAPPING["resnet"](out_features=["stage4"] )
elif isinstance(lowerCamelCase , lowerCamelCase ):
UpperCamelCase : Optional[int] = backbone_config.get("model_type" )
UpperCamelCase : Optional[int] = CONFIG_MAPPING[backbone_model_type]
UpperCamelCase : Any = config_class.from_dict(lowerCamelCase )
# set timm attributes to None
UpperCamelCase , UpperCamelCase , UpperCamelCase : Any = None, None, None
UpperCamelCase : Any = use_timm_backbone
UpperCamelCase : Dict = backbone_config
UpperCamelCase : Tuple = num_channels
UpperCamelCase : str = num_queries
UpperCamelCase : Tuple = d_model
UpperCamelCase : List[str] = encoder_ffn_dim
UpperCamelCase : List[Any] = encoder_layers
UpperCamelCase : List[Any] = encoder_attention_heads
UpperCamelCase : Optional[int] = decoder_ffn_dim
UpperCamelCase : str = decoder_layers
UpperCamelCase : Optional[Any] = decoder_attention_heads
UpperCamelCase : List[str] = dropout
UpperCamelCase : Any = attention_dropout
UpperCamelCase : int = activation_dropout
UpperCamelCase : int = activation_function
UpperCamelCase : List[str] = init_std
UpperCamelCase : List[str] = init_xavier_std
UpperCamelCase : Dict = encoder_layerdrop
UpperCamelCase : Any = decoder_layerdrop
UpperCamelCase : Tuple = encoder_layers
UpperCamelCase : int = auxiliary_loss
UpperCamelCase : Optional[Any] = position_embedding_type
UpperCamelCase : int = backbone
UpperCamelCase : List[Any] = use_pretrained_backbone
UpperCamelCase : Dict = dilation
# Hungarian matcher
UpperCamelCase : List[Any] = class_cost
UpperCamelCase : int = bbox_cost
UpperCamelCase : List[str] = giou_cost
# Loss coefficients
UpperCamelCase : List[Any] = mask_loss_coefficient
UpperCamelCase : List[str] = dice_loss_coefficient
UpperCamelCase : str = bbox_loss_coefficient
UpperCamelCase : Optional[int] = giou_loss_coefficient
UpperCamelCase : Any = eos_coefficient
super().__init__(is_encoder_decoder=lowerCamelCase , **lowerCamelCase )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return self.d_model
class UpperCAmelCase_ ( lowerCamelCase_ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = version.parse('''1.11''' )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> float:
'''simple docstring'''
return 1e-5
@property
def SCREAMING_SNAKE_CASE__ ( self ) -> int:
'''simple docstring'''
return 12
| 173 | 1 |
import unittest
from knapsack import greedy_knapsack as kp
class SCREAMING_SNAKE_CASE_ (unittest.TestCase ):
'''simple docstring'''
def _lowerCAmelCase ( self : List[str] ) ->int:
lowerCamelCase_ : Union[str, Any] = [10, 20, 30, 40, 50, 60]
lowerCamelCase_ : List[Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase_ : Optional[Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def _lowerCAmelCase ( self : Optional[int] ) ->int:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def _lowerCAmelCase ( self : str ) ->int:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def _lowerCAmelCase ( self : List[Any] ) ->List[Any]:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def _lowerCAmelCase ( self : Dict ) ->Optional[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def _lowerCAmelCase ( self : int ) ->Dict:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 171 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = ""
_a = "hf-legacy" # "hf://"" is reserved for hffs
def __init__( self : Any , __a : Optional[DatasetInfo] = None , __a : Optional[str] = None , **__a : Any , ) ->Any:
super().__init__(self , **__a )
lowerCamelCase_ : Tuple = repo_info
lowerCamelCase_ : Any = token
lowerCamelCase_ : Any = None
def _lowerCAmelCase ( self : Optional[int] ) ->List[Any]:
if self.dir_cache is None:
lowerCamelCase_ : Dict = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCamelCase_ : int = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__a ): {"""name""": str(__a ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _lowerCAmelCase ( self : int , __a : str , __a : str = "rb" , **__a : Optional[Any] , ) ->Dict:
if not isinstance(self.repo_info , __a ):
raise NotImplementedError(F'''Open is only implemented for dataset repositories, but got {self.repo_info}''' )
lowerCamelCase_ : int = hf_hub_url(self.repo_info.id , __a , revision=self.repo_info.sha )
return fsspec.open(
__a , mode=__a , headers=get_authentication_headers_for_url(__a , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _lowerCAmelCase ( self : Dict , __a : str , **__a : List[Any] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Tuple = self._strip_protocol(__a )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__a )
def _lowerCAmelCase ( self : Any , __a : Optional[Any] , __a : str=False , **__a : List[str] ) ->List[Any]:
self._get_dirs()
lowerCamelCase_ : Optional[Any] = PurePosixPath(path.strip("""/""" ) )
lowerCamelCase_ : Dict = {}
for p, f in self.dir_cache.items():
lowerCamelCase_ : str = PurePosixPath(p.strip("""/""" ) )
lowerCamelCase_ : Dict = p.parent
if root == path:
lowerCamelCase_ : int = f
lowerCamelCase_ : List[str] = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out )
| 171 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE__ : Optional[int] = {
"""configuration_x_clip""": [
"""XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""XCLIPConfig""",
"""XCLIPTextConfig""",
"""XCLIPVisionConfig""",
],
"""processing_x_clip""": ["""XCLIPProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Tuple = [
"""XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XCLIPModel""",
"""XCLIPPreTrainedModel""",
"""XCLIPTextModel""",
"""XCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_x_clip import (
XCLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
XCLIPConfig,
XCLIPTextConfig,
XCLIPVisionConfig,
)
from .processing_x_clip import XCLIPProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_x_clip import (
XCLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
XCLIPModel,
XCLIPPreTrainedModel,
XCLIPTextModel,
XCLIPVisionModel,
)
else:
import sys
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 0 | """simple docstring"""
_UpperCamelCase : Any = {str(digit): digit**5 for digit in range(10)}
def a_ ( _lowerCAmelCase : int ):
'''simple docstring'''
return sum(DIGITS_FIFTH_POWER[digit] for digit in str(_lowerCAmelCase ) )
def a_ ( ):
'''simple docstring'''
return sum(
number
for number in range(1000 , 100_0000 )
if number == digits_fifth_powers_sum(_lowerCAmelCase ) )
if __name__ == "__main__":
print(solution())
| 599 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = {
"transfo-xl-wt103": "https://huggingface.co/transfo-xl-wt103/resolve/main/config.json",
}
class a_( lowercase__ ):
"""simple docstring"""
__snake_case : Any ='''transfo-xl'''
__snake_case : List[str] =['''mems''']
__snake_case : int ={
'''n_token''': '''vocab_size''',
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Union[str, Any] , lowerCAmelCase__ : Union[str, Any]=2_6_7_7_3_5 , lowerCAmelCase__ : List[Any]=[2_0_0_0_0, 4_0_0_0_0, 2_0_0_0_0_0] , lowerCAmelCase__ : List[Any]=1_0_2_4 , lowerCAmelCase__ : Tuple=1_0_2_4 , lowerCAmelCase__ : Tuple=1_6 , lowerCAmelCase__ : int=6_4 , lowerCAmelCase__ : str=4_0_9_6 , lowerCAmelCase__ : List[Any]=4 , lowerCAmelCase__ : Union[str, Any]=False , lowerCAmelCase__ : int=1_8 , lowerCAmelCase__ : Optional[Any]=1_6_0_0 , lowerCAmelCase__ : Optional[Any]=1_0_0_0 , lowerCAmelCase__ : int=True , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : int=0 , lowerCAmelCase__ : Tuple=-1 , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : int=0.1 , lowerCAmelCase__ : List[Any]=0.0 , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : int="normal" , lowerCAmelCase__ : Union[str, Any]=0.01 , lowerCAmelCase__ : str=0.01 , lowerCAmelCase__ : Any=0.02 , lowerCAmelCase__ : Tuple=1e-5 , lowerCAmelCase__ : str=0 , **lowerCAmelCase__ : int , ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE = vocab_size
SCREAMING_SNAKE_CASE = []
self.cutoffs.extend(lowerCAmelCase__)
if proj_share_all_but_first:
SCREAMING_SNAKE_CASE = [False] + [True] * len(self.cutoffs)
else:
SCREAMING_SNAKE_CASE = [False] + [False] * len(self.cutoffs)
SCREAMING_SNAKE_CASE = d_model
SCREAMING_SNAKE_CASE = d_embed
SCREAMING_SNAKE_CASE = d_head
SCREAMING_SNAKE_CASE = d_inner
SCREAMING_SNAKE_CASE = div_val
SCREAMING_SNAKE_CASE = pre_lnorm
SCREAMING_SNAKE_CASE = n_layer
SCREAMING_SNAKE_CASE = n_head
SCREAMING_SNAKE_CASE = mem_len
SCREAMING_SNAKE_CASE = same_length
SCREAMING_SNAKE_CASE = attn_type
SCREAMING_SNAKE_CASE = clamp_len
SCREAMING_SNAKE_CASE = sample_softmax
SCREAMING_SNAKE_CASE = adaptive
SCREAMING_SNAKE_CASE = dropout
SCREAMING_SNAKE_CASE = dropatt
SCREAMING_SNAKE_CASE = untie_r
SCREAMING_SNAKE_CASE = init
SCREAMING_SNAKE_CASE = init_range
SCREAMING_SNAKE_CASE = proj_init_std
SCREAMING_SNAKE_CASE = init_std
SCREAMING_SNAKE_CASE = layer_norm_epsilon
super().__init__(eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def __UpperCamelCase ( self : Tuple) -> Tuple:
"""simple docstring"""
logger.info(f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
return -1
@max_position_embeddings.setter
def __UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : Dict) -> Any:
"""simple docstring"""
raise NotImplementedError(
f'''The model {self.model_type} is one of the few models that has no sequence length limit.''')
| 715 |
__UpperCAmelCase = 9.80_665
def A_ ( lowercase_ , lowercase_ , lowercase_ = g ) ->float:
"""simple docstring"""
if fluid_density <= 0:
raise ValueError('Impossible fluid density' )
if volume < 0:
raise ValueError('Impossible Object volume' )
if gravity <= 0:
raise ValueError('Impossible Gravity' )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 259 | 0 |
def __lowerCAmelCase ( _A ,_A ):
"""simple docstring"""
return int((input_a, input_a).count(0 ) == 0 )
def __lowerCAmelCase ( ):
"""simple docstring"""
assert and_gate(0 ,0 ) == 0
assert and_gate(0 ,1 ) == 0
assert and_gate(1 ,0 ) == 0
assert and_gate(1 ,1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 398 | class _lowercase :
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
'''simple docstring'''
_lowercase = arr.split(""",""" )
def _UpperCAmelCase ( self ):
'''simple docstring'''
_lowercase = [int(self.array[0] )] * len(self.array )
_lowercase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
_lowercase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
_lowercase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
A_: List[str] = input('please input some numbers:')
A_: Optional[int] = SubArray(whole_array)
A_: Dict = array.solve_sub_array()
print(('the results is:', re))
| 398 | 1 |
"""simple docstring"""
def _snake_case ( ) -> Optional[Any]:
lowerCamelCase_ : List[str] =[]
lowerCamelCase_ : Any =1
while len(_lowerCAmelCase ) < 1e6:
constant.append(str(_lowerCAmelCase ) )
i += 1
lowerCamelCase_ : Any ="".join(_lowerCAmelCase )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9_999] )
* int(constant[99_999] )
* int(constant[999_999] )
)
if __name__ == "__main__":
print(solution())
| 716 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :torch.FloatTensor
class lowercase__ ( snake_case__, snake_case__ ):
@register_to_config
def __init__( self : Any , snake_case__ : int = 3 , snake_case__ : int = 3 , snake_case__ : Tuple[str] = ("DownEncoderBlock2D",) , snake_case__ : Tuple[str] = ("UpDecoderBlock2D",) , snake_case__ : Tuple[int] = (64,) , snake_case__ : int = 1 , snake_case__ : str = "silu" , snake_case__ : int = 3 , snake_case__ : int = 32 , snake_case__ : int = 256 , snake_case__ : int = 32 , snake_case__ : Optional[int] = None , snake_case__ : float = 0.18_215 , snake_case__ : str = "group" , ):
super().__init__()
# pass init params to Encoder
lowerCamelCase_ : Union[str, Any] =Encoder(
in_channels=snake_case__ , out_channels=snake_case__ , down_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , double_z=snake_case__ , )
lowerCamelCase_ : Dict =vq_embed_dim if vq_embed_dim is not None else latent_channels
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
lowerCamelCase_ : Optional[int] =VectorQuantizer(snake_case__ , snake_case__ , beta=0.25 , remap=snake_case__ , sane_index_shape=snake_case__ )
lowerCamelCase_ : List[str] =nn.Convad(snake_case__ , snake_case__ , 1 )
# pass init params to Decoder
lowerCamelCase_ : Tuple =Decoder(
in_channels=snake_case__ , out_channels=snake_case__ , up_block_types=snake_case__ , block_out_channels=snake_case__ , layers_per_block=snake_case__ , act_fn=snake_case__ , norm_num_groups=snake_case__ , norm_type=snake_case__ , )
@apply_forward_hook
def UpperCAmelCase__ ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : Optional[int] =self.encoder(snake_case__ )
lowerCamelCase_ : Dict =self.quant_conv(snake_case__ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=snake_case__ )
@apply_forward_hook
def UpperCAmelCase__ ( self : Any , snake_case__ : torch.FloatTensor , snake_case__ : bool = False , snake_case__ : bool = True ):
# also go through quantization layer
if not force_not_quantize:
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : int =self.quantize(snake_case__ )
else:
lowerCamelCase_ : Optional[int] =h
lowerCamelCase_ : Optional[Any] =self.post_quant_conv(snake_case__ )
lowerCamelCase_ : List[str] =self.decoder(snake_case__ , quant if self.config.norm_type == "spatial" else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
def UpperCAmelCase__ ( self : List[Any] , snake_case__ : torch.FloatTensor , snake_case__ : bool = True ):
lowerCamelCase_ : List[str] =sample
lowerCamelCase_ : Any =self.encode(snake_case__ ).latents
lowerCamelCase_ : List[Any] =self.decode(snake_case__ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=snake_case__ )
| 244 | 0 |
'''simple docstring'''
import math
import unittest
from transformers import BioGptConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptTokenizer,
)
from transformers.models.biogpt.modeling_biogpt import BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE :
def __init__( self , _UpperCAmelCase , _UpperCAmelCase=13 , _UpperCAmelCase=7 , _UpperCAmelCase=True , _UpperCAmelCase=True , _UpperCAmelCase=False , _UpperCAmelCase=True , _UpperCAmelCase=99 , _UpperCAmelCase=32 , _UpperCAmelCase=5 , _UpperCAmelCase=4 , _UpperCAmelCase=37 , _UpperCAmelCase="gelu" , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=512 , _UpperCAmelCase=16 , _UpperCAmelCase=2 , _UpperCAmelCase=0.02 , _UpperCAmelCase=3 , _UpperCAmelCase=4 , _UpperCAmelCase=None , ):
'''simple docstring'''
__A : str = parent
__A : str = batch_size
__A : Optional[Any] = seq_length
__A : Optional[Any] = is_training
__A : Optional[int] = use_input_mask
__A : int = use_token_type_ids
__A : Union[str, Any] = use_labels
__A : List[Any] = vocab_size
__A : List[Any] = hidden_size
__A : str = num_hidden_layers
__A : Union[str, Any] = num_attention_heads
__A : Any = intermediate_size
__A : Any = hidden_act
__A : List[Any] = hidden_dropout_prob
__A : Dict = attention_probs_dropout_prob
__A : List[Any] = max_position_embeddings
__A : List[str] = type_vocab_size
__A : Optional[Any] = type_sequence_label_size
__A : str = initializer_range
__A : Dict = num_labels
__A : Dict = num_choices
__A : Dict = scope
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
__A : Optional[int] = None
if self.use_input_mask:
__A : List[str] = random_attention_mask([self.batch_size, self.seq_length])
__A : Optional[int] = None
if self.use_token_type_ids:
__A : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
__A : Dict = None
__A : Any = None
__A : Optional[int] = None
if self.use_labels:
__A : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size)
__A : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
__A : Any = ids_tensor([self.batch_size] , self.num_choices)
__A : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
return BioGptConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__lowercase , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Dict = BioGptModel(config=__lowercase)
model.to(__lowercase)
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase)
__A : Any = model(__lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , ):
'''simple docstring'''
__A : int = BioGptForCausalLM(config=__lowercase)
model.to(__lowercase)
model.eval()
__A : Dict = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase , labels=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
'''simple docstring'''
__A : Any = BioGptModel(config=__lowercase)
model.to(__lowercase)
model.eval()
# create attention mask
__A : int = torch.ones(input_ids.shape , dtype=torch.long , device=__lowercase)
__A : str = self.seq_length // 2
__A : Any = 0
# first forward pass
__A : Union[str, Any] = model(__lowercase , attention_mask=__lowercase).to_tuple()
# create hypothetical next token and extent to next_input_ids
__A : str = ids_tensor((self.batch_size, 1) , config.vocab_size)
# change a random masked slice from input_ids
__A : List[str] = ids_tensor((1,) , __lowercase).item() + 1
__A : Optional[int] = ids_tensor((self.batch_size, 1) , config.vocab_size).squeeze(-1)
__A : List[str] = random_other_next_tokens
# append to next input_ids and attn_mask
__A : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1)
__A : Tuple = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1) , dtype=torch.long , device=__lowercase)] , dim=1 , )
# get two different outputs
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase)['''last_hidden_state''']
__A : Tuple = model(__lowercase , past_key_values=__lowercase , attention_mask=__lowercase)['''last_hidden_state''']
# select random slice
__A : Union[str, Any] = ids_tensor((1,) , output_from_past.shape[-1]).item()
__A : List[str] = output_from_no_past[:, -1, random_slice_idx].detach()
__A : Tuple = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = BioGptModel(config=__lowercase).to(__lowercase).eval()
__A : Any = torch.ones(input_ids.shape , dtype=torch.long , device=__lowercase)
# first forward pass
__A : Union[str, Any] = model(__lowercase , attention_mask=__lowercase , use_cache=__lowercase)
__A : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
__A : str = ids_tensor((self.batch_size, 3) , config.vocab_size)
__A : Dict = ids_tensor((self.batch_size, 3) , 2)
# append to next input_ids and
__A : Tuple = torch.cat([input_ids, next_tokens] , dim=-1)
__A : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1)
__A : Union[str, Any] = model(__lowercase , attention_mask=__lowercase)['''last_hidden_state''']
__A : Optional[int] = model(__lowercase , attention_mask=__lowercase , past_key_values=__lowercase)[
'''last_hidden_state'''
]
# select random slice
__A : Tuple = ids_tensor((1,) , output_from_past.shape[-1]).item()
__A : List[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
__A : Optional[int] = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1])
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__lowercase , __lowercase , atol=1e-3))
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase , _UpperCAmelCase=False):
'''simple docstring'''
__A : Optional[Any] = BioGptForCausalLM(__lowercase)
model.to(__lowercase)
if gradient_checkpointing:
model.gradient_checkpointing_enable()
__A : Optional[int] = model(__lowercase , labels=__lowercase)
self.parent.assertEqual(result.loss.shape , ())
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
result.loss.backward()
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , *_UpperCAmelCase):
'''simple docstring'''
__A : str = BioGptModel(__lowercase)
__A : List[str] = model.config.initializer_range / math.sqrt(2 * model.config.num_hidden_layers)
for key in model.state_dict().keys():
if "c_proj" in key and "weight" in key:
self.parent.assertLessEqual(abs(torch.std(model.state_dict()[key]) - model_std) , 0.001)
self.parent.assertLessEqual(abs(torch.mean(model.state_dict()[key]) - 0.0) , 0.01)
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , *_UpperCAmelCase):
'''simple docstring'''
__A : List[str] = self.num_labels
__A : Optional[Any] = BioGptForTokenClassification(__lowercase)
model.to(__lowercase)
model.eval()
__A : Tuple = model(__lowercase , attention_mask=__lowercase , token_type_ids=__lowercase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Any = self.prepare_config_and_inputs()
(
__A
) : str = config_and_inputs
__A : Union[str, Any] = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE (__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
lowerCAmelCase = (
(BioGptModel, BioGptForCausalLM, BioGptForSequenceClassification, BioGptForTokenClassification)
if is_torch_available()
else ()
)
lowerCAmelCase = (BioGptForCausalLM,) if is_torch_available() else ()
lowerCAmelCase = (
{
"""feature-extraction""": BioGptModel,
"""text-classification""": BioGptForSequenceClassification,
"""text-generation""": BioGptForCausalLM,
"""token-classification""": BioGptForTokenClassification,
"""zero-shot""": BioGptForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase = False
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = BioGptModelTester(self)
__A : List[str] = ConfigTester(self , config_class=__lowercase , hidden_size=37)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__A : Any = type
self.model_tester.create_and_check_model(*__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_attention_mask_past(*__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_forward_and_backwards(*__lowercase , gradient_checkpointing=__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_model_past_large_inputs(*__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_weight_initialization(*__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_biogpt_for_token_classification(*__lowercase)
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(__lowercase)
__A : Optional[Any] = BioGptTokenizer.from_pretrained('microsoft/biogpt')
__A : Union[str, Any] = '''left'''
# Define PAD Token = EOS Token = 50256
__A : Dict = tokenizer.eos_token
__A : Dict = model.config.eos_token_id
# use different length sentences to test batching
__A : int = [
'''Hello, my dog is a little''',
'''Today, I''',
]
__A : Union[str, Any] = tokenizer(__lowercase , return_tensors='pt' , padding=__lowercase)
__A : Optional[Any] = inputs['''input_ids'''].to(__lowercase)
__A : List[str] = model.generate(
input_ids=__lowercase , attention_mask=inputs['attention_mask'].to(__lowercase) , )
__A : Dict = tokenizer(sentences[0] , return_tensors='pt').input_ids.to(__lowercase)
__A : List[str] = model.generate(input_ids=__lowercase)
__A : Tuple = inputs_non_padded.shape[-1] - inputs['''attention_mask'''][-1].long().sum().cpu().item()
__A : int = tokenizer(sentences[1] , return_tensors='pt').input_ids.to(__lowercase)
__A : List[str] = model.generate(input_ids=__lowercase , max_length=model.config.max_length - num_paddings)
__A : Optional[Any] = tokenizer.batch_decode(__lowercase , skip_special_tokens=__lowercase)
__A : Any = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__lowercase)
__A : Tuple = tokenizer.decode(output_padded[0] , skip_special_tokens=__lowercase)
__A : List[str] = [
'''Hello, my dog is a little bit bigger than a little bit.''',
'''Today, I have a good idea of how to use the information''',
]
self.assertListEqual(__lowercase , __lowercase)
self.assertListEqual(__lowercase , [non_padded_sentence, padded_sentence])
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for model_name in BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__A : str = BioGptModel.from_pretrained(__lowercase)
self.assertIsNotNone(__lowercase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__A : str = 3
__A : List[str] = input_dict['''input_ids''']
__A : Any = input_ids.ne(1).to(__lowercase)
__A : int = ids_tensor([self.model_tester.batch_size] , self.model_tester.type_sequence_label_size)
__A : int = BioGptForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__A : str = model(__lowercase , attention_mask=__lowercase , labels=__lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : str = self.model_tester.prepare_config_and_inputs_for_common()
__A : Optional[int] = 3
__A : Any = '''multi_label_classification'''
__A : Optional[Any] = input_dict['''input_ids''']
__A : Optional[Any] = input_ids.ne(1).to(__lowercase)
__A : str = ids_tensor(
[self.model_tester.batch_size, config.num_labels] , self.model_tester.type_sequence_label_size).to(torch.float)
__A : List[str] = BioGptForSequenceClassification(__lowercase)
model.to(__lowercase)
model.eval()
__A : Any = model(__lowercase , attention_mask=__lowercase , labels=__lowercase)
self.assertEqual(result.logits.shape , (self.model_tester.batch_size, self.model_tester.num_labels))
@require_torch
class SCREAMING_SNAKE_CASE (unittest.TestCase ):
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Dict = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
__A : int = torch.tensor([[2, 4805, 9, 656, 21]])
__A : str = model(__lowercase)[0]
__A : Optional[int] = 4_2384
__A : Dict = torch.Size((1, 5, vocab_size))
self.assertEqual(output.shape , __lowercase)
__A : Tuple = torch.tensor(
[[[-9.5236, -9.8918, 10.4557], [-11.0469, -9.6423, 8.1022], [-8.8664, -7.8826, 5.5325]]])
self.assertTrue(torch.allclose(output[:, :3, :3] , __lowercase , atol=1e-4))
@slow
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : List[str] = BioGptTokenizer.from_pretrained('microsoft/biogpt')
__A : List[Any] = BioGptForCausalLM.from_pretrained('microsoft/biogpt')
model.to(__lowercase)
torch.manual_seed(0)
__A : Union[str, Any] = tokenizer('COVID-19 is' , return_tensors='pt').to(__lowercase)
__A : Union[str, Any] = model.generate(
**__lowercase , min_length=100 , max_length=1024 , num_beams=5 , early_stopping=__lowercase , )
__A : Any = tokenizer.decode(output_ids[0] , skip_special_tokens=__lowercase)
__A : Optional[Any] = (
'''COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the'''
''' causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and'''
''' territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK),'''
''' and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and'''
''' more than 800,000 deaths.'''
)
self.assertEqual(__lowercase , __lowercase) | 8 |
import time
from dataclasses import dataclass
from multiprocessing import Pool
from unittest import TestCase
from unittest.mock import patch
import multiprocess
import numpy as np
import pytest
from datasets.utils.py_utils import (
NestedDataStructure,
asdict,
iflatmap_unordered,
map_nested,
temp_seed,
temporary_assignment,
zip_dict,
)
from .utils import require_tf, require_torch
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return x.sum()
def _lowerCAmelCase ( __lowerCAmelCase ) -> Union[str, Any]: # picklable for multiprocessing
"""simple docstring"""
return i + 1
@dataclass
class a :
__lowerCAmelCase : int
__lowerCAmelCase : str
class a ( __lowerCamelCase ):
def __lowerCamelCase ( self :List[str] ):
snake_case__ : Optional[int] = {}
snake_case__ : str = []
snake_case__ : Optional[Any] = 1
snake_case__ : Optional[int] = [1, 2]
snake_case__ : Any = {'''a''': 1, '''b''': 2}
snake_case__ : Optional[int] = {'''a''': [1, 2], '''b''': [3, 4]}
snake_case__ : Tuple = {'''a''': {'''1''': 1}, '''b''': 2}
snake_case__ : List[Any] = {'''a''': 1, '''b''': 2, '''c''': 3, '''d''': 4}
snake_case__ : Union[str, Any] = {}
snake_case__ : Union[str, Any] = []
snake_case__ : Union[str, Any] = 2
snake_case__ : Optional[int] = [2, 3]
snake_case__ : Optional[Any] = {'''a''': 2, '''b''': 3}
snake_case__ : Dict = {'''a''': [2, 3], '''b''': [4, 5]}
snake_case__ : Optional[int] = {'''a''': {'''1''': 2}, '''b''': 3}
snake_case__ : Tuple = {'''a''': 2, '''b''': 3, '''c''': 4, '''d''': 5}
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ) ,__lowercase )
snake_case__ : str = 2
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(map_nested(__lowercase ,__lowercase ,num_proc=__lowercase ) ,__lowercase )
snake_case__ : Dict = {'''a''': np.eye(2 ), '''b''': np.zeros(3 ), '''c''': np.ones(2 )}
snake_case__ : Any = {'''a''': 2, '''b''': 0, '''c''': 2}
snake_case__ : str = {
'''a''': np.eye(2 ).astype(__lowercase ),
'''b''': np.zeros(3 ).astype(__lowercase ),
'''c''': np.ones(2 ).astype(__lowercase ),
}
self.assertEqual(map_nested(__lowercase ,__lowercase ,map_numpy=__lowercase ) ,__lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase ,__lowercase ,map_numpy=__lowercase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
self.assertEqual(map_nested(__lowercase ,__lowercase ,map_numpy=__lowercase ,num_proc=__lowercase ) ,__lowercase )
self.assertEqual(
{k: v.tolist() for k, v in map_nested(__lowercase ,__lowercase ,map_numpy=__lowercase ,num_proc=__lowercase ).items()} ,{k: v.tolist() for k, v in expected_map_nested_sna_int.items()} ,)
with self.assertRaises(__lowercase ): # can't pickle a local lambda
map_nested(lambda __lowercase : x + 1 ,__lowercase ,num_proc=__lowercase )
def __lowerCamelCase ( self :Optional[int] ):
snake_case__ : List[str] = {'''a''': 1, '''b''': 2}
snake_case__ : Optional[Any] = {'''a''': 3, '''b''': 4}
snake_case__ : str = {'''a''': 5, '''b''': 6}
snake_case__ : Union[str, Any] = sorted([('''a''', (1, 3, 5)), ('''b''', (2, 4, 6))] )
self.assertEqual(sorted(zip_dict(__lowercase ,__lowercase ,__lowercase ) ) ,__lowercase )
def __lowerCamelCase ( self :List[str] ):
class a :
__lowerCAmelCase : Union[str, Any] = """bar"""
snake_case__ : Dict = Foo()
self.assertEqual(foo.my_attr ,'''bar''' )
with temporary_assignment(__lowercase ,'''my_attr''' ,'''BAR''' ):
self.assertEqual(foo.my_attr ,'''BAR''' )
self.assertEqual(foo.my_attr ,'''bar''' )
@pytest.mark.parametrize(
'''iterable_length, num_proc, expected_num_proc''' , [
(1, None, 1),
(1, 1, 1),
(2, None, 1),
(2, 1, 1),
(2, 2, 1),
(2, 3, 1),
(3, 2, 1),
(16, 16, 16),
(16, 17, 16),
(17, 16, 16),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ) -> Dict:
"""simple docstring"""
with patch('''datasets.utils.py_utils._single_map_nested''' ) as mock_single_map_nested, patch(
'''datasets.parallel.parallel.Pool''' ) as mock_multiprocessing_pool:
snake_case__ : int = {f"""{i}""": i for i in range(__lowerCAmelCase )}
snake_case__ : str = map_nested(lambda __lowerCAmelCase : x + 10 , __lowerCAmelCase , num_proc=__lowerCAmelCase , parallel_min_length=16 )
if expected_num_proc == 1:
assert mock_single_map_nested.called
assert not mock_multiprocessing_pool.called
else:
assert not mock_single_map_nested.called
assert mock_multiprocessing_pool.called
assert mock_multiprocessing_pool.call_args[0][0] == expected_num_proc
class a ( __lowerCamelCase ):
@require_tf
def __lowerCamelCase ( self :Tuple ):
import tensorflow as tf
from tensorflow.keras import layers
snake_case__ : Union[str, Any] = layers.Dense(2 )
def gen_random_output():
snake_case__ : Tuple = tf.random.uniform((1, 3) )
return model(__lowercase ).numpy()
with temp_seed(4_2 ,set_tensorflow=__lowercase ):
snake_case__ : List[str] = gen_random_output()
with temp_seed(4_2 ,set_tensorflow=__lowercase ):
snake_case__ : List[str] = gen_random_output()
snake_case__ : Tuple = gen_random_output()
np.testing.assert_equal(__lowercase ,__lowercase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@require_torch
def __lowerCamelCase ( self :Union[str, Any] ):
import torch
def gen_random_output():
snake_case__ : Dict = torch.nn.Linear(3 ,2 )
snake_case__ : List[str] = torch.rand(1 ,3 )
return model(__lowercase ).detach().numpy()
with temp_seed(4_2 ,set_pytorch=__lowercase ):
snake_case__ : Optional[int] = gen_random_output()
with temp_seed(4_2 ,set_pytorch=__lowercase ):
snake_case__ : Optional[Any] = gen_random_output()
snake_case__ : Optional[Any] = gen_random_output()
np.testing.assert_equal(__lowercase ,__lowercase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
def __lowerCamelCase ( self :Any ):
def gen_random_output():
return np.random.rand(1 ,3 )
with temp_seed(4_2 ):
snake_case__ : Dict = gen_random_output()
with temp_seed(4_2 ):
snake_case__ : int = gen_random_output()
snake_case__ : Union[str, Any] = gen_random_output()
np.testing.assert_equal(__lowercase ,__lowercase )
self.assertGreater(np.abs(outa - outa ).sum() ,0 )
@pytest.mark.parametrize('''input_data''' , [{}] )
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
snake_case__ : Optional[Any] = NestedDataStructure(__lowerCAmelCase ).data
assert output_data == input_data
@pytest.mark.parametrize(
'''data, expected_output''' , [
({}, []),
([], []),
('''foo''', ['''foo''']),
(['''foo''', '''bar'''], ['''foo''', '''bar''']),
([['''foo''', '''bar''']], ['''foo''', '''bar''']),
([[['''foo'''], ['''bar''']]], ['''foo''', '''bar''']),
([[['''foo'''], '''bar''']], ['''foo''', '''bar''']),
({'''a''': 1, '''b''': 2}, [1, 2]),
({'''a''': [1, 2], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[1, 2]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[[3], [4]]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [[3, 4]]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, 4]}, [1, 2, 3, 4]),
({'''a''': [[[1], [2]]], '''b''': [3, [4]]}, [1, 2, 3, 4]),
({'''a''': {'''1''': 1}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': 2}, [1, 2]),
({'''a''': {'''1''': [1]}, '''b''': [2]}, [1, 2]),
] , )
def _lowerCAmelCase ( __lowerCAmelCase , __lowerCAmelCase ) -> Any:
"""simple docstring"""
snake_case__ : List[Any] = NestedDataStructure(__lowerCAmelCase ).flatten()
assert output == expected_output
def _lowerCAmelCase ( ) -> Union[str, Any]:
"""simple docstring"""
snake_case__ : Tuple = A(x=1 , y='''foobar''' )
snake_case__ : List[str] = {'''x''': 1, '''y''': '''foobar'''}
assert asdict(__lowerCAmelCase ) == expected_output
snake_case__ : List[Any] = {'''a''': {'''b''': A(x=10 , y='''foo''' )}, '''c''': [A(x=20 , y='''bar''' )]}
snake_case__ : Tuple = {'''a''': {'''b''': {'''x''': 10, '''y''': '''foo'''}}, '''c''': [{'''x''': 20, '''y''': '''bar'''}]}
assert asdict(__lowerCAmelCase ) == expected_output
with pytest.raises(__lowerCAmelCase ):
asdict([1, A(x=10 , y='''foo''' )] )
def _lowerCAmelCase ( __lowerCAmelCase ) -> List[Any]:
"""simple docstring"""
return text.split()
def _lowerCAmelCase ( __lowerCAmelCase ) -> Tuple:
"""simple docstring"""
yield (time.time(), content)
time.sleep(2 )
yield (time.time(), content)
def _lowerCAmelCase ( ) -> Any:
"""simple docstring"""
with Pool(2 ) as pool:
snake_case__ : List[str] = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__lowerCAmelCase ) == 20
# check multiprocess from pathos (uses dill for pickling)
with multiprocess.Pool(2 ) as pool:
snake_case__ : List[Any] = list(iflatmap_unordered(__lowerCAmelCase , _split_text , kwargs_iterable=[{'''text''': '''hello there'''}] * 10 ) )
assert out.count('''hello''' ) == 10
assert out.count('''there''' ) == 10
assert len(__lowerCAmelCase ) == 20
# check that we get items as fast as possible
with Pool(2 ) as pool:
snake_case__ : List[str] = []
for yield_time, content in iflatmap_unordered(
__lowerCAmelCase , _aseconds_generator_of_aitems_with_timing , kwargs_iterable=[{'''content''': '''a'''}, {'''content''': '''b'''}] ):
assert yield_time < time.time() + 0.1, "we should each item directly after it was yielded"
out.append(__lowerCAmelCase )
assert out.count('''a''' ) == 2
assert out.count('''b''' ) == 2
assert len(__lowerCAmelCase ) == 4
| 252 | 0 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class SCREAMING_SNAKE_CASE_ ( _lowerCAmelCase ):
"""simple docstring"""
def __magic_name__ ( self ):
lowerCamelCase__ = tempfile.mkdtemp()
lowerCamelCase__ = 5
# Realm tok
lowerCamelCase__ = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"test",
"question",
"this",
"is",
"the",
"first",
"second",
"third",
"fourth",
"fifth",
"record",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowerCamelCase__ = os.path.join(self.tmpdirname , "realm_tokenizer" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
lowerCamelCase__ = os.path.join(_lowerCAmelCase , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
lowerCamelCase__ = os.path.join(self.tmpdirname , "realm_block_records" )
os.makedirs(_lowerCAmelCase , exist_ok=_lowerCAmelCase )
def __magic_name__ ( self ):
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname , "realm_tokenizer" ) )
def __magic_name__ ( self ):
shutil.rmtree(self.tmpdirname )
def __magic_name__ ( self ):
lowerCamelCase__ = RealmConfig(num_block_records=self.num_block_records )
return config
def __magic_name__ ( self ):
lowerCamelCase__ = Dataset.from_dict(
{
"id": ["0", "1"],
"question": ["foo", "bar"],
"answers": [["Foo", "Bar"], ["Bar"]],
} )
return dataset
def __magic_name__ ( self ):
lowerCamelCase__ = np.array(
[
b"This is the first record",
b"This is the second record",
b"This is the third record",
b"This is the fourth record",
b"This is the fifth record",
b"This is a longer longer longer record",
] , dtype=_lowerCAmelCase , )
return block_records
def __magic_name__ ( self ):
lowerCamelCase__ = RealmRetriever(
block_records=self.get_dummy_block_records() , tokenizer=self.get_tokenizer() , )
return retriever
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = self.get_dummy_retriever()
lowerCamelCase__ = retriever.tokenizer
lowerCamelCase__ = np.array([0, 3] , dtype="long" )
lowerCamelCase__ = tokenizer(["Test question"] ).input_ids
lowerCamelCase__ = tokenizer(
["the fourth"] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
lowerCamelCase__ = config.reader_seq_len
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="np" )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(len(_lowerCAmelCase ) , 2 )
self.assertEqual(concat_inputs.input_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape , (2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape , (2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape , (2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "first", "record", "[SEP]"] , )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) , ["[CLS]", "test", "question", "[SEP]", "this", "is", "the", "fourth", "record", "[SEP]"] , )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_config()
lowerCamelCase__ = self.get_dummy_retriever()
lowerCamelCase__ = retriever.tokenizer
lowerCamelCase__ = np.array([0, 3, 5] , dtype="long" )
lowerCamelCase__ = tokenizer(["Test question"] ).input_ids
lowerCamelCase__ = tokenizer(
["the fourth", "longer longer"] , add_special_tokens=_lowerCAmelCase , return_token_type_ids=_lowerCAmelCase , return_attention_mask=_lowerCAmelCase , ).input_ids
lowerCamelCase__ = config.reader_seq_len
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = retriever(
_lowerCAmelCase , _lowerCAmelCase , answer_ids=_lowerCAmelCase , max_length=_lowerCAmelCase , return_tensors="np" )
self.assertEqual([False, True, True] , _lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] , _lowerCAmelCase )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] , _lowerCAmelCase )
def __magic_name__ ( self ):
lowerCamelCase__ = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
# Test local path
lowerCamelCase__ = retriever.from_pretrained(os.path.join(self.tmpdirname , "realm_block_records" ) )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
# Test mocked remote path
with patch("transformers.models.realm.retrieval_realm.hf_hub_download" ) as mock_hf_hub_download:
lowerCamelCase__ = os.path.join(
os.path.join(self.tmpdirname , "realm_block_records" ) , _REALM_BLOCK_RECORDS_FILENAME )
lowerCamelCase__ = RealmRetriever.from_pretrained("google/realm-cc-news-pretrained-openqa" )
self.assertEqual(retriever.block_records[0] , b"This is the first record" )
| 709 |
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase = None ):
if components is None:
lowerCamelCase__ = []
lowerCamelCase__ = list(_lowerCAmelCase )
def __len__( self ):
return len(self.__components )
def __str__( self ):
return "(" + ",".join(map(_lowerCAmelCase , self.__components ) ) + ")"
def __add__( self , _lowerCAmelCase ):
lowerCamelCase__ = len(self )
if size == len(_lowerCAmelCase ):
lowerCamelCase__ = [self.__components[i] + other.component(_lowerCAmelCase ) for i in range(_lowerCAmelCase )]
return Vector(_lowerCAmelCase )
else:
raise Exception("must have the same size" )
def __sub__( self , _lowerCAmelCase ):
lowerCamelCase__ = len(self )
if size == len(_lowerCAmelCase ):
lowerCamelCase__ = [self.__components[i] - other.component(_lowerCAmelCase ) for i in range(_lowerCAmelCase )]
return Vector(_lowerCAmelCase )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self , _lowerCAmelCase ):
...
@overload
def __mul__( self , _lowerCAmelCase ):
...
def __mul__( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , (float, int) ):
lowerCamelCase__ = [c * other for c in self.__components]
return Vector(_lowerCAmelCase )
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(self ) == len(_lowerCAmelCase ):
lowerCamelCase__ = len(self )
lowerCamelCase__ = [self.__components[i] * other.component(_lowerCAmelCase ) for i in range(_lowerCAmelCase )]
return sum(_lowerCAmelCase )
else: # error case
raise Exception("invalid operand!" )
def __magic_name__ ( self ):
return Vector(self.__components )
def __magic_name__ ( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
assert -len(self.__components ) <= pos < len(self.__components )
lowerCamelCase__ = value
def __magic_name__ ( self ):
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
lowerCamelCase__ = [c**2 for c in self.__components]
return math.sqrt(sum(_lowerCAmelCase ) )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase = False ):
lowerCamelCase__ = self * other
lowerCamelCase__ = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def __UpperCamelCase ( a) ->Vector:
assert isinstance(a, a)
return Vector([0] * dimension)
def __UpperCamelCase ( a, a) ->Vector:
assert isinstance(a, a) and (isinstance(a, a))
lowerCamelCase__ = [0] * dimension
lowerCamelCase__ = 1
return Vector(a)
def __UpperCamelCase ( a, a, a) ->Vector:
assert (
isinstance(a, a)
and isinstance(a, a)
and (isinstance(a, (int, float)))
)
return x * scalar + y
def __UpperCamelCase ( a, a, a) ->Vector:
random.seed(a)
lowerCamelCase__ = [random.randint(a, a) for _ in range(a)]
return Vector(a)
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
def __init__( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
lowerCamelCase__ = matrix
lowerCamelCase__ = w
lowerCamelCase__ = h
def __str__( self ):
lowerCamelCase__ = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self , _lowerCAmelCase ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase__ = []
for i in range(self.__height ):
lowerCamelCase__ = [
self.__matrix[i][j] + other.component(_lowerCAmelCase , _lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(_lowerCAmelCase )
return Matrix(_lowerCAmelCase , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self , _lowerCAmelCase ):
if self.__width == other.width() and self.__height == other.height():
lowerCamelCase__ = []
for i in range(self.__height ):
lowerCamelCase__ = [
self.__matrix[i][j] - other.component(_lowerCAmelCase , _lowerCAmelCase )
for j in range(self.__width )
]
matrix.append(_lowerCAmelCase )
return Matrix(_lowerCAmelCase , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self , _lowerCAmelCase ):
...
@overload
def __mul__( self , _lowerCAmelCase ):
...
def __mul__( self , _lowerCAmelCase ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ): # matrix-vector
if len(_lowerCAmelCase ) == self.__width:
lowerCamelCase__ = zero_vector(self.__height )
for i in range(self.__height ):
lowerCamelCase__ = [
self.__matrix[i][j] * other.component(_lowerCAmelCase )
for j in range(self.__width )
]
ans.change_component(_lowerCAmelCase , sum(_lowerCAmelCase ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(_lowerCAmelCase , (int, float) ): # matrix-scalar
lowerCamelCase__ = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(_lowerCAmelCase , self.__width , self.__height )
return None
def __magic_name__ ( self ):
return self.__height
def __magic_name__ ( self ):
return self.__width
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
if 0 <= x < self.__height and 0 <= y < self.__width:
lowerCamelCase__ = value
else:
raise Exception("change_component: indices out of bounds" )
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
lowerCamelCase__ = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(_lowerCAmelCase ) ):
lowerCamelCase__ = minor[i][:y] + minor[i][y + 1 :]
return Matrix(_lowerCAmelCase , self.__width - 1 , self.__height - 1 ).determinant()
def __magic_name__ ( self , _lowerCAmelCase , _lowerCAmelCase ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(_lowerCAmelCase , _lowerCAmelCase )
else:
raise Exception("Indices out of bounds" )
def __magic_name__ ( self ):
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
lowerCamelCase__ = [
self.__matrix[0][y] * self.cofactor(0 , _lowerCAmelCase ) for y in range(self.__width )
]
return sum(_lowerCAmelCase )
def __UpperCamelCase ( a) ->Matrix:
lowerCamelCase__ = [[0] * n for _ in range(a)]
return Matrix(a, a, a)
def __UpperCamelCase ( a, a, a, a) ->Matrix:
random.seed(a)
lowerCamelCase__ = [
[random.randint(a, a) for _ in range(a)] for _ in range(a)
]
return Matrix(a, a, a)
| 360 | 0 |
from typing import List, Optional, Union
import torch
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
__a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
__a : Union[str, Any] = """
Examples:
```py
>>> import torch
>>> import numpy as np
>>> from diffusers import KandinskyV22PriorPipeline, KandinskyV22ControlnetPipeline
>>> from transformers import pipeline
>>> from diffusers.utils import load_image
>>> def make_hint(image, depth_estimator):
... image = depth_estimator(image)[\"depth\"]
... image = np.array(image)
... image = image[:, :, None]
... image = np.concatenate([image, image, image], axis=2)
... detected_map = torch.from_numpy(image).float() / 255.0
... hint = detected_map.permute(2, 0, 1)
... return hint
>>> depth_estimator = pipeline(\"depth-estimation\")
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-prior\", torch_dtype=torch.float16
... )
>>> pipe_prior = pipe_prior.to(\"cuda\")
>>> pipe = KandinskyV22ControlnetPipeline.from_pretrained(
... \"kandinsky-community/kandinsky-2-2-controlnet-depth\", torch_dtype=torch.float16
... )
>>> pipe = pipe.to(\"cuda\")
>>> img = load_image(
... \"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main\"
... \"/kandinsky/cat.png\"
... ).resize((768, 768))
>>> hint = make_hint(img, depth_estimator).unsqueeze(0).half().to(\"cuda\")
>>> prompt = \"A robot, 4k photo\"
>>> negative_prior_prompt = \"lowres, text, error, cropped, worst quality, low quality, jpeg artifacts, ugly, duplicate, morbid, mutilated, out of frame, extra fingers, mutated hands, poorly drawn hands, poorly drawn face, mutation, deformed, blurry, dehydrated, bad anatomy, bad proportions, extra limbs, cloned face, disfigured, gross proportions, malformed limbs, missing arms, missing legs, extra arms, extra legs, fused fingers, too many fingers, long neck, username, watermark, signature\"
>>> generator = torch.Generator(device=\"cuda\").manual_seed(43)
>>> image_emb, zero_image_emb = pipe_prior(
... prompt=prompt, negative_prompt=negative_prior_prompt, generator=generator
... ).to_tuple()
>>> images = pipe(
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... hint=hint,
... num_inference_steps=50,
... generator=generator,
... height=768,
... width=768,
... ).images
>>> images[0].save(\"robot_cat.png\")
```
"""
def UpperCAmelCase ( lowercase , lowercase , lowercase=8 ):
"""simple docstring"""
__lowercase = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
__lowercase = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , ) -> Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(
unet=lowerCAmelCase__ , scheduler=lowerCAmelCase__ , movq=lowerCAmelCase__ , )
__lowercase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ) -> Dict:
'''simple docstring'''
if latents is None:
__lowercase = randn_tensor(lowerCAmelCase__ , generator=lowerCAmelCase__ , device=lowerCAmelCase__ , dtype=lowerCAmelCase__ )
else:
if latents.shape != shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowercase = latents.to(lowerCAmelCase__ )
__lowercase = latents * scheduler.init_noise_sigma
return latents
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=0 ) -> int:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('''Please install accelerate via `pip install accelerate`''' )
__lowercase = torch.device(F"cuda:{gpu_id}" )
__lowercase = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCAmelCase__ , lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self , lowerCAmelCase__=0 ) -> Dict:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version('''>=''' , '''0.17.0.dev0''' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('''`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.''' )
__lowercase = torch.device(F"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('''cpu''' , silence_dtype_warnings=lowerCAmelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowercase = None
for cpu_offloaded_model in [self.unet, self.movq]:
__lowercase , __lowercase = cpu_offload_with_hook(lowerCAmelCase__ , lowerCAmelCase__ , prev_module_hook=lowerCAmelCase__ )
# We'll offload the last model manually.
__lowercase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self ) -> str:
'''simple docstring'''
if not hasattr(self.unet , '''_hf_hook''' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCAmelCase__ , '''_hf_hook''' )
and hasattr(module._hf_hook , '''execution_device''' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCAmelCase__ )
def __call__( self , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 5_12 , lowerCAmelCase__ = 1_00 , lowerCAmelCase__ = 4.0 , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "pil" , lowerCAmelCase__ = True , ) -> int:
'''simple docstring'''
__lowercase = self._execution_device
__lowercase = guidance_scale > 1.0
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
__lowercase = torch.cat(lowerCAmelCase__ , dim=0 )
__lowercase = image_embeds.shape[0] * num_images_per_prompt
if do_classifier_free_guidance:
__lowercase = image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
__lowercase = negative_image_embeds.repeat_interleave(lowerCAmelCase__ , dim=0 )
__lowercase = hint.repeat_interleave(lowerCAmelCase__ , dim=0 )
__lowercase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ )
__lowercase = torch.cat([hint, hint] , dim=0 ).to(dtype=self.unet.dtype , device=lowerCAmelCase__ )
self.scheduler.set_timesteps(lowerCAmelCase__ , device=lowerCAmelCase__ )
__lowercase = self.scheduler.timesteps
__lowercase = self.movq.config.latent_channels
__lowercase , __lowercase = downscale_height_and_width(lowerCAmelCase__ , lowerCAmelCase__ , self.movq_scale_factor )
# create initial latent
__lowercase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , image_embeds.dtype , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowerCAmelCase__ ) ):
# expand the latents if we are doing classifier free guidance
__lowercase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowercase = {'''image_embeds''': image_embeds, '''hint''': hint}
__lowercase = self.unet(
sample=lowerCAmelCase__ , timestep=lowerCAmelCase__ , encoder_hidden_states=lowerCAmelCase__ , added_cond_kwargs=lowerCAmelCase__ , return_dict=lowerCAmelCase__ , )[0]
if do_classifier_free_guidance:
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
__lowercase , __lowercase = noise_pred.chunk(2 )
__lowercase , __lowercase = variance_pred.chunk(2 )
__lowercase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowercase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , '''variance_type''' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowercase , __lowercase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowercase = self.scheduler.step(
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , generator=lowerCAmelCase__ , )[0]
# post-processing
__lowercase = self.movq.decode(lowerCAmelCase__ , force_not_quantize=lowerCAmelCase__ )['''sample''']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowercase = image * 0.5 + 0.5
__lowercase = image.clamp(0 , 1 )
__lowercase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowercase = self.numpy_to_pil(lowerCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCAmelCase__ ) | 534 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__a : Tuple = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a : Dict = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
__a : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 534 | 1 |
"""simple docstring"""
from math import acos, sin
from typing import List, Tuple, Union
import numpy as np
import torch
from PIL import Image
from ...models import AutoencoderKL, UNetaDConditionModel
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import AudioPipelineOutput, BaseOutput, DiffusionPipeline, ImagePipelineOutput
from .mel import Mel
class _UpperCAmelCase( lowerCamelCase ):
lowercase__ = ['vqvae']
def __init__( self , __a , __a , __a , __a , ) -> Dict:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__a , scheduler=__a , mel=__a , vqvae=__a)
def UpperCAmelCase ( self) -> int:
'''simple docstring'''
return 50 if isinstance(self.scheduler , __a) else 10_00
@torch.no_grad()
def __call__( self , __a = 1 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = None , __a = 0 , __a = 0 , __a = None , __a = 0 , __a = None , __a = None , __a=True , ) -> Union[
Union[AudioPipelineOutput, ImagePipelineOutput],
Tuple[List[Image.Image], Tuple[int, List[np.ndarray]]],
]:
'''simple docstring'''
_UpperCamelCase = steps or self.get_default_steps()
self.scheduler.set_timesteps(__a)
_UpperCamelCase = step_generator or generator
# For backwards compatibility
if type(self.unet.config.sample_size) == int:
_UpperCamelCase = (self.unet.config.sample_size, self.unet.config.sample_size)
if noise is None:
_UpperCamelCase = randn_tensor(
(
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size[0],
self.unet.config.sample_size[1],
) , generator=__a , device=self.device , )
_UpperCamelCase = noise
_UpperCamelCase = None
if audio_file is not None or raw_audio is not None:
self.mel.load_audio(__a , __a)
_UpperCamelCase = self.mel.audio_slice_to_image(__a)
_UpperCamelCase = np.frombuffer(input_image.tobytes() , dtype='''uint8''').reshape(
(input_image.height, input_image.width))
_UpperCamelCase = (input_image / 2_55) * 2 - 1
_UpperCamelCase = torch.tensor(input_image[np.newaxis, :, :] , dtype=torch.float).to(self.device)
if self.vqvae is not None:
_UpperCamelCase = self.vqvae.encode(torch.unsqueeze(__a , 0)).latent_dist.sample(
generator=__a)[0]
_UpperCamelCase = self.vqvae.config.scaling_factor * input_images
if start_step > 0:
_UpperCamelCase = self.scheduler.add_noise(__a , __a , self.scheduler.timesteps[start_step - 1])
_UpperCamelCase = (
self.unet.config.sample_size[1] * self.mel.get_sample_rate() / self.mel.x_res / self.mel.hop_length
)
_UpperCamelCase = int(mask_start_secs * pixels_per_second)
_UpperCamelCase = int(mask_end_secs * pixels_per_second)
_UpperCamelCase = self.scheduler.add_noise(__a , __a , torch.tensor(self.scheduler.timesteps[start_step:]))
for step, t in enumerate(self.progress_bar(self.scheduler.timesteps[start_step:])):
if isinstance(self.unet , __a):
_UpperCamelCase = self.unet(__a , __a , __a)['''sample''']
else:
_UpperCamelCase = self.unet(__a , __a)['''sample''']
if isinstance(self.scheduler , __a):
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , eta=__a , generator=__a , )['''prev_sample''']
else:
_UpperCamelCase = self.scheduler.step(
model_output=__a , timestep=__a , sample=__a , generator=__a , )['''prev_sample''']
if mask is not None:
if mask_start > 0:
_UpperCamelCase = mask[:, step, :, :mask_start]
if mask_end > 0:
_UpperCamelCase = mask[:, step, :, -mask_end:]
if self.vqvae is not None:
# 0.18215 was scaling factor used in training to ensure unit variance
_UpperCamelCase = 1 / self.vqvae.config.scaling_factor * images
_UpperCamelCase = self.vqvae.decode(__a)['''sample''']
_UpperCamelCase = (images / 2 + 0.5).clamp(0 , 1)
_UpperCamelCase = images.cpu().permute(0 , 2 , 3 , 1).numpy()
_UpperCamelCase = (images * 2_55).round().astype('''uint8''')
_UpperCamelCase = list(
(Image.fromarray(_[:, :, 0]) for _ in images)
if images.shape[3] == 1
else (Image.fromarray(__a , mode='''RGB''').convert('''L''') for _ in images))
_UpperCamelCase = [self.mel.image_to_audio(__a) for _ in images]
if not return_dict:
return images, (self.mel.get_sample_rate(), audios)
return BaseOutput(**AudioPipelineOutput(np.array(__a)[:, np.newaxis, :]) , **ImagePipelineOutput(__a))
@torch.no_grad()
def UpperCAmelCase ( self , __a , __a = 50) -> np.ndarray:
'''simple docstring'''
assert isinstance(self.scheduler , __a)
self.scheduler.set_timesteps(__a)
_UpperCamelCase = np.array(
[np.frombuffer(image.tobytes() , dtype='''uint8''').reshape((1, image.height, image.width)) for image in images])
_UpperCamelCase = (sample / 2_55) * 2 - 1
_UpperCamelCase = torch.Tensor(__a).to(self.device)
for t in self.progress_bar(torch.flip(self.scheduler.timesteps , (0,))):
_UpperCamelCase = t - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps
_UpperCamelCase = self.scheduler.alphas_cumprod[t]
_UpperCamelCase = (
self.scheduler.alphas_cumprod[prev_timestep]
if prev_timestep >= 0
else self.scheduler.final_alpha_cumprod
)
_UpperCamelCase = 1 - alpha_prod_t
_UpperCamelCase = self.unet(__a , __a)['''sample''']
_UpperCamelCase = (1 - alpha_prod_t_prev) ** 0.5 * model_output
_UpperCamelCase = (sample - pred_sample_direction) * alpha_prod_t_prev ** (-0.5)
_UpperCamelCase = sample * alpha_prod_t ** 0.5 + beta_prod_t ** 0.5 * model_output
return sample
@staticmethod
def UpperCAmelCase ( __a , __a , __a) -> torch.Tensor:
'''simple docstring'''
_UpperCamelCase = acos(torch.dot(torch.flatten(__a) , torch.flatten(__a)) / torch.norm(__a) / torch.norm(__a))
return sin((1 - alpha) * theta) * xa / sin(__a) + sin(alpha * theta) * xa / sin(__a)
| 78 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DeiTImageProcessor, ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
def lowerCamelCase__ ( __snake_case, __snake_case=False ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
('''pre_logits.fc.weight''', '''pooler.dense.weight'''),
('''pre_logits.fc.bias''', '''pooler.dense.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
_UpperCamelCase = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case=False ) -> str:
"""simple docstring"""
for i in range(config.num_hidden_layers ):
if base_model:
_UpperCamelCase = ''''''
else:
_UpperCamelCase = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
_UpperCamelCase = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
_UpperCamelCase = in_proj_weight[
: config.hidden_size, :
]
_UpperCamelCase = in_proj_bias[: config.hidden_size]
_UpperCamelCase = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_UpperCamelCase = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
_UpperCamelCase = in_proj_weight[
-config.hidden_size :, :
]
_UpperCamelCase = in_proj_bias[-config.hidden_size :]
def lowerCamelCase__ ( __snake_case ) -> Dict:
"""simple docstring"""
_UpperCamelCase = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(__snake_case, __snake_case )
def lowerCamelCase__ ( __snake_case, __snake_case, __snake_case ) -> Any:
"""simple docstring"""
_UpperCamelCase = dct.pop(__snake_case )
_UpperCamelCase = val
def lowerCamelCase__ ( ) -> Dict:
"""simple docstring"""
_UpperCamelCase = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
_UpperCamelCase = Image.open(requests.get(__snake_case, stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCamelCase__ ( __snake_case, __snake_case ) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = ViTConfig()
_UpperCamelCase = False
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
if vit_name[-5:] == "in21k":
_UpperCamelCase = True
_UpperCamelCase = int(vit_name[-12:-10] )
_UpperCamelCase = int(vit_name[-9:-6] )
else:
_UpperCamelCase = 10_00
_UpperCamelCase = '''huggingface/label-files'''
_UpperCamelCase = '''imagenet-1k-id2label.json'''
_UpperCamelCase = json.load(open(hf_hub_download(__snake_case, __snake_case, repo_type='''dataset''' ), '''r''' ) )
_UpperCamelCase = {int(__snake_case ): v for k, v in idalabel.items()}
_UpperCamelCase = idalabel
_UpperCamelCase = {v: k for k, v in idalabel.items()}
_UpperCamelCase = int(vit_name[-6:-4] )
_UpperCamelCase = int(vit_name[-3:] )
# size of the architecture
if "deit" in vit_name:
if vit_name[9:].startswith('''tiny''' ):
_UpperCamelCase = 1_92
_UpperCamelCase = 7_68
_UpperCamelCase = 12
_UpperCamelCase = 3
elif vit_name[9:].startswith('''small''' ):
_UpperCamelCase = 3_84
_UpperCamelCase = 15_36
_UpperCamelCase = 12
_UpperCamelCase = 6
else:
pass
else:
if vit_name[4:].startswith('''small''' ):
_UpperCamelCase = 7_68
_UpperCamelCase = 23_04
_UpperCamelCase = 8
_UpperCamelCase = 8
elif vit_name[4:].startswith('''base''' ):
pass
elif vit_name[4:].startswith('''large''' ):
_UpperCamelCase = 10_24
_UpperCamelCase = 40_96
_UpperCamelCase = 24
_UpperCamelCase = 16
elif vit_name[4:].startswith('''huge''' ):
_UpperCamelCase = 12_80
_UpperCamelCase = 51_20
_UpperCamelCase = 32
_UpperCamelCase = 16
# load original model from timm
_UpperCamelCase = timm.create_model(__snake_case, pretrained=__snake_case )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
_UpperCamelCase = timm_model.state_dict()
if base_model:
remove_classification_head_(__snake_case )
_UpperCamelCase = create_rename_keys(__snake_case, __snake_case )
for src, dest in rename_keys:
rename_key(__snake_case, __snake_case, __snake_case )
read_in_q_k_v(__snake_case, __snake_case, __snake_case )
# load HuggingFace model
if vit_name[-5:] == "in21k":
_UpperCamelCase = ViTModel(__snake_case ).eval()
else:
_UpperCamelCase = ViTForImageClassification(__snake_case ).eval()
model.load_state_dict(__snake_case )
# Check outputs on an image, prepared by ViTImageProcessor/DeiTImageProcessor
if "deit" in vit_name:
_UpperCamelCase = DeiTImageProcessor(size=config.image_size )
else:
_UpperCamelCase = ViTImageProcessor(size=config.image_size )
_UpperCamelCase = image_processor(images=prepare_img(), return_tensors='''pt''' )
_UpperCamelCase = encoding['''pixel_values''']
_UpperCamelCase = model(__snake_case )
if base_model:
_UpperCamelCase = timm_model.forward_features(__snake_case )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(__snake_case, outputs.pooler_output, atol=1e-3 )
else:
_UpperCamelCase = timm_model(__snake_case )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(__snake_case, outputs.logits, atol=1e-3 )
Path(__snake_case ).mkdir(exist_ok=__snake_case )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(__snake_case )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(__snake_case )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_patch16_224""",
type=str,
help="""Name of the ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
_a = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path)
| 78 | 1 |
"""simple docstring"""
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowercase__ ( lowerCAmelCase : str , lowerCAmelCase : str ) -> int:
"""simple docstring"""
UpperCAmelCase = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1_024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1_024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1E-5,
'token_type_vocab_size': 2,
}
UpperCAmelCase = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCAmelCase , output_all_encodings=lowerCAmelCase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase = os.path.join(get_home_dir() , 'models' )
UpperCAmelCase = _load_vocab(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , cls=lowerCAmelCase )
UpperCAmelCase = nlp.model.BERTModel(
lowerCAmelCase , len(lowerCAmelCase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCAmelCase , use_token_type_embed=lowerCAmelCase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCAmelCase , use_decoder=lowerCAmelCase , )
original_bort.load_parameters(lowerCAmelCase , cast_dtype=lowerCAmelCase , ignore_extra=lowerCAmelCase )
UpperCAmelCase = original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCAmelCase ),
}
UpperCAmelCase = BertConfig.from_dict(lowerCAmelCase )
UpperCAmelCase = BertForMaskedLM(lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase : int ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase : str , lowerCAmelCase : Tuple ):
UpperCAmelCase = hf_param.shape
UpperCAmelCase = to_torch(params[gluon_param] )
UpperCAmelCase = gluon_param.shape
assert (
shape_hf == shape_gluon
), F"The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"
return gluon_param
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
UpperCAmelCase = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase = hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase = layer.attention.self
UpperCAmelCase = check_and_map_params(
self_attn.key.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.bias" )
UpperCAmelCase = check_and_map_params(
self_attn.key.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_key.weight" )
UpperCAmelCase = check_and_map_params(
self_attn.query.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.bias" )
UpperCAmelCase = check_and_map_params(
self_attn.query.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_query.weight" )
UpperCAmelCase = check_and_map_params(
self_attn.value.bias.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.bias" )
UpperCAmelCase = check_and_map_params(
self_attn.value.weight.data , F"encoder.transformer_cells.{i}.attention_cell.proj_value.weight" )
# self attention output
UpperCAmelCase = layer.attention.output
UpperCAmelCase = check_and_map_params(
self_output.dense.bias , F"encoder.transformer_cells.{i}.proj.bias" )
UpperCAmelCase = check_and_map_params(
self_output.dense.weight , F"encoder.transformer_cells.{i}.proj.weight" )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.layer_norm.beta" )
UpperCAmelCase = check_and_map_params(
self_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.layer_norm.gamma" )
# intermediate
UpperCAmelCase = layer.intermediate
UpperCAmelCase = check_and_map_params(
intermediate.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_1.bias" )
UpperCAmelCase = check_and_map_params(
intermediate.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_1.weight" )
# output
UpperCAmelCase = layer.output
UpperCAmelCase = check_and_map_params(
bert_output.dense.bias , F"encoder.transformer_cells.{i}.ffn.ffn_2.bias" )
UpperCAmelCase = check_and_map_params(
bert_output.dense.weight , F"encoder.transformer_cells.{i}.ffn.ffn_2.weight" )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.bias , F"encoder.transformer_cells.{i}.ffn.layer_norm.beta" )
UpperCAmelCase = check_and_map_params(
bert_output.LayerNorm.weight , F"encoder.transformer_cells.{i}.ffn.layer_norm.gamma" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase = RobertaTokenizer.from_pretrained('roberta-base' )
UpperCAmelCase = tokenizer.encode_plus(lowerCAmelCase )['input_ids']
# Get gluon output
UpperCAmelCase = mx.nd.array([input_ids] )
UpperCAmelCase = original_bort(inputs=lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase = BertModel.from_pretrained(lowerCAmelCase )
hf_bort_model.eval()
UpperCAmelCase = tokenizer.encode_plus(lowerCAmelCase , return_tensors='pt' )
UpperCAmelCase = hf_bort_model(**lowerCAmelCase )[0]
UpperCAmelCase = output_gluon[0].asnumpy()
UpperCAmelCase = output_hf[0].detach().numpy()
UpperCAmelCase = np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase = np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1E-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCAmelCase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 373 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE_ = {'''configuration_beit''': ['''BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BeitConfig''', '''BeitOnnxConfig''']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = ['''BeitFeatureExtractor''']
SCREAMING_SNAKE_CASE_ = ['''BeitImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''BEIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BeitForImageClassification''',
'''BeitForMaskedImageModeling''',
'''BeitForSemanticSegmentation''',
'''BeitModel''',
'''BeitPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE_ = [
'''FlaxBeitForImageClassification''',
'''FlaxBeitForMaskedImageModeling''',
'''FlaxBeitModel''',
'''FlaxBeitPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 373 | 1 |
import re
from filelock import FileLock
try:
import nltk
UpperCAmelCase_ : Any = True
except (ImportError, ModuleNotFoundError):
UpperCAmelCase_ : Optional[int] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : str ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , __magic_name__ ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(__magic_name__ ) )
| 590 |
import argparse
import torch
from transformers import GPTaConfig, GPTaModel, load_tf_weights_in_gpta
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> List[Any]:
"""simple docstring"""
if gpta_config_file == "":
UpperCamelCase :str = GPTaConfig()
else:
UpperCamelCase :int = GPTaConfig.from_json_file(__magic_name__ )
UpperCamelCase :List[str] = GPTaModel(__magic_name__ )
# Load weights from numpy
load_tf_weights_in_gpta(__magic_name__ , __magic_name__ , __magic_name__ )
# Save pytorch-model
UpperCamelCase :Union[str, Any] = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase :Union[str, Any] = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path}""" )
torch.save(model.state_dict() , __magic_name__ )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__magic_name__ , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
UpperCAmelCase_ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--gpt2_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--gpt2_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained OpenAI model. \n'''
'''This specifies the model architecture.'''
),
)
UpperCAmelCase_ : Union[str, Any] = parser.parse_args()
convert_gpta_checkpoint_to_pytorch(args.gpta_checkpoint_path, args.gpta_config_file, args.pytorch_dump_folder_path)
| 590 | 1 |
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A :
def __init__( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[Any]=13 , _lowerCAmelCase: List[Any]=7 , _lowerCAmelCase: str=True , _lowerCAmelCase: Optional[int]=True , _lowerCAmelCase: Union[str, Any]=True , _lowerCAmelCase: Any=True , _lowerCAmelCase: str=99 , _lowerCAmelCase: List[str]=32 , _lowerCAmelCase: Optional[Any]=2 , _lowerCAmelCase: Dict=4 , _lowerCAmelCase: Optional[int]=37 , _lowerCAmelCase: Union[str, Any]="gelu" , _lowerCAmelCase: int=0.1 , _lowerCAmelCase: Optional[int]=0.1 , _lowerCAmelCase: Optional[int]=512 , _lowerCAmelCase: List[str]=16 , _lowerCAmelCase: Optional[Any]=2 , _lowerCAmelCase: List[Any]=0.02 , _lowerCAmelCase: Optional[Any]=3 , _lowerCAmelCase: List[Any]=4 , _lowerCAmelCase: List[str]=None , ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =parent
UpperCAmelCase_ =13
UpperCAmelCase_ =7
UpperCAmelCase_ =True
UpperCAmelCase_ =True
UpperCAmelCase_ =True
UpperCAmelCase_ =True
UpperCAmelCase_ =99
UpperCAmelCase_ =384
UpperCAmelCase_ =2
UpperCAmelCase_ =4
UpperCAmelCase_ =37
UpperCAmelCase_ ="""gelu"""
UpperCAmelCase_ =0.1
UpperCAmelCase_ =0.1
UpperCAmelCase_ =512
UpperCAmelCase_ =16
UpperCAmelCase_ =2
UpperCAmelCase_ =0.02
UpperCAmelCase_ =3
UpperCAmelCase_ =4
UpperCAmelCase_ =128
UpperCAmelCase_ =2
UpperCAmelCase_ =9
UpperCAmelCase_ =1
UpperCAmelCase_ =None
def lowerCAmelCase__ ( self: List[str] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ =None
if self.use_input_mask:
UpperCAmelCase_ =random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ =None
if self.use_token_type_ids:
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase_ =None
UpperCAmelCase_ =None
UpperCAmelCase_ =None
if self.use_labels:
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ =ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase_ =ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_lowerCamelCase , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: int , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: int , _lowerCAmelCase: int , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertModel(config=_lowerCamelCase )
UpperCAmelCase_ ={"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
UpperCAmelCase_ =[input_ids, input_mask]
UpperCAmelCase_ =model(_lowerCamelCase )
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: Dict , _lowerCAmelCase: Optional[int] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertForMaskedLM(config=_lowerCamelCase )
UpperCAmelCase_ ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: Tuple , _lowerCAmelCase: int , _lowerCAmelCase: str , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Tuple , _lowerCAmelCase: Optional[int] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =TFConvBertForSequenceClassification(config=_lowerCamelCase )
UpperCAmelCase_ ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCAmelCase__ ( self: List[Any] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: List[str] , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Any ) -> Dict:
'''simple docstring'''
UpperCAmelCase_ =self.num_choices
UpperCAmelCase_ =TFConvBertForMultipleChoice(config=_lowerCamelCase )
UpperCAmelCase_ =tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ =tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ =tf.tile(tf.expand_dims(_lowerCamelCase , 1 ) , (1, self.num_choices, 1) )
UpperCAmelCase_ ={
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCAmelCase__ ( self: List[str] , _lowerCAmelCase: Optional[Any] , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: str , _lowerCAmelCase: List[Any] , _lowerCAmelCase: str , _lowerCAmelCase: Any , _lowerCAmelCase: Any ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =self.num_labels
UpperCAmelCase_ =TFConvBertForTokenClassification(config=_lowerCamelCase )
UpperCAmelCase_ ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase__ ( self: Any , _lowerCAmelCase: Union[str, Any] , _lowerCAmelCase: int , _lowerCAmelCase: Dict , _lowerCAmelCase: Tuple , _lowerCAmelCase: Dict , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Union[str, Any] ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertForQuestionAnswering(config=_lowerCamelCase )
UpperCAmelCase_ ={
"""input_ids""": input_ids,
"""attention_mask""": input_mask,
"""token_type_ids""": token_type_ids,
}
UpperCAmelCase_ =model(_lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCAmelCase__ ( self: Optional[int] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =self.prepare_config_and_inputs()
(
UpperCAmelCase_
) =config_and_inputs
UpperCAmelCase_ ={"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class A ( a__ , a__ , unittest.TestCase ):
_snake_case =(
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_snake_case =(
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_snake_case =False
_snake_case =False
_snake_case =False
def lowerCAmelCase__ ( self: List[Any] ) -> Union[str, Any]:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertModelTester(self )
UpperCAmelCase_ =ConfigTester(self , config_class=_lowerCamelCase , hidden_size=37 )
def lowerCAmelCase__ ( self: List[str] ) -> Tuple:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: Union[str, Any] ) -> int:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCamelCase )
def lowerCAmelCase__ ( self: Dict ) -> List[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_lowerCamelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> Optional[Any]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_lowerCamelCase )
def lowerCAmelCase__ ( self: List[Any] ) -> List[str]:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_lowerCamelCase )
def lowerCAmelCase__ ( self: Dict ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_lowerCamelCase )
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_lowerCamelCase )
@slow
def lowerCAmelCase__ ( self: str ) -> Tuple:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =True
UpperCAmelCase_ =True
if hasattr(_lowerCamelCase , "use_cache" ):
UpperCAmelCase_ =True
UpperCAmelCase_ =getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ =getattr(self.model_tester , "key_length" , _lowerCamelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ =self._prepare_for_class(_lowerCamelCase , _lowerCamelCase )
UpperCAmelCase_ =model_class(_lowerCamelCase )
UpperCAmelCase_ =len(model(_lowerCamelCase ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowerCamelCase , saved_model=_lowerCamelCase )
UpperCAmelCase_ =os.path.join(_lowerCamelCase , "saved_model" , "1" )
UpperCAmelCase_ =tf.keras.models.load_model(_lowerCamelCase )
UpperCAmelCase_ =model(_lowerCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase_ =outputs["""encoder_hidden_states"""]
UpperCAmelCase_ =outputs["""encoder_attentions"""]
else:
UpperCAmelCase_ =outputs["""hidden_states"""]
UpperCAmelCase_ =outputs["""attentions"""]
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
UpperCAmelCase_ =getattr(
self.model_tester , "expected_num_hidden_layers" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_lowerCamelCase ) , _lowerCamelCase )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCAmelCase__ ( self: Tuple ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
self.assertIsNotNone(_lowerCamelCase )
def lowerCAmelCase__ ( self: str ) -> str:
'''simple docstring'''
UpperCAmelCase_ =self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ =True
UpperCAmelCase_ =getattr(self.model_tester , "decoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ =getattr(self.model_tester , "encoder_seq_length" , self.model_tester.seq_length )
UpperCAmelCase_ =getattr(self.model_tester , "key_length" , _lowerCamelCase )
UpperCAmelCase_ =getattr(self.model_tester , "key_length" , _lowerCamelCase )
def check_decoder_attentions_output(_lowerCAmelCase: int ):
UpperCAmelCase_ =len(_lowerCamelCase )
self.assertEqual(out_len % 2 , 0 )
UpperCAmelCase_ =outputs.decoder_attentions
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_lowerCAmelCase: List[Any] ):
UpperCAmelCase_ =[
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_lowerCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
UpperCAmelCase_ =True
UpperCAmelCase_ =False
UpperCAmelCase_ =model_class(_lowerCamelCase )
UpperCAmelCase_ =model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
UpperCAmelCase_ =len(_lowerCamelCase )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
if self.is_encoder_decoder:
UpperCAmelCase_ =model_class(_lowerCamelCase )
UpperCAmelCase_ =model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_decoder_attentions_output(_lowerCamelCase )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
UpperCAmelCase_ =True
UpperCAmelCase_ =model_class(_lowerCamelCase )
UpperCAmelCase_ =model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
# Check attention is always last and order is fine
UpperCAmelCase_ =True
UpperCAmelCase_ =True
UpperCAmelCase_ =model_class(_lowerCamelCase )
UpperCAmelCase_ =model(self._prepare_for_class(_lowerCamelCase , _lowerCamelCase ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_lowerCamelCase ) )
self.assertEqual(model.config.output_hidden_states , _lowerCamelCase )
check_encoder_attentions_output(_lowerCamelCase )
@require_tf
class A ( unittest.TestCase ):
@slow
def lowerCAmelCase__ ( self: List[Any] ) -> str:
'''simple docstring'''
UpperCAmelCase_ =TFConvBertModel.from_pretrained("YituTech/conv-bert-base" )
UpperCAmelCase_ =tf.constant([[0, 1, 2, 3, 4, 5]] )
UpperCAmelCase_ =model(_lowerCamelCase )[0]
UpperCAmelCase_ =[1, 6, 768]
self.assertEqual(output.shape , _lowerCamelCase )
UpperCAmelCase_ =tf.constant(
[
[
[-0.03_47_54_93, -0.4_68_60_34, -0.30_63_88_32],
[0.22_63_72_48, -0.26_98_86_46, -0.7_42_34_24],
[0.10_32_48_68, -0.45_01_35_08, -0.58_28_07_84],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _lowerCamelCase , atol=1e-4 )
| 54 |
'''simple docstring'''
import argparse
import json
import os
import pickle
import shutil
import numpy as np
import torch
from distiller import Distiller
from lm_seqs_dataset import LmSeqsDataset
from transformers import (
BertConfig,
BertForMaskedLM,
BertTokenizer,
DistilBertConfig,
DistilBertForMaskedLM,
DistilBertTokenizer,
GPTaConfig,
GPTaLMHeadModel,
GPTaTokenizer,
RobertaConfig,
RobertaForMaskedLM,
RobertaTokenizer,
)
from utils import git_log, init_gpu_params, logger, set_seed
__A ={
'distilbert': (DistilBertConfig, DistilBertForMaskedLM, DistilBertTokenizer),
'roberta': (RobertaConfig, RobertaForMaskedLM, RobertaTokenizer),
'bert': (BertConfig, BertForMaskedLM, BertTokenizer),
'gpt2': (GPTaConfig, GPTaLMHeadModel, GPTaTokenizer),
}
def _UpperCamelCase ( UpperCamelCase__ ):
assert (args.mlm and args.alpha_mlm > 0.0) or (not args.mlm and args.alpha_mlm == 0.0)
assert (args.alpha_mlm > 0.0 and args.alpha_clm == 0.0) or (args.alpha_mlm == 0.0 and args.alpha_clm > 0.0)
if args.mlm:
assert os.path.isfile(args.token_counts )
assert (args.student_type in ["roberta", "distilbert"]) and (args.teacher_type in ["roberta", "bert"])
else:
assert (args.student_type in ["gpt2"]) and (args.teacher_type in ["gpt2"])
assert args.teacher_type == args.student_type or (
args.student_type == "distilbert" and args.teacher_type == "bert"
)
assert os.path.isfile(args.student_config )
if args.student_pretrained_weights is not None:
assert os.path.isfile(args.student_pretrained_weights )
if args.freeze_token_type_embds:
assert args.student_type in ["roberta"]
assert args.alpha_ce >= 0.0
assert args.alpha_mlm >= 0.0
assert args.alpha_clm >= 0.0
assert args.alpha_mse >= 0.0
assert args.alpha_cos >= 0.0
assert args.alpha_ce + args.alpha_mlm + args.alpha_clm + args.alpha_mse + args.alpha_cos > 0.0
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : Optional[Any] = False
elif args.student_type == "gpt2":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
if args.student_type == "roberta":
UpperCAmelCase__ : str = False
def _UpperCamelCase ( ):
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser(description="""Training""" )
parser.add_argument("""--force""" , action="""store_true""" , help="""Overwrite dump_path if it already exists.""" )
parser.add_argument(
"""--dump_path""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The output directory (log, checkpoints, parameters, etc.)""" )
parser.add_argument(
"""--data_file""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The binarized file (tokenized + tokens_to_ids) and grouped by sequence.""" , )
parser.add_argument(
"""--student_type""" , type=UpperCamelCase__ , choices=["""distilbert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""The student type (DistilBERT, RoBERTa).""" , )
parser.add_argument("""--student_config""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""Path to the student configuration.""" )
parser.add_argument(
"""--student_pretrained_weights""" , default=UpperCamelCase__ , type=UpperCamelCase__ , help="""Load student initialization checkpoint.""" )
parser.add_argument(
"""--teacher_type""" , choices=["""bert""", """roberta""", """gpt2"""] , required=UpperCamelCase__ , help="""Teacher type (BERT, RoBERTa).""" )
parser.add_argument("""--teacher_name""" , type=UpperCamelCase__ , required=UpperCamelCase__ , help="""The teacher model.""" )
parser.add_argument("""--temperature""" , default=2.0 , type=UpperCamelCase__ , help="""Temperature for the softmax temperature.""" )
parser.add_argument(
"""--alpha_ce""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the distillation loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_mlm""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight for the MLM loss. Must be >=0. Should be used in conjunction with `mlm` flag.""" , )
parser.add_argument("""--alpha_clm""" , default=0.5 , type=UpperCamelCase__ , help="""Linear weight for the CLM loss. Must be >=0.""" )
parser.add_argument("""--alpha_mse""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the MSE loss. Must be >=0.""" )
parser.add_argument(
"""--alpha_cos""" , default=0.0 , type=UpperCamelCase__ , help="""Linear weight of the cosine embedding loss. Must be >=0.""" )
parser.add_argument(
"""--mlm""" , action="""store_true""" , help="""The LM step: MLM or CLM. If `mlm` is True, the MLM is used over CLM.""" )
parser.add_argument(
"""--mlm_mask_prop""" , default=0.15 , type=UpperCamelCase__ , help="""Proportion of tokens for which we need to make a prediction.""" , )
parser.add_argument("""--word_mask""" , default=0.8 , type=UpperCamelCase__ , help="""Proportion of tokens to mask out.""" )
parser.add_argument("""--word_keep""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to keep.""" )
parser.add_argument("""--word_rand""" , default=0.1 , type=UpperCamelCase__ , help="""Proportion of tokens to randomly replace.""" )
parser.add_argument(
"""--mlm_smoothing""" , default=0.7 , type=UpperCamelCase__ , help="""Smoothing parameter to emphasize more rare tokens (see XLM, similar to word2vec).""" , )
parser.add_argument("""--token_counts""" , type=UpperCamelCase__ , help="""The token counts in the data_file for MLM.""" )
parser.add_argument(
"""--restrict_ce_to_mask""" , action="""store_true""" , help="""If true, compute the distillation loss only the [MLM] prediction distribution.""" , )
parser.add_argument(
"""--freeze_pos_embs""" , action="""store_true""" , help="""Freeze positional embeddings during distillation. For student_type in ['roberta', 'gpt2'] only.""" , )
parser.add_argument(
"""--freeze_token_type_embds""" , action="""store_true""" , help="""Freeze token type embeddings during distillation if existent. For student_type in ['roberta'] only.""" , )
parser.add_argument("""--n_epoch""" , type=UpperCamelCase__ , default=3 , help="""Number of pass on the whole dataset.""" )
parser.add_argument("""--batch_size""" , type=UpperCamelCase__ , default=5 , help="""Batch size (for each process).""" )
parser.add_argument(
"""--group_by_size""" , action="""store_false""" , help="""If true, group sequences that have similar length into the same batch. Default is true.""" , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=UpperCamelCase__ , default=5_0 , help="""Gradient accumulation for larger training batches.""" , )
parser.add_argument("""--warmup_prop""" , default=0.05 , type=UpperCamelCase__ , help="""Linear warmup proportion.""" )
parser.add_argument("""--weight_decay""" , default=0.0 , type=UpperCamelCase__ , help="""Weight decay if we apply some.""" )
parser.add_argument("""--learning_rate""" , default=5e-4 , type=UpperCamelCase__ , help="""The initial learning rate for Adam.""" )
parser.add_argument("""--adam_epsilon""" , default=1e-6 , type=UpperCamelCase__ , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--max_grad_norm""" , default=5.0 , type=UpperCamelCase__ , help="""Max gradient norm.""" )
parser.add_argument("""--initializer_range""" , default=0.02 , type=UpperCamelCase__ , help="""Random initialization range.""" )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=UpperCamelCase__ , default="""O1""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_gpu""" , type=UpperCamelCase__ , default=1 , help="""Number of GPUs in the node.""" )
parser.add_argument("""--local_rank""" , type=UpperCamelCase__ , default=-1 , help="""Distributed training - Local rank""" )
parser.add_argument("""--seed""" , type=UpperCamelCase__ , default=5_6 , help="""Random seed""" )
parser.add_argument("""--log_interval""" , type=UpperCamelCase__ , default=5_0_0 , help="""Tensorboard logging interval.""" )
parser.add_argument("""--checkpoint_interval""" , type=UpperCamelCase__ , default=4_0_0_0 , help="""Checkpoint interval.""" )
UpperCAmelCase__ : Dict = parser.parse_args()
sanity_checks(UpperCamelCase__ )
# ARGS #
init_gpu_params(UpperCamelCase__ )
set_seed(UpperCamelCase__ )
if args.is_master:
if os.path.exists(args.dump_path ):
if not args.force:
raise ValueError(
f'''Serialization dir {args.dump_path} already exists, but you have not precised wheter to overwrite'''
""" itUse `--force` if you want to overwrite it""" )
else:
shutil.rmtree(args.dump_path )
if not os.path.exists(args.dump_path ):
os.makedirs(args.dump_path )
logger.info(f'''Experiment will be dumped and logged in {args.dump_path}''' )
# SAVE PARAMS #
logger.info(f'''Param: {args}''' )
with open(os.path.join(args.dump_path , """parameters.json""" ) , """w""" ) as f:
json.dump(vars(UpperCamelCase__ ) , UpperCamelCase__ , indent=4 )
git_log(args.dump_path )
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[Any] = MODEL_CLASSES[args.student_type]
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Optional[int] = MODEL_CLASSES[args.teacher_type]
# TOKENIZER #
UpperCAmelCase__ : Optional[Any] = teacher_tokenizer_class.from_pretrained(args.teacher_name )
UpperCAmelCase__ : Any = {}
for tok_name, tok_symbol in tokenizer.special_tokens_map.items():
UpperCAmelCase__ : Any = tokenizer.all_special_tokens.index(UpperCamelCase__ )
UpperCAmelCase__ : str = tokenizer.all_special_ids[idx]
logger.info(f'''Special tokens {special_tok_ids}''' )
UpperCAmelCase__ : Any = special_tok_ids
UpperCAmelCase__ : str = tokenizer.max_model_input_sizes[args.teacher_name]
# DATA LOADER #
logger.info(f'''Loading data from {args.data_file}''' )
with open(args.data_file , """rb""" ) as fp:
UpperCAmelCase__ : str = pickle.load(UpperCamelCase__ )
if args.mlm:
logger.info(f'''Loading token counts from {args.token_counts} (already pre-computed)''' )
with open(args.token_counts , """rb""" ) as fp:
UpperCAmelCase__ : int = pickle.load(UpperCamelCase__ )
UpperCAmelCase__ : Optional[Any] = np.maximum(UpperCamelCase__ , 1 ) ** -args.mlm_smoothing
for idx in special_tok_ids.values():
UpperCAmelCase__ : Tuple = 0.0 # do not predict special tokens
UpperCAmelCase__ : Any = torch.from_numpy(UpperCamelCase__ )
else:
UpperCAmelCase__ : Tuple = None
UpperCAmelCase__ : Optional[int] = LmSeqsDataset(params=UpperCamelCase__ , data=UpperCamelCase__ )
logger.info("""Data loader created.""" )
# STUDENT #
logger.info(f'''Loading student config from {args.student_config}''' )
UpperCAmelCase__ : int = student_config_class.from_pretrained(args.student_config )
UpperCAmelCase__ : Optional[int] = True
if args.student_pretrained_weights is not None:
logger.info(f'''Loading pretrained weights from {args.student_pretrained_weights}''' )
UpperCAmelCase__ : str = student_model_class.from_pretrained(args.student_pretrained_weights , config=UpperCamelCase__ )
else:
UpperCAmelCase__ : Any = student_model_class(UpperCamelCase__ )
if args.n_gpu > 0:
student.to(f'''cuda:{args.local_rank}''' )
logger.info("""Student loaded.""" )
# TEACHER #
UpperCAmelCase__ : int = teacher_model_class.from_pretrained(args.teacher_name , output_hidden_states=UpperCamelCase__ )
if args.n_gpu > 0:
teacher.to(f'''cuda:{args.local_rank}''' )
logger.info(f'''Teacher loaded from {args.teacher_name}.''' )
# FREEZING #
if args.freeze_pos_embs:
freeze_pos_embeddings(UpperCamelCase__ , UpperCamelCase__ )
if args.freeze_token_type_embds:
freeze_token_type_embeddings(UpperCamelCase__ , UpperCamelCase__ )
# SANITY CHECKS #
assert student.config.vocab_size == teacher.config.vocab_size
assert student.config.hidden_size == teacher.config.hidden_size
assert student.config.max_position_embeddings == teacher.config.max_position_embeddings
if args.mlm:
assert token_probs.size(0 ) == stu_architecture_config.vocab_size
# DISTILLER #
torch.cuda.empty_cache()
UpperCAmelCase__ : Optional[Any] = Distiller(
params=UpperCamelCase__ , dataset=UpperCamelCase__ , token_probs=UpperCamelCase__ , student=UpperCamelCase__ , teacher=UpperCamelCase__ )
distiller.train()
logger.info("""Let's go get some drinks.""" )
if __name__ == "__main__":
main() | 407 | 0 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
lowerCAmelCase__ = 'examples/'
lowerCAmelCase__ = {
'examples': (re.compile(r'^check_min_version\("[^"]+"\)\s*$', re.MULTILINE), 'check_min_version("VERSION")\n'),
'init': (re.compile(r'^__version__\s+=\s+"([^"]+)"\s*$', re.MULTILINE), '__version__ = "VERSION"\n'),
'setup': (re.compile(r'^(\s*)version\s*=\s*"[^"]+",', re.MULTILINE), r'\1version="VERSION",'),
'doc': (re.compile(r'^(\s*)release\s*=\s*"[^"]+"$', re.MULTILINE), 'release = "VERSION"\n'),
}
lowerCAmelCase__ = {
'init': 'src/diffusers/__init__.py',
'setup': 'setup.py',
}
lowerCAmelCase__ = 'README.md'
def _lowerCamelCase ( __a, __a, __a ):
with open(__a, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE_ = replace.replace('''VERSION''', __a )
SCREAMING_SNAKE_CASE_ = re_pattern.sub(__a, __a )
with open(__a, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.write(__a )
def _lowerCamelCase ( __a ):
for folder, directories, fnames in os.walk(__a ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('''research_projects''' )
if "legacy" in directories:
directories.remove('''legacy''' )
for fname in fnames:
if fname.endswith('''.py''' ):
update_version_in_file(os.path.join(__a, __a ), __a, pattern='''examples''' )
def _lowerCamelCase ( __a, __a=False ):
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(__a, __a, __a )
if not patch:
update_version_in_examples(__a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE_ = '''1. Want to contribute a new model?'''
with open(__a, '''r''', encoding='''utf-8''', newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE_ = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE_ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE_ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('''1.''' ):
SCREAMING_SNAKE_CASE_ = lines[index].replace(
'''https://huggingface.co/docs/diffusers/main/model_doc''', '''https://huggingface.co/docs/diffusers/model_doc''', )
index += 1
with open(__a, '''w''', encoding='''utf-8''', newline='''\n''' ) as f:
f.writelines(__a )
def _lowerCamelCase ( ):
with open(REPLACE_FILES['''init'''], '''r''' ) as f:
SCREAMING_SNAKE_CASE_ = f.read()
SCREAMING_SNAKE_CASE_ = REPLACE_PATTERNS['''init'''][0].search(__a ).groups()[0]
return packaging.version.parse(__a )
def _lowerCamelCase ( __a=False ):
SCREAMING_SNAKE_CASE_ = get_version()
if patch and default_version.is_devrelease:
raise ValueError('''Can\'t create a patch version from the dev branch, checkout a released version!''' )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE_ = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE_ = F'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
SCREAMING_SNAKE_CASE_ = F'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE_ = input(F'Which version are you releasing? [{default_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ = default_version
print(F'Updating version to {version}.' )
global_version_update(__a, patch=__a )
def _lowerCamelCase ( ):
SCREAMING_SNAKE_CASE_ = get_version()
SCREAMING_SNAKE_CASE_ = F'{current_version.major}.{current_version.minor + 1}.0.dev0'
SCREAMING_SNAKE_CASE_ = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE_ = input(F'Which version are we developing now? [{dev_version}]' )
if len(__a ) == 0:
SCREAMING_SNAKE_CASE_ = dev_version
print(F'Updating version to {version}.' )
global_version_update(__a )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase__ = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
lowerCAmelCase__ = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work() | 628 |
"""simple docstring"""
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase__ = logging.getLogger(__name__)
def _lowerCamelCase ( __a, __a ):
return (preds == labels).mean()
@dataclass
class snake_case :
UpperCAmelCase__ = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''} )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
@dataclass
class snake_case :
UpperCAmelCase__ = field(metadata={'''help''': '''The name of the task to train on: ''' + ''', '''.join(processors.keys() )} )
UpperCAmelCase__ = field(metadata={'''help''': '''Should contain the data files for the task.'''} )
UpperCAmelCase__ = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
UpperCAmelCase__ = field(
default=__lowercase , metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
def _lowerCamelCase ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
SCREAMING_SNAKE_CASE_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. Use'
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, )
logger.warning(
'''Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s''', training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1 ), training_args.fpaa, )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info('''Training/evaluation parameters %s''', __a )
# Set seed
set_seed(training_args.seed )
try:
SCREAMING_SNAKE_CASE_ = processors[data_args.task_name]()
SCREAMING_SNAKE_CASE_ = processor.get_labels()
SCREAMING_SNAKE_CASE_ = len(__a )
except KeyError:
raise ValueError('''Task not found: %s''' % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
SCREAMING_SNAKE_CASE_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path, num_labels=__a, finetuning_task=data_args.task_name, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, cache_dir=model_args.cache_dir, )
SCREAMING_SNAKE_CASE_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path, from_tf=bool('''.ckpt''' in model_args.model_name_or_path ), config=__a, cache_dir=model_args.cache_dir, )
# Get datasets
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.train, )
if training_args.do_train
else None
)
SCREAMING_SNAKE_CASE_ = (
MultipleChoiceDataset(
data_dir=data_args.data_dir, tokenizer=__a, task=data_args.task_name, max_seq_length=data_args.max_seq_length, overwrite_cache=data_args.overwrite_cache, mode=Split.dev, )
if training_args.do_eval
else None
)
def compute_metrics(__a ) -> Dict:
SCREAMING_SNAKE_CASE_ = np.argmax(p.predictions, axis=1 )
return {"acc": simple_accuracy(__a, p.label_ids )}
# Data collator
SCREAMING_SNAKE_CASE_ = DataCollatorWithPadding(__a, pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
SCREAMING_SNAKE_CASE_ = Trainer(
model=__a, args=__a, train_dataset=__a, eval_dataset=__a, compute_metrics=__a, data_collator=__a, )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
SCREAMING_SNAKE_CASE_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
SCREAMING_SNAKE_CASE_ = trainer.evaluate()
SCREAMING_SNAKE_CASE_ = os.path.join(training_args.output_dir, '''eval_results.txt''' )
if trainer.is_world_master():
with open(__a, '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(''' %s = %s''', __a, __a )
writer.write('''%s = %s\n''' % (key, value) )
results.update(__a )
return results
def _lowerCamelCase ( __a ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 628 | 1 |
import argparse
import torch
from transformers import (
EncodecConfig,
EncodecFeatureExtractor,
EncodecModel,
logging,
)
# checkpoints downloaded from:
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_24khz-d7cc33bc.th
# https://huggingface.co/facebook/musicgen-small/resolve/main/compression_state_dict.bin
# https://dl.fbaipublicfiles.com/encodec/v0/encodec_48khz-7e698e3e.th
logging.set_verbosity_info()
_lowerCAmelCase: Union[str, Any] = logging.get_logger('transformers.models.encodec')
_lowerCAmelCase: List[Any] = {
'quantizer.vq.layers.*._codebook.inited': 'quantizer.layers.*.codebook.inited',
'quantizer.vq.layers.*._codebook.cluster_size': 'quantizer.layers.*.codebook.cluster_size',
'quantizer.vq.layers.*._codebook.embed': 'quantizer.layers.*.codebook.embed',
'quantizer.vq.layers.*._codebook.embed_avg': 'quantizer.layers.*.codebook.embed_avg',
}
_lowerCAmelCase: str = {
'encoder.model.0.conv.conv': 'encoder.layers.0.conv',
'encoder.model.1.block.1.conv.conv': 'encoder.layers.1.block.1.conv',
'encoder.model.1.block.3.conv.conv': 'encoder.layers.1.block.3.conv',
'encoder.model.1.shortcut.conv.conv': 'encoder.layers.1.shortcut.conv',
'encoder.model.3.conv.conv': 'encoder.layers.3.conv',
'encoder.model.4.block.1.conv.conv': 'encoder.layers.4.block.1.conv',
'encoder.model.4.block.3.conv.conv': 'encoder.layers.4.block.3.conv',
'encoder.model.4.shortcut.conv.conv': 'encoder.layers.4.shortcut.conv',
'encoder.model.6.conv.conv': 'encoder.layers.6.conv',
'encoder.model.7.block.1.conv.conv': 'encoder.layers.7.block.1.conv',
'encoder.model.7.block.3.conv.conv': 'encoder.layers.7.block.3.conv',
'encoder.model.7.shortcut.conv.conv': 'encoder.layers.7.shortcut.conv',
'encoder.model.9.conv.conv': 'encoder.layers.9.conv',
'encoder.model.10.block.1.conv.conv': 'encoder.layers.10.block.1.conv',
'encoder.model.10.block.3.conv.conv': 'encoder.layers.10.block.3.conv',
'encoder.model.10.shortcut.conv.conv': 'encoder.layers.10.shortcut.conv',
'encoder.model.12.conv.conv': 'encoder.layers.12.conv',
'encoder.model.13.lstm': 'encoder.layers.13.lstm',
'encoder.model.15.conv.conv': 'encoder.layers.15.conv',
}
_lowerCAmelCase: Optional[int] = {
'encoder.model.0.conv.norm': 'encoder.layers.0.norm',
'encoder.model.1.block.1.conv.norm': 'encoder.layers.1.block.1.norm',
'encoder.model.1.block.3.conv.norm': 'encoder.layers.1.block.3.norm',
'encoder.model.1.shortcut.conv.norm': 'encoder.layers.1.shortcut.norm',
'encoder.model.3.conv.norm': 'encoder.layers.3.norm',
'encoder.model.4.block.1.conv.norm': 'encoder.layers.4.block.1.norm',
'encoder.model.4.block.3.conv.norm': 'encoder.layers.4.block.3.norm',
'encoder.model.4.shortcut.conv.norm': 'encoder.layers.4.shortcut.norm',
'encoder.model.6.conv.norm': 'encoder.layers.6.norm',
'encoder.model.7.block.1.conv.norm': 'encoder.layers.7.block.1.norm',
'encoder.model.7.block.3.conv.norm': 'encoder.layers.7.block.3.norm',
'encoder.model.7.shortcut.conv.norm': 'encoder.layers.7.shortcut.norm',
'encoder.model.9.conv.norm': 'encoder.layers.9.norm',
'encoder.model.10.block.1.conv.norm': 'encoder.layers.10.block.1.norm',
'encoder.model.10.block.3.conv.norm': 'encoder.layers.10.block.3.norm',
'encoder.model.10.shortcut.conv.norm': 'encoder.layers.10.shortcut.norm',
'encoder.model.12.conv.norm': 'encoder.layers.12.norm',
'encoder.model.15.conv.norm': 'encoder.layers.15.norm',
}
_lowerCAmelCase: Dict = {
'decoder.model.0.conv.conv': 'decoder.layers.0.conv',
'decoder.model.1.lstm': 'decoder.layers.1.lstm',
'decoder.model.3.convtr.convtr': 'decoder.layers.3.conv',
'decoder.model.4.block.1.conv.conv': 'decoder.layers.4.block.1.conv',
'decoder.model.4.block.3.conv.conv': 'decoder.layers.4.block.3.conv',
'decoder.model.4.shortcut.conv.conv': 'decoder.layers.4.shortcut.conv',
'decoder.model.6.convtr.convtr': 'decoder.layers.6.conv',
'decoder.model.7.block.1.conv.conv': 'decoder.layers.7.block.1.conv',
'decoder.model.7.block.3.conv.conv': 'decoder.layers.7.block.3.conv',
'decoder.model.7.shortcut.conv.conv': 'decoder.layers.7.shortcut.conv',
'decoder.model.9.convtr.convtr': 'decoder.layers.9.conv',
'decoder.model.10.block.1.conv.conv': 'decoder.layers.10.block.1.conv',
'decoder.model.10.block.3.conv.conv': 'decoder.layers.10.block.3.conv',
'decoder.model.10.shortcut.conv.conv': 'decoder.layers.10.shortcut.conv',
'decoder.model.12.convtr.convtr': 'decoder.layers.12.conv',
'decoder.model.13.block.1.conv.conv': 'decoder.layers.13.block.1.conv',
'decoder.model.13.block.3.conv.conv': 'decoder.layers.13.block.3.conv',
'decoder.model.13.shortcut.conv.conv': 'decoder.layers.13.shortcut.conv',
'decoder.model.15.conv.conv': 'decoder.layers.15.conv',
}
_lowerCAmelCase: Tuple = {
'decoder.model.0.conv.norm': 'decoder.layers.0.norm',
'decoder.model.3.convtr.norm': 'decoder.layers.3.norm',
'decoder.model.4.block.1.conv.norm': 'decoder.layers.4.block.1.norm',
'decoder.model.4.block.3.conv.norm': 'decoder.layers.4.block.3.norm',
'decoder.model.4.shortcut.conv.norm': 'decoder.layers.4.shortcut.norm',
'decoder.model.6.convtr.norm': 'decoder.layers.6.norm',
'decoder.model.7.block.1.conv.norm': 'decoder.layers.7.block.1.norm',
'decoder.model.7.block.3.conv.norm': 'decoder.layers.7.block.3.norm',
'decoder.model.7.shortcut.conv.norm': 'decoder.layers.7.shortcut.norm',
'decoder.model.9.convtr.norm': 'decoder.layers.9.norm',
'decoder.model.10.block.1.conv.norm': 'decoder.layers.10.block.1.norm',
'decoder.model.10.block.3.conv.norm': 'decoder.layers.10.block.3.norm',
'decoder.model.10.shortcut.conv.norm': 'decoder.layers.10.shortcut.norm',
'decoder.model.12.convtr.norm': 'decoder.layers.12.norm',
'decoder.model.13.block.1.conv.norm': 'decoder.layers.13.block.1.norm',
'decoder.model.13.block.3.conv.norm': 'decoder.layers.13.block.3.norm',
'decoder.model.13.shortcut.conv.norm': 'decoder.layers.13.shortcut.norm',
'decoder.model.15.conv.norm': 'decoder.layers.15.norm',
}
_lowerCAmelCase: str = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_DECODER,
}
_lowerCAmelCase: Dict = {
**MAPPING_QUANTIZER,
**MAPPING_ENCODER,
**MAPPING_ENCODER_48K,
**MAPPING_DECODER,
**MAPPING_DECODER_48K,
}
_lowerCAmelCase: Tuple = []
_lowerCAmelCase: str = []
def _lowercase( __a : Any , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Optional[Any] ):
for attribute in key.split('.' ):
a__ =getattr(__a , __a )
if weight_type is not None:
a__ =getattr(__a , __a ).shape
else:
a__ =hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
a__ =value
elif weight_type == "weight_g":
a__ =value
elif weight_type == "weight_v":
a__ =value
elif weight_type == "bias":
a__ =value
elif weight_type == "running_mean":
a__ =value
elif weight_type == "running_var":
a__ =value
elif weight_type == "num_batches_tracked":
a__ =value
elif weight_type == "weight_ih_l0":
a__ =value
elif weight_type == "weight_hh_l0":
a__ =value
elif weight_type == "bias_ih_l0":
a__ =value
elif weight_type == "bias_hh_l0":
a__ =value
elif weight_type == "weight_ih_l1":
a__ =value
elif weight_type == "weight_hh_l1":
a__ =value
elif weight_type == "bias_ih_l1":
a__ =value
elif weight_type == "bias_hh_l1":
a__ =value
else:
a__ =value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def _lowercase( __a : Optional[int] , __a : Union[str, Any] ):
for key in ignore_keys:
if key.endswith('.*' ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def _lowercase( __a : str , __a : int , __a : Tuple ):
a__ =[]
if model_name == "encodec_24khz" or "encodec_32khz":
a__ =MAPPING_24K
elif model_name == "encodec_48khz":
a__ =MAPPING_48K
else:
raise ValueError(f"""Unsupported model: {model_name}""" )
for name, value in orig_dict.items():
if should_ignore(__a , __a ):
logger.info(f"""{name} was ignored""" )
continue
a__ =False
for key, mapped_key in MAPPING.items():
if "*" in key:
a__ , a__ =key.split('.*.' )
if prefix in name and suffix in name:
a__ =suffix
if key in name:
# HACK otherwise .embed gets initialized with .embed_avg too
if key.endswith('embed' ) and name.endswith('embed_avg' ):
continue
a__ =True
if "*" in mapped_key:
a__ =name.split(__a )[0].split('.' )[-2]
a__ =mapped_key.replace('*' , __a )
if "weight_g" in name:
a__ ='weight_g'
elif "weight_v" in name:
a__ ='weight_v'
elif "weight_ih_l0" in name:
a__ ='weight_ih_l0'
elif "weight_hh_l0" in name:
a__ ='weight_hh_l0'
elif "bias_ih_l0" in name:
a__ ='bias_ih_l0'
elif "bias_hh_l0" in name:
a__ ='bias_hh_l0'
elif "weight_ih_l1" in name:
a__ ='weight_ih_l1'
elif "weight_hh_l1" in name:
a__ ='weight_hh_l1'
elif "bias_ih_l1" in name:
a__ ='bias_ih_l1'
elif "bias_hh_l1" in name:
a__ ='bias_hh_l1'
elif "bias" in name:
a__ ='bias'
elif "weight" in name:
a__ ='weight'
elif "running_mean" in name:
a__ ='running_mean'
elif "running_var" in name:
a__ ='running_var'
elif "num_batches_tracked" in name:
a__ ='num_batches_tracked'
else:
a__ =None
set_recursively(__a , __a , __a , __a , __a )
continue
if not is_used:
unused_weights.append(__a )
logger.warning(f"""Unused weights: {unused_weights}""" )
@torch.no_grad()
def _lowercase( __a : str , __a : int , __a : str , __a : Tuple=None , __a : Optional[int]=None , ):
if config_path is not None:
a__ =EncodecConfig.from_pretrained(__a )
else:
a__ =EncodecConfig()
if model_name == "encodec_24khz":
pass # config is already correct
elif model_name == "encodec_32khz":
a__ =[8, 5, 4, 4]
a__ =[2.2]
a__ =64
a__ =3_2000
a__ =2048
a__ =False
a__ =False
a__ =False
elif model_name == "encodec_48khz":
a__ =[8, 5, 4, 2]
a__ =[3.0, 6.0, 12.0, 24.0]
a__ =4_8000
a__ =2
a__ =False
a__ ='time_group_norm'
a__ =True
a__ =1.0
a__ =0.01
else:
raise ValueError(f"""Unknown model name: {model_name}""" )
a__ =EncodecModel(__a )
a__ =EncodecFeatureExtractor(
feature_size=config.audio_channels , sampling_rate=config.sampling_rate , chunk_length_s=config.chunk_length_s , overlap=config.overlap , )
feature_extractor.save_pretrained(__a )
a__ =torch.load(__a )
if "best_state" in original_checkpoint:
# we might have a training state saved, in which case discard the yaml results and just retain the weights
a__ =original_checkpoint['best_state']
recursively_load_weights(__a , __a , __a )
model.save_pretrained(__a )
if repo_id:
print('Pushing to the hub...' )
feature_extractor.push_to_hub(__a )
model.push_to_hub(__a )
if __name__ == "__main__":
_lowerCAmelCase: str = argparse.ArgumentParser()
parser.add_argument(
'--model',
default='encodec_24khz',
type=str,
help='The model to convert. Should be one of \'encodec_24khz\', \'encodec_32khz\', \'encodec_48khz\'.',
)
parser.add_argument('--checkpoint_path', required=True, default=None, type=str, help='Path to original checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--pytorch_dump_folder_path', required=True, default=None, type=str, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--push_to_hub', default=None, type=str, help='Where to upload the converted model on the 🤗 hub.'
)
_lowerCAmelCase: str = parser.parse_args()
convert_checkpoint(
args.model,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.push_to_hub,
)
| 20 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import torch
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
@dataclass
class _UpperCAmelCase ( a ):
'''simple docstring'''
a__ =42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_text_to_video_synth import TextToVideoSDPipeline
from .pipeline_text_to_video_synth_imgaimg import VideoToVideoSDPipeline # noqa: F401
from .pipeline_text_to_video_zero import TextToVideoZeroPipeline
| 506 | 0 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
__UpperCAmelCase = datasets.logging.get_logger(__name__)
__UpperCAmelCase = "\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
__UpperCAmelCase = "\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
__UpperCAmelCase = "\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
__UpperCAmelCase = {
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase__ ( datasets.Metric ):
"""simple docstring"""
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="""https://github.com/google-research/bleurt""" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Value("""string""" , id="""sequence""" ),
} ) , codebase_urls=["""https://github.com/google-research/bleurt"""] , reference_urls=["""https://github.com/google-research/bleurt""", """https://arxiv.org/abs/2004.04696"""] , )
def lowerCamelCase_ ( self : str , lowerCamelCase_ : Union[str, Any] ):
'''simple docstring'''
if self.config_name == "default":
logger.warning(
"""Using default BLEURT-Base checkpoint for sequence maximum length 128. """
"""You can use a bigger model for better results with e.g.: datasets.load_metric('bleurt', 'bleurt-large-512').""" )
SCREAMING_SNAKE_CASE : Any = "bleurt-base-128"
if self.config_name.lower() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
SCREAMING_SNAKE_CASE : int = self.config_name.upper()
else:
raise KeyError(
f'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
SCREAMING_SNAKE_CASE : Optional[Any] = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
SCREAMING_SNAKE_CASE : Optional[Any] = score.BleurtScorer(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase_ ( self : Optional[int] , lowerCamelCase_ : str , lowerCamelCase_ : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = self.scorer.score(references=UpperCamelCase_ , candidates=UpperCamelCase_ )
return {"scores": scores}
| 710 |
'''simple docstring'''
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
__UpperCAmelCase = {"""UserAgent""": UserAgent().random}
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = script.contents[0]
SCREAMING_SNAKE_CASE : int = json.loads(data[data.find("""{\"config\"""" ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self : Dict , lowerCamelCase_ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = f'''https://www.instagram.com/{username}/'''
SCREAMING_SNAKE_CASE : Any = self.get_json()
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = requests.get(self.url , headers=lowerCamelCase_ ).text
SCREAMING_SNAKE_CASE : List[Any] = BeautifulSoup(lowerCamelCase_ , """html.parser""" ).find_all("""script""" )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : Dict ):
'''simple docstring'''
return f'''{self.__class__.__name__}(\'{self.username}\')'''
def __str__( self : int ):
'''simple docstring'''
return f'''{self.fullname} ({self.username}) is {self.biography}'''
@property
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
return self.user_data["username"]
@property
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
return self.user_data["full_name"]
@property
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
return self.user_data["biography"]
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.user_data["business_email"]
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.user_data["external_url"]
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.user_data["edge_followed_by"]["count"]
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.user_data["edge_follow"]["count"]
@property
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
return self.user_data["profile_pic_url_hd"]
@property
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
return self.user_data["is_verified"]
@property
def lowerCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self.user_data["is_private"]
def __A ( lowerCamelCase_ = "github" ):
"""simple docstring"""
import os
if os.environ.get("""CI""" ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE : Any = InstagramUser(lowerCamelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data , lowerCamelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 1_50
assert instagram_user.number_of_followers > 12_00_00
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "[email protected]"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith("""https://instagram.""" )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
__UpperCAmelCase = InstagramUser("""github""")
print(instagram_user)
print(f'''{instagram_user.number_of_posts = }''')
print(f'''{instagram_user.number_of_followers = }''')
print(f'''{instagram_user.number_of_followings = }''')
print(f'''{instagram_user.email = }''')
print(f'''{instagram_user.website = }''')
print(f'''{instagram_user.profile_picture_url = }''')
print(f'''{instagram_user.is_verified = }''')
print(f'''{instagram_user.is_private = }''')
| 79 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowercase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 630 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase__ = logging.get_logger(__name__)
lowercase__ = {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json"""
),
}
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = """dpr"""
def __init__( self , lowercase=30522 , lowercase=768 , lowercase=12 , lowercase=12 , lowercase=3072 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=512 , lowercase=2 , lowercase=0.02 , lowercase=1E-12 , lowercase=0 , lowercase="absolute" , lowercase = 0 , **lowercase , ):
super().__init__(pad_token_id=lowercase , **lowercase )
_lowerCamelCase : int = vocab_size
_lowerCamelCase : Optional[int] = hidden_size
_lowerCamelCase : str = num_hidden_layers
_lowerCamelCase : Optional[int] = num_attention_heads
_lowerCamelCase : Tuple = hidden_act
_lowerCamelCase : int = intermediate_size
_lowerCamelCase : str = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_probs_dropout_prob
_lowerCamelCase : Union[str, Any] = max_position_embeddings
_lowerCamelCase : str = type_vocab_size
_lowerCamelCase : int = initializer_range
_lowerCamelCase : Optional[int] = layer_norm_eps
_lowerCamelCase : Dict = projection_dim
_lowerCamelCase : int = position_embedding_type | 630 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case ={"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__snake_case =logging.get_logger(__name__)
__snake_case ={"""vocab_file""": """spiece.model"""}
__snake_case ={
"""vocab_file""": {
"""google/bigbird-roberta-base""": """https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model""",
"""google/bigbird-roberta-large""": (
"""https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model"""
),
"""google/bigbird-base-trivia-itc""": (
"""https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model"""
),
}
}
__snake_case ={
"""google/bigbird-roberta-base""": 4_096,
"""google/bigbird-roberta-large""": 4_096,
"""google/bigbird-base-trivia-itc""": 4_096,
}
class UpperCAmelCase_ ( __lowercase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
lowerCamelCase : List[int] = []
def __init__( self : Optional[int] , UpperCAmelCase__ : int , UpperCAmelCase__ : Any="<unk>" , UpperCAmelCase__ : Union[str, Any]="<s>" , UpperCAmelCase__ : str="</s>" , UpperCAmelCase__ : List[Any]="<pad>" , UpperCAmelCase__ : Any="[SEP]" , UpperCAmelCase__ : List[Any]="[MASK]" , UpperCAmelCase__ : Optional[int]="[CLS]" , UpperCAmelCase__ : Optional[Dict[str, Any]] = None , **UpperCAmelCase__ : Tuple , ) -> None:
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else bos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else eos_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else unk_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else pad_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else cls_token
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase = AddedToken(UpperCAmelCase__ , lstrip=UpperCAmelCase__ , rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ) else mask_token
lowerCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__ , eos_token=UpperCAmelCase__ , unk_token=UpperCAmelCase__ , pad_token=UpperCAmelCase__ , sep_token=UpperCAmelCase__ , mask_token=UpperCAmelCase__ , cls_token=UpperCAmelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCAmelCase__ , )
lowerCAmelCase = vocab_file
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCAmelCase__ )
@property
def __UpperCAmelCase ( self : List[str] ) -> Tuple:
return self.sp_model.get_piece_size()
def __UpperCAmelCase ( self : Dict ) -> Tuple:
lowerCAmelCase = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = self.__dict__.copy()
lowerCAmelCase = None
return state
def __setstate__( self : int , UpperCAmelCase__ : List[Any] ) -> Dict:
lowerCAmelCase = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
lowerCAmelCase = {}
lowerCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : str ) -> List[str]:
return self.sp_model.encode(UpperCAmelCase__ , out_type=UpperCAmelCase__ )
def __UpperCAmelCase ( self : Tuple , UpperCAmelCase__ : Optional[int] ) -> Dict:
return self.sp_model.piece_to_id(UpperCAmelCase__ )
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : int ) -> Optional[int]:
lowerCAmelCase = self.sp_model.IdToPiece(UpperCAmelCase__ )
return token
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[str] ) -> Any:
lowerCAmelCase = []
lowerCAmelCase = ''
lowerCAmelCase = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
lowerCAmelCase = True
lowerCAmelCase = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
lowerCAmelCase = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : bool = False , UpperCAmelCase__ : bool = None , UpperCAmelCase__ : bool = True , **UpperCAmelCase__ : int , ) -> str:
lowerCAmelCase = kwargs.pop('use_source_tokenizer' , UpperCAmelCase__ )
lowerCAmelCase = self.convert_ids_to_tokens(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )
# To avoid mixing byte-level and unicode for byte-level BPT
# we need to build string separately for added tokens and byte-level tokens
# cf. https://github.com/huggingface/transformers/issues/1133
lowerCAmelCase = []
lowerCAmelCase = []
for token in filtered_tokens:
if skip_special_tokens and token in self.all_special_ids:
continue
if token in self.added_tokens_encoder:
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
lowerCAmelCase = []
sub_texts.append(UpperCAmelCase__ )
else:
current_sub_text.append(UpperCAmelCase__ )
if current_sub_text:
sub_texts.append(self.convert_tokens_to_string(UpperCAmelCase__ ) )
# Mimic the behavior of the Rust tokenizer:
# No space before [MASK] and [SEP]
if spaces_between_special_tokens:
lowerCAmelCase = re.sub(R' (\[(MASK|SEP)\])' , R'\1' , ' '.join(UpperCAmelCase__ ) )
else:
lowerCAmelCase = ''.join(UpperCAmelCase__ )
lowerCAmelCase = (
clean_up_tokenization_spaces
if clean_up_tokenization_spaces is not None
else self.clean_up_tokenization_spaces
)
if clean_up_tokenization_spaces:
lowerCAmelCase = self.clean_up_tokenization(UpperCAmelCase__ )
return clean_text
else:
return text
def __UpperCAmelCase ( self : Dict , UpperCAmelCase__ : str , UpperCAmelCase__ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase = os.path.join(
UpperCAmelCase__ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__ , 'wb' ) as fi:
lowerCAmelCase = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
def __UpperCAmelCase ( self : int , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
lowerCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + token_ids_a + sep
def __UpperCAmelCase ( self : List[str] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None , UpperCAmelCase__ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__ , token_ids_a=UpperCAmelCase__ , already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1] + ([0] * len(UpperCAmelCase__ )) + [1]
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[int] , UpperCAmelCase__ : Optional[List[int]] = None ) -> List[int]:
lowerCAmelCase = [self.sep_token_id]
lowerCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
| 513 | 1 |
'''simple docstring'''
import numpy as np
def snake_case_ (UpperCamelCase : np.ndarray , UpperCamelCase : np.ndarray , UpperCamelCase : float = 1e-12 , UpperCamelCase : int = 100 , ):
'''simple docstring'''
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[1]
# Ensure proper dimensionality.
assert np.shape(UpperCamelCase )[0] == np.shape(UpperCamelCase )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(UpperCamelCase ) == np.iscomplexobj(UpperCamelCase )
_a = np.iscomplexobj(UpperCamelCase )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(UpperCamelCase , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
_a = False
_a = 0
_a = 0
_a = 1e12
while not convergence:
# Multiple matrix by the vector.
_a = np.dot(UpperCamelCase , UpperCamelCase )
# Normalize the resulting output vector.
_a = w / np.linalg.norm(UpperCamelCase )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
_a = vector.conj().T if is_complex else vector.T
_a = np.dot(UpperCamelCase , np.dot(UpperCamelCase , UpperCamelCase ) )
# Check convergence.
_a = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
_a = True
_a = lambda_
if is_complex:
_a = np.real(lambda_ )
return lambda_, vector
def snake_case_ ():
'''simple docstring'''
_a = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
_a = np.array([41, 4, 20] )
_a = real_input_matrix.astype(np.complexaaa )
_a = np.triu(1J * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
_a = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
_a = real_input_matrix
_a = real_vector
elif problem_type == "complex":
_a = complex_input_matrix
_a = complex_vector
# Our implementation.
_a , _a = power_iteration(UpperCamelCase , UpperCamelCase )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
_a , _a = np.linalg.eigh(UpperCamelCase )
# Last eigenvalue is the maximum one.
_a = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
_a = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(UpperCamelCase ) - np.abs(UpperCamelCase ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 22 |
"""simple docstring"""
from __future__ import annotations
lowercase__ :Dict = 'Muhammad Umer Farooq'
lowercase__ :Any = 'MIT'
lowercase__ :List[str] = '1.0.0'
lowercase__ :str = 'Muhammad Umer Farooq'
lowercase__ :List[str] = '[email protected]'
lowercase__ :Dict = 'Alpha'
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class snake_case ( __UpperCAmelCase ):
'''simple docstring'''
def __init__( self : str , __lowercase : str ):
'''simple docstring'''
super().__init__()
__UpperCAmelCase : list[str] = []
__UpperCAmelCase : Tuple = domain
def A_ ( self : Any , __lowercase : str , __lowercase : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
__UpperCAmelCase : List[Any] = parse.urljoin(self.domain , __lowercase )
self.urls.append(__lowercase )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return ".".join(get_sub_domain_name(UpperCAmelCase_ ).split('''.''' )[-2:] )
def lowerCamelCase_ ( UpperCAmelCase_ ) ->str:
"""simple docstring"""
return parse.urlparse(UpperCAmelCase_ ).netloc
def lowerCamelCase_ ( UpperCAmelCase_ = "https://github.com" ) ->list[str]:
"""simple docstring"""
__UpperCAmelCase : Union[str, Any] = get_domain_name(UpperCAmelCase_ )
# Initialize the parser
__UpperCAmelCase : int = Parser(UpperCAmelCase_ )
try:
# Open URL
__UpperCAmelCase : Union[str, Any] = requests.get(UpperCAmelCase_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
__UpperCAmelCase : str = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
__UpperCAmelCase : Optional[int] = requests.get(UpperCAmelCase_ )
# Get the valid email.
__UpperCAmelCase : Tuple = re.findall('''[a-zA-Z0-9]+@''' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(UpperCAmelCase_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(UpperCAmelCase_ )
if __name__ == "__main__":
lowercase__ :List[str] = emails_from_url('https://github.com')
print(f"""{len(emails)} emails found:""")
print('\n'.join(sorted(emails))) | 522 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase : Optional[int] ={
"""configuration_pegasus_x""": ["""PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PegasusXConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict =[
"""PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PegasusXForConditionalGeneration""",
"""PegasusXModel""",
"""PegasusXPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Dict =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 709 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = (CMStochasticIterativeScheduler,)
lowercase__ = 10
def _lowerCamelCase ( self , **snake_case__ ):
'''simple docstring'''
UpperCamelCase_ = {
"num_train_timesteps": 201,
"sigma_min": 0.002,
"sigma_max": 80.0,
}
config.update(**snake_case__ )
return config
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = 10
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = self.scheduler_classes[0](**snake_case__ )
scheduler.set_timesteps(snake_case__ )
UpperCamelCase_ = scheduler.timesteps[0]
UpperCamelCase_ = scheduler.timesteps[1]
UpperCamelCase_ = self.dummy_sample
UpperCamelCase_ = 0.1 * sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowerCamelCase ( self ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = 1
scheduler.set_timesteps(snake_case__ )
UpperCamelCase_ = scheduler.timesteps
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(snake_case__ ):
# 1. scale model input
UpperCamelCase_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
# 2. predict noise residual
UpperCamelCase_ = model(snake_case__ , snake_case__ )
# 3. predict previous sample x_t-1
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
UpperCamelCase_ = pred_prev_sample
UpperCamelCase_ = torch.sum(torch.abs(snake_case__ ) )
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 192.7_614 ) < 1e-2
assert abs(result_mean.item() - 0.2_510 ) < 1e-3
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [106, 0]
scheduler.set_timesteps(timesteps=snake_case__ )
UpperCamelCase_ = scheduler.timesteps
UpperCamelCase_ = torch.manual_seed(0 )
UpperCamelCase_ = self.dummy_model()
UpperCamelCase_ = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
UpperCamelCase_ = scheduler.scale_model_input(snake_case__ , snake_case__ )
# 2. predict noise residual
UpperCamelCase_ = model(snake_case__ , snake_case__ )
# 3. predict previous sample x_t-1
UpperCamelCase_ = scheduler.step(snake_case__ , snake_case__ , snake_case__ , generator=snake_case__ ).prev_sample
UpperCamelCase_ = pred_prev_sample
UpperCamelCase_ = torch.sum(torch.abs(snake_case__ ) )
UpperCamelCase_ = torch.mean(torch.abs(snake_case__ ) )
assert abs(result_sum.item() - 347.6_357 ) < 1e-2
assert abs(result_mean.item() - 0.4_527 ) < 1e-3
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [39, 30, 12, 15, 0]
with self.assertRaises(snake_case__ , msg="`timesteps` must be in descending order." ):
scheduler.set_timesteps(timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [39, 30, 12, 1, 0]
UpperCamelCase_ = len(snake_case__ )
with self.assertRaises(snake_case__ , msg="Can only pass one of `num_inference_steps` or `timesteps`." ):
scheduler.set_timesteps(num_inference_steps=snake_case__ , timesteps=snake_case__ )
def _lowerCamelCase ( self ):
'''simple docstring'''
UpperCamelCase_ = self.scheduler_classes[0]
UpperCamelCase_ = self.get_scheduler_config()
UpperCamelCase_ = scheduler_class(**snake_case__ )
UpperCamelCase_ = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case__ , msg="`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}" , ):
scheduler.set_timesteps(timesteps=snake_case__ )
| 504 | 0 |
'''simple docstring'''
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('TEST_SAGEMAKER' , 'False' ) ) is not True , reason='Skipping test because should only be run when releasing minor transformers version' , )
@pytest.mark.usefixtures('sm_env' )
@parameterized_class(
[
{
'framework': 'pytorch',
'script': 'run_glue_model_parallelism.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
{
'framework': 'pytorch',
'script': 'run_glue.py',
'model_name_or_path': 'roberta-large',
'instance_type': 'ml.p3dn.24xlarge',
'results': {'train_runtime': 1_6_0_0, 'eval_accuracy': 0.3, 'eval_loss': 1.2},
},
] )
class lowerCamelCase_ ( unittest.TestCase ):
def __lowercase ( self : Union[str, Any] ):
"""simple docstring"""
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() , encoding='''utf-8''' , check=lowerCAmelCase__ , )
assert hasattr(self , '''env''' )
def __lowercase ( self : Optional[int] , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
# configuration for running training on smdistributed Model Parallel
SCREAMING_SNAKE_CASE : Tuple = {
'''enabled''': True,
'''processes_per_host''': 8,
}
SCREAMING_SNAKE_CASE : Optional[int] = {
'''enabled''': True,
'''parameters''': {
'''microbatches''': 4,
'''placement_strategy''': '''spread''',
'''pipeline''': '''interleaved''',
'''optimize''': '''speed''',
'''partitions''': 4,
'''ddp''': True,
},
}
SCREAMING_SNAKE_CASE : str = {'''smdistributed''': {'''modelparallel''': smp_options}, '''mpi''': mpi_options}
SCREAMING_SNAKE_CASE : Optional[Any] = '''trainer''' if self.script == '''run_glue.py''' else '''smtrainer'''
# creates estimator
return HuggingFace(
entry_point=self.script , source_dir=self.env.test_path , role=self.env.role , image_uri=self.env.image_uri , base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" , instance_count=lowerCAmelCase__ , instance_type=self.instance_type , debugger_hook_config=lowerCAmelCase__ , hyperparameters={
**self.env.hyperparameters,
'''model_name_or_path''': self.model_name_or_path,
'''max_steps''': 5_00,
} , metric_definitions=self.env.metric_definitions , distribution=lowerCAmelCase__ , py_version='''py36''' , )
def __lowercase ( self : Dict , lowerCAmelCase__ : int ):
"""simple docstring"""
TrainingJobAnalytics(lowerCAmelCase__ ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __lowercase ( self : Dict , lowerCAmelCase__ : List[Any] ):
"""simple docstring"""
# create estimator
SCREAMING_SNAKE_CASE : Any = self.create_estimator(lowerCAmelCase__ )
# run training
estimator.fit()
# result dataframe
SCREAMING_SNAKE_CASE : Optional[Any] = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
SCREAMING_SNAKE_CASE : int = list(result_metrics_df[result_metrics_df.metric_name == '''eval_accuracy''']['''value'''] )
SCREAMING_SNAKE_CASE : Tuple = list(result_metrics_df[result_metrics_df.metric_name == '''eval_loss''']['''value'''] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
SCREAMING_SNAKE_CASE : int = (
Session().describe_training_job(estimator.latest_training_job.name ).get('''TrainingTimeInSeconds''' , 99_99_99 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['''eval_accuracy'''] for t in eval_accuracy )
assert all(t <= self.results['''eval_loss'''] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" , '''w''' ) as outfile:
json.dump({'''train_time''': train_runtime, '''eval_accuracy''': eval_accuracy, '''eval_loss''': eval_loss} , lowerCAmelCase__ )
| 527 |
'''simple docstring'''
from queue import PriorityQueue
from typing import Any
import numpy as np
def UpperCAmelCase ( A : dict , A : str , A : set , A : set , A : dict , A : dict , A : PriorityQueue , A : dict , A : float | int , ):
for nxt, d in graph[v]:
if nxt in visited_forward:
continue
SCREAMING_SNAKE_CASE : str = cst_fwd.get(A , np.inf )
SCREAMING_SNAKE_CASE : Optional[int] = cst_fwd[v] + d
if new_cost_f < old_cost_f:
queue.put((new_cost_f, nxt) )
SCREAMING_SNAKE_CASE : Tuple = new_cost_f
SCREAMING_SNAKE_CASE : List[str] = v
if nxt in visited_backward:
if cst_fwd[v] + d + cst_bwd[nxt] < shortest_distance:
SCREAMING_SNAKE_CASE : Union[str, Any] = cst_fwd[v] + d + cst_bwd[nxt]
return shortest_distance
def UpperCAmelCase ( A : str , A : str , A : dict , A : dict ):
SCREAMING_SNAKE_CASE : Dict = -1
SCREAMING_SNAKE_CASE : Optional[int] = set()
SCREAMING_SNAKE_CASE : Optional[Any] = set()
SCREAMING_SNAKE_CASE : Any = {source: 0}
SCREAMING_SNAKE_CASE : Tuple = {destination: 0}
SCREAMING_SNAKE_CASE : Union[str, Any] = {source: None}
SCREAMING_SNAKE_CASE : Optional[int] = {destination: None}
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : PriorityQueue[Any] = PriorityQueue()
SCREAMING_SNAKE_CASE : List[str] = np.inf
queue_forward.put((0, source) )
queue_backward.put((0, destination) )
if source == destination:
return 0
while not queue_forward.empty() and not queue_backward.empty():
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : str = queue_forward.get()
visited_forward.add(A )
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : int = queue_backward.get()
visited_backward.add(A )
SCREAMING_SNAKE_CASE : Tuple = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
SCREAMING_SNAKE_CASE : Optional[int] = pass_and_relaxation(
A , A , A , A , A , A , A , A , A , )
if cst_fwd[v_fwd] + cst_bwd[v_bwd] >= shortest_distance:
break
if shortest_distance != np.inf:
SCREAMING_SNAKE_CASE : int = shortest_distance
return shortest_path_distance
lowerCAmelCase_ : int = {
'B': [['C', 1]],
'C': [['D', 1]],
'D': [['F', 1]],
'E': [['B', 1], ['G', 2]],
'F': [],
'G': [['F', 1]],
}
lowerCAmelCase_ : str = {
'B': [['E', 1]],
'C': [['B', 1]],
'D': [['C', 1]],
'F': [['D', 1], ['G', 1]],
'E': [[None, np.inf]],
'G': [['E', 2]],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 527 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
'''google/canine-s''': '''https://huggingface.co/google/canine-s/resolve/main/config.json''',
# See all CANINE models at https://huggingface.co/models?filter=canine
}
class __snake_case ( _lowercase):
snake_case__ : Optional[int] = "canine"
def __init__( self : List[Any] , __lowerCAmelCase : str=7_6_8 , __lowerCAmelCase : List[Any]=1_2 , __lowerCAmelCase : Optional[int]=1_2 , __lowerCAmelCase : str=3_0_7_2 , __lowerCAmelCase : str="gelu" , __lowerCAmelCase : Any=0.1 , __lowerCAmelCase : str=0.1 , __lowerCAmelCase : Any=1_6_3_8_4 , __lowerCAmelCase : str=1_6 , __lowerCAmelCase : Union[str, Any]=0.02 , __lowerCAmelCase : Dict=1E-12 , __lowerCAmelCase : Optional[Any]=0 , __lowerCAmelCase : Dict=0xe0_00 , __lowerCAmelCase : Optional[int]=0xe0_01 , __lowerCAmelCase : List[Any]=4 , __lowerCAmelCase : str=4 , __lowerCAmelCase : Optional[int]=8 , __lowerCAmelCase : List[Any]=1_6_3_8_4 , __lowerCAmelCase : Optional[Any]=1_2_8 , **__lowerCAmelCase : Optional[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCAmelCase , bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
_lowerCamelCase : Dict = max_position_embeddings
_lowerCamelCase : int = hidden_size
_lowerCamelCase : List[str] = num_hidden_layers
_lowerCamelCase : List[str] = num_attention_heads
_lowerCamelCase : str = intermediate_size
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Any = hidden_dropout_prob
_lowerCamelCase : Dict = attention_probs_dropout_prob
_lowerCamelCase : Dict = initializer_range
_lowerCamelCase : Tuple = type_vocab_size
_lowerCamelCase : int = layer_norm_eps
# Character config:
_lowerCamelCase : Dict = downsampling_rate
_lowerCamelCase : str = upsampling_kernel_size
_lowerCamelCase : List[Any] = num_hash_functions
_lowerCamelCase : Dict = num_hash_buckets
_lowerCamelCase : Optional[Any] = local_transformer_stride
| 598 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class __snake_case ( _lowercase):
snake_case__ : torch.FloatTensor
class __snake_case ( _lowercase , _lowercase):
@register_to_config
def __init__( self : Optional[Any] , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3 , __lowerCAmelCase : Tuple[str] = ("DownEncoderBlock2D",) , __lowerCAmelCase : Tuple[str] = ("UpDecoderBlock2D",) , __lowerCAmelCase : Tuple[int] = (6_4,) , __lowerCAmelCase : int = 1 , __lowerCAmelCase : str = "silu" , __lowerCAmelCase : int = 3 , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : int = 2_5_6 , __lowerCAmelCase : int = 3_2 , __lowerCAmelCase : Optional[int] = None , __lowerCAmelCase : float = 0.1_82_15 , __lowerCAmelCase : str = "group" , ):
"""simple docstring"""
super().__init__()
# pass init params to Encoder
_lowerCamelCase : Optional[Any] = Encoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , down_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , double_z=__lowerCAmelCase , )
_lowerCamelCase : Dict = vq_embed_dim if vq_embed_dim is not None else latent_channels
_lowerCamelCase : List[str] = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
_lowerCamelCase : int = VectorQuantizer(__lowerCAmelCase , __lowerCAmelCase , beta=0.25 , remap=__lowerCAmelCase , sane_index_shape=__lowerCAmelCase )
_lowerCamelCase : Dict = nn.Convad(__lowerCAmelCase , __lowerCAmelCase , 1 )
# pass init params to Decoder
_lowerCamelCase : List[str] = Decoder(
in_channels=__lowerCAmelCase , out_channels=__lowerCAmelCase , up_block_types=__lowerCAmelCase , block_out_channels=__lowerCAmelCase , layers_per_block=__lowerCAmelCase , act_fn=__lowerCAmelCase , norm_num_groups=__lowerCAmelCase , norm_type=__lowerCAmelCase , )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : Any , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ):
"""simple docstring"""
_lowerCamelCase : List[Any] = self.encoder(__lowerCAmelCase )
_lowerCamelCase : int = self.quant_conv(__lowerCAmelCase )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=__lowerCAmelCase )
@apply_forward_hook
def SCREAMING_SNAKE_CASE ( self : str , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = True ):
"""simple docstring"""
if not force_not_quantize:
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase : Optional[Any] = self.quantize(__lowerCAmelCase )
else:
_lowerCamelCase : Dict = h
_lowerCamelCase : List[Any] = self.post_quant_conv(__lowerCAmelCase )
_lowerCamelCase : Union[str, Any] = self.decoder(__lowerCAmelCase , quant if self.config.norm_type == '''spatial''' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self : int , __lowerCAmelCase : torch.FloatTensor , __lowerCAmelCase : bool = True ):
"""simple docstring"""
_lowerCamelCase : Union[str, Any] = sample
_lowerCamelCase : Tuple = self.encode(__lowerCAmelCase ).latents
_lowerCamelCase : Optional[int] = self.decode(__lowerCAmelCase ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__lowerCAmelCase )
| 598 | 1 |
def lowerCamelCase_ ( _UpperCamelCase ) -> str:
"""simple docstring"""
return " ".join(
''''''.join(word[::-1] ) if len(_UpperCamelCase ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words('''Hey wollef sroirraw'''))
| 60 |
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('0.8.3'):
raise Exception('requires gluonnlp == 0.8.3')
if version.parse(mx.__version__) != version.parse('1.5.0'):
raise Exception('requires mxnet == 1.5.0')
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = 'The Nymphenburg Palace is a beautiful palace in Munich!'
def _UpperCAmelCase ( A , A ):
'''simple docstring'''
UpperCAmelCase__ ={
"attention_cell": "multi_head",
"num_layers": 4,
"units": 1024,
"hidden_size": 768,
"max_length": 512,
"num_heads": 8,
"scaled": True,
"dropout": 0.1,
"use_residual": True,
"embed_size": 1024,
"embed_dropout": 0.1,
"word_embed": None,
"layer_norm_eps": 1e-5,
"token_type_vocab_size": 2,
}
UpperCAmelCase__ =bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
UpperCAmelCase__ =BERTEncoder(
attention_cell=predefined_args["attention_cell"] , num_layers=predefined_args["num_layers"] , units=predefined_args["units"] , hidden_size=predefined_args["hidden_size"] , max_length=predefined_args["max_length"] , num_heads=predefined_args["num_heads"] , scaled=predefined_args["scaled"] , dropout=predefined_args["dropout"] , output_attention=A , output_all_encodings=A , use_residual=predefined_args["use_residual"] , activation=predefined_args.get("activation" , "gelu" ) , layer_norm_eps=predefined_args.get("layer_norm_eps" , A ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
UpperCAmelCase__ ="openwebtext_ccnews_stories_books_cased"
# Specify download folder to Gluonnlp's vocab
UpperCAmelCase__ =os.path.join(get_home_dir() , "models" )
UpperCAmelCase__ =_load_vocab(A , A , A , cls=A )
UpperCAmelCase__ =nlp.model.BERTModel(
A , len(A ) , units=predefined_args["units"] , embed_size=predefined_args["embed_size"] , embed_dropout=predefined_args["embed_dropout"] , word_embed=predefined_args["word_embed"] , use_pooler=A , use_token_type_embed=A , token_type_vocab_size=predefined_args["token_type_vocab_size"] , use_classifier=A , use_decoder=A , )
original_bort.load_parameters(A , cast_dtype=A , ignore_extra=A )
UpperCAmelCase__ =original_bort._collect_params_with_prefix()
# Build our config 🤗
UpperCAmelCase__ ={
"architectures": ["BertForMaskedLM"],
"attention_probs_dropout_prob": predefined_args["dropout"],
"hidden_act": "gelu",
"hidden_dropout_prob": predefined_args["dropout"],
"hidden_size": predefined_args["embed_size"],
"initializer_range": 0.02,
"intermediate_size": predefined_args["hidden_size"],
"layer_norm_eps": predefined_args["layer_norm_eps"],
"max_position_embeddings": predefined_args["max_length"],
"model_type": "bort",
"num_attention_heads": predefined_args["num_heads"],
"num_hidden_layers": predefined_args["num_layers"],
"pad_token_id": 1, # 2 = BERT, 1 = RoBERTa
"type_vocab_size": 1, # 2 = BERT, 1 = RoBERTa
"vocab_size": len(A ),
}
UpperCAmelCase__ =BertConfig.from_dict(A )
UpperCAmelCase__ =BertForMaskedLM(A )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(A ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(A , A ):
UpperCAmelCase__ =hf_param.shape
UpperCAmelCase__ =to_torch(params[gluon_param] )
UpperCAmelCase__ =gluon_param.shape
assert (
shape_hf == shape_gluon
), F"""The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers"""
return gluon_param
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , "word_embed.0.weight" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , "encoder.position_weight" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , "encoder.layer_norm.beta" )
UpperCAmelCase__ =check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , "encoder.layer_norm.gamma" )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
UpperCAmelCase__ =torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
UpperCAmelCase__ =hf_bort_model.bert.encoder.layer[i]
# self attention
UpperCAmelCase__ =layer.attention.self
UpperCAmelCase__ =check_and_map_params(
self_attn.key.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.key.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_key.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.query.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.query.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_query.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.value.bias.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_attn.value.weight.data , F"""encoder.transformer_cells.{i}.attention_cell.proj_value.weight""" )
# self attention output
UpperCAmelCase__ =layer.attention.output
UpperCAmelCase__ =check_and_map_params(
self_output.dense.bias , F"""encoder.transformer_cells.{i}.proj.bias""" )
UpperCAmelCase__ =check_and_map_params(
self_output.dense.weight , F"""encoder.transformer_cells.{i}.proj.weight""" )
UpperCAmelCase__ =check_and_map_params(
self_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.layer_norm.beta""" )
UpperCAmelCase__ =check_and_map_params(
self_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.layer_norm.gamma""" )
# intermediate
UpperCAmelCase__ =layer.intermediate
UpperCAmelCase__ =check_and_map_params(
intermediate.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_1.bias""" )
UpperCAmelCase__ =check_and_map_params(
intermediate.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_1.weight""" )
# output
UpperCAmelCase__ =layer.output
UpperCAmelCase__ =check_and_map_params(
bert_output.dense.bias , F"""encoder.transformer_cells.{i}.ffn.ffn_2.bias""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.dense.weight , F"""encoder.transformer_cells.{i}.ffn.ffn_2.weight""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.LayerNorm.bias , F"""encoder.transformer_cells.{i}.ffn.layer_norm.beta""" )
UpperCAmelCase__ =check_and_map_params(
bert_output.LayerNorm.weight , F"""encoder.transformer_cells.{i}.ffn.layer_norm.gamma""" )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
UpperCAmelCase__ =RobertaTokenizer.from_pretrained("roberta-base" )
UpperCAmelCase__ =tokenizer.encode_plus(A )["input_ids"]
# Get gluon output
UpperCAmelCase__ =mx.nd.array([input_ids] )
UpperCAmelCase__ =original_bort(inputs=A , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(A )
UpperCAmelCase__ =BertModel.from_pretrained(A )
hf_bort_model.eval()
UpperCAmelCase__ =tokenizer.encode_plus(A , return_tensors="pt" )
UpperCAmelCase__ =hf_bort_model(**A )[0]
UpperCAmelCase__ =output_gluon[0].asnumpy()
UpperCAmelCase__ =output_hf[0].detach().numpy()
UpperCAmelCase__ =np.max(np.abs(hf_layer - gluon_layer ) ).item()
UpperCAmelCase__ =np.allclose(A , A , atol=1e-3 )
if success:
print("✔️ Both model do output the same tensors" )
else:
print("❌ Both model do **NOT** output the same tensors" )
print("Absolute difference is:" , A )
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--bort_checkpoint_path', default=None, type=str, required=True, help='Path the official Bort params file.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCamelCase_ = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path)
| 625 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__a = random.Random()
def lowerCamelCase__ ( _lowercase , _lowercase=1.0 , _lowercase=None , _lowercase=None ):
'''simple docstring'''
if rng is None:
UpperCAmelCase_ : str = global_rng
UpperCAmelCase_ : List[str] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __a( unittest.TestCase ):
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=7 ,_SCREAMING_SNAKE_CASE=400 ,_SCREAMING_SNAKE_CASE=2_000 ,_SCREAMING_SNAKE_CASE=10 ,_SCREAMING_SNAKE_CASE=160 ,_SCREAMING_SNAKE_CASE=8 ,_SCREAMING_SNAKE_CASE=0.0 ,_SCREAMING_SNAKE_CASE=4_000 ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=True ,) -> Union[str, Any]:
UpperCAmelCase_ : Optional[int] = parent
UpperCAmelCase_ : Optional[Any] = batch_size
UpperCAmelCase_ : Optional[Any] = min_seq_length
UpperCAmelCase_ : Dict = max_seq_length
UpperCAmelCase_ : List[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
UpperCAmelCase_ : Optional[int] = padding_value
UpperCAmelCase_ : Dict = sampling_rate
UpperCAmelCase_ : List[Any] = return_attention_mask
UpperCAmelCase_ : Optional[Any] = do_normalize
UpperCAmelCase_ : Optional[Any] = feature_size
UpperCAmelCase_ : List[str] = chunk_length
UpperCAmelCase_ : Any = hop_length
def a__ ( self ) -> Union[str, Any]:
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def a__ ( self ,_SCREAMING_SNAKE_CASE=False ,_SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
def _flatten(_SCREAMING_SNAKE_CASE ):
return list(itertools.chain(*_SCREAMING_SNAKE_CASE ) )
if equal_length:
UpperCAmelCase_ : int = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
UpperCAmelCase_ : Optional[Any] = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
UpperCAmelCase_ : Dict = [np.asarray(_SCREAMING_SNAKE_CASE ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a( _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = WhisperFeatureExtractor if is_speech_available() else None
def a__ ( self ) -> str:
UpperCAmelCase_ : Any = WhisperFeatureExtractionTester(self )
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : List[str] = feat_extract_first.save_pretrained(_SCREAMING_SNAKE_CASE )[0]
check_json_file_has_correct_format(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = self.feature_extraction_class.from_pretrained(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = feat_extract_first.to_dict()
UpperCAmelCase_ : List[str] = feat_extract_second.to_dict()
UpperCAmelCase_ : Optional[int] = feat_extract_first.mel_filters
UpperCAmelCase_ : Union[str, Any] = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
UpperCAmelCase_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ : Optional[Any] = os.path.join(_SCREAMING_SNAKE_CASE ,'''feat_extract.json''' )
feat_extract_first.to_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : str = self.feature_extraction_class.from_json_file(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Dict = feat_extract_first.to_dict()
UpperCAmelCase_ : Tuple = feat_extract_second.to_dict()
UpperCAmelCase_ : List[str] = feat_extract_first.mel_filters
UpperCAmelCase_ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
self.assertEqual(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Union[str, Any]:
# Tests that all call wrap to encode_plus and batch_encode_plus
UpperCAmelCase_ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
UpperCAmelCase_ : List[Any] = [floats_list((1, x) )[0] for x in range(800 ,1_400 ,200 )]
UpperCAmelCase_ : Any = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
# Test feature size
UpperCAmelCase_ : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE ,padding='''max_length''' ,return_tensors='''np''' ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
UpperCAmelCase_ : Dict = feature_extractor(speech_inputs[0] ,return_tensors='''np''' ).input_features
UpperCAmelCase_ : Optional[int] = feature_extractor(np_speech_inputs[0] ,return_tensors='''np''' ).input_features
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# Test batched
UpperCAmelCase_ : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
UpperCAmelCase_ : Optional[Any] = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
UpperCAmelCase_ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
UpperCAmelCase_ : str = np.asarray(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
UpperCAmelCase_ : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
# Test truncation required
UpperCAmelCase_ : int = [floats_list((1, x) )[0] for x in range(200 ,(feature_extractor.n_samples + 500) ,200 )]
UpperCAmelCase_ : Any = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs]
UpperCAmelCase_ : Optional[int] = [x[: feature_extractor.n_samples] for x in speech_inputs]
UpperCAmelCase_ : List[str] = [np.asarray(_SCREAMING_SNAKE_CASE ) for speech_input in speech_inputs_truncated]
UpperCAmelCase_ : List[Any] = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
UpperCAmelCase_ : List[str] = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''np''' ).input_features
for enc_seq_a, enc_seq_a in zip(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ):
self.assertTrue(np.allclose(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,atol=1e-3 ) )
def a__ ( self ) -> List[Any]:
import torch
UpperCAmelCase_ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : str = np.random.rand(100 ,32 ).astype(np.floataa )
UpperCAmelCase_ : Optional[int] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
UpperCAmelCase_ : Dict = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''np''' )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
UpperCAmelCase_ : List[str] = feature_extractor.pad([{'''input_features''': inputs}] ,return_tensors='''pt''' )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def a__ ( self ,_SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ : Dict = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' ,'''clean''' ,split='''validation''' )
# automatic decoding with librispeech
UpperCAmelCase_ : List[Any] = ds.sort('''id''' ).select(range(_SCREAMING_SNAKE_CASE ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def a__ ( self ) -> Union[str, Any]:
# fmt: off
UpperCAmelCase_ : Dict = torch.tensor(
[
0.11_93, -0.09_46, -0.10_98, -0.01_96, 0.02_25, -0.06_90, -0.17_36, 0.09_51,
0.09_71, -0.08_17, -0.07_02, 0.01_62, 0.02_60, 0.00_17, -0.01_92, -0.16_78,
0.07_09, -0.18_67, -0.06_55, -0.02_74, -0.02_34, -0.18_84, -0.05_16, -0.05_54,
-0.02_74, -0.14_25, -0.14_23, 0.08_37, 0.03_77, -0.08_54
] )
# fmt: on
UpperCAmelCase_ : Tuple = self._load_datasamples(1 )
UpperCAmelCase_ : Optional[Any] = WhisperFeatureExtractor()
UpperCAmelCase_ : str = feature_extractor(_SCREAMING_SNAKE_CASE ,return_tensors='''pt''' ).input_features
self.assertEqual(input_features.shape ,(1, 80, 3_000) )
self.assertTrue(torch.allclose(input_features[0, 0, :30] ,_SCREAMING_SNAKE_CASE ,atol=1e-4 ) )
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
UpperCAmelCase_ : str = self._load_datasamples(1 )[0]
UpperCAmelCase_ : Tuple = ((audio - audio.min()) / (audio.max() - audio.min())) * 65_535 # Rescale to [0, 65535] to show issue
UpperCAmelCase_ : Optional[int] = feat_extract.zero_mean_unit_var_norm([audio] ,attention_mask=_SCREAMING_SNAKE_CASE )[0]
self.assertTrue(np.all(np.mean(_SCREAMING_SNAKE_CASE ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(_SCREAMING_SNAKE_CASE ) - 1 ) < 1e-3 ) ) | 300 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaubertConfig,
TFFlaubertForMultipleChoice,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForSequenceClassification,
TFFlaubertForTokenClassification,
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
)
class __a:
"""simple docstring"""
def __init__( self ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : Dict = parent
UpperCAmelCase_ : Optional[Any] = 13
UpperCAmelCase_ : Optional[Any] = 7
UpperCAmelCase_ : Union[str, Any] = True
UpperCAmelCase_ : Optional[Any] = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : Tuple = True
UpperCAmelCase_ : str = True
UpperCAmelCase_ : List[Any] = False
UpperCAmelCase_ : Dict = False
UpperCAmelCase_ : Tuple = False
UpperCAmelCase_ : Dict = 2
UpperCAmelCase_ : Tuple = 99
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : Optional[int] = 32
UpperCAmelCase_ : Optional[int] = 2
UpperCAmelCase_ : Tuple = 4
UpperCAmelCase_ : List[Any] = 0.1
UpperCAmelCase_ : int = 0.1
UpperCAmelCase_ : List[str] = 512
UpperCAmelCase_ : Any = 16
UpperCAmelCase_ : Union[str, Any] = 2
UpperCAmelCase_ : Any = 0.02
UpperCAmelCase_ : Tuple = 3
UpperCAmelCase_ : List[Any] = 4
UpperCAmelCase_ : Dict = '''last'''
UpperCAmelCase_ : Dict = True
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Union[str, Any] = 0
def a__ ( self ) -> List[str]:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
UpperCAmelCase_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] ,dtype=tf.floataa )
UpperCAmelCase_ : Optional[Any] = None
if self.use_input_lengths:
UpperCAmelCase_ : Optional[int] = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
UpperCAmelCase_ : List[str] = None
if self.use_token_type_ids:
UpperCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
UpperCAmelCase_ : str = None
UpperCAmelCase_ : Tuple = None
UpperCAmelCase_ : Any = None
if self.use_labels:
UpperCAmelCase_ : int = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
UpperCAmelCase_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
UpperCAmelCase_ : Any = ids_tensor([self.batch_size] ,2 ,dtype=tf.floataa )
UpperCAmelCase_ : List[Any] = ids_tensor([self.batch_size] ,self.num_choices )
UpperCAmelCase_ : int = FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,bos_token_id=self.bos_token_id ,)
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Any:
UpperCAmelCase_ : Tuple = TFFlaubertModel(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : List[Any] = model(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = [input_ids, input_mask]
UpperCAmelCase_ : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : int = TFFlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Union[str, Any] = {'''input_ids''': input_ids, '''lengths''': input_lengths, '''langs''': token_type_ids}
UpperCAmelCase_ : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Tuple:
UpperCAmelCase_ : List[Any] = TFFlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Tuple = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> int:
UpperCAmelCase_ : List[Any] = TFFlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Any = {'''input_ids''': input_ids, '''lengths''': input_lengths}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> Optional[Any]:
UpperCAmelCase_ : str = self.num_labels
UpperCAmelCase_ : List[str] = TFFlaubertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : Optional[int] = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
UpperCAmelCase_ : List[str] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,) -> str:
UpperCAmelCase_ : List[Any] = self.num_choices
UpperCAmelCase_ : Any = TFFlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
UpperCAmelCase_ : List[str] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : str = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE ,1 ) ,(1, self.num_choices, 1) )
UpperCAmelCase_ : Dict = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def a__ ( self ) -> List[Any]:
UpperCAmelCase_ : List[Any] = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
), (
UpperCAmelCase_
),
) : Any = config_and_inputs
UpperCAmelCase_ : Tuple = {
'''input_ids''': input_ids,
'''token_type_ids''': token_type_ids,
'''langs''': token_type_ids,
'''lengths''': input_lengths,
}
return config, inputs_dict
@require_tf
class __a( _a , _a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase = (
(
TFFlaubertModel,
TFFlaubertWithLMHeadModel,
TFFlaubertForSequenceClassification,
TFFlaubertForQuestionAnsweringSimple,
TFFlaubertForTokenClassification,
TFFlaubertForMultipleChoice,
)
if is_tf_available()
else ()
)
lowerCAmelCase = (
(TFFlaubertWithLMHeadModel,) if is_tf_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
lowerCAmelCase = (
{
'''feature-extraction''': TFFlaubertModel,
'''fill-mask''': TFFlaubertWithLMHeadModel,
'''question-answering''': TFFlaubertForQuestionAnsweringSimple,
'''text-classification''': TFFlaubertForSequenceClassification,
'''token-classification''': TFFlaubertForTokenClassification,
'''zero-shot''': TFFlaubertForSequenceClassification,
}
if is_tf_available()
else {}
)
lowerCAmelCase = False
lowerCAmelCase = False
def a__ ( self ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('''Fast''' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def a__ ( self ) -> Any:
UpperCAmelCase_ : Optional[int] = TFFlaubertModelTester(self )
UpperCAmelCase_ : Union[str, Any] = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,emb_dim=37 )
def a__ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> str:
UpperCAmelCase_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Tuple:
UpperCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def a__ ( self ) -> Optional[int]:
UpperCAmelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def a__ ( self ) -> Any:
for model_name in TF_FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ : Any = TFFlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
@require_sentencepiece
@require_tokenizers
class __a( unittest.TestCase ):
"""simple docstring"""
@slow
def a__ ( self ) -> int:
UpperCAmelCase_ : Optional[Any] = TFFlaubertModel.from_pretrained('''jplu/tf-flaubert-small-cased''' )
UpperCAmelCase_ : Dict = tf.convert_to_tensor(
[[0, 158, 735, 2_592, 1_424, 6_727, 82, 1]] ,dtype=tf.intaa ,) # "J'aime flaubert !"
UpperCAmelCase_ : str = model(_SCREAMING_SNAKE_CASE )[0]
UpperCAmelCase_ : Optional[int] = tf.TensorShape((1, 8, 512) )
self.assertEqual(output.shape ,_SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
UpperCAmelCase_ : List[Any] = tf.convert_to_tensor(
[
[
[-1.8_76_87_73, -1.56_65_55, 0.27_07_24_18],
[-1.6_92_00_38, -0.5_87_35_05, 1.9_32_95_99],
[-2.9_56_39_85, -1.6_99_38_35, 1.7_97_20_52],
]
] ,dtype=tf.floataa ,)
self.assertTrue(np.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 300 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
UpperCamelCase = random.Random()
def _lowerCamelCase ( UpperCAmelCase_ : Union[str, Any], UpperCAmelCase_ : Dict=1.0, UpperCAmelCase_ : List[Any]=None, UpperCAmelCase_ : Optional[Any]=None ) -> int:
"""simple docstring"""
if rng is None:
A__ = global_rng
A__ = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=7 , SCREAMING_SNAKE_CASE__=400 , SCREAMING_SNAKE_CASE__=2000 , SCREAMING_SNAKE_CASE__=2048 , SCREAMING_SNAKE_CASE__=128 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=30 , SCREAMING_SNAKE_CASE__=44100 , ) -> Optional[Any]:
A__ = parent
A__ = batch_size
A__ = min_seq_length
A__ = max_seq_length
A__ = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
A__ = spectrogram_length
A__ = feature_size
A__ = num_audio_channels
A__ = hop_length
A__ = chunk_length
A__ = sampling_rate
def snake_case__ ( self ) -> int:
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def snake_case__ ( self , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ) -> Tuple:
def _flatten(SCREAMING_SNAKE_CASE__ ):
return list(itertools.chain(*SCREAMING_SNAKE_CASE__ ) )
if equal_length:
A__ = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
A__ = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
A__ = [np.asarray(SCREAMING_SNAKE_CASE__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCamelCase__ ( _lowerCAmelCase , unittest.TestCase ):
"""simple docstring"""
A__ : str = TvltFeatureExtractor
def snake_case__ ( self ) -> str:
A__ = TvltFeatureExtractionTester(self )
def snake_case__ ( self ) -> Dict:
A__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "spectrogram_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "feature_size" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "num_audio_channels" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "hop_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "chunk_length" ) )
self.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , "sampling_rate" ) )
def snake_case__ ( self ) -> List[str]:
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = feat_extract_first.save_pretrained(SCREAMING_SNAKE_CASE__ )[0]
check_json_file_has_correct_format(SCREAMING_SNAKE_CASE__ )
A__ = self.feature_extraction_class.from_pretrained(SCREAMING_SNAKE_CASE__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop("mel_filters" )
A__ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Tuple:
A__ = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A__ = os.path.join(SCREAMING_SNAKE_CASE__ , "feat_extract.json" )
feat_extract_first.to_json_file(SCREAMING_SNAKE_CASE__ )
A__ = self.feature_extraction_class.from_json_file(SCREAMING_SNAKE_CASE__ )
A__ = feat_extract_first.to_dict()
A__ = feat_extract_second.to_dict()
A__ = dict_first.pop("mel_filters" )
A__ = dict_second.pop("mel_filters" )
self.assertTrue(np.allclose(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> str:
# Initialize feature_extractor
A__ = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
A__ = [floats_list((1, x) )[0] for x in range(800 , 1400 , 200 )]
A__ = [np.asarray(SCREAMING_SNAKE_CASE__ ) for speech_input in speech_inputs]
# Test not batched input
A__ = feature_extractor(np_speech_inputs[0] , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
A__ = feature_extractor(
SCREAMING_SNAKE_CASE__ , return_tensors="np" , sampling_rate=44100 , mask_audio=SCREAMING_SNAKE_CASE__ ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
A__ = [floats_list((1, x) )[0] for x in (800, 800, 800)]
A__ = np.asarray(SCREAMING_SNAKE_CASE__ )
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="np" , sampling_rate=44100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ ) -> Tuple:
A__ = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
A__ = ds.sort("id" ).select(range(SCREAMING_SNAKE_CASE__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def snake_case__ ( self ) -> Optional[int]:
A__ = self._load_datasamples(1 )
A__ = TvltFeatureExtractor()
A__ = feature_extractor(SCREAMING_SNAKE_CASE__ , return_tensors="pt" ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 192, 128) )
A__ = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 104 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
UpperCamelCase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
UpperCamelCase = {
"""vocab_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt"""
),
"""google/electra-base-generator""": """https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt""",
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""google/electra-small-generator""": (
"""https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json"""
),
"""google/electra-base-generator""": (
"""https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json"""
),
"""google/electra-large-generator""": (
"""https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json"""
),
"""google/electra-small-discriminator""": (
"""https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-base-discriminator""": (
"""https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json"""
),
"""google/electra-large-discriminator""": (
"""https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json"""
),
},
}
UpperCamelCase = {
"""google/electra-small-generator""": 512,
"""google/electra-base-generator""": 512,
"""google/electra-large-generator""": 512,
"""google/electra-small-discriminator""": 512,
"""google/electra-base-discriminator""": 512,
"""google/electra-large-discriminator""": 512,
}
UpperCamelCase = {
"""google/electra-small-generator""": {"""do_lower_case""": True},
"""google/electra-base-generator""": {"""do_lower_case""": True},
"""google/electra-large-generator""": {"""do_lower_case""": True},
"""google/electra-small-discriminator""": {"""do_lower_case""": True},
"""google/electra-base-discriminator""": {"""do_lower_case""": True},
"""google/electra-large-discriminator""": {"""do_lower_case""": True},
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Dict = VOCAB_FILES_NAMES
A__ : str = PRETRAINED_VOCAB_FILES_MAP
A__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
A__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__ : List[str] = ElectraTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__="[UNK]" , SCREAMING_SNAKE_CASE__="[SEP]" , SCREAMING_SNAKE_CASE__="[PAD]" , SCREAMING_SNAKE_CASE__="[CLS]" , SCREAMING_SNAKE_CASE__="[MASK]" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=None , **SCREAMING_SNAKE_CASE__ , ) -> str:
super().__init__(
SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , do_lower_case=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , tokenize_chinese_chars=SCREAMING_SNAKE_CASE__ , strip_accents=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
A__ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , SCREAMING_SNAKE_CASE__ ) != do_lower_case
or normalizer_state.get("strip_accents" , SCREAMING_SNAKE_CASE__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , SCREAMING_SNAKE_CASE__ ) != tokenize_chinese_chars
):
A__ = getattr(SCREAMING_SNAKE_CASE__ , normalizer_state.pop("type" ) )
A__ = do_lower_case
A__ = strip_accents
A__ = tokenize_chinese_chars
A__ = normalizer_class(**SCREAMING_SNAKE_CASE__ )
A__ = do_lower_case
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ) -> List[str]:
A__ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> List[int]:
A__ = [self.sep_token_id]
A__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def snake_case__ ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ) -> Tuple[str]:
A__ = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
| 104 | 1 |
'''simple docstring'''
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A =logging.get_logger(__name__)
# TODO Update this
A ={
'facebook/esm-1b': 'https://huggingface.co/facebook/esm-1b/resolve/main/config.json',
# See all ESM models at https://huggingface.co/models?filter=esm
}
class _a ( __a ):
__a : List[Any] = """esm"""
def __init__( self : str , lowercase : Optional[Any]=None , lowercase : Any=None , lowercase : Union[str, Any]=None , lowercase : Dict=768 , lowercase : Dict=12 , lowercase : str=12 , lowercase : Optional[int]=3_072 , lowercase : Dict=0.1 , lowercase : str=0.1 , lowercase : Tuple=1_026 , lowercase : int=0.02 , lowercase : int=1E-12 , lowercase : Any="absolute" , lowercase : List[Any]=True , lowercase : Any=None , lowercase : Optional[Any]=False , lowercase : Optional[Any]=False , lowercase : str=None , lowercase : List[Any]=None , **lowercase : List[Any] , ):
'''simple docstring'''
super().__init__(pad_token_id=lowercase , mask_token_id=lowercase , **lowercase )
UpperCAmelCase = vocab_size
UpperCAmelCase = hidden_size
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_dropout_prob
UpperCAmelCase = attention_probs_dropout_prob
UpperCAmelCase = max_position_embeddings
UpperCAmelCase = initializer_range
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = position_embedding_type
UpperCAmelCase = use_cache
UpperCAmelCase = emb_layer_norm_before
UpperCAmelCase = token_dropout
UpperCAmelCase = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info('''No esmfold_config supplied for folding model, using default values.''' )
UpperCAmelCase = EsmFoldConfig()
elif isinstance(lowercase , lowercase ):
UpperCAmelCase = EsmFoldConfig(**lowercase )
UpperCAmelCase = esmfold_config
if vocab_list is None:
logger.warning('''No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!''' )
UpperCAmelCase = get_default_vocab_list()
else:
UpperCAmelCase = vocab_list
else:
UpperCAmelCase = None
UpperCAmelCase = None
if self.esmfold_config is not None and getattr(self.esmfold_config , '''use_esm_attn_map''' , lowercase ):
raise ValueError('''The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!''' )
def A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase = super().to_dict()
if isinstance(self.esmfold_config , lowercase ):
UpperCAmelCase = self.esmfold_config.to_dict()
return output
@dataclass
class _a :
__a : str = None
__a : bool = True
__a : bool = False
__a : bool = False
__a : bool = False
__a : float = 0
__a : bool = True
__a : bool = False
__a : int = 128
__a : "TrunkConfig" = None
def A ( self : List[str] ):
'''simple docstring'''
if self.trunk is None:
UpperCAmelCase = TrunkConfig()
elif isinstance(self.trunk , lowercase ):
UpperCAmelCase = TrunkConfig(**self.trunk )
def A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.trunk.to_dict()
return output
@dataclass
class _a :
__a : int = 48
__a : int = 1_024
__a : int = 128
__a : int = 32
__a : int = 32
__a : int = 32
__a : float = 0
__a : float = 0
__a : bool = False
__a : int = 4
__a : Optional[int] = 128
__a : "StructureModuleConfig" = None
def A ( self : List[Any] ):
'''simple docstring'''
if self.structure_module is None:
UpperCAmelCase = StructureModuleConfig()
elif isinstance(self.structure_module , lowercase ):
UpperCAmelCase = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
'''`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got'''
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
'''`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got'''
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
UpperCAmelCase = self.sequence_state_dim // self.sequence_head_width
UpperCAmelCase = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
'''`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got'''
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
'''`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got'''
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase = asdict(self )
UpperCAmelCase = self.structure_module.to_dict()
return output
@dataclass
class _a :
__a : int = 384
__a : int = 128
__a : int = 16
__a : int = 128
__a : int = 12
__a : int = 4
__a : int = 8
__a : float = 0.1
__a : int = 8
__a : int = 1
__a : int = 2
__a : int = 7
__a : int = 10
__a : float = 1e-8
__a : float = 1e5
def A ( self : str ):
'''simple docstring'''
return asdict(self )
def snake_case_ ():
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
)
| 358 |
'''simple docstring'''
def snake_case_ (_a : list[list[int]] , _a : int , _a : int , _a : list[int] ):
# 1. Validate that path exists between current and next vertices
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def snake_case_ (_a : list[list[int]] , _a : list[int] , _a : int ):
# Base Case
if curr_ind == len(_a ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_a ) ):
if valid_connection(_a , _a , _a , _a ):
# Insert current vertex into path as next transition
UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_a , _a , curr_ind + 1 ):
return True
# Backtrack
UpperCAmelCase = -1
return False
def snake_case_ (_a : list[list[int]] , _a : int = 0 ):
UpperCAmelCase = [-1] * (len(_a ) + 1)
# initialize start and end of path with starting index
UpperCAmelCase = UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_a , _a , 1 ) else []
| 358 | 1 |
"""simple docstring"""
import os
import numpy
import onnx
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase : Tuple = a.name
lowerCAmelCase : Tuple = b.name
lowerCAmelCase : List[str] = ""
lowerCAmelCase : str = ""
lowerCAmelCase : Optional[Any] = a == b
lowerCAmelCase : Tuple = name_a
lowerCAmelCase : Tuple = name_b
return res
def a__ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str ):
'''simple docstring'''
for i, input_name in enumerate(node_proto.input ):
if input_name == name:
node_proto.input.insert(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
node_proto.input.pop(i + 1 )
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
_graph_replace_input_with(node_proto.attribute[1].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Dict , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
for n in graph_proto.node:
_node_replace_input_with(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Tuple ):
'''simple docstring'''
lowerCAmelCase : Optional[Any] = list(model.graph.initializer )
lowerCAmelCase : Tuple = list(model_without_ext.graph.initializer )
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
lowerCAmelCase : Dict = inits[i].name
lowerCAmelCase : int = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i] )
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def a__ ( SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase : Tuple = os.path.dirname(SCREAMING_SNAKE_CASE )
lowerCAmelCase : str = os.path.basename(SCREAMING_SNAKE_CASE )
lowerCAmelCase : int = onnx.load(os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
lowerCAmelCase : List[Any] = list(model.graph.initializer )
lowerCAmelCase : Optional[int] = set()
lowerCAmelCase : Tuple = {}
lowerCAmelCase : str = []
lowerCAmelCase : Dict = 0
for i in range(len(SCREAMING_SNAKE_CASE ) ):
if i in dup_set:
continue
for j in range(i + 1 , len(SCREAMING_SNAKE_CASE ) ):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j] ):
dup_set.add(SCREAMING_SNAKE_CASE )
dup_set.add(SCREAMING_SNAKE_CASE )
lowerCAmelCase : List[Any] = inits[j].data_type
lowerCAmelCase : str = numpy.prod(inits[j].dims )
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 1_1:
mem_size *= 8
else:
print("unexpected data type: " , SCREAMING_SNAKE_CASE )
total_reduced_size += mem_size
lowerCAmelCase : int = inits[i].name
lowerCAmelCase : Tuple = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(SCREAMING_SNAKE_CASE )
else:
lowerCAmelCase : Tuple = [name_j]
ind_to_replace.append((j, i) )
print("total reduced size: " , total_reduced_size / 1_0_2_4 / 1_0_2_4 / 1_0_2_4 , "GB" )
lowerCAmelCase : Union[str, Any] = sorted(SCREAMING_SNAKE_CASE )
_remove_dup_initializers_from_model(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase : Optional[int] = "optimized_" + model_file_name
lowerCAmelCase : Any = os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
onnx.save(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return new_model
| 645 |
"""simple docstring"""
from __future__ import annotations
def a__ ( SCREAMING_SNAKE_CASE : list[float] , SCREAMING_SNAKE_CASE : list[float] ):
'''simple docstring'''
lowerCAmelCase : int = sorted(numsa + numsa )
lowerCAmelCase , lowerCAmelCase : int = divmod(len(SCREAMING_SNAKE_CASE ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCAmelCase__ = [float(x) for x in input('''Enter the elements of first array: ''').split()]
lowerCAmelCase__ = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(F"The median of two arrays is: {median_of_two_arrays(array_a, array_a)}")
| 645 | 1 |
'''simple docstring'''
from pathlib import Path
import numpy as np
from PIL import Image
def __lowerCAmelCase ( lowerCamelCase : np.ndarray ):
'''simple docstring'''
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = rgb[:, :, 0], rgb[:, :, 1], rgb[:, :, 2]
return 0.2_9_8_9 * r + 0.5_8_7_0 * g + 0.1_1_4_0 * b
def __lowerCAmelCase ( lowerCamelCase : np.ndarray ):
'''simple docstring'''
return (gray > 1_27) & (gray <= 2_55)
def __lowerCAmelCase ( lowerCamelCase : np.ndarray , lowerCamelCase : np.ndarray ):
'''simple docstring'''
__lowerCAmelCase = np.zeros_like(lowerCamelCase )
__lowerCAmelCase = np.zeros(
(image.shape[0] + kernel.shape[0] - 1, image.shape[1] + kernel.shape[1] - 1) )
# Copy image to padded image
__lowerCAmelCase = image
# Iterate over image & apply kernel
for x in range(image.shape[1] ):
for y in range(image.shape[0] ):
__lowerCAmelCase = (
kernel * image_padded[y : y + kernel.shape[0], x : x + kernel.shape[1]]
).sum()
__lowerCAmelCase = int(summation > 0 )
return output
if __name__ == "__main__":
# read original image
lowerCAmelCase : Union[str, Any] = Path(__file__).resolve().parent / '''image_data''' / '''lena.jpg'''
lowerCAmelCase : Any = np.array(Image.open(lena_path))
# kernel to be applied
lowerCAmelCase : Union[str, Any] = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]])
lowerCAmelCase : Union[str, Any] = dilation(gray_to_binary(rgb_to_gray(lena)), structuring_element)
# Save the output image
lowerCAmelCase : Any = Image.fromarray(output).convert('''RGB''')
pil_img.save('''result_dilation.png''') | 718 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCAmelCase ( lowerCamelCase : bytes , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
__lowerCAmelCase = "f32le"
__lowerCAmelCase = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(lowerCamelCase , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
__lowerCAmelCase = ffmpeg_process.communicate(lowerCamelCase )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
__lowerCAmelCase = output_stream[0]
__lowerCAmelCase = np.frombuffer(lowerCamelCase , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
__lowerCAmelCase = f'''{sampling_rate}'''
__lowerCAmelCase = "1"
if format_for_conversion == "s16le":
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
__lowerCAmelCase = platform.system()
if system == "Linux":
__lowerCAmelCase = "alsa"
__lowerCAmelCase = "default"
elif system == "Darwin":
__lowerCAmelCase = "avfoundation"
__lowerCAmelCase = ":0"
elif system == "Windows":
__lowerCAmelCase = "dshow"
__lowerCAmelCase = "default"
__lowerCAmelCase = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
__lowerCAmelCase = _ffmpeg_stream(lowerCamelCase , lowerCamelCase )
for item in iterator:
yield item
def __lowerCAmelCase ( lowerCamelCase : int , lowerCamelCase : float , lowerCamelCase : Optional[int] = None , lowerCamelCase : Optional[Union[Tuple[float, float], float]] = None , lowerCamelCase : str = "f32le" , ):
'''simple docstring'''
if stream_chunk_s is not None:
__lowerCAmelCase = stream_chunk_s
else:
__lowerCAmelCase = chunk_length_s
__lowerCAmelCase = ffmpeg_microphone(lowerCamelCase , lowerCamelCase , format_for_conversion=lowerCamelCase )
if format_for_conversion == "s16le":
__lowerCAmelCase = np.intaa
__lowerCAmelCase = 2
elif format_for_conversion == "f32le":
__lowerCAmelCase = np.floataa
__lowerCAmelCase = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
__lowerCAmelCase = chunk_length_s / 6
__lowerCAmelCase = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCamelCase , (int, float) ):
__lowerCAmelCase = [stride_length_s, stride_length_s]
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
__lowerCAmelCase = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
__lowerCAmelCase = datetime.datetime.now()
__lowerCAmelCase = datetime.timedelta(seconds=lowerCamelCase )
for item in chunk_bytes_iter(lowerCamelCase , lowerCamelCase , stride=(stride_left, stride_right) , stream=lowerCamelCase ):
# Put everything back in numpy scale
__lowerCAmelCase = np.frombuffer(item["raw"] , dtype=lowerCamelCase )
__lowerCAmelCase = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
__lowerCAmelCase = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Tuple[int, int] , lowerCamelCase : bool = False ):
'''simple docstring'''
__lowerCAmelCase = B""
__lowerCAmelCase , __lowerCAmelCase = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
__lowerCAmelCase = 0
for raw in iterator:
acc += raw
if stream and len(lowerCamelCase ) < chunk_len:
__lowerCAmelCase = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCamelCase ) >= chunk_len:
# We are flushing the accumulator
__lowerCAmelCase = (_stride_left, stride_right)
__lowerCAmelCase = {"raw": acc[:chunk_len], "stride": stride}
if stream:
__lowerCAmelCase = False
yield item
__lowerCAmelCase = stride_left
__lowerCAmelCase = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCamelCase ) > stride_left:
__lowerCAmelCase = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
__lowerCAmelCase = False
yield item
def __lowerCAmelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int ):
'''simple docstring'''
__lowerCAmelCase = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCamelCase , stdout=subprocess.PIPE , bufsize=lowerCamelCase ) as ffmpeg_process:
while True:
__lowerCAmelCase = ffmpeg_process.stdout.read(lowerCamelCase )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error | 39 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def lowerCamelCase_ ( )-> Dict:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase ):
requests.request('GET' , 'https://huggingface.co' )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request('GET' , 'https://huggingface.co' , timeout=1.0 )
@pytest.mark.integration
def lowerCamelCase_ ( )-> Dict:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request('GET' , 'https://huggingface.co' )
def lowerCamelCase_ ( )-> Optional[int]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase ):
http_head('https://huggingface.co' )
| 411 |
import argparse
import os
import numpy as np
import tensorflow as tf
import torch
from transformers import BertModel
def lowerCamelCase_ ( lowerCAmelCase: BertModel , lowerCAmelCase: str , lowerCAmelCase: str )-> Dict:
_snake_case : Optional[Any] = ('dense.weight', 'attention.self.query', 'attention.self.key', 'attention.self.value')
_snake_case : Union[str, Any] = (
('layer.', 'layer_'),
('word_embeddings.weight', 'word_embeddings'),
('position_embeddings.weight', 'position_embeddings'),
('token_type_embeddings.weight', 'token_type_embeddings'),
('.', '/'),
('LayerNorm/weight', 'LayerNorm/gamma'),
('LayerNorm/bias', 'LayerNorm/beta'),
('weight', 'kernel'),
)
if not os.path.isdir(lowerCAmelCase ):
os.makedirs(lowerCAmelCase )
_snake_case : Optional[Any] = model.state_dict()
def to_tf_var_name(lowerCAmelCase: str ):
for patt, repl in iter(lowerCAmelCase ):
_snake_case : Union[str, Any] = name.replace(lowerCAmelCase , lowerCAmelCase )
return F"""bert/{name}"""
def create_tf_var(lowerCAmelCase: np.ndarray , lowerCAmelCase: str , lowerCAmelCase: tf.Session ):
_snake_case : Union[str, Any] = tf.dtypes.as_dtype(tensor.dtype )
_snake_case : Tuple = tf.get_variable(dtype=lowerCAmelCase , shape=tensor.shape , name=lowerCAmelCase , initializer=tf.zeros_initializer() )
session.run(tf.variables_initializer([tf_var] ) )
session.run(lowerCAmelCase )
return tf_var
tf.reset_default_graph()
with tf.Session() as session:
for var_name in state_dict:
_snake_case : List[Any] = to_tf_var_name(lowerCAmelCase )
_snake_case : Dict = state_dict[var_name].numpy()
if any(x in var_name for x in tensors_to_transpose ):
_snake_case : List[Any] = torch_tensor.T
_snake_case : Union[str, Any] = create_tf_var(tensor=lowerCAmelCase , name=lowerCAmelCase , session=lowerCAmelCase )
tf.keras.backend.set_value(lowerCAmelCase , lowerCAmelCase )
_snake_case : Dict = session.run(lowerCAmelCase )
print(F"""Successfully created {tf_name}: {np.allclose(lowerCAmelCase , lowerCAmelCase )}""" )
_snake_case : List[Any] = tf.train.Saver(tf.trainable_variables() )
saver.save(lowerCAmelCase , os.path.join(lowerCAmelCase , model_name.replace('-' , '_' ) + '.ckpt' ) )
def lowerCamelCase_ ( lowerCAmelCase: Dict=None )-> List[Any]:
_snake_case : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument('--model_name' , type=lowerCAmelCase , required=lowerCAmelCase , help='model name e.g. bert-base-uncased' )
parser.add_argument(
'--cache_dir' , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , help='Directory containing pytorch model' )
parser.add_argument('--pytorch_model_path' , type=lowerCAmelCase , required=lowerCAmelCase , help='/path/to/<pytorch-model-name>.bin' )
parser.add_argument('--tf_cache_dir' , type=lowerCAmelCase , required=lowerCAmelCase , help='Directory in which to save tensorflow model' )
_snake_case : str = parser.parse_args(lowerCAmelCase )
_snake_case : int = BertModel.from_pretrained(
pretrained_model_name_or_path=args.model_name , state_dict=torch.load(args.pytorch_model_path ) , cache_dir=args.cache_dir , )
convert_pytorch_checkpoint_to_tf(model=lowerCAmelCase , ckpt_dir=args.tf_cache_dir , model_name=args.model_name )
if __name__ == "__main__":
main()
| 411 | 1 |
'''simple docstring'''
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowercase_ : int = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = len(SCREAMING_SNAKE_CASE_ )
lowercase_ : int = (
first_str_length if first_str_length > second_str_length else second_str_length
)
lowercase_ : list = []
for char_count in range(SCREAMING_SNAKE_CASE_ ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(alternative_string_arrange('AB', 'XYZ'), end=' ')
| 438 | '''simple docstring'''
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( SCREAMING_SNAKE_CASE_ = 10_001 ):
try:
lowercase_ : List[Any] = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError('Parameter nth must be int or castable to int.' ) from None
if nth <= 0:
raise ValueError('Parameter nth must be greater than or equal to one.' )
lowercase_ : list[int] = []
lowercase_ : Optional[int] = 2
while len(SCREAMING_SNAKE_CASE_ ) < nth:
if is_prime(SCREAMING_SNAKE_CASE_ ):
primes.append(SCREAMING_SNAKE_CASE_ )
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE_ ) - 1]
if __name__ == "__main__":
print(F"""{solution() = }""")
| 438 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : List[Any] ):
'''simple docstring'''
lowerCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm1.weight', F'encoder.encoder.layer.{i}.layernorm_before.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm1.bias', F'encoder.encoder.layer.{i}.layernorm_before.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.weight', F'encoder.encoder.layer.{i}.attention.output.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.attn.proj.bias', F'encoder.encoder.layer.{i}.attention.output.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.norm2.weight', F'encoder.encoder.layer.{i}.layernorm_after.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.norm2.bias', F'encoder.encoder.layer.{i}.layernorm_after.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.weight', F'encoder.encoder.layer.{i}.intermediate.dense.weight') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc1.bias', F'encoder.encoder.layer.{i}.intermediate.dense.bias') )
rename_keys.append(
(F'encoder.deit.blocks.{i}.mlp.fc2.weight', F'encoder.encoder.layer.{i}.output.dense.weight') )
rename_keys.append((F'encoder.deit.blocks.{i}.mlp.fc2.bias', F'encoder.encoder.layer.{i}.output.dense.bias') )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
lowerCAmelCase = state_dict.pop(F'encoder.deit.blocks.{i}.attn.qkv.weight' )
lowerCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
lowerCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
lowerCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Tuple , SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase = dct.pop(SCREAMING_SNAKE_CASE )
lowerCAmelCase = val
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Optional[int] ):
'''simple docstring'''
if "handwritten" in checkpoint_url:
lowerCAmelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
lowerCAmelCase = Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def UpperCAmelCase__ ( SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Dict ):
'''simple docstring'''
lowerCAmelCase = ViTConfig(image_size=3_84 , qkv_bias=SCREAMING_SNAKE_CASE )
lowerCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
lowerCAmelCase = 7_68
elif "large" in checkpoint_url:
# use ViT-large encoder
lowerCAmelCase = 10_24
lowerCAmelCase = 40_96
lowerCAmelCase = 24
lowerCAmelCase = 16
lowerCAmelCase = 10_24
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
lowerCAmelCase = False
lowerCAmelCase = """relu"""
lowerCAmelCase = 10_24
lowerCAmelCase = True
lowerCAmelCase = False
lowerCAmelCase = False
# load HuggingFace model
lowerCAmelCase = ViTModel(SCREAMING_SNAKE_CASE , add_pooling_layer=SCREAMING_SNAKE_CASE )
lowerCAmelCase = TrOCRForCausalLM(SCREAMING_SNAKE_CASE )
lowerCAmelCase = VisionEncoderDecoderModel(encoder=SCREAMING_SNAKE_CASE , decoder=SCREAMING_SNAKE_CASE )
model.eval()
# load state_dict of original model, rename some keys
lowerCAmelCase = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE , map_location="""cpu""" , check_hash=SCREAMING_SNAKE_CASE )["""model"""]
lowerCAmelCase = create_rename_keys(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
read_in_q_k_v(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
lowerCAmelCase = state_dict.pop(SCREAMING_SNAKE_CASE )
if key.startswith("""decoder""" ) and "output_projection" not in key:
lowerCAmelCase = val
else:
lowerCAmelCase = val
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE )
# Check outputs on an image
lowerCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
lowerCAmelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
lowerCAmelCase = TrOCRProcessor(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
lowerCAmelCase = processor(images=prepare_img(SCREAMING_SNAKE_CASE ) , return_tensors="""pt""" ).pixel_values
# verify logits
lowerCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
lowerCAmelCase = model(pixel_values=SCREAMING_SNAKE_CASE , decoder_input_ids=SCREAMING_SNAKE_CASE )
lowerCAmelCase = outputs.logits
lowerCAmelCase = torch.Size([1, 1, 5_02_65] )
if "trocr-base-handwritten" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[-1.45_02, -4.66_83, -0.53_47, -2.92_91, 9.14_35, -3.05_71, 8.97_64, 1.75_60, 8.73_58, -1.53_11] )
elif "trocr-large-handwritten" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[-2.64_37, -1.31_29, -2.25_96, -5.34_55, 6.35_39, 1.76_04, 5.49_91, 1.47_02, 5.61_13, 2.01_70] )
elif "trocr-base-printed" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[-5.68_16, -5.83_88, 1.13_98, -6.90_34, 6.85_05, -2.43_93, 1.22_84, -1.02_32, -1.96_61, -3.92_10] )
elif "trocr-large-printed" in checkpoint_url:
lowerCAmelCase = torch.tensor(
[-6.01_62, -7.09_59, 4.41_55, -5.10_63, 7.04_68, -3.16_31, 2.64_66, -0.30_81, -0.81_06, -1.75_35] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , SCREAMING_SNAKE_CASE , atol=1e-3 ), "First elements of logits not as expected"
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'Saving processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
parser.add_argument(
"--checkpoint_url",
default="https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt",
type=str,
help="URL to the original PyTorch checkpoint (.pth file).",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 532 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def A_ ( lowercase , lowercase , lowercase = None ) -> str:
"""simple docstring"""
if version.parse(hfh.__version__ ).release < version.parse("""0.11.0""" ).release:
# old versions of hfh don't url-encode the file path
UpperCAmelCase_ : Tuple = quote(lowercase )
return hfh.hf_hub_url(lowercase , lowercase , repo_type="""dataset""" , revision=lowercase )
| 470 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
lowercase__ =tuple[int, int]
class UpperCamelCase__ :
def __init__(self : int , snake_case_ : set[int] , snake_case_ : Mapping[EdgeT, int] ):
__a : set[int] = vertices
__a : dict[EdgeT, int] = {
(min(snake_case_ ), max(snake_case_ )): weight for edge, weight in edges.items()
}
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : EdgeT , snake_case_ : int ):
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
__a : Dict = weight
def lowerCAmelCase (self : Optional[Any] ):
__a : Graph = Graph({min(self.vertices )} , {} )
__a : EdgeT
__a : int
__a : EdgeT
__a : int
while len(subgraph.vertices ) < len(self.vertices ):
__a : List[str] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
__a : Union[str, Any] = edge
__a : Dict = weight
subgraph.add_edge(snake_case_ , snake_case_ )
return subgraph
def __UpperCamelCase ( lowerCAmelCase__ : str = "p107_network.txt" ):
__a : str = os.path.abspath(os.path.dirname(lowerCAmelCase__ ) )
__a : str = os.path.join(lowerCAmelCase__ , lowerCAmelCase__ )
__a : dict[EdgeT, int] = {}
__a : list[str]
__a : int
__a : int
with open(lowerCAmelCase__ ) as f:
__a : Tuple = f.read().strip().split('''\n''' )
__a : Union[str, Any] = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(lowerCAmelCase__ ) ):
for edgea in range(lowerCAmelCase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
__a : Optional[Any] = int(adjaceny_matrix[edgea][edgea] )
__a : Graph = Graph(set(range(len(lowerCAmelCase__ ) ) ) , lowerCAmelCase__ )
__a : Graph = graph.prims_algorithm()
__a : int = sum(graph.edges.values() )
__a : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 326 |
lowercase__ ={
"joule": 1.0,
"kilojoule": 1000,
"megajoule": 1000000,
"gigajoule": 1000000000,
"wattsecond": 1.0,
"watthour": 3600,
"kilowatthour": 3600000,
"newtonmeter": 1.0,
"calorie_nutr": 4186.8,
"kilocalorie_nutr": 4186800.00,
"electronvolt": 1.602176634e-19,
"britishthermalunit_it": 1055.05585,
"footpound": 1.355_818,
}
def __UpperCamelCase ( lowerCAmelCase__ : str , lowerCAmelCase__ : str , lowerCAmelCase__ : float ):
if to_type not in ENERGY_CONVERSION or from_type not in ENERGY_CONVERSION:
__a : Dict = (
f"Incorrect 'from_type' or 'to_type' value: {from_type!r}, {to_type!r}\n"
f"Valid values are: {', '.join(lowerCAmelCase__ )}"
)
raise ValueError(lowerCAmelCase__ )
return value * ENERGY_CONVERSION[from_type] / ENERGY_CONVERSION[to_type]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 326 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class UpperCAmelCase_ ( unittest.TestCase , _SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def _lowercase ( self ):
"""simple docstring"""
_lowerCAmelCase = load_tool("""text-to-speech""" )
self.tool.setup()
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = self.tool("""hey""" )
_lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def _lowercase ( self ):
"""simple docstring"""
torch.manual_seed(0 )
_lowerCAmelCase = self.tool("""hey""" )
_lowerCAmelCase = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
| 5 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
SCREAMING_SNAKE_CASE :Tuple = {
'''/attention/''': '''/0/SelfAttention/''',
'''/self_attention/''': '''/0/SelfAttention/''',
'''/encoder_decoder_attention/''': '''/1/EncDecAttention/''',
'''value''': '''v''',
'''query''': '''q''',
'''key''': '''k''',
'''out''': '''o''',
'''pre_self_attention_layer_norm''': '''0/layer_norm''',
'''pre_cross_attention_layer_norm''': '''1/layer_norm''',
'''pre_attention_layer_norm''': '''0/layer_norm''', # previously 1, but seems wrong
'''token_embedder''': '''shared''',
'''encoder_norm''': '''final_layer_norm''',
'''decoder_norm''': '''final_layer_norm''',
'''relpos_bias/rel_embedding''': '''block/0/layer/0/SelfAttention/relative_attention_bias/weight''',
'''router/router_weights/w/''': '''router/classifier/''',
'''roer/roer_weights/w/''': '''router/classifier/''',
'''logits_dense''': '''lm_head''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :Any )->Any:
'''simple docstring'''
snake_case_ = list(s_dict.keys() )
for key in keys:
snake_case_ = r".*/layers_(\d+)"
snake_case_ = key
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.sub(r"layers_(\d+)" , r"block/\1/layer" , lowerCAmelCase_ )
snake_case_ = r"(encoder|decoder)\/"
if re.match(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case_ = re.match(lowerCAmelCase_ , lowerCAmelCase_ ).groups()
if groups[0] == "encoder":
snake_case_ = re.sub(r"/mlp/" , r"/1/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/1/layer_norm/" , lowerCAmelCase_ )
elif groups[0] == "decoder":
snake_case_ = re.sub(r"/mlp/" , r"/2/mlp/" , lowerCAmelCase_ )
snake_case_ = re.sub(r"/pre_mlp_layer_norm/" , r"/2/layer_norm/" , lowerCAmelCase_ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
snake_case_ = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''{key} -> {new_key}''' )
snake_case_ = s_dict.pop(lowerCAmelCase_ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
snake_case_ = s_dict[
"decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight"
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
snake_case_ = s_dict[key].shape[0]
snake_case_ = s_dict[key]
for idx in range(lowerCAmelCase_ ):
snake_case_ = expert_weihts[idx]
print(F'''{key} -> {key.replace('expert/' , 'nested fstring' )}''' )
s_dict.pop(lowerCAmelCase_ )
return s_dict
SCREAMING_SNAKE_CASE :Optional[Any] = {
'''NUM_ENCODER_LAYERS''': '''num_layers''',
'''NUM_DECODER_LAYERS''': '''num_decoder_layers''',
'''NUM_HEADS''': '''num_heads''',
'''HEAD_DIM''': '''d_kv''',
'''EMBED_DIM''': '''d_model''',
'''MLP_DIM''': '''d_ff''',
'''NUM_SELECTED_EXPERTS''': '''num_selected_experts''',
'''NUM_ENCODER_SPARSE_LAYERS''': '''num_sparse_encoder_layers''',
'''NUM_DECODER_SPARSE_LAYERS''': '''num_sparse_decoder_layers''',
'''dense.MlpBlock.activations''': '''feed_forward_proj''',
}
def _lowerCAmelCase ( lowerCAmelCase_ :List[str] , lowerCAmelCase_ :Any )->Optional[int]:
'''simple docstring'''
import regex as re
with open(lowerCAmelCase_ , "r" ) as f:
snake_case_ = f.read()
snake_case_ = re.findall(r"(.*) = ([0-9.]*)" , lowerCAmelCase_ )
snake_case_ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
snake_case_ = float(lowerCAmelCase_ ) if "." in value else int(lowerCAmelCase_ )
snake_case_ = re.findall(r"(.*activations) = \(\'(.*)\',\)" , lowerCAmelCase_ )[0]
snake_case_ = str(activation[1] )
snake_case_ = num_experts
snake_case_ = SwitchTransformersConfig(**lowerCAmelCase_ )
return config
def _lowerCAmelCase ( lowerCAmelCase_ :Optional[Any] , lowerCAmelCase_ :List[Any] , lowerCAmelCase_ :Dict=None , lowerCAmelCase_ :str="./" , lowerCAmelCase_ :Optional[int]=8 )->List[str]:
'''simple docstring'''
print(F'''Loading flax weights from : {flax_checkpoint_path}''' )
snake_case_ = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
if gin_file is not None:
snake_case_ = convert_gin_to_config(lowerCAmelCase_ , lowerCAmelCase_ )
else:
snake_case_ = SwitchTransformersConfig.from_pretrained(lowerCAmelCase_ )
snake_case_ = SwitchTransformersForConditionalGeneration(lowerCAmelCase_ )
snake_case_ = flax_params["target"]
snake_case_ = flatten_dict(lowerCAmelCase_ , sep="/" )
snake_case_ = rename_keys(lowerCAmelCase_ )
snake_case_ = unflatten_dict(lowerCAmelCase_ , sep="/" )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(lowerCAmelCase_ , lowerCAmelCase_ )
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
pt_model.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--switch_t5x_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the'''
''' model architecture. If not provided, a `gin_file` has to be provided.'''
),
)
parser.add_argument(
'''--gin_file''',
default=None,
type=str,
required=False,
help='''Path to the gin config file. If not provided, a `config_file` has to be passed ''',
)
parser.add_argument(
'''--config_name''', default=None, type=str, required=False, help='''Config name of SwitchTransformers model.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output pytorch model.'''
)
parser.add_argument('''--num_experts''', default=8, type=int, required=False, help='''Number of experts''')
SCREAMING_SNAKE_CASE :int = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 283 | 0 |
'''simple docstring'''
import string
from math import logaa
def __snake_case ( lowercase : str , lowercase : str ):
snake_case_ = document.translate(
str.maketrans("" , "" , string.punctuation ) ).replace("\n" , "" )
snake_case_ = document_without_punctuation.split(" " ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def __snake_case ( lowercase : str , lowercase : str ):
snake_case_ = corpus.lower().translate(
str.maketrans("" , "" , string.punctuation ) ) # strip all punctuation and replace it with ''
snake_case_ = corpus_without_punctuation.split("\n" )
snake_case_ = term.lower()
return (len([doc for doc in docs if term in doc] ), len(lowerCAmelCase__ ))
def __snake_case ( lowercase : int , lowercase : int , lowercase : Any=False ):
if smoothing:
if n == 0:
raise ValueError("log10(0) is undefined." )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("df must be > 0" )
elif n == 0:
raise ValueError("log10(0) is undefined." )
return round(logaa(n / df ) , 3 )
def __snake_case ( lowercase : int , lowercase : int ):
return round(tf * idf , 3 )
| 709 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def __snake_case ( lowercase : int = 1_000_000 , lowercase : int = 10 ):
snake_case_ = defaultdict(lowercase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
snake_case_ = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
snake_case_ = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowercase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 420 | 0 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class UpperCAmelCase_ ( UpperCamelCase , UpperCamelCase ):
'''simple docstring'''
@register_to_config
def __init__( self , __A = 768 , ):
"""simple docstring"""
super().__init__()
lowerCamelCase : Optional[int] = nn.Parameter(torch.zeros(1 , __A ) )
lowerCamelCase : Union[str, Any] = nn.Parameter(torch.ones(1 , __A ) )
def _snake_case ( self , __A = None , __A = None , ):
"""simple docstring"""
lowerCamelCase : Optional[Any] = nn.Parameter(self.mean.to(__A ).to(__A ) )
lowerCamelCase : List[str] = nn.Parameter(self.std.to(__A ).to(__A ) )
return self
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = (embeds - self.mean) * 1.0 / self.std
return embeds
def _snake_case ( self , __A ):
"""simple docstring"""
lowerCamelCase : int = (embeds * self.std) + self.mean
return embeds
| 340 |
import torch
from diffusers import DiffusionPipeline
class UpperCAmelCase_ ( UpperCamelCase ):
'''simple docstring'''
def __init__( self , __A , __A ):
"""simple docstring"""
super().__init__()
self.register_modules(unet=__A , scheduler=__A )
def __call__( self ):
"""simple docstring"""
lowerCamelCase : List[str] = torch.randn(
(1, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , )
lowerCamelCase : Optional[Any] = 1
lowerCamelCase : Optional[int] = self.unet(__A , __A ).sample
lowerCamelCase : List[str] = self.scheduler.step(__A , __A , __A ).prev_sample
lowerCamelCase : str = scheduler_output - scheduler_output + torch.ones_like(__A )
return result
| 340 | 1 |
def __lowerCAmelCase ( A_ : int | float | str ) -> tuple[int, int]:
try:
__UpperCAmelCase = float(A_ )
except ValueError:
raise ValueError("Please enter a valid number" )
__UpperCAmelCase = decimal - int(A_ )
if fractional_part == 0:
return int(A_ ), 1
else:
__UpperCAmelCase = len(str(A_ ).split("." )[1] )
__UpperCAmelCase = int(decimal * (10**number_of_frac_digits) )
__UpperCAmelCase = 10**number_of_frac_digits
__UpperCAmelCase , __UpperCAmelCase = denominator, numerator
while True:
__UpperCAmelCase = dividend % divisor
if remainder == 0:
break
__UpperCAmelCase , __UpperCAmelCase = divisor, remainder
__UpperCAmelCase , __UpperCAmelCase = numerator / divisor, denominator / divisor
return int(A_ ), int(A_ )
if __name__ == "__main__":
print(F"{decimal_to_fraction(2) = }")
print(F"{decimal_to_fraction(89.0) = }")
print(F"{decimal_to_fraction('67') = }")
print(F"{decimal_to_fraction('45.0') = }")
print(F"{decimal_to_fraction(1.5) = }")
print(F"{decimal_to_fraction('6.25') = }")
print(F"{decimal_to_fraction('78td') = }")
| 286 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a_ = {
"""configuration_squeezebert""": [
"""SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SqueezeBertConfig""",
"""SqueezeBertOnnxConfig""",
],
"""tokenization_squeezebert""": ["""SqueezeBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""SqueezeBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SqueezeBertForMaskedLM""",
"""SqueezeBertForMultipleChoice""",
"""SqueezeBertForQuestionAnswering""",
"""SqueezeBertForSequenceClassification""",
"""SqueezeBertForTokenClassification""",
"""SqueezeBertModel""",
"""SqueezeBertModule""",
"""SqueezeBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_squeezebert import (
SQUEEZEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
SqueezeBertConfig,
SqueezeBertOnnxConfig,
)
from .tokenization_squeezebert import SqueezeBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_squeezebert_fast import SqueezeBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_squeezebert import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
SqueezeBertModule,
SqueezeBertPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 286 | 1 |
"""simple docstring"""
def lowercase__ ( lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
if collection == []:
return []
# get some information about the collection
_UpperCamelCase : Tuple = len(lowerCAmelCase_ )
_UpperCamelCase : List[Any] = max(lowerCAmelCase_ )
_UpperCamelCase : Union[str, Any] = min(lowerCAmelCase_ )
# create the counting array
_UpperCamelCase : List[str] = coll_max + 1 - coll_min
_UpperCamelCase : List[Any] = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 ,lowerCAmelCase_ ):
_UpperCamelCase : Optional[Any] = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_UpperCamelCase : Optional[Any] = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 ,lowerCAmelCase_ ) ):
_UpperCamelCase : Any = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return "".join([chr(lowerCAmelCase_ ) for i in counting_sort([ord(lowerCAmelCase_ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
lowerCamelCase__ = input("Enter numbers separated by a comma:\n").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 624 |
"""simple docstring"""
import warnings
from ..trainer import Trainer
from ..utils import logging
a__ : Any = logging.get_logger(__name__)
class UpperCamelCase_ ( UpperCamelCase):
"""simple docstring"""
def __init__( self : Any , UpperCAmelCase__ : Union[str, Any]=None , **UpperCAmelCase__ : Union[str, Any] ) -> Any:
warnings.warn(
"`SageMakerTrainer` is deprecated and will be removed in v5 of Transformers. You can use `Trainer` "
"instead." , UpperCAmelCase__ , )
super().__init__(args=UpperCAmelCase__ , **UpperCAmelCase__ )
| 682 | 0 |
def snake_case (UpperCamelCase : Dict , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[int] ):
'''simple docstring'''
if n == 0:
return 1
elif n % 2 == 1:
return (binary_exponentiation(UpperCamelCase , n - 1 , UpperCamelCase ) * a) % mod
else:
lowerCamelCase__ = binary_exponentiation(UpperCamelCase , n / 2 , UpperCamelCase )
return (b * b) % mod
# a prime number
a__ : Dict = 7_0_1
a__ : Tuple = 1_0_0_0_0_0_0_0_0_0
a__ : Union[str, Any] = 1_0
# using binary exponentiation function, O(log(p)):
print((a / b) % p == (a * binary_exponentiation(b, p - 2, p)) % p)
print((a / b) % p == (a * b ** (p - 2)) % p)
| 235 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , a_ : List[str]="" , a_ : str="train" ):
"""simple docstring"""
assert os.path.isdir(a_ )
lowerCamelCase__ = []
lowerCamelCase__ = os.listdir(a_ )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCamelCase__ = os.path.join(a_ , a_ )
if not os.path.isfile(a_ ):
continue
self.documents.append(a_ )
def __len__( self : List[Any] ):
"""simple docstring"""
return len(self.documents )
def __getitem__( self : Optional[int] , a_ : Any ):
"""simple docstring"""
lowerCamelCase__ = self.documents[idx]
lowerCamelCase__ = document_path.split("""/""" )[-1]
with open(a_ , encoding="""utf-8""" ) as source:
lowerCamelCase__ = source.read()
lowerCamelCase__ , lowerCamelCase__ = process_story(a_ )
return document_name, story_lines, summary_lines
def snake_case (UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = list(filter(lambda UpperCamelCase : len(UpperCamelCase ) != 0 , [line.strip() for line in raw_story.split("""\n""" )] ) )
# for some unknown reason some lines miss a period, add it
lowerCamelCase__ = [_add_missing_period(UpperCamelCase ) for line in nonempty_lines]
# gather article lines
lowerCamelCase__ = []
lowerCamelCase__ = deque(UpperCamelCase )
while True:
try:
lowerCamelCase__ = lines.popleft()
if element.startswith("""@highlight""" ):
break
story_lines.append(UpperCamelCase )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCamelCase__ = list(filter(lambda UpperCamelCase : not t.startswith("""@highlight""" ) , UpperCamelCase ) )
return story_lines, summary_lines
def snake_case (UpperCamelCase : Dict ):
'''simple docstring'''
lowerCamelCase__ = [""".""", """!""", """?""", """...""", """'""", """`""", """\"""", """\u2019""", """\u2019""", """)"""]
if line.startswith("""@highlight""" ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def snake_case (UpperCamelCase : str , UpperCamelCase : Optional[int] , UpperCamelCase : List[Any] ):
'''simple docstring'''
if len(UpperCamelCase ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(UpperCamelCase )) )
return sequence
def snake_case (UpperCamelCase : List[str] , UpperCamelCase : Tuple ):
'''simple docstring'''
lowerCamelCase__ = torch.ones_like(UpperCamelCase )
lowerCamelCase__ = sequence == pad_token_id
lowerCamelCase__ = 0
return mask
def snake_case (UpperCamelCase : int , UpperCamelCase : List[str] , UpperCamelCase : str ):
'''simple docstring'''
lowerCamelCase__ = [tokenizer.encode(UpperCamelCase ) for line in story_lines]
lowerCamelCase__ = [token for sentence in story_lines_token_ids for token in sentence]
lowerCamelCase__ = [tokenizer.encode(UpperCamelCase ) for line in summary_lines]
lowerCamelCase__ = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def snake_case (UpperCamelCase : Optional[int] , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__ = []
for sequence in batch:
lowerCamelCase__ = -1
lowerCamelCase__ = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(UpperCamelCase )
return torch.tensor(UpperCamelCase )
| 235 | 1 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.