code
stringlengths 82
54.1k
| code_codestyle
int64 0
699
| style_context
stringlengths 111
35.6k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : torch.FloatTensor
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : Optional[Any] , _lowercase : int = 32 , _lowercase : int = 64 , _lowercase : int = 20 , _lowercase : int = 7_68 , _lowercase : Dict=77 , _lowercase : List[str]=4 , _lowercase : float = 0.0 , _lowercase : str = "silu" , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Optional[str] = "linear" , _lowercase : Optional[str] = "prd" , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , ):
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = attention_head_dim
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads * attention_head_dim
SCREAMING_SNAKE_CASE__ : Dict = additional_embeddings
SCREAMING_SNAKE_CASE__ : int = time_embed_dim or inner_dim
SCREAMING_SNAKE_CASE__ : List[Any] = embedding_proj_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = clip_embed_dim or embedding_dim
SCREAMING_SNAKE_CASE__ : Dict = Timesteps(_lowercase , _lowercase , 0 )
SCREAMING_SNAKE_CASE__ : Dict = TimestepEmbedding(_lowercase , _lowercase , out_dim=_lowercase , act_fn=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
if embedding_proj_norm_type is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
elif embedding_proj_norm_type == "layer":
SCREAMING_SNAKE_CASE__ : List[str] = nn.LayerNorm(_lowercase )
else:
raise ValueError(f"""unsupported embedding_proj_norm_type: {embedding_proj_norm_type}""" )
SCREAMING_SNAKE_CASE__ : Dict = nn.Linear(_lowercase , _lowercase )
if encoder_hid_proj_type is None:
SCREAMING_SNAKE_CASE__ : Any = None
elif encoder_hid_proj_type == "linear":
SCREAMING_SNAKE_CASE__ : List[str] = nn.Linear(_lowercase , _lowercase )
else:
raise ValueError(f"""unsupported encoder_hid_proj_type: {encoder_hid_proj_type}""" )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , _lowercase ) )
if added_emb_type == "prd":
SCREAMING_SNAKE_CASE__ : Any = nn.Parameter(torch.zeros(1 , 1 , _lowercase ) )
elif added_emb_type is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
else:
raise ValueError(
f"""`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `'prd'` or `None`.""" )
SCREAMING_SNAKE_CASE__ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
_lowercase , _lowercase , _lowercase , dropout=_lowercase , activation_fn='''gelu''' , attention_bias=_lowercase , )
for d in range(_lowercase )
] )
if norm_in_type == "layer":
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.LayerNorm(_lowercase )
elif norm_in_type is None:
SCREAMING_SNAKE_CASE__ : Tuple = None
else:
raise ValueError(f"""Unsupported norm_in_type: {norm_in_type}.""" )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.LayerNorm(_lowercase )
SCREAMING_SNAKE_CASE__ : int = nn.Linear(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
SCREAMING_SNAKE_CASE__ : str = causal_attention_mask[None, ...]
self.register_buffer('''causal_attention_mask''' , _lowercase , persistent=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Dict = nn.Parameter(torch.zeros(1 , _lowercase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = {}
def fn_recursive_add_processors(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Dict[str, AttentionProcessor] ):
if hasattr(_lowercase , '''set_processor''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(_lowercase , _lowercase , _lowercase )
return processors
def lowercase__ ( self : Tuple , _lowercase : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
SCREAMING_SNAKE_CASE__ : List[Any] = len(self.attn_processors.keys() )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) != count:
raise ValueError(
f"""A dict of processors was passed, but the number of processors {len(_lowercase )} does not match the"""
f""" number of attention layers: {count}. Please make sure to pass {count} processor classes.""" )
def fn_recursive_attn_processor(_lowercase : str , _lowercase : torch.nn.Module , _lowercase : Tuple ):
if hasattr(_lowercase , '''set_processor''' ):
if not isinstance(_lowercase , _lowercase ):
module.set_processor(_lowercase )
else:
module.set_processor(processor.pop(f"""{name}.processor""" ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f"""{name}.{sub_name}""" , _lowercase , _lowercase )
for name, module in self.named_children():
fn_recursive_attn_processor(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
self.set_attn_processor(AttnProcessor() )
def lowercase__ ( self : Tuple , _lowercase : Optional[int] , _lowercase : Union[torch.Tensor, float, int] , _lowercase : torch.FloatTensor , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[torch.BoolTensor] = None , _lowercase : bool = True , ):
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states.shape[0]
SCREAMING_SNAKE_CASE__ : int = timestep
if not torch.is_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(_lowercase ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : Dict = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
SCREAMING_SNAKE_CASE__ : Optional[int] = timesteps * torch.ones(_lowercase , dtype=timesteps.dtype , device=timesteps.device )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.time_proj(_lowercase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
SCREAMING_SNAKE_CASE__ : Tuple = timesteps_projected.to(dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = self.time_embedding(_lowercase )
if self.embedding_proj_norm is not None:
SCREAMING_SNAKE_CASE__ : str = self.embedding_proj_norm(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.embedding_proj(_lowercase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.encoder_hidden_states_proj(_lowercase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError('''`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set''' )
SCREAMING_SNAKE_CASE__ : str = self.proj_in(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.positional_embedding.to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
if encoder_hidden_states is not None:
additional_embeds.append(_lowercase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
SCREAMING_SNAKE_CASE__ : int = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_states[:, None, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : int = self.prd_embedding.to(hidden_states.dtype ).expand(_lowercase , -1 , -1 )
additional_embeds.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cat(
_lowercase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
SCREAMING_SNAKE_CASE__ : Any = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = F.pad(
_lowercase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_states + positional_embeddings
if attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
SCREAMING_SNAKE_CASE__ : int = F.pad(_lowercase , (0, self.additional_embeddings) , value=0.0 )
SCREAMING_SNAKE_CASE__ : Tuple = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.norm_in(_lowercase )
for block in self.transformer_blocks:
SCREAMING_SNAKE_CASE__ : str = block(_lowercase , attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self.norm_out(_lowercase )
if self.prd_embedding is not None:
SCREAMING_SNAKE_CASE__ : Any = hidden_states[:, -1]
else:
SCREAMING_SNAKE_CASE__ : Dict = hidden_states[:, additional_embeddings_len:]
SCREAMING_SNAKE_CASE__ : List[str] = self.proj_to_clip_embeddings(_lowercase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from requests.exceptions import HTTPError
from transformers import AutoImageProcessor, ViTImageProcessor
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
a_ :str = get_tests_dir('fixtures')
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Union[str, Any] ):
# A mock response for an HTTP head request to emulate server down
SCREAMING_SNAKE_CASE__ : Tuple = mock.Mock()
SCREAMING_SNAKE_CASE__ : Dict = 5_00
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : List[str] = HTTPError
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
# Download this model to make sure it's in the cache.
SCREAMING_SNAKE_CASE__ : Optional[Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# Under the mock environment we get a 500 error when trying to reach the model.
with mock.patch('''requests.Session.request''' , return_value=_lowercase ) as mock_head:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ViTImageProcessor.from_pretrained('''hf-internal-testing/tiny-random-vit''' )
# This check we did call the fake head request
mock_head.assert_called()
def lowercase__ ( self : Tuple ):
# This test is for deprecated behavior and can be removed in v5
SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained(
'''https://huggingface.co/hf-internal-testing/tiny-random-vit/resolve/main/preprocessor_config.json''' )
def lowercase__ ( self : Tuple ):
with self.assertRaises(_lowercase ):
# config is in subfolder, the following should not work without specifying the subfolder
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained('''hf-internal-testing/stable-diffusion-all-variants''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/stable-diffusion-all-variants''' , subfolder='''feature_extractor''' )
self.assertIsNotNone(_lowercase )
@is_staging_test
class lowercase ( unittest.TestCase ):
@classmethod
def lowercase__ ( cls : int ):
SCREAMING_SNAKE_CASE__ : int = TOKEN
HfFolder.save_token(_lowercase )
@classmethod
def lowercase__ ( cls : Union[str, Any] ):
try:
delete_repo(token=cls._token , repo_id='''test-image-processor''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''valid_org/test-image-processor-org''' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='''test-dynamic-image-processor''' )
except HTTPError:
pass
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''test-image-processor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Tuple = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase , repo_id='''test-image-processor''' , push_to_hub=_lowercase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : Any = ViTImageProcessor.from_pretrained(f"""{USER}/test-image-processor""" )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = ViTImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''valid_org/test-image-processor''' , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
# Reset repo
delete_repo(token=self._token , repo_id='''valid_org/test-image-processor''' )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(
_lowercase , repo_id='''valid_org/test-image-processor-org''' , push_to_hub=_lowercase , use_auth_token=self._token )
SCREAMING_SNAKE_CASE__ : str = ViTImageProcessor.from_pretrained('''valid_org/test-image-processor-org''' )
for k, v in image_processor.__dict__.items():
self.assertEqual(_lowercase , getattr(_lowercase , _lowercase ) )
def lowercase__ ( self : Optional[Any] ):
CustomImageProcessor.register_for_auto_class()
SCREAMING_SNAKE_CASE__ : Any = CustomImageProcessor.from_pretrained(_lowercase )
image_processor.push_to_hub('''test-dynamic-image-processor''' , use_auth_token=self._token )
# This has added the proper auto_map field to the config
self.assertDictEqual(
image_processor.auto_map , {'''AutoImageProcessor''': '''custom_image_processing.CustomImageProcessor'''} , )
SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained(
f"""{USER}/test-dynamic-image-processor""" , trust_remote_code=_lowercase )
# Can't make an isinstance check because the new_image_processor is from the CustomImageProcessor class of a dynamic module
self.assertEqual(new_image_processor.__class__.__name__ , '''CustomImageProcessor''' )
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ :Optional[int] = logging.get_logger(__name__)
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = torch.load(A__ , map_location='''cpu''' )
if "model" in sd.keys():
SCREAMING_SNAKE_CASE__ : str = torch.load(A__ , map_location='''cpu''' )['''model''']
# pop unnecessary weights
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
'''decoder.version''',
'''decoder.output_projection.weight''',
]
for key in keys_to_delete:
if key in sd:
sd.pop(A__ )
SCREAMING_SNAKE_CASE__ : str = {
'''decoder.project_in_dim.weight''': '''decoder.project_in.weight''',
'''decoder.project_out_dim.weight''': '''decoder.project_out.weight''',
'''decoder.layer_norm.weight''': '''decoder.final_layer_norm.weight''',
'''decoder.layer_norm.bias''': '''decoder.final_layer_norm.bias''',
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
SCREAMING_SNAKE_CASE__ : Optional[int] = sd.pop(A__ )
SCREAMING_SNAKE_CASE__ : int = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
SCREAMING_SNAKE_CASE__ : int = sd[key]
# We split QKV in separate Q,K,V
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace('''.qkv_proj.''' , '''.q_proj.''' )
SCREAMING_SNAKE_CASE__ : Tuple = key.replace('''.qkv_proj.''' , '''.k_proj.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace('''.qkv_proj.''' , '''.v_proj.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = torch.split(A__ , depth // 3 , dim=0 )
SCREAMING_SNAKE_CASE__ : Dict = q
SCREAMING_SNAKE_CASE__ : Optional[Any] = k
SCREAMING_SNAKE_CASE__ : Optional[Any] = v
del sd[key]
return sd
@torch.no_grad()
def a ( A__ , A__ , A__=None ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = load_checkpoint(A__ )
if config is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = OPTConfig.from_pretrained(A__ )
else:
SCREAMING_SNAKE_CASE__ : str = OPTConfig()
SCREAMING_SNAKE_CASE__ : Tuple = OPTModel(A__ ).half().eval()
model.load_state_dict(A__ )
# Check results
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
if __name__ == "__main__":
a_ :List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fairseq_path',
type=str,
help=(
'path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:'
' https://huggingface.co/models?other=opt_metasq'
),
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--hf_config', default=None, type=str, help='Define HF config.')
a_ :Union[str, Any] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ :int = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
a_ :Tuple = parser.parse_args()
a_ :Dict = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ :Tuple = CLIPImageProcessor()
a_ :List[Any] = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
a_ :int = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
@register_to_config
def __init__( self : str , _lowercase : int = 7_68 , ):
super().__init__()
SCREAMING_SNAKE_CASE__ : List[str] = nn.Parameter(torch.zeros(1 , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Parameter(torch.ones(1 , _lowercase ) )
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Union[str, torch.device]] = None , _lowercase : Optional[torch.dtype] = None , ):
SCREAMING_SNAKE_CASE__ : int = nn.Parameter(self.mean.to(_lowercase ).to(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(self.std.to(_lowercase ).to(_lowercase ) )
return self
def lowercase__ ( self : Optional[int] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = (embeds - self.mean) * 1.0 / self.std
return embeds
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = (embeds * self.std) + self.mean
return embeds
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import ASTFeatureExtractor
from transformers.testing_utils import require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
a_ :str = random.Random()
if is_torch_available():
import torch
def a ( A__ , A__=1.0 , A__=None , A__=None ) -> Optional[Any]:
'''simple docstring'''
if rng is None:
SCREAMING_SNAKE_CASE__ : Tuple = global_rng
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , _lowercase : Tuple , _lowercase : Optional[Any]=7 , _lowercase : List[str]=4_00 , _lowercase : Optional[Any]=20_00 , _lowercase : Optional[int]=1 , _lowercase : Optional[Any]=0.0 , _lowercase : Union[str, Any]=1_60_00 , _lowercase : str=True , _lowercase : List[str]=True , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : str = min_seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
SCREAMING_SNAKE_CASE__ : List[Any] = feature_size
SCREAMING_SNAKE_CASE__ : Tuple = padding_value
SCREAMING_SNAKE_CASE__ : Optional[int] = sampling_rate
SCREAMING_SNAKE_CASE__ : Optional[Any] = return_attention_mask
SCREAMING_SNAKE_CASE__ : List[str] = do_normalize
def lowercase__ ( self : Tuple ):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def lowercase__ ( self : int , _lowercase : Tuple=False , _lowercase : Optional[Any]=False ):
def _flatten(_lowercase : Union[str, Any] ):
return list(itertools.chain(*_lowercase ) )
if equal_length:
SCREAMING_SNAKE_CASE__ : Tuple = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
SCREAMING_SNAKE_CASE__ : Dict = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
SCREAMING_SNAKE_CASE__ : int = [np.asarray(_lowercase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = ASTFeatureExtractor
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = ASTFeatureExtractionTester(self )
def lowercase__ ( self : str ):
# Tests that all call wrap to encode_plus and batch_encode_plus
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [floats_list((1, x) )[0] for x in range(8_00 , 14_00 , 2_00 )]
SCREAMING_SNAKE_CASE__ : Any = [np.asarray(_lowercase ) for speech_input in speech_inputs]
# Test not batched input
SCREAMING_SNAKE_CASE__ : Any = feat_extract(speech_inputs[0] , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feat_extract(np_speech_inputs[0] , return_tensors='''np''' ).input_values
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test batched
SCREAMING_SNAKE_CASE__ : Any = feat_extract(_lowercase , padding=_lowercase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__ : List[Any] = feat_extract(_lowercase , padding=_lowercase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [floats_list((1, x) )[0] for x in (8_00, 8_00, 8_00)]
SCREAMING_SNAKE_CASE__ : str = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = feat_extract(_lowercase , return_tensors='''np''' ).input_values
SCREAMING_SNAKE_CASE__ : List[str] = feat_extract(_lowercase , return_tensors='''np''' ).input_values
for enc_seq_a, enc_seq_a in zip(_lowercase , _lowercase ):
self.assertTrue(np.allclose(_lowercase , _lowercase , atol=1E-3 ) )
@require_torch
def lowercase__ ( self : Tuple ):
import torch
SCREAMING_SNAKE_CASE__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
SCREAMING_SNAKE_CASE__ : Any = np.random.rand(1_00 ).astype(np.floataa )
SCREAMING_SNAKE_CASE__ : List[str] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''np''' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
SCREAMING_SNAKE_CASE__ : Tuple = feature_extractor.pad([{'''input_values''': inputs}] , return_tensors='''pt''' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
from datasets import load_dataset
SCREAMING_SNAKE_CASE__ : int = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ds.sort('''id''' ).select(range(_lowercase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
@require_torch
def lowercase__ ( self : Optional[int] ):
# fmt: off
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[-0.9894, -1.2776, -0.9066, -1.2776, -0.9349, -1.2609, -1.0386, -1.2776,
-1.1561, -1.2776, -1.2052, -1.2723, -1.2190, -1.2132, -1.2776, -1.1133,
-1.1953, -1.1343, -1.1584, -1.2203, -1.1770, -1.2474, -1.2381, -1.1936,
-0.9270, -0.8317, -0.8049, -0.7706, -0.7565, -0.7869] )
# fmt: on
SCREAMING_SNAKE_CASE__ : str = self._load_datasamples(1 )
SCREAMING_SNAKE_CASE__ : List[Any] = ASTFeatureExtractor()
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor(_lowercase , return_tensors='''pt''' ).input_values
self.assertEquals(input_values.shape , (1, 10_24, 1_28) )
self.assertTrue(torch.allclose(input_values[0, 0, :30] , _lowercase , atol=1E-4 ) )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = ['''image_processor''', '''tokenizer''']
lowerCamelCase : List[Any] = '''ViTImageProcessor'''
lowerCamelCase : int = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : Dict , _lowercase : Dict=None , _lowercase : List[str]=None , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
SCREAMING_SNAKE_CASE__ : str = kwargs.pop('''feature_extractor''' )
SCREAMING_SNAKE_CASE__ : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(_lowercase , _lowercase )
def __call__( self : Optional[Any] , _lowercase : List[Any]=None , _lowercase : str=None , _lowercase : int=None , _lowercase : Dict=None , **_lowercase : Dict ):
if text is None and visual_prompt is None and images is None:
raise ValueError('''You have to specify either text, visual prompt or images.''' )
if text is not None and visual_prompt is not None:
raise ValueError('''You have to specify exactly one type of prompt. Either text or visual prompt.''' )
if text is not None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if images is not None:
SCREAMING_SNAKE_CASE__ : str = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if visual_prompt is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''pixel_values''': image_features.pixel_values,
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
SCREAMING_SNAKE_CASE__ : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
SCREAMING_SNAKE_CASE__ : Any = {
'''conditional_pixel_values''': prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**_lowercase ) , tensor_type=_lowercase )
def lowercase__ ( self : int , *_lowercase : List[Any] , **_lowercase : Optional[Any] ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowercase__ ( self : Dict , *_lowercase : Union[str, Any] , **_lowercase : Union[str, Any] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
@property
def lowercase__ ( self : int ):
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , _lowercase , )
return self.image_processor_class
@property
def lowercase__ ( self : Optional[Any] ):
warnings.warn(
'''`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.''' , _lowercase , )
return self.image_processor
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
import enum
import os
from hashlib import shaaaa
from typing import Optional
from .. import config
from .logging import get_logger
a_ :Optional[int] = get_logger(__name__)
class lowercase ( enum.Enum ):
lowerCamelCase : str = '''all_checks'''
lowerCamelCase : str = '''basic_checks'''
lowerCamelCase : Any = '''no_checks'''
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
def a ( A__ , A__ , A__=None ) -> Optional[int]:
'''simple docstring'''
if expected_checksums is None:
logger.info('''Unable to verify checksums.''' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreDownloadedFiles(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedDownloadedFile(str(set(A__ ) - set(A__ ) ) )
SCREAMING_SNAKE_CASE__ : int = [url for url in expected_checksums if expected_checksums[url] != recorded_checksums[url]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = ''' for ''' + verification_name if verification_name is not None else ''''''
if len(A__ ) > 0:
raise NonMatchingChecksumError(
f"""Checksums didn't match{for_verification_name}:\n"""
f"""{bad_urls}\n"""
'''Set `verification_mode=\'no_checks\'` to skip checksums verification and ignore this error''' )
logger.info('''All the checksums matched successfully''' + for_verification_name )
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
class lowercase ( _UpperCAmelCase ):
pass
def a ( A__ , A__ ) -> Dict:
'''simple docstring'''
if expected_splits is None:
logger.info('''Unable to verify splits sizes.''' )
return
if len(set(A__ ) - set(A__ ) ) > 0:
raise ExpectedMoreSplits(str(set(A__ ) - set(A__ ) ) )
if len(set(A__ ) - set(A__ ) ) > 0:
raise UnexpectedSplits(str(set(A__ ) - set(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
{'''expected''': expected_splits[name], '''recorded''': recorded_splits[name]}
for name in expected_splits
if expected_splits[name].num_examples != recorded_splits[name].num_examples
]
if len(A__ ) > 0:
raise NonMatchingSplitsSizesError(str(A__ ) )
logger.info('''All the splits matched successfully.''' )
def a ( A__ , A__ = True ) -> dict:
'''simple docstring'''
if record_checksum:
SCREAMING_SNAKE_CASE__ : Optional[Any] = shaaaa()
with open(A__ , '''rb''' ) as f:
for chunk in iter(lambda: f.read(1 << 2_0 ) , b'''''' ):
m.update(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = m.hexdigest()
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
return {"num_bytes": os.path.getsize(A__ ), "checksum": checksum}
def a ( A__ ) -> List[Any]:
'''simple docstring'''
if dataset_size and config.IN_MEMORY_MAX_SIZE:
return dataset_size < config.IN_MEMORY_MAX_SIZE
else:
return False
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def a ( A__ , A__ , A__ , A__=None , A__=None , A__=None , A__=None , A__=None , ) -> Dict:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
SCREAMING_SNAKE_CASE__ : Dict = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=A__ )
if decoder_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
if cross_attn_head_mask is None:
SCREAMING_SNAKE_CASE__ : List[str] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=A__ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class lowercase :
def __init__( self : Tuple , _lowercase : List[str] , _lowercase : Union[str, Any]=13 , _lowercase : Optional[Any]=7 , _lowercase : int=True , _lowercase : str=False , _lowercase : Tuple=99 , _lowercase : Dict=16 , _lowercase : List[str]=2 , _lowercase : Optional[int]=4 , _lowercase : Any=4 , _lowercase : Any="relu" , _lowercase : List[str]=0.1 , _lowercase : Any=0.1 , _lowercase : List[str]=0.0 , _lowercase : Tuple=0.0 , _lowercase : Optional[int]=20 , _lowercase : Any=2 , _lowercase : Union[str, Any]=1 , _lowercase : int=0 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = seq_length
SCREAMING_SNAKE_CASE__ : Optional[Any] = is_training
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : List[str] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : int = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = pad_token_id
SCREAMING_SNAKE_CASE__ : Any = bos_token_id
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.eos_token_id # Eos Token
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
SCREAMING_SNAKE_CASE__ : List[str] = input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
SCREAMING_SNAKE_CASE__ : Any = self.get_config()
SCREAMING_SNAKE_CASE__ : Tuple = prepare_mam_aaa_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowercase__ ( self : Optional[int] ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
return config, inputs_dict
def lowercase__ ( self : Tuple , _lowercase : str , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaMaaaModel(config=_lowercase ).get_decoder().to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[int] = inputs_dict['''attention_mask''']
SCREAMING_SNAKE_CASE__ : Tuple = inputs_dict['''head_mask''']
# first forward pass
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , attention_mask=_lowercase , head_mask=_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : Any = ids_tensor((self.batch_size, 3) , config.vocab_size )
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase , attention_mask=_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase , attention_mask=_lowercase , past_key_values=_lowercase )[
'''last_hidden_state'''
]
# select random slice
SCREAMING_SNAKE_CASE__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : str = output_from_no_past[:, -3:, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : int = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(_lowercase , _lowercase , atol=1E-2 ) )
def lowercase__ ( self : Any , _lowercase : Dict , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModel(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = outputs.encoder_last_hidden_state
SCREAMING_SNAKE_CASE__ : List[str] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Any = model.get_encoder()
encoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaEncoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder(inputs_dict['''input_ids'''] , attention_mask=inputs_dict['''attention_mask'''] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = model.get_decoder()
decoder.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : int = MaMaaaDecoder.from_pretrained(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = decoder(
input_ids=inputs_dict['''decoder_input_ids'''] , attention_mask=inputs_dict['''decoder_attention_mask'''] , encoder_hidden_states=_lowercase , encoder_attention_mask=inputs_dict['''attention_mask'''] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
lowerCamelCase : str = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
lowerCamelCase : Dict = (
{
'''conversational''': MaMaaaForConditionalGeneration,
'''feature-extraction''': MaMaaaModel,
'''summarization''': MaMaaaForConditionalGeneration,
'''text2text-generation''': MaMaaaForConditionalGeneration,
'''translation''': MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = True
lowerCamelCase : Tuple = True
lowerCamelCase : Any = False
lowerCamelCase : int = False
def lowercase__ ( self : List[str] , _lowercase : Tuple , _lowercase : Any , _lowercase : Tuple , _lowercase : str , _lowercase : List[Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaMaaaModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = model_class.from_pretrained(_lowercase , output_loading_info=_lowercase )
self.assertEqual(info['''missing_keys'''] , [] )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = copy.deepcopy(self._prepare_for_class(_lowercase , _lowercase ) )
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
del inputs["input_ids"]
else:
SCREAMING_SNAKE_CASE__ : int = inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs.get('''decoder_input_ids''' , _lowercase )
del inputs["input_ids"]
inputs.pop('''decoder_input_ids''' , _lowercase )
SCREAMING_SNAKE_CASE__ : str = model.get_input_embeddings()
if not self.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : List[str] = wte(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = wte(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = wte(_lowercase )
with torch.no_grad():
model(**_lowercase )[0]
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = input_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[Any] = input_ids.ne(1 ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaForConditionalGeneration(_lowercase ).eval().to(_lowercase )
if torch_device == "cuda":
model.half()
model.generate(_lowercase , attention_mask=_lowercase )
model.generate(num_beams=4 , do_sample=_lowercase , early_stopping=_lowercase , num_return_sequences=3 )
def a ( A__ ) -> List[str]:
'''simple docstring'''
return torch.tensor(A__ , dtype=torch.long , device=A__ )
a_ :int = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : List[Any] ):
return MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[str] = MaMaaaModel.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : Tuple = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, 10_24) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : List[str] = torch.tensor(
[[-0.7780, -0.1676, 0.1038], [-6.7556, -1.3992, 0.0567], [-7.5383, -0.5920, -0.2779]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
# change to intended input
SCREAMING_SNAKE_CASE__ : Any = _long_tensor([[12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38, 2]] )
SCREAMING_SNAKE_CASE__ : List[str] = _long_tensor([[2, 12_80_28, 98, 12, 3_05_27, 27_32, 1_59, 77_55, 6_19_04, 3_91_44, 38]] )
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_mam_aaa_inputs_dict(model.config , _lowercase , _lowercase )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )[0]
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.Size((1, 11, model.config.vocab_size) )
self.assertEqual(output.shape , _lowercase )
# change to expected output here
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.tensor(
[[-1.0448, -1.0411, 3.7992], [-3.2191, -3.2386, -1.3451], [-3.6210, -3.5993, 0.4925]] , device=_lowercase )
self.assertTrue(torch.allclose(output[:, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = MaMaaaForConditionalGeneration.from_pretrained('''facebook/m2m100_418M''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaMaaaTokenizer.from_pretrained('''facebook/m2m100_418M''' , src_lang='''fr''' , tgt_lang='''en''' )
SCREAMING_SNAKE_CASE__ : int = [
'''L\'affaire NSA souligne l\'absence totale de débat sur le renseignement''',
'''Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.''',
'''Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent'''
''' Fabius convoque l\'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de'''
''' l\'ampleur de la surveillance américaine sur l\'ensemble des communications en France.''',
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer(_lowercase , padding=_lowercase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Tuple = model.generate(
input_ids=dct['''input_ids'''].to(_lowercase ) , attention_mask=dct['''attention_mask'''].to(_lowercase ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id('''en''' ) , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
'''The NSA case highlights the total absence of intelligence debate''',
'''I think there are two levels of response from the French government.''',
'''When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S.'''
''' Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all'''
''' communications in France.''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=_lowercase , skip_special_tokens=_lowercase )
assert generated == expected_en
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a_ :Tuple = logging.get_logger(__name__)
class lowercase :
def __init__( self : str , _lowercase : str = None , _lowercase : uuid.UUID = None , _lowercase : Any=None , _lowercase : Optional[int]=None ):
if not conversation_id:
SCREAMING_SNAKE_CASE__ : Dict = uuid.uuida()
if past_user_inputs is None:
SCREAMING_SNAKE_CASE__ : Dict = []
if generated_responses is None:
SCREAMING_SNAKE_CASE__ : Dict = []
SCREAMING_SNAKE_CASE__ : uuid.UUID = conversation_id
SCREAMING_SNAKE_CASE__ : List[str] = past_user_inputs
SCREAMING_SNAKE_CASE__ : List[str] = generated_responses
SCREAMING_SNAKE_CASE__ : Optional[str] = text
def __eq__( self : Optional[int] , _lowercase : Dict ):
if not isinstance(_lowercase , _lowercase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def lowercase__ ( self : Dict , _lowercase : str , _lowercase : bool = False ):
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
SCREAMING_SNAKE_CASE__ : int = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = text
def lowercase__ ( self : int ):
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
SCREAMING_SNAKE_CASE__ : int = None
def lowercase__ ( self : List[Any] , _lowercase : str ):
self.generated_responses.append(_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for user_input, generated_response in zip(self.past_user_inputs , self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
SCREAMING_SNAKE_CASE__ : List[str] = '''user''' if is_user else '''bot'''
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_UpperCAmelCase , r'''
min_length_for_response (`int`, *optional*, defaults to 32):
The minimum length (in number of tokens) for a response.
minimum_tokens (`int`, *optional*, defaults to 10):
The minimum length of tokens to leave for a response.
''' , )
class lowercase ( _UpperCAmelCase ):
def __init__( self : Dict , *_lowercase : List[str] , **_lowercase : List[Any] ):
super().__init__(*_lowercase , **_lowercase )
if self.tokenizer.pad_token_id is None:
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer.eos_token
def lowercase__ ( self : str , _lowercase : List[Any]=None , _lowercase : Tuple=None , _lowercase : Tuple=None , **_lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = {}
if min_length_for_response is not None:
SCREAMING_SNAKE_CASE__ : str = min_length_for_response
if minimum_tokens is not None:
SCREAMING_SNAKE_CASE__ : Dict = minimum_tokens
if "max_length" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs['''max_length''']
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(_lowercase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , _lowercase : Union[Conversation, List[Conversation]] , _lowercase : Optional[Any]=0 , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Any = super().__call__(_lowercase , num_workers=_lowercase , **_lowercase )
if isinstance(_lowercase , _lowercase ) and len(_lowercase ) == 1:
return outputs[0]
return outputs
def lowercase__ ( self : int , _lowercase : Conversation , _lowercase : str=32 ):
if not isinstance(_lowercase , _lowercase ):
raise ValueError('''ConversationalPipeline, expects Conversation as inputs''' )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
'''Add user inputs with the conversation\'s `add_user_input` method''' )
if hasattr(self.tokenizer , '''_build_conversation_input_ids''' ):
SCREAMING_SNAKE_CASE__ : Dict = self.tokenizer._build_conversation_input_ids(_lowercase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
SCREAMING_SNAKE_CASE__ : Any = self._legacy_parse_and_tokenize(_lowercase )
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.LongTensor([input_ids] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def lowercase__ ( self : str , _lowercase : Optional[Any] , _lowercase : Tuple=10 , **_lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = generate_kwargs.get('''max_length''' , self.model.config.max_length )
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs['''input_ids'''].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
SCREAMING_SNAKE_CASE__ : List[Any] = max_length - minimum_tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_inputs['''input_ids'''][:, -trim:]
if "attention_mask" in model_inputs:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs['''attention_mask'''][:, -trim:]
SCREAMING_SNAKE_CASE__ : Optional[int] = model_inputs.pop('''conversation''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_length
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model.generate(**_lowercase , **_lowercase )
if self.model.config.is_encoder_decoder:
SCREAMING_SNAKE_CASE__ : int = 1
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def lowercase__ ( self : Optional[Any] , _lowercase : str , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Tuple = model_outputs['''output_ids''']
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.decode(
output_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = model_outputs['''conversation''']
conversation.mark_processed()
conversation.append_response(_lowercase )
return conversation
def lowercase__ ( self : Union[str, Any] , _lowercase : Conversation ):
SCREAMING_SNAKE_CASE__ : str = self.tokenizer.eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase ) )
if len(_lowercase ) > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Tuple = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(A__ ):
for j in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = [2_5_5, 2_5_5, 2_5_5] - img[i][j]
return img
if __name__ == "__main__":
# read original image
a_ :List[str] = imread('image_data/lena.jpg', 1)
# convert to its negative
a_ :Optional[Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = RobertaTokenizer
lowerCamelCase : List[Any] = RobertaTokenizerFast
lowerCamelCase : Optional[Any] = True
lowerCamelCase : Optional[Any] = {'''cls_token''': '''<s>'''}
def lowercase__ ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : List[str] = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE__ : Tuple = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def lowercase__ ( self : str , **_lowercase : str ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Any , **_lowercase : str ):
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''lower newer'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower newer'''
return input_text, output_text
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''lower newer'''
SCREAMING_SNAKE_CASE__ : List[Any] = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.tokenize(_lowercase ) # , add_prefix_space=True)
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('''Hello world!''' , add_special_tokens=_lowercase ) , [0, 3_14_14, 2_32, 3_28, 2] )
self.assertListEqual(
tokenizer.encode('''Hello world! cécé herlolip 418''' , add_special_tokens=_lowercase ) , [0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2] , )
@slow
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer_class.from_pretrained('''roberta-base''' )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode('''sequence builders''' , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode('''multi-sequence build''' , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(
'''sequence builders''' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.build_inputs_with_special_tokens(_lowercase , _lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : str = '''Encode this sequence.'''
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.byte_encoder[''' '''.encode('''utf-8''' )[0]]
# Testing encoder arguments
SCREAMING_SNAKE_CASE__ : int = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : int = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_lowercase , _lowercase )
tokenizer.add_special_tokens({'''bos_token''': '''<s>'''} )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
# Testing spaces after special tokens
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''<mask>'''
tokenizer.add_special_tokens(
{'''mask_token''': AddedToken(_lowercase , lstrip=_lowercase , rstrip=_lowercase )} ) # mask token has a left space
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = '''Encode <mask> sequence'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''Encode <mask>sequence'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = encoded.index(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer.encode(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = encoded.index(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_lowercase , _lowercase )
def lowercase__ ( self : Tuple ):
pass
def lowercase__ ( self : int ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ : List[str] = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.tokenizer_class.from_pretrained(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = '''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer_r.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
SCREAMING_SNAKE_CASE__ : int = tokenizer_p.encode_plus(_lowercase , add_special_tokens=_lowercase , return_token_type_ids=_lowercase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_lowercase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
def lowercase__ ( self : Any ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['''add_prefix_space'''] , _lowercase )
self.assertEqual(post_processor_state['''add_prefix_space'''] , _lowercase )
self.assertEqual(post_processor_state['''trim_offsets'''] , _lowercase )
def lowercase__ ( self : Optional[int] ):
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ : Tuple = '''hello''' # `hello` is a token in the vocabulary of `pretrained_name`
SCREAMING_SNAKE_CASE__ : int = f"""{text_of_1_token} {text_of_1_token}"""
SCREAMING_SNAKE_CASE__ : int = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : List[str] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ) + 1, len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : Dict = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_lowercase ), len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : str = f""" {text}"""
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
SCREAMING_SNAKE_CASE__ : List[Any] = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : int = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ) + 1, 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : str = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
SCREAMING_SNAKE_CASE__ : Tuple = self.rust_tokenizer_class.from_pretrained(
_lowercase , use_fast=_lowercase , add_prefix_space=_lowercase , trim_offsets=_lowercase )
SCREAMING_SNAKE_CASE__ : str = tokenizer_r(_lowercase , return_offsets_mapping=_lowercase , add_special_tokens=_lowercase )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_lowercase )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_lowercase ), 1 + len(_lowercase ) + 1 + len(_lowercase )) , )
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
def a ( A__ ) -> list:
'''simple docstring'''
if any(not isinstance(A__ , A__ ) or x < 0 for x in sequence ):
raise TypeError('''Sequence must be list of non-negative integers''' )
for _ in range(len(A__ ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(A__ , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import fire
from utils import calculate_rouge, save_json
def a ( A__ , A__ , A__=None , **A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [x.strip() for x in open(A__ ).readlines()]
SCREAMING_SNAKE_CASE__ : int = [x.strip() for x in open(A__ ).readlines()][: len(A__ )]
SCREAMING_SNAKE_CASE__ : Optional[Any] = calculate_rouge(A__ , A__ , **A__ )
if save_path is not None:
save_json(A__ , A__ , indent=A__ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = 0, 0, 0
SCREAMING_SNAKE_CASE__ : Tuple = ugly_nums[ia] * 2
SCREAMING_SNAKE_CASE__ : int = ugly_nums[ia] * 3
SCREAMING_SNAKE_CASE__ : List[str] = ugly_nums[ia] * 5
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : int = min(A__ , A__ , A__ )
ugly_nums.append(A__ )
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Any = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
SCREAMING_SNAKE_CASE__ : str = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F'''{ugly_numbers(2_00) = }''')
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
def a ( A__ ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(A__ , A__ ):
raise ValueError('''Length must be a positive.''' )
return 3 * ((2_5 + 1_0 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def a ( A__ ) -> float:
'''simple docstring'''
if edge <= 0 or not isinstance(A__ , A__ ):
raise ValueError('''Length must be a positive.''' )
return ((1_5 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
def a ( A__ ) -> list:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = False
while is_sorted is False: # Until all the indices are traversed keep looping
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
for i in range(0 , len(A__ ) - 1 , 2 ): # iterating over all even indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE__ : int = False
for i in range(1 , len(A__ ) - 1 , 2 ): # iterating over all odd indices
if input_list[i] > input_list[i + 1]:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = input_list[i + 1], input_list[i]
# swapping if elements not in order
SCREAMING_SNAKE_CASE__ : int = False
return input_list
if __name__ == "__main__":
print('Enter list to be sorted')
a_ :Optional[int] = [int(x) for x in input().split()]
# inputing elements of the list in one line
a_ :str = odd_even_sort(input_list)
print('The sorted list is')
print(sorted_list)
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from math import sqrt
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = 0
for i in range(1 , int(sqrt(A__ ) + 1 ) ):
if n % i == 0 and i != sqrt(A__ ):
total += i + n // i
elif i == sqrt(A__ ):
total += i
return total - n
def a ( A__ = 1_0_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = sum(
i
for i in range(1 , A__ )
if sum_of_divisors(sum_of_divisors(A__ ) ) == i and sum_of_divisors(A__ ) != i )
return total
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if principal <= 0:
raise Exception('''Principal borrowed must be > 0''' )
if rate_per_annum < 0:
raise Exception('''Rate of interest must be >= 0''' )
if years_to_repay <= 0 or not isinstance(A__ , A__ ):
raise Exception('''Years to repay must be an integer > 0''' )
# Yearly rate is divided by 12 to get monthly rate
SCREAMING_SNAKE_CASE__ : List[Any] = rate_per_annum / 1_2
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
SCREAMING_SNAKE_CASE__ : Union[str, Any] = years_to_repay * 1_2
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ :str = logging.get_logger(__name__)
a_ :Optional[int] = 'https://openaipublic.azureedge.net/jukebox/models/'
a_ :Optional[Any] = {
'jukebox-1b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'1b_lyrics/prior_level_2.pth.tar',
],
'jukebox-5b-lyrics': [
'5b/vqvae.pth.tar',
'5b/prior_level_0.pth.tar',
'5b/prior_level_1.pth.tar',
'5b_lyrics/prior_level_2.pth.tar',
],
}
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
if key.endswith('''.model.1.bias''' ) and len(key.split('''.''' ) ) > 1_0:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace('''.model.1.bias''' , '''.conv1d_1.bias''' )
elif key.endswith('''.model.1.weight''' ) and len(key.split('''.''' ) ) > 1_0:
SCREAMING_SNAKE_CASE__ : str = key.replace('''.model.1.weight''' , '''.conv1d_1.weight''' )
elif key.endswith('''.model.3.bias''' ) and len(key.split('''.''' ) ) > 1_0:
SCREAMING_SNAKE_CASE__ : int = key.replace('''.model.3.bias''' , '''.conv1d_2.bias''' )
elif key.endswith('''.model.3.weight''' ) and len(key.split('''.''' ) ) > 1_0:
SCREAMING_SNAKE_CASE__ : Optional[int] = key.replace('''.model.3.weight''' , '''.conv1d_2.weight''' )
if "conditioner_blocks.0." in key:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.replace('''conditioner_blocks.0''' , '''conditioner_blocks''' )
if "prime_prior" in key:
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.replace('''prime_prior''' , '''encoder''' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
SCREAMING_SNAKE_CASE__ : Tuple = key.replace('''.emb.''' , '''.''' )
if key.endswith('''k''' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('''.k''' , '''.codebook''' )
if "y_emb." in key:
return key.replace('''y_emb.''' , '''metadata_embedding.''' )
if "x_emb.emb." in key:
SCREAMING_SNAKE_CASE__ : List[Any] = key.replace('''0.x_emb.emb''' , '''embed_tokens''' )
if "prime_state_ln" in key:
return key.replace('''prime_state_ln''' , '''encoder.final_layer_norm''' )
if ".ln" in key:
return key.replace('''.ln''' , '''.layer_norm''' )
if "_ln" in key:
return key.replace('''_ln''' , '''_layer_norm''' )
if "prime_state_proj" in key:
return key.replace('''prime_state_proj''' , '''encoder.proj_in''' )
if "prime_x_out" in key:
return key.replace('''prime_x_out''' , '''encoder.lm_head''' )
if "prior.x_out" in key:
return key.replace('''x_out''' , '''fc_proj_out''' )
if "x_emb" in key:
return key.replace('''x_emb''' , '''embed_tokens''' )
return key
def a ( A__ , A__ , A__ , A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = {}
import re
SCREAMING_SNAKE_CASE__ : List[str] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(
r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'''encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : List[Any] = re.compile(
r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re.compile(r'''decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = re.compile(
r'''conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = re.compile(r'''conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)''' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = re_encoder_block_conv_in.match(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = regex_match.groups()
SCREAMING_SNAKE_CASE__ : int = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE__ : Any = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : Tuple = re_encoder_block_conv_in.sub(A__ , A__ )
elif re_encoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re_encoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] )
SCREAMING_SNAKE_CASE__ : Dict = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
SCREAMING_SNAKE_CASE__ : List[Any] = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : List[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE__ : List[Any] = re_encoder_block_resnet.sub(A__ , A__ )
elif re_encoder_block_proj_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = re_encoder_block_proj_out.match(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE__ : str = f"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : Any = re_encoder_block_proj_out.sub(A__ , A__ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : str = re_decoder_block_conv_out.match(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE__ : Tuple = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : Any = re_decoder_block_conv_out.sub(A__ , A__ )
elif re_decoder_block_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = re_decoder_block_resnet.match(A__ )
SCREAMING_SNAKE_CASE__ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
SCREAMING_SNAKE_CASE__ : int = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE__ : Dict = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
SCREAMING_SNAKE_CASE__ : Dict = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : Optional[int] = prefix + resnet_block
SCREAMING_SNAKE_CASE__ : List[str] = re_decoder_block_resnet.sub(A__ , A__ )
elif re_decoder_block_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = re_decoder_block_proj_in.match(A__ )
SCREAMING_SNAKE_CASE__ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE__ : str = f"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : str = re_decoder_block_proj_in.sub(A__ , A__ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : int = re_prior_cond_conv_out.match(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : List[Any] = re_prior_cond_conv_out.sub(A__ , A__ )
elif re_prior_cond_resnet.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = re_prior_cond_resnet.match(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
SCREAMING_SNAKE_CASE__ : Optional[int] = {'''1''': 1, '''3''': 2}[groups[-2]]
SCREAMING_SNAKE_CASE__ : List[Any] = f"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
SCREAMING_SNAKE_CASE__ : str = f"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : List[Any] = prefix + resnet_block
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re_prior_cond_resnet.sub(A__ , A__ )
elif re_prior_cond_proj_in.fullmatch(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = re_prior_cond_proj_in.match(A__ )
SCREAMING_SNAKE_CASE__ : Dict = regex_match.groups()
SCREAMING_SNAKE_CASE__ : Optional[int] = f"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = re_prior_cond_proj_in.sub(A__ , A__ )
# keep original key
else:
SCREAMING_SNAKE_CASE__ : List[Any] = original_key
SCREAMING_SNAKE_CASE__ : int = replace_key(A__ )
if f"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(f"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[f"""{key_prefix}.{key}"""].shape:
SCREAMING_SNAKE_CASE__ : List[str] = model_state_dict[f"""{key_prefix}.{key}"""]
print(f"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
SCREAMING_SNAKE_CASE__ : List[str] = original_key
SCREAMING_SNAKE_CASE__ : str = original_key
SCREAMING_SNAKE_CASE__ : Tuple = value
return new_dict
@torch.no_grad()
def a ( A__=None , A__=None ) -> str:
'''simple docstring'''
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
SCREAMING_SNAKE_CASE__ : Tuple = requests.get(f"""{PREFIX}{file}""" , allow_redirects=A__ )
os.makedirs(f"""{pytorch_dump_folder_path}/""" , exist_ok=A__ )
open(f"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , '''wb''' ).write(r.content )
SCREAMING_SNAKE_CASE__ : Tuple = MODEL_MAPPING[model_name.split('''/''' )[-1]]
SCREAMING_SNAKE_CASE__ : Optional[Any] = JukeboxConfig.from_pretrained(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = JukeboxModel(A__ )
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Any = {}
for i, dict_name in enumerate(A__ ):
SCREAMING_SNAKE_CASE__ : Dict = torch.load(f"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['''model''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
for k in old_dic.keys():
if k.endswith('''.b''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = old_dic[k]
elif k.endswith('''.w''' ):
SCREAMING_SNAKE_CASE__ : Any = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
SCREAMING_SNAKE_CASE__ : Tuple = old_dic[k]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = old_dic[k]
SCREAMING_SNAKE_CASE__ : Any = '''vqvae''' if i == 0 else f"""priors.{3 - i}"""
SCREAMING_SNAKE_CASE__ : str = fix_jukebox_keys(A__ , model.state_dict() , A__ , A__ )
weight_dict.append(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = weight_dict.pop(0 )
model.vqvae.load_state_dict(A__ )
for i in range(len(A__ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(A__ ).mkdir(exist_ok=A__ )
with open(f"""{pytorch_dump_folder_path}/mapping.json""" , '''w''' ) as txtfile:
json.dump(A__ , A__ )
print(f"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(A__ )
return weight_dict
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='jukebox-5b-lyrics',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path',
default='jukebox-5b-lyrics-converted',
type=str,
help='Path to the output PyTorch model directory.',
)
a_ :List[Any] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :int = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[Any] = {
'vocab_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt'
),
'distilbert-base-german-cased': 'https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt',
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'distilbert-base-uncased': 'https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json',
'distilbert-base-uncased-distilled-squad': (
'https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-cased': 'https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json',
'distilbert-base-cased-distilled-squad': (
'https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json'
),
'distilbert-base-german-cased': (
'https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json'
),
'distilbert-base-multilingual-cased': (
'https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json'
),
},
}
a_ :Union[str, Any] = {
'distilbert-base-uncased': 5_12,
'distilbert-base-uncased-distilled-squad': 5_12,
'distilbert-base-cased': 5_12,
'distilbert-base-cased-distilled-squad': 5_12,
'distilbert-base-german-cased': 5_12,
'distilbert-base-multilingual-cased': 5_12,
}
a_ :Any = {
'distilbert-base-uncased': {'do_lower_case': True},
'distilbert-base-uncased-distilled-squad': {'do_lower_case': True},
'distilbert-base-cased': {'do_lower_case': False},
'distilbert-base-cased-distilled-squad': {'do_lower_case': False},
'distilbert-base-german-cased': {'do_lower_case': False},
'distilbert-base-multilingual-cased': {'do_lower_case': False},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : int = ['''input_ids''', '''attention_mask''']
lowerCamelCase : Optional[int] = DistilBertTokenizer
def __init__( self : Union[str, Any] , _lowercase : List[Any]=None , _lowercase : int=None , _lowercase : Optional[int]=True , _lowercase : Optional[int]="[UNK]" , _lowercase : Dict="[SEP]" , _lowercase : Optional[int]="[PAD]" , _lowercase : List[Any]="[CLS]" , _lowercase : Union[str, Any]="[MASK]" , _lowercase : List[Any]=True , _lowercase : int=None , **_lowercase : List[Any] , ):
super().__init__(
_lowercase , tokenizer_file=_lowercase , do_lower_case=_lowercase , unk_token=_lowercase , sep_token=_lowercase , pad_token=_lowercase , cls_token=_lowercase , mask_token=_lowercase , tokenize_chinese_chars=_lowercase , strip_accents=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('''lowercase''' , _lowercase ) != do_lower_case
or normalizer_state.get('''strip_accents''' , _lowercase ) != strip_accents
or normalizer_state.get('''handle_chinese_chars''' , _lowercase ) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE__ : Optional[Any] = getattr(_lowercase , normalizer_state.pop('''type''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = do_lower_case
SCREAMING_SNAKE_CASE__ : Union[str, Any] = strip_accents
SCREAMING_SNAKE_CASE__ : Dict = tokenize_chinese_chars
SCREAMING_SNAKE_CASE__ : List[Any] = normalizer_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_lower_case
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any]=None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def lowercase__ ( self : Union[str, Any] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def lowercase__ ( self : Union[str, Any] , _lowercase : str , _lowercase : Optional[str] = None ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._tokenizer.model.save(_lowercase , name=_lowercase )
return tuple(_lowercase )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
def a ( A__ ) -> str:
'''simple docstring'''
if not all(char in '''01''' for char in bin_string ):
raise ValueError('''Non-binary value was passed to the function''' )
if not bin_string:
raise ValueError('''Empty string was passed to the function''' )
SCREAMING_SNAKE_CASE__ : List[str] = ''''''
while len(A__ ) % 3 != 0:
SCREAMING_SNAKE_CASE__ : Optional[int] = '''0''' + bin_string
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
bin_string[index : index + 3]
for index in range(len(A__ ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
SCREAMING_SNAKE_CASE__ : Dict = 0
for index, val in enumerate(A__ ):
oct_val += int(2 ** (2 - index) * int(A__ ) )
oct_string += str(A__ )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = ArgumentParser(
description=(
'''PyTorch TPU distributed training launch '''
'''helper utility that will spawn up '''
'''multiple distributed processes'''
) )
# Optional arguments for the launch helper
parser.add_argument('''--num_cores''' , type=A__ , default=1 , help='''Number of TPU cores to use (1 or 8).''' )
# positional
parser.add_argument(
'''training_script''' , type=A__ , help=(
'''The full path to the single TPU training '''
'''program/script to be launched in parallel, '''
'''followed by all the arguments for the '''
'''training script'''
) , )
# rest from the training program
parser.add_argument('''training_script_args''' , nargs=A__ )
return parser.parse_args()
def a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = parse_args()
# Import training_script as a module.
SCREAMING_SNAKE_CASE__ : Tuple = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
SCREAMING_SNAKE_CASE__ : Any = script_fpath.stem
SCREAMING_SNAKE_CASE__ : int = importlib.import_module(A__ )
# Patch sys.argv
SCREAMING_SNAKE_CASE__ : List[Any] = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Tuple = {'ctrl': 'https://huggingface.co/ctrl/resolve/main/config.json'}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = '''ctrl'''
lowerCamelCase : Optional[Any] = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self : Optional[int] , _lowercase : List[Any]=24_65_34 , _lowercase : Union[str, Any]=2_56 , _lowercase : Tuple=12_80 , _lowercase : Union[str, Any]=81_92 , _lowercase : List[Any]=48 , _lowercase : Any=16 , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.1 , _lowercase : List[str]=1E-6 , _lowercase : List[Any]=0.02 , _lowercase : Union[str, Any]=True , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : str = n_positions
SCREAMING_SNAKE_CASE__ : Any = n_embd
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n_layer
SCREAMING_SNAKE_CASE__ : Optional[int] = n_head
SCREAMING_SNAKE_CASE__ : Optional[int] = dff
SCREAMING_SNAKE_CASE__ : Optional[Any] = resid_pdrop
SCREAMING_SNAKE_CASE__ : int = embd_pdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = layer_norm_epsilon
SCREAMING_SNAKE_CASE__ : int = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = use_cache
super().__init__(**_lowercase )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
def a ( A__ , A__ ) -> str:
'''simple docstring'''
if not isinstance(A__ , A__ ):
raise ValueError('''iterations must be defined as integers''' )
if not isinstance(A__ , A__ ) or not number >= 1:
raise ValueError(
'''starting number must be
and integer and be more than 0''' )
if not iterations >= 1:
raise ValueError('''Iterations must be done more than 0 times to play FizzBuzz''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ''''''
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(A__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :str = logging.get_logger(__name__)
a_ :Any = {
'weiweishi/roc-bert-base-zh': 'https://huggingface.co/weiweishi/roc-bert-base-zh/resolve/main/config.json',
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = '''roc_bert'''
def __init__( self : List[str] , _lowercase : Dict=3_05_22 , _lowercase : Tuple=7_68 , _lowercase : Any=12 , _lowercase : str=12 , _lowercase : Any=30_72 , _lowercase : str="gelu" , _lowercase : Any=0.1 , _lowercase : int=0.1 , _lowercase : Optional[Any]=5_12 , _lowercase : Any=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Optional[int]=1E-12 , _lowercase : Any=True , _lowercase : Tuple=0 , _lowercase : Dict="absolute" , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=True , _lowercase : Dict=True , _lowercase : Optional[int]=7_68 , _lowercase : Tuple=9_10 , _lowercase : Optional[int]=5_12 , _lowercase : List[str]=2_48_58 , _lowercase : List[Any]=True , **_lowercase : List[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Tuple = type_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = enable_pronunciation
SCREAMING_SNAKE_CASE__ : Tuple = enable_shape
SCREAMING_SNAKE_CASE__ : Dict = pronunciation_embed_dim
SCREAMING_SNAKE_CASE__ : int = pronunciation_vocab_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = shape_embed_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = shape_vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = concat_input
SCREAMING_SNAKE_CASE__ : str = position_embedding_type
SCREAMING_SNAKE_CASE__ : str = classifier_dropout
super().__init__(pad_token_id=_lowercase , **_lowercase )
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :List[Any] = logging.get_logger(__name__)
a_ :Dict = {
'facebook/deit-base-distilled-patch16-224': (
'https://huggingface.co/facebook/deit-base-patch16-224/resolve/main/config.json'
),
# See all DeiT models at https://huggingface.co/models?filter=deit
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = '''deit'''
def __init__( self : int , _lowercase : Optional[int]=7_68 , _lowercase : List[Any]=12 , _lowercase : Optional[Any]=12 , _lowercase : Dict=30_72 , _lowercase : Any="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : List[Any]=0.0 , _lowercase : Optional[Any]=0.02 , _lowercase : List[str]=1E-12 , _lowercase : Optional[int]=2_24 , _lowercase : Optional[int]=16 , _lowercase : int=3 , _lowercase : int=True , _lowercase : Optional[int]=16 , **_lowercase : Union[str, Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Tuple = image_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = patch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : int = qkv_bias
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = version.parse('''1.11''' )
@property
def lowercase__ ( self : Union[str, Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : Any ):
return 1E-4
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> tuple[float, list[float]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = list(range(len(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Any = [v / w for v, w in zip(A__ , A__ )]
index.sort(key=lambda A__ : ratio[i] , reverse=A__ )
SCREAMING_SNAKE_CASE__ : float = 0
SCREAMING_SNAKE_CASE__ : list[float] = [0] * len(A__ )
for i in index:
if weight[i] <= capacity:
SCREAMING_SNAKE_CASE__ : List[Any] = 1
max_value += value[i]
capacity -= weight[i]
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TFXLMRobertaModel.from_pretrained('''jplu/tf-xlm-roberta-base''' )
SCREAMING_SNAKE_CASE__ : Tuple = {
'''input_ids''': tf.convert_to_tensor([[0, 26_46, 1_02_69, 83, 9_99_42, 2]] , dtype=tf.intaa ), # "My dog is cute"
'''attention_mask''': tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : Optional[int] = tf.TensorShape((1, 6, 7_68) )
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice.
SCREAMING_SNAKE_CASE__ : Dict = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
from __future__ import annotations
def a ( A__ , A__ , A__ , ) -> tuple[str, float]:
'''simple docstring'''
if (stress, tangential_force, area).count(0 ) != 1:
raise ValueError('''You cannot supply more or less than 2 values''' )
elif stress < 0:
raise ValueError('''Stress cannot be negative''' )
elif tangential_force < 0:
raise ValueError('''Tangential Force cannot be negative''' )
elif area < 0:
raise ValueError('''Area cannot be negative''' )
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
a_ :Optional[Any] = logging.getLogger(__name__)
def a ( A__ , A__ , A__ = None , A__ = None , A__ = None , A__ = None , A__ = None , A__ = False , ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb_quantization_config.load_in_abit
SCREAMING_SNAKE_CASE__ : List[str] = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
SCREAMING_SNAKE_CASE__ : int = []
# custom device map
if isinstance(A__ , A__ ) and len(device_map.keys() ) > 1:
SCREAMING_SNAKE_CASE__ : List[Any] = [key for key, value in device_map.items() if value in ['''disk''', '''cpu''']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
SCREAMING_SNAKE_CASE__ : Optional[int] = get_keys_to_not_convert(A__ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
SCREAMING_SNAKE_CASE__ : List[Any] = []
SCREAMING_SNAKE_CASE__ : List[str] = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(A__ )
# compatibility with peft
SCREAMING_SNAKE_CASE__ : Union[str, Any] = load_in_abit
SCREAMING_SNAKE_CASE__ : int = load_in_abit
SCREAMING_SNAKE_CASE__ : int = get_parameter_device(A__ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
SCREAMING_SNAKE_CASE__ : List[str] = replace_with_bnb_layers(A__ , A__ , modules_to_not_convert=A__ )
# convert param to the right dtype
SCREAMING_SNAKE_CASE__ : int = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
SCREAMING_SNAKE_CASE__ : int = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
SCREAMING_SNAKE_CASE__ : List[str] = getattr(A__ , A__ , A__ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(A__ ):
param.to(A__ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"""The model device type is {model_device.type}. However, cuda is needed for quantization."""
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"""`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} """ )
else:
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : List[Any] = replace_with_bnb_layers(
A__ , A__ , modules_to_not_convert=A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = get_quantized_model_device_map(
A__ , A__ , A__ , max_memory=A__ , no_split_module_classes=A__ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
SCREAMING_SNAKE_CASE__ : Optional[int] = True
SCREAMING_SNAKE_CASE__ : Any = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
A__ , A__ , A__ , dtype=bnb_quantization_config.torch_dtype , offload_folder=A__ , offload_state_dict=A__ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(A__ , device_map=A__ , offload_dir=A__ )
def a ( A__ , A__ , A__=None , A__=None , A__=None ) -> Union[str, Any]:
'''simple docstring'''
if device_map is None:
if torch.cuda.is_available():
SCREAMING_SNAKE_CASE__ : Dict = {'''''': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(A__ , A__ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
SCREAMING_SNAKE_CASE__ : List[Any] = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
SCREAMING_SNAKE_CASE__ : List[str] = {}
SCREAMING_SNAKE_CASE__ : Dict = special_dtypes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = no_split_module_classes
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
SCREAMING_SNAKE_CASE__ : int = get_balanced_memory(
A__ , low_zero=(device_map == '''balanced_low_0''') , max_memory=A__ , **A__ , )
SCREAMING_SNAKE_CASE__ : str = max_memory
SCREAMING_SNAKE_CASE__ : Dict = infer_auto_device_map(A__ , **A__ )
if isinstance(A__ , A__ ):
# check if don't have any quantized module on the cpu
SCREAMING_SNAKE_CASE__ : Optional[Any] = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''
Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit
the quantized model. If you want to dispatch the model on the CPU or the disk while keeping
these modules in `torch_dtype`, you need to pass a custom `device_map` to
`load_and_quantize_model`. Check
https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk
for more details.
''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def a ( A__ , A__ , A__=None , A__=None ) -> str:
'''simple docstring'''
if modules_to_not_convert is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def a ( A__ , A__ , A__=None , A__=None , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = False
for name, module in model.named_children():
if current_key_name is None:
SCREAMING_SNAKE_CASE__ : List[str] = []
current_key_name.append(A__ )
if isinstance(A__ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
SCREAMING_SNAKE_CASE__ : int = '''.'''.join(A__ )
SCREAMING_SNAKE_CASE__ : Dict = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
SCREAMING_SNAKE_CASE__ : Any = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : Any = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=A__ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
SCREAMING_SNAKE_CASE__ : List[str] = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
SCREAMING_SNAKE_CASE__ : Dict = module.weight.data
if module.bias is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = module.bias.data
bnb_module.requires_grad_(A__ )
setattr(A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = True
if len(list(module.children() ) ) > 0:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = _replace_with_bnb_layers(
A__ , A__ , A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def a ( A__ ) -> str:
'''simple docstring'''
with init_empty_weights():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = deepcopy(A__ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
SCREAMING_SNAKE_CASE__ : Dict = find_tied_parameters(A__ )
# For compatibility with Accelerate < 0.18
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : str = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
SCREAMING_SNAKE_CASE__ : List[str] = sum(A__ , [] )
SCREAMING_SNAKE_CASE__ : str = len(A__ ) > 0
# Check if it is a base model
SCREAMING_SNAKE_CASE__ : Dict = False
if hasattr(A__ , '''base_model_prefix''' ):
SCREAMING_SNAKE_CASE__ : Optional[int] = not hasattr(A__ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
SCREAMING_SNAKE_CASE__ : List[str] = list(model.named_children() )
SCREAMING_SNAKE_CASE__ : Dict = [list_modules[-1][0]]
# add last module together with tied weights
SCREAMING_SNAKE_CASE__ : Any = set(A__ ) - set(A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = list(set(A__ ) ) + list(A__ )
# remove ".weight" from the keys
SCREAMING_SNAKE_CASE__ : int = ['''.weight''', '''.bias''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
SCREAMING_SNAKE_CASE__ : List[str] = name.replace(A__ , '''''' )
filtered_module_names.append(A__ )
return filtered_module_names
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
for m in model.modules():
if isinstance(A__ , bnb.nn.Linearabit ):
return True
return False
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
return next(parameter.parameters() ).device
def a ( A__ , A__ , A__ , A__ , A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
if fpaa_statistics is None:
set_module_tensor_to_device(A__ , A__ , 0 , dtype=A__ , value=A__ )
SCREAMING_SNAKE_CASE__ : str = param_name
SCREAMING_SNAKE_CASE__ : Dict = model
if "." in tensor_name:
SCREAMING_SNAKE_CASE__ : Optional[Any] = tensor_name.split('''.''' )
for split in splits[:-1]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = getattr(A__ , A__ )
if new_module is None:
raise ValueError(f"""{module} has no attribute {split}.""" )
SCREAMING_SNAKE_CASE__ : Tuple = new_module
SCREAMING_SNAKE_CASE__ : Tuple = splits[-1]
# offload weights
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
offload_weight(module._parameters[tensor_name] , A__ , A__ , index=A__ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ , )
else:
offload_weight(A__ , A__ , A__ , index=A__ )
offload_weight(A__ , param_name.replace('''weight''' , '''SCB''' ) , A__ , index=A__ )
set_module_tensor_to_device(A__ , A__ , '''meta''' , dtype=A__ , value=torch.empty(*param.size() ) )
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :int = logging.get_logger(__name__)
a_ :Tuple = {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''speech_to_text'''
lowerCamelCase : str = ['''past_key_values''']
lowerCamelCase : Optional[Any] = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Any , _lowercase : List[Any]=1_00_00 , _lowercase : Union[str, Any]=12 , _lowercase : int=20_48 , _lowercase : Tuple=4 , _lowercase : Any=6 , _lowercase : int=20_48 , _lowercase : str=4 , _lowercase : Optional[Any]=0.0 , _lowercase : List[str]=0.0 , _lowercase : Dict=True , _lowercase : List[str]=True , _lowercase : List[str]="relu" , _lowercase : List[str]=2_56 , _lowercase : str=0.1 , _lowercase : Tuple=0.0 , _lowercase : List[Any]=0.0 , _lowercase : int=0.02 , _lowercase : int=2 , _lowercase : Any=True , _lowercase : List[str]=1 , _lowercase : List[str]=0 , _lowercase : Tuple=2 , _lowercase : Dict=60_00 , _lowercase : str=10_24 , _lowercase : int=2 , _lowercase : Any=(5, 5) , _lowercase : Tuple=10_24 , _lowercase : Optional[Any]=80 , _lowercase : Tuple=1 , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : int = d_model
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Tuple = encoder_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = dropout
SCREAMING_SNAKE_CASE__ : List[str] = attention_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_dropout
SCREAMING_SNAKE_CASE__ : int = activation_function
SCREAMING_SNAKE_CASE__ : List[str] = init_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : int = use_cache
SCREAMING_SNAKE_CASE__ : Any = encoder_layers
SCREAMING_SNAKE_CASE__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Dict = max_source_positions
SCREAMING_SNAKE_CASE__ : Tuple = max_target_positions
SCREAMING_SNAKE_CASE__ : List[Any] = num_conv_layers
SCREAMING_SNAKE_CASE__ : str = list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = conv_channels
SCREAMING_SNAKE_CASE__ : List[Any] = input_feat_per_channel
SCREAMING_SNAKE_CASE__ : List[str] = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'''Configuration for convolutional module is incorrect. '''
'''It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '''
f"""but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" )
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self : Optional[Any] , _lowercase : Any , _lowercase : List[str]=7 , _lowercase : Tuple=3 , _lowercase : List[str]=30 , _lowercase : Optional[Any]=4_00 , _lowercase : Any=True , _lowercase : Union[str, Any]=None , _lowercase : List[Any]=0.9 , _lowercase : Optional[Any]=None , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=[0.5, 0.5, 0.5] , _lowercase : int=[0.5, 0.5, 0.5] , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 30}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = crop_size if crop_size is not None else {'''height''': 30, '''width''': 30}
SCREAMING_SNAKE_CASE__ : Tuple = parent
SCREAMING_SNAKE_CASE__ : Union[str, Any] = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Dict = min_resolution
SCREAMING_SNAKE_CASE__ : str = max_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] = do_resize_and_center_crop
SCREAMING_SNAKE_CASE__ : str = size
SCREAMING_SNAKE_CASE__ : Tuple = crop_pct
SCREAMING_SNAKE_CASE__ : List[Any] = crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_mean
SCREAMING_SNAKE_CASE__ : Optional[int] = image_std
def lowercase__ ( self : Optional[Any] ):
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Dict = PoolFormerImageProcessor if is_vision_available() else None
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def lowercase__ ( self : Union[str, Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowercase , '''do_resize_and_center_crop''' ) )
self.assertTrue(hasattr(_lowercase , '''size''' ) )
self.assertTrue(hasattr(_lowercase , '''crop_pct''' ) )
self.assertTrue(hasattr(_lowercase , '''do_normalize''' ) )
self.assertTrue(hasattr(_lowercase , '''image_mean''' ) )
self.assertTrue(hasattr(_lowercase , '''image_std''' ) )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''shortest_edge''': 30} )
self.assertEqual(image_processor.crop_size , {'''height''': 30, '''width''': 30} )
SCREAMING_SNAKE_CASE__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'''shortest_edge''': 42} )
self.assertEqual(image_processor.crop_size , {'''height''': 84, '''width''': 84} )
def lowercase__ ( self : Optional[int] ):
pass
def lowercase__ ( self : Dict ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : Tuple ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , numpify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
def lowercase__ ( self : int ):
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_lowercase , torchify=_lowercase )
for image in image_inputs:
self.assertIsInstance(_lowercase , torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0] , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
# Test batched
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_lowercase , return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['''height'''],
self.image_processor_tester.crop_size['''width'''],
) , )
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :List[str] = logging.get_logger(__name__)
a_ :List[str] = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''owlvit_text_model'''
def __init__( self : List[str] , _lowercase : Optional[Any]=4_94_08 , _lowercase : Tuple=5_12 , _lowercase : List[str]=20_48 , _lowercase : List[Any]=12 , _lowercase : Any=8 , _lowercase : int=16 , _lowercase : int="quick_gelu" , _lowercase : Any=1E-5 , _lowercase : Union[str, Any]=0.0 , _lowercase : Union[str, Any]=0.02 , _lowercase : List[Any]=1.0 , _lowercase : List[str]=0 , _lowercase : List[Any]=4_94_06 , _lowercase : Dict=4_94_07 , **_lowercase : Tuple , ):
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Optional[Any] = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : int = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _lowercase : Union[str, os.PathLike] , **_lowercase : Union[str, Any] ):
cls._set_token_in_kwargs(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = cls.get_config_dict(_lowercase , **_lowercase )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
SCREAMING_SNAKE_CASE__ : Dict = config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : str = '''owlvit_vision_model'''
def __init__( self : Any , _lowercase : Dict=7_68 , _lowercase : List[str]=30_72 , _lowercase : Tuple=12 , _lowercase : Any=12 , _lowercase : Any=3 , _lowercase : Any=7_68 , _lowercase : Optional[int]=32 , _lowercase : Dict="quick_gelu" , _lowercase : Union[str, Any]=1E-5 , _lowercase : Optional[int]=0.0 , _lowercase : Dict=0.02 , _lowercase : Optional[Any]=1.0 , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[Any] = num_attention_heads
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : int = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Any = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = initializer_factor
@classmethod
def lowercase__ ( cls : Any , _lowercase : Union[str, os.PathLike] , **_lowercase : Union[str, Any] ):
cls._set_token_in_kwargs(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = cls.get_config_dict(_lowercase , **_lowercase )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get('''model_type''' ) == "owlvit":
SCREAMING_SNAKE_CASE__ : str = config_dict['''vision_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : str = '''owlvit'''
lowerCamelCase : int = True
def __init__( self : Union[str, Any] , _lowercase : Dict=None , _lowercase : List[str]=None , _lowercase : Union[str, Any]=5_12 , _lowercase : Dict=2.6592 , _lowercase : Union[str, Any]=True , **_lowercase : List[str] , ):
super().__init__(**_lowercase )
if text_config is None:
SCREAMING_SNAKE_CASE__ : List[Any] = {}
logger.info('''text_config is None. Initializing the OwlViTTextConfig with default values.''' )
if vision_config is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = {}
logger.info('''vision_config is None. initializing the OwlViTVisionConfig with default values.''' )
SCREAMING_SNAKE_CASE__ : Any = OwlViTTextConfig(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = OwlViTVisionConfig(**_lowercase )
SCREAMING_SNAKE_CASE__ : Any = projection_dim
SCREAMING_SNAKE_CASE__ : Dict = logit_scale_init_value
SCREAMING_SNAKE_CASE__ : Union[str, Any] = return_dict
SCREAMING_SNAKE_CASE__ : Any = 1.0
@classmethod
def lowercase__ ( cls : List[Any] , _lowercase : Union[str, os.PathLike] , **_lowercase : Dict ):
cls._set_token_in_kwargs(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = cls.get_config_dict(_lowercase , **_lowercase )
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_lowercase , **_lowercase )
@classmethod
def lowercase__ ( cls : int , _lowercase : Dict , _lowercase : Dict , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[str] = text_config
SCREAMING_SNAKE_CASE__ : Tuple = vision_config
return cls.from_dict(_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = copy.deepcopy(self.__dict__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.text_config.to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.vision_config.to_dict()
SCREAMING_SNAKE_CASE__ : int = self.__class__.model_type
return output
class lowercase ( _UpperCAmelCase ):
@property
def lowercase__ ( self : int ):
return OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''sequence'''}),
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''attention_mask''', {0: '''batch''', 1: '''sequence'''}),
] )
@property
def lowercase__ ( self : Dict ):
return OrderedDict(
[
('''logits_per_image''', {0: '''batch'''}),
('''logits_per_text''', {0: '''batch'''}),
('''text_embeds''', {0: '''batch'''}),
('''image_embeds''', {0: '''batch'''}),
] )
@property
def lowercase__ ( self : Any ):
return 1E-4
def lowercase__ ( self : Tuple , _lowercase : "ProcessorMixin" , _lowercase : int = -1 , _lowercase : int = -1 , _lowercase : Optional["TensorType"] = None , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = super().generate_dummy_inputs(
processor.tokenizer , batch_size=_lowercase , seq_length=_lowercase , framework=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = super().generate_dummy_inputs(
processor.image_processor , batch_size=_lowercase , framework=_lowercase )
return {**text_input_dict, **image_input_dict}
@property
def lowercase__ ( self : int ):
return 14
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
from functools import reduce
a_ :Optional[Any] = (
'73167176531330624919225119674426574742355349194934'
'96983520312774506326239578318016984801869478851843'
'85861560789112949495459501737958331952853208805511'
'12540698747158523863050715693290963295227443043557'
'66896648950445244523161731856403098711121722383113'
'62229893423380308135336276614282806444486645238749'
'30358907296290491560440772390713810515859307960866'
'70172427121883998797908792274921901699720888093776'
'65727333001053367881220235421809751254540594752243'
'52584907711670556013604839586446706324415722155397'
'53697817977846174064955149290862569321978468622482'
'83972241375657056057490261407972968652414535100474'
'82166370484403199890008895243450658541227588666881'
'16427171479924442928230863465674813919123162824586'
'17866458359124566529476545682848912883142607690042'
'24219022671055626321111109370544217506941658960408'
'07198403850962455444362981230987879927244284909188'
'84580156166097919133875499200524063689912560717606'
'05886116467109405077541002256983155200055935729725'
'71636269561882670428252483600823257530420752963450'
)
def a ( A__ = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A__ , A__ : str(int(A__ ) * int(A__ ) ) , n[i : i + 1_3] ) )
for i in range(len(A__ ) - 1_2 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..bit import BitConfig
a_ :Optional[int] = logging.get_logger(__name__)
a_ :Any = {
'Intel/dpt-large': 'https://huggingface.co/Intel/dpt-large/resolve/main/config.json',
# See all DPT models at https://huggingface.co/models?filter=dpt
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''dpt'''
def __init__( self : int , _lowercase : str=7_68 , _lowercase : Optional[int]=12 , _lowercase : str=12 , _lowercase : List[Any]=30_72 , _lowercase : int="gelu" , _lowercase : int=0.0 , _lowercase : int=0.0 , _lowercase : Optional[int]=0.02 , _lowercase : Optional[int]=1E-12 , _lowercase : Union[str, Any]=3_84 , _lowercase : int=16 , _lowercase : Dict=3 , _lowercase : int=False , _lowercase : str=True , _lowercase : Dict=[2, 5, 8, 11] , _lowercase : Any="project" , _lowercase : Any=[4, 2, 1, 0.5] , _lowercase : Optional[Any]=[96, 1_92, 3_84, 7_68] , _lowercase : Tuple=2_56 , _lowercase : Optional[int]=-1 , _lowercase : Tuple=False , _lowercase : str=True , _lowercase : Dict=0.4 , _lowercase : Union[str, Any]=2_55 , _lowercase : str=0.1 , _lowercase : Union[str, Any]=[1, 10_24, 24, 24] , _lowercase : Union[str, Any]=[0, 1] , _lowercase : Union[str, Any]=None , **_lowercase : Optional[int] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = is_hybrid
if self.is_hybrid:
if backbone_config is None:
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
}
SCREAMING_SNAKE_CASE__ : List[str] = BitConfig(**_lowercase )
elif isinstance(_lowercase , _lowercase ):
logger.info('''Initializing the config with a `BiT` backbone.''' )
SCREAMING_SNAKE_CASE__ : str = BitConfig(**_lowercase )
elif isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : str = backbone_config
else:
raise ValueError(
f"""backbone_config must be a dictionary or a `PretrainedConfig`, got {backbone_config.__class__}.""" )
SCREAMING_SNAKE_CASE__ : str = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : Optional[Any] = neck_ignore_stages
if readout_type != "project":
raise ValueError('''Readout type must be \'project\' when using `DPT-hybrid` mode.''' )
else:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Optional[int] = None
SCREAMING_SNAKE_CASE__ : str = []
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : Dict = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : Dict = image_size
SCREAMING_SNAKE_CASE__ : Tuple = patch_size
SCREAMING_SNAKE_CASE__ : Dict = num_channels
SCREAMING_SNAKE_CASE__ : List[str] = qkv_bias
SCREAMING_SNAKE_CASE__ : List[Any] = backbone_out_indices
if readout_type not in ["ignore", "add", "project"]:
raise ValueError('''Readout_type must be one of [\'ignore\', \'add\', \'project\']''' )
SCREAMING_SNAKE_CASE__ : str = readout_type
SCREAMING_SNAKE_CASE__ : List[Any] = reassemble_factors
SCREAMING_SNAKE_CASE__ : Optional[int] = neck_hidden_sizes
SCREAMING_SNAKE_CASE__ : List[str] = fusion_hidden_size
SCREAMING_SNAKE_CASE__ : Any = head_in_index
SCREAMING_SNAKE_CASE__ : int = use_batch_norm_in_fusion_residual
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : List[str] = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : List[str] = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : Any = semantic_loss_ignore_index
SCREAMING_SNAKE_CASE__ : Dict = semantic_classifier_dropout
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = copy.deepcopy(self.__dict__ )
if output["backbone_config"] is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : List[Any] = self.__class__.model_type
return output
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
a_ :Union[str, Any] = logging.get_logger(__name__)
a_ :Union[str, Any] = 'T5Config'
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = '''mt5'''
lowerCamelCase : List[str] = MTaConfig
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = '''mt5'''
lowerCamelCase : Dict = MTaConfig
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Any = '''mt5'''
lowerCamelCase : List[str] = MTaConfig
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import pytest
import datasets
# Import fixture modules as plugins
a_ :Union[str, Any] = ['tests.fixtures.files', 'tests.fixtures.hub', 'tests.fixtures.fsspec']
def a ( A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
for item in items:
if any(marker in item.keywords for marker in ['''integration''', '''unit'''] ):
continue
item.add_marker(pytest.mark.unit )
def a ( A__ ) -> int:
'''simple docstring'''
config.addinivalue_line('''markers''' , '''torchaudio_latest: mark test to run with torchaudio>=0.12''' )
@pytest.fixture(autouse=A__ )
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = tmp_path_factory.getbasetemp() / '''cache'''
SCREAMING_SNAKE_CASE__ : Tuple = test_hf_cache_home / '''datasets'''
SCREAMING_SNAKE_CASE__ : Dict = test_hf_cache_home / '''metrics'''
SCREAMING_SNAKE_CASE__ : int = test_hf_cache_home / '''modules'''
monkeypatch.setattr('''datasets.config.HF_DATASETS_CACHE''' , str(A__ ) )
monkeypatch.setattr('''datasets.config.HF_METRICS_CACHE''' , str(A__ ) )
monkeypatch.setattr('''datasets.config.HF_MODULES_CACHE''' , str(A__ ) )
SCREAMING_SNAKE_CASE__ : Dict = test_hf_datasets_cache / '''downloads'''
monkeypatch.setattr('''datasets.config.DOWNLOADED_DATASETS_PATH''' , str(A__ ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = test_hf_datasets_cache / '''downloads''' / '''extracted'''
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(A__ ) )
@pytest.fixture(autouse=A__ , scope='''session''' )
def a ( ) -> List[str]:
'''simple docstring'''
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def a ( A__ ) -> Union[str, Any]:
'''simple docstring'''
monkeypatch.setattr('''datasets.config.HF_UPDATE_DOWNLOAD_COUNTS''' , A__ )
@pytest.fixture
def a ( A__ ) -> int:
'''simple docstring'''
monkeypatch.setattr('''sqlalchemy.util.deprecations.SILENCE_UBER_WARNING''' , A__ )
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
from sklearn.metrics import mean_squared_error
import datasets
a_ :Tuple = '\\n@article{scikit-learn,\n title={Scikit-learn: Machine Learning in {P}ython},\n author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.\n and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.\n and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and\n Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},\n journal={Journal of Machine Learning Research},\n volume={12},\n pages={2825--2830},\n year={2011}\n}\n'
a_ :Optional[Any] = '\\nMean Squared Error(MSE) is the average of the square of difference between the predicted\nand actual values.\n'
a_ :int = '\nArgs:\n predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Estimated target values.\n references: array-like of shape (n_samples,) or (n_samples, n_outputs)\n Ground truth (correct) target values.\n sample_weight: array-like of shape (n_samples,), default=None\n Sample weights.\n multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"\n Defines aggregating of multiple output values. Array-like value defines weights used to average errors.\n\n "raw_values" : Returns a full set of errors in case of multioutput input.\n\n "uniform_average" : Errors of all outputs are averaged with uniform weight.\n\n squared : bool, default=True\n If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.\n\nReturns:\n mse : mean squared error.\nExamples:\n\n >>> mse_metric = datasets.load_metric("mse")\n >>> predictions = [2.5, 0.0, 2, 8]\n >>> references = [3, -0.5, 2, 7]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.375}\n >>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)\n >>> print(rmse_result)\n {\'mse\': 0.6123724356957945}\n\n If you\'re using multi-dimensional lists, then set the config as follows :\n\n >>> mse_metric = datasets.load_metric("mse", "multilist")\n >>> predictions = [[0.5, 1], [-1, 1], [7, -6]]\n >>> references = [[0, 2], [-1, 2], [8, -5]]\n >>> results = mse_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'mse\': 0.7083333333333334}\n >>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')\n >>> print(results) # doctest: +NORMALIZE_WHITESPACE\n {\'mse\': array([0.41666667, 1. ])}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'''
] , )
def lowercase__ ( self : Dict ):
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('''float''' ) ),
"references": datasets.Sequence(datasets.Value('''float''' ) ),
}
else:
return {
"predictions": datasets.Value('''float''' ),
"references": datasets.Value('''float''' ),
}
def lowercase__ ( self : str , _lowercase : str , _lowercase : int , _lowercase : int=None , _lowercase : Dict="uniform_average" , _lowercase : List[str]=True ):
SCREAMING_SNAKE_CASE__ : str = mean_squared_error(
_lowercase , _lowercase , sample_weight=_lowercase , multioutput=_lowercase , squared=_lowercase )
return {"mse": mse}
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
a_ :Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def a ( A__ , A__ ) -> List[str]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
SCREAMING_SNAKE_CASE__ : Tuple = XLMProphetNetForConditionalGenerationOld.from_pretrained(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = XLMProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = ProphetNetForConditionalGenerationOld.from_pretrained(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = ProphetNetForConditionalGeneration.from_pretrained(
A__ , output_loading_info=A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''key_proj''', '''value_proj''', '''query_proj''']
SCREAMING_SNAKE_CASE__ : Tuple = {
'''self_attn''': '''ngram_self_attn''',
'''cross_attn''': '''encoder_attn''',
'''cross_attn_layer_norm''': '''encoder_attn_layer_norm''',
'''feed_forward_layer_norm''': '''final_layer_norm''',
'''feed_forward''': '''''',
'''intermediate''': '''fc1''',
'''output''': '''fc2''',
'''key_proj''': '''k_proj''',
'''query_proj''': '''q_proj''',
'''value_proj''': '''v_proj''',
'''word_embeddings''': '''embed_tokens''',
'''embeddings_layer_norm''': '''emb_layer_norm''',
'''relative_pos_embeddings''': '''relative_linear''',
'''ngram_embeddings''': '''ngram_input_embed''',
'''position_embeddings''': '''embed_positions''',
}
for key in loading_info["missing_keys"]:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = key.split('''.''' )
if attributes[0] == "lm_head":
SCREAMING_SNAKE_CASE__ : Optional[Any] = prophet
SCREAMING_SNAKE_CASE__ : Dict = prophet_old
else:
SCREAMING_SNAKE_CASE__ : List[Any] = prophet.prophetnet
SCREAMING_SNAKE_CASE__ : Any = prophet_old.model
SCREAMING_SNAKE_CASE__ : Tuple = False
for attribute in attributes:
if attribute in mapping:
SCREAMING_SNAKE_CASE__ : List[str] = mapping[attribute]
if not hasattr(A__ , A__ ) and len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = attribute
elif hasattr(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Any = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_model.weight
logger.info(f"""{attribute} is initialized.""" )
SCREAMING_SNAKE_CASE__ : Any = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
SCREAMING_SNAKE_CASE__ : List[Any] = old_model.bias
logger.info(f"""{attribute} is initialized""" )
SCREAMING_SNAKE_CASE__ : int = True
break
elif attribute in special_keys and hasattr(A__ , '''in_proj_weight''' ):
SCREAMING_SNAKE_CASE__ : Dict = old_model.in_proj_weight.shape[0] // 3
SCREAMING_SNAKE_CASE__ : Dict = getattr(A__ , A__ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
SCREAMING_SNAKE_CASE__ : str = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_1_2, "We want 512 position_embeddings."
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Parameter(old_model.embed_positions.weight[:5_1_2, :] )
SCREAMING_SNAKE_CASE__ : Optional[Any] = True
break
if attribute.isdigit():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model[int(A__ )]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = old_model[int(A__ )]
else:
SCREAMING_SNAKE_CASE__ : List[Any] = getattr(A__ , A__ )
if old_attribute == "":
SCREAMING_SNAKE_CASE__ : Optional[int] = old_model
else:
if not hasattr(A__ , A__ ):
raise ValueError(f"""{old_model} does not have {old_attribute}""" )
SCREAMING_SNAKE_CASE__ : Optional[int] = getattr(A__ , A__ )
if not is_key_init:
raise ValueError(f"""{key} was not correctly initialized!""" )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
prophet.save_pretrained(A__ )
if __name__ == "__main__":
a_ :Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :int = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
from ...processing_utils import ProcessorMixin
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = '''SpeechT5FeatureExtractor'''
lowerCamelCase : Optional[Any] = '''SpeechT5Tokenizer'''
def __init__( self : int , _lowercase : List[str] , _lowercase : str ):
super().__init__(_lowercase , _lowercase )
def __call__( self : Dict , *_lowercase : Any , **_lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('''audio''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('''text''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = kwargs.pop('''text_target''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = kwargs.pop('''audio_target''' , _lowercase )
SCREAMING_SNAKE_CASE__ : int = kwargs.pop('''sampling_rate''' , _lowercase )
if audio is not None and text is not None:
raise ValueError(
'''Cannot process both `audio` and `text` inputs. Did you mean `audio_target` or `text_target`?''' )
if audio_target is not None and text_target is not None:
raise ValueError(
'''Cannot process both `audio_target` and `text_target` inputs. Did you mean `audio` or `text`?''' )
if audio is None and audio_target is None and text is None and text_target is None:
raise ValueError(
'''You need to specify either an `audio`, `audio_target`, `text`, or `text_target` input to process.''' )
if audio is not None:
SCREAMING_SNAKE_CASE__ : Dict = self.feature_extractor(_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
elif text is not None:
SCREAMING_SNAKE_CASE__ : int = self.tokenizer(_lowercase , **_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Tuple = None
if audio_target is not None:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor(audio_target=_lowercase , *_lowercase , sampling_rate=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = targets['''input_values''']
elif text_target is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : str = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ : List[str] = labels
SCREAMING_SNAKE_CASE__ : List[Any] = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ : List[str] = decoder_attention_mask
return inputs
def lowercase__ ( self : List[str] , *_lowercase : Optional[Any] , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop('''input_values''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = kwargs.pop('''input_ids''' , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = kwargs.pop('''labels''' , _lowercase )
if input_values is not None and input_ids is not None:
raise ValueError('''Cannot process both `input_values` and `input_ids` inputs.''' )
if input_values is None and input_ids is None and labels is None:
raise ValueError(
'''You need to specify either an `input_values`, `input_ids`, or `labels` input to be padded.''' )
if input_values is not None:
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
elif input_ids is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer.pad(_lowercase , **_lowercase )
else:
SCREAMING_SNAKE_CASE__ : str = None
if labels is not None:
if "input_ids" in labels or (isinstance(_lowercase , _lowercase ) and "input_ids" in labels[0]):
SCREAMING_SNAKE_CASE__ : int = self.tokenizer.pad(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = targets['''input_ids''']
else:
SCREAMING_SNAKE_CASE__ : List[Any] = self.feature_extractor.feature_size
SCREAMING_SNAKE_CASE__ : str = self.feature_extractor.num_mel_bins
SCREAMING_SNAKE_CASE__ : Optional[int] = self.feature_extractor.pad(_lowercase , *_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = feature_size_hack
SCREAMING_SNAKE_CASE__ : Optional[Any] = targets['''input_values''']
else:
SCREAMING_SNAKE_CASE__ : Dict = None
if inputs is None:
return targets
if targets is not None:
SCREAMING_SNAKE_CASE__ : int = labels
SCREAMING_SNAKE_CASE__ : Dict = targets.get('''attention_mask''' )
if decoder_attention_mask is not None:
SCREAMING_SNAKE_CASE__ : Any = decoder_attention_mask
return inputs
def lowercase__ ( self : Optional[int] , *_lowercase : Any , **_lowercase : Optional[Any] ):
return self.tokenizer.batch_decode(*_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , *_lowercase : Any , **_lowercase : List[str] ):
return self.tokenizer.decode(*_lowercase , **_lowercase )
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
import argparse
import math
import traceback
import dateutil.parser as date_parser
import requests
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = {}
SCREAMING_SNAKE_CASE__ : int = job['''started_at''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = job['''completed_at''']
SCREAMING_SNAKE_CASE__ : Dict = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = date_parser.parse(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = round((end_datetime - start_datetime).total_seconds() / 6_0.0 )
SCREAMING_SNAKE_CASE__ : Any = start
SCREAMING_SNAKE_CASE__ : Dict = end
SCREAMING_SNAKE_CASE__ : Optional[int] = duration_in_min
return job_info
def a ( A__ , A__=None ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if token is not None:
SCREAMING_SNAKE_CASE__ : int = {'''Accept''': '''application/vnd.github+json''', '''Authorization''': f"""Bearer {token}"""}
SCREAMING_SNAKE_CASE__ : Dict = f"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
SCREAMING_SNAKE_CASE__ : List[Any] = requests.get(A__ , headers=A__ ).json()
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
try:
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
SCREAMING_SNAKE_CASE__ : Optional[Any] = math.ceil((result['''total_count'''] - 1_0_0) / 1_0_0 )
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : int = requests.get(url + f"""&page={i + 2}""" , headers=A__ ).json()
job_time.update({job['''name''']: extract_time_from_single_job(A__ ) for job in result['''jobs''']} )
return job_time
except Exception:
print(f"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('--workflow_run_id', type=str, required=True, help='A GitHub Actions workflow run id.')
a_ :Any = parser.parse_args()
a_ :str = get_job_time(args.workflow_run_id)
a_ :Optional[int] = dict(sorted(job_time.items(), key=lambda item: item[1]["duration"], reverse=True))
for k, v in job_time.items():
print(F'''{k}: {v['duration']}''')
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : jnp.ndarray
lowerCamelCase : jnp.ndarray
class lowercase ( nn.Module ):
lowerCamelCase : int
lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
lowerCamelCase : jnp.dtype = jnp.floataa
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Dict = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ : str = []
for i in range(len(self.block_out_channels ) - 1 ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.block_out_channels[i]
SCREAMING_SNAKE_CASE__ : Any = self.block_out_channels[i + 1]
SCREAMING_SNAKE_CASE__ : str = nn.Conv(
_lowercase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = nn.Conv(
_lowercase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = blocks
SCREAMING_SNAKE_CASE__ : List[str] = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Union[str, Any] , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.conv_in(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = nn.silu(_lowercase )
for block in self.blocks:
SCREAMING_SNAKE_CASE__ : Dict = block(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.silu(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.conv_out(_lowercase )
return embedding
@flax_register_to_config
class lowercase ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : int = 32
lowerCamelCase : int = 4
lowerCamelCase : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCamelCase : Union[bool, Tuple[bool]] = False
lowerCamelCase : Tuple[int] = (320, 640, 1_280, 1_280)
lowerCamelCase : int = 2
lowerCamelCase : Union[int, Tuple[int]] = 8
lowerCamelCase : Optional[Union[int, Tuple[int]]] = None
lowerCamelCase : int = 1_280
lowerCamelCase : float = 0.0
lowerCamelCase : bool = False
lowerCamelCase : jnp.dtype = jnp.floataa
lowerCamelCase : bool = True
lowerCamelCase : int = 0
lowerCamelCase : str = "rgb"
lowerCamelCase : Tuple[int] = (16, 32, 96, 256)
def lowercase__ ( self : int , _lowercase : jax.random.KeyArray ):
# init input tensors
SCREAMING_SNAKE_CASE__ : Any = (1, self.in_channels, self.sample_size, self.sample_size)
SCREAMING_SNAKE_CASE__ : Tuple = jnp.zeros(_lowercase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.ones((1,) , dtype=jnp.intaa )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Any = (1, 3, self.sample_size * 8, self.sample_size * 8)
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.zeros(_lowercase , dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.split(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = {'''params''': params_rng, '''dropout''': dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )["params"]
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.block_out_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
SCREAMING_SNAKE_CASE__ : Optional[int] = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
SCREAMING_SNAKE_CASE__ : Tuple = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype )
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
SCREAMING_SNAKE_CASE__ : Any = self.only_cross_attention
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = (num_attention_heads,) * len(self.down_block_types )
# down
SCREAMING_SNAKE_CASE__ : Any = []
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = block_out_channels[0]
SCREAMING_SNAKE_CASE__ : List[str] = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
for i, down_block_type in enumerate(self.down_block_types ):
SCREAMING_SNAKE_CASE__ : Tuple = output_channel
SCREAMING_SNAKE_CASE__ : Any = block_out_channels[i]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = i == len(_lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
SCREAMING_SNAKE_CASE__ : int = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
SCREAMING_SNAKE_CASE__ : Dict = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase )
for _ in range(self.layers_per_block ):
SCREAMING_SNAKE_CASE__ : int = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
if not is_final_block:
SCREAMING_SNAKE_CASE__ : str = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = down_blocks
SCREAMING_SNAKE_CASE__ : List[str] = controlnet_down_blocks
# mid
SCREAMING_SNAKE_CASE__ : List[Any] = block_out_channels[-1]
SCREAMING_SNAKE_CASE__ : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=_lowercase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = nn.Conv(
_lowercase , kernel_size=(1, 1) , padding='''VALID''' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self : Tuple , _lowercase : Dict , _lowercase : List[Any] , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : float = 1.0 , _lowercase : bool = True , _lowercase : bool = False , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.flip(_lowercase , axis=1 )
# 1. time
if not isinstance(_lowercase , jnp.ndarray ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
SCREAMING_SNAKE_CASE__ : int = timesteps.astype(dtype=jnp.floataa )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.expand_dims(_lowercase , 0 )
SCREAMING_SNAKE_CASE__ : Dict = self.time_proj(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.time_embedding(_lowercase )
# 2. pre-process
SCREAMING_SNAKE_CASE__ : Any = jnp.transpose(_lowercase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ : str = self.conv_in(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = jnp.transpose(_lowercase , (0, 2, 3, 1) )
SCREAMING_SNAKE_CASE__ : Dict = self.controlnet_cond_embedding(_lowercase )
sample += controlnet_cond
# 3. down
SCREAMING_SNAKE_CASE__ : Dict = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = down_block(_lowercase , _lowercase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
SCREAMING_SNAKE_CASE__ : List[str] = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
# 5. contronet blocks
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ()
for down_block_res_sample, controlnet_block in zip(_lowercase , self.controlnet_down_blocks ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = controlnet_block(_lowercase )
controlnet_down_block_res_samples += (down_block_res_sample,)
SCREAMING_SNAKE_CASE__ : List[str] = controlnet_down_block_res_samples
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.controlnet_mid_block(_lowercase )
# 6. scaling
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_lowercase , mid_block_res_sample=_lowercase )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ :Union[str, Any] = logging.get_logger(__name__)
a_ :Union[str, Any] = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a_ :int = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def a ( A__ ) -> Dict:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_type_to_module_name(A__ )
SCREAMING_SNAKE_CASE__ : str = importlib.import_module(f""".{module_name}""" , '''transformers.models''' )
try:
return getattr(A__ , A__ )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(A__ , '''__name__''' , A__ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE__ : List[str] = importlib.import_module('''transformers''' )
if hasattr(A__ , A__ ):
return getattr(A__ , A__ )
return None
def a ( A__ , A__ = None , A__ = False , A__ = False , A__ = None , A__ = None , A__ = None , A__ = False , **A__ , ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = get_file_from_repo(
A__ , A__ , cache_dir=A__ , force_download=A__ , resume_download=A__ , proxies=A__ , use_auth_token=A__ , revision=A__ , local_files_only=A__ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(A__ , encoding='''utf-8''' ) as reader:
return json.load(A__ )
class lowercase :
def __init__( self : Any ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_lowercase )
def lowercase__ ( cls : List[str] , _lowercase : Optional[int] , **_lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = kwargs.pop('''config''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = kwargs.pop('''trust_remote_code''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = FeatureExtractionMixin.get_feature_extractor_dict(_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Any = config_dict.get('''feature_extractor_type''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
SCREAMING_SNAKE_CASE__ : List[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoConfig.from_pretrained(_lowercase , **_lowercase )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE__ : List[str] = getattr(_lowercase , '''feature_extractor_type''' , _lowercase )
if hasattr(_lowercase , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE__ : Dict = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE__ : Any = feature_extractor_class_from_name(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE__ : List[Any] = feature_extractor_class is not None or type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE__ : Dict = resolve_trust_remote_code(
_lowercase , _lowercase , _lowercase , _lowercase )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE__ : List[Any] = get_class_from_dynamic_module(
_lowercase , _lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = kwargs.pop('''code_revision''' , _lowercase )
if os.path.isdir(_lowercase ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(_lowercase ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE__ : Tuple = FEATURE_EXTRACTOR_MAPPING[type(_lowercase )]
return feature_extractor_class.from_dict(_lowercase , **_lowercase )
raise ValueError(
f"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
f"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
f"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def lowercase__ ( _lowercase : Dict , _lowercase : Optional[Any] ):
FEATURE_EXTRACTOR_MAPPING.register(_lowercase , _lowercase )
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
IMAGE_PROCESSOR_MAPPING,
AutoConfig,
AutoImageProcessor,
CLIPConfig,
CLIPImageProcessor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_image_processing import CustomImageProcessor # noqa E402
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = 0
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = AutoImageProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Any ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : List[Any] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Dict ):
# Ensure we can load the image processor from the feature extractor config
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : List[str] ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : int = CLIPConfig()
# Create a dummy config file with image_proceesor_type
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Path(_lowercase ) / '''config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
# remove image_processor_type to make sure config.json alone is enough to load image processor locally
SCREAMING_SNAKE_CASE__ : str = AutoImageProcessor.from_pretrained(_lowercase ).to_dict()
config_dict.pop('''image_processor_type''' )
SCREAMING_SNAKE_CASE__ : List[str] = CLIPImageProcessor(**_lowercase )
# save in new folder
model_config.save_pretrained(_lowercase )
config.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoImageProcessor.from_pretrained(_lowercase )
# make sure private variable is not incorrectly saved
SCREAMING_SNAKE_CASE__ : List[str] = json.loads(config.to_json_string() )
self.assertTrue('''_processor_class''' not in dict_as_saved )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : int ):
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : List[str] = Path(_lowercase ) / '''preprocessor_config.json'''
json.dump(
{'''image_processor_type''': '''CLIPImageProcessor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
with self.assertRaisesRegex(
_lowercase , '''clip-base is not a local folder and is not a valid model identifier''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''clip-base''' )
def lowercase__ ( self : str ):
with self.assertRaisesRegex(
_lowercase , R'''aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)''' ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(_lowercase , revision='''aaaaaa''' )
def lowercase__ ( self : List[str] ):
with self.assertRaisesRegex(
_lowercase , '''hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.''' , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/config-no-model''' )
def lowercase__ ( self : Any ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : List[str] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
# Test image processor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(_lowercase , trust_remote_code=_lowercase )
self.assertEqual(reloaded_image_processor.__class__.__name__ , '''NewImageProcessor''' )
def lowercase__ ( self : List[str] ):
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(_lowercase ):
AutoImageProcessor.register(_lowercase , _lowercase )
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Path(_lowercase ) / '''preprocessor_config.json'''
SCREAMING_SNAKE_CASE__ : int = Path(_lowercase ) / '''config.json'''
json.dump(
{'''feature_extractor_type''': '''CLIPFeatureExtractor''', '''processor_class''': '''CLIPProcessor'''} , open(_lowercase , '''w''' ) , )
json.dump({'''model_type''': '''clip'''} , open(_lowercase , '''w''' ) )
SCREAMING_SNAKE_CASE__ : List[Any] = CustomImageProcessor.from_pretrained(_lowercase )
# Now that the config is registered, it can be used as any other config with the auto-API
with tempfile.TemporaryDirectory() as tmp_dir:
image_processor.save_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(_lowercase )
self.assertIsInstance(_lowercase , _lowercase )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
def lowercase__ ( self : Optional[Any] ):
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Union[str, Any] = True
try:
AutoConfig.register('''custom''' , _lowercase )
AutoImageProcessor.register(_lowercase , _lowercase )
# If remote code is not set, the default is to use local
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoImageProcessor.from_pretrained('''hf-internal-testing/test_dynamic_image_processor''' )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote code is disabled, we load the local one.
SCREAMING_SNAKE_CASE__ : List[Any] = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(image_processor.is_local )
# If remote is enabled, we load from the Hub
SCREAMING_SNAKE_CASE__ : Any = AutoImageProcessor.from_pretrained(
'''hf-internal-testing/test_dynamic_image_processor''' , trust_remote_code=_lowercase )
self.assertEqual(image_processor.__class__.__name__ , '''NewImageProcessor''' )
self.assertTrue(not hasattr(_lowercase , '''is_local''' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in IMAGE_PROCESSOR_MAPPING._extra_content:
del IMAGE_PROCESSOR_MAPPING._extra_content[CustomConfig]
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class lowercase ( unittest.TestCase ):
@slow
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = XLMRobertaModel.from_pretrained('''xlm-roberta-base''' )
SCREAMING_SNAKE_CASE__ : Tuple = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 12, 7_68) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ : List[Any] = torch.tensor(
[[-0.0101, 0.1218, -0.0803, 0.0801, 0.1327, 0.0776, -0.1215, 0.2383, 0.3338, 0.3106, 0.0300, 0.0252]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
@slow
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = XLMRobertaModel.from_pretrained('''xlm-roberta-large''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor([[0, 5_81, 1_02_69, 83, 9_99_42, 1_36, 6_07_42, 23, 70, 8_05_83, 1_82_76, 2]] )
# The dog is cute and lives in the garden house
SCREAMING_SNAKE_CASE__ : List[str] = torch.Size((1, 12, 10_24) ) # batch_size, sequence_length, embedding_vector_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.tensor(
[[-0.0699, -0.0318, 0.0705, -0.1241, 0.0999, -0.0520, 0.1004, -0.1838, -0.4704, 0.1437, 0.0821, 0.0126]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )['''last_hidden_state'''].detach()
self.assertEqual(output.shape , _lowercase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] , _lowercase , atol=1E-3 ) )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
import argparse
import json
import logging
import os
import shutil
import sys
import tempfile
import unittest
from unittest import mock
import torch
from accelerate.utils import write_basic_config
from transformers.testing_utils import TestCasePlus, get_gpu_count, run_command, slow, torch_device
from transformers.utils import is_apex_available
logging.basicConfig(level=logging.DEBUG)
a_ :Optional[int] = logging.getLogger()
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument('''-f''' )
SCREAMING_SNAKE_CASE__ : int = parser.parse_args()
return args.f
def a ( A__ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = {}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(A__ , '''all_results.json''' )
if os.path.exists(A__ ):
with open(A__ , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : List[str] = json.load(A__ )
else:
raise ValueError(f"""can't find {path}""" )
return results
def a ( ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.is_available() and torch_device == '''cuda'''
return is_using_cuda and is_apex_available()
a_ :Any = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class lowercase ( _UpperCAmelCase ):
@classmethod
def lowercase__ ( cls : int ):
# Write Accelerate config, will pick up on CPU, GPU, and multi-GPU
SCREAMING_SNAKE_CASE__ : List[Any] = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(cls.tmpdir , '''default_config.yml''' )
write_basic_config(save_location=cls.configPath )
SCREAMING_SNAKE_CASE__ : Any = ['''accelerate''', '''launch''', '''--config_file''', cls.configPath]
@classmethod
def lowercase__ ( cls : Any ):
shutil.rmtree(cls.tmpdir )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/text-classification/run_glue_no_trainer.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--seed=42
--checkpointing_steps epoch
--with_tracking
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''glue_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : int = f"""
{self.examples_dir}/pytorch/language-modeling/run_clm_no_trainer.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--checkpointing_steps epoch
--with_tracking
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 1_00 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''clm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f"""
{self.examples_dir}/pytorch/language-modeling/run_mlm_no_trainer.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--num_train_epochs=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertLess(result['''perplexity'''] , 42 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''mlm_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
SCREAMING_SNAKE_CASE__ : str = 7 if get_gpu_count() > 1 else 2
SCREAMING_SNAKE_CASE__ : Dict = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Optional[Any] = f"""
{self.examples_dir}/pytorch/token-classification/run_ner_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : List[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.75 )
self.assertLess(result['''train_loss'''] , 0.5 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''ner_no_trainer''' ) ) )
@unittest.skip(reason='''Fix me @muellerzr''' )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/question-answering/run_qa_no_trainer.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--seed=42
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
# Because we use --version_2_with_negative the testing script uses SQuAD v2 metrics.
self.assertGreaterEqual(result['''eval_f1'''] , 28 )
self.assertGreaterEqual(result['''eval_exact'''] , 28 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''qa_no_trainer''' ) ) )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Any = f"""
{self.examples_dir}/pytorch/multiple-choice/run_swag_no_trainer.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--max_train_steps=20
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Tuple = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.8 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''swag_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[Any] = f"""
{self.examples_dir}/pytorch/summarization/run_summarization_no_trainer.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_rouge1'''] , 10 )
self.assertGreaterEqual(result['''eval_rouge2'''] , 2 )
self.assertGreaterEqual(result['''eval_rougeL'''] , 7 )
self.assertGreaterEqual(result['''eval_rougeLsum'''] , 7 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''summarization_no_trainer''' ) ) )
@slow
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : str = f"""
{self.examples_dir}/pytorch/translation/run_translation_no_trainer.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--max_train_steps=50
--num_warmup_steps=8
--num_beams=6
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--source_lang en_XX
--target_lang ro_RO
--checkpointing_steps epoch
--with_tracking
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Any = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_bleu'''] , 30 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''epoch_0''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''translation_no_trainer''' ) ) )
@slow
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = logging.StreamHandler(sys.stdout )
logger.addHandler(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : List[str] = f"""
{self.examples_dir}/pytorch/semantic-segmentation/run_semantic_segmentation_no_trainer.py
--dataset_name huggingface/semantic-segmentation-test-sample
--output_dir {tmp_dir}
--max_train_steps=10
--num_warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--checkpointing_steps epoch
""".split()
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[Any] = get_results(_lowercase )
self.assertGreaterEqual(result['''eval_overall_accuracy'''] , 0.10 )
@mock.patch.dict(os.environ , {'''WANDB_MODE''': '''offline'''} )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = self.get_auto_remove_tmp_dir()
SCREAMING_SNAKE_CASE__ : Dict = f"""
{self.examples_dir}/pytorch/image-classification/run_image_classification_no_trainer.py
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--max_train_steps 2
--train_val_split 0.1
--seed 42
--output_dir {tmp_dir}
--with_tracking
--checkpointing_steps 1
""".split()
if is_cuda_and_apex_available():
testargs.append('''--fp16''' )
run_command(self._launch_args + testargs )
SCREAMING_SNAKE_CASE__ : Optional[int] = get_results(_lowercase )
# The base model scores a 25%
self.assertGreaterEqual(result['''eval_accuracy'''] , 0.6 )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''step_1''' ) ) )
self.assertTrue(os.path.exists(os.path.join(_lowercase , '''image_classification_no_trainer''' ) ) )
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
def a ( A__ = 1_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = (n * (n + 1) // 2) ** 2
SCREAMING_SNAKE_CASE__ : int = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a_ :Optional[Any] = logging.get_logger(__name__)
a_ :Dict = {
'SenseTime/deformable-detr': 'https://huggingface.co/sensetime/deformable-detr/resolve/main/config.json',
# See all Deformable DETR models at https://huggingface.co/models?filter=deformable-detr
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Tuple = '''deformable_detr'''
lowerCamelCase : Optional[int] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
}
def __init__( self : Dict , _lowercase : int=True , _lowercase : Tuple=None , _lowercase : str=3 , _lowercase : str=3_00 , _lowercase : Any=10_24 , _lowercase : Optional[int]=6 , _lowercase : str=10_24 , _lowercase : List[str]=8 , _lowercase : List[str]=6 , _lowercase : Tuple=10_24 , _lowercase : int=8 , _lowercase : Union[str, Any]=0.0 , _lowercase : Tuple=True , _lowercase : Optional[Any]="relu" , _lowercase : List[str]=2_56 , _lowercase : Dict=0.1 , _lowercase : Tuple=0.0 , _lowercase : Tuple=0.0 , _lowercase : Tuple=0.02 , _lowercase : Optional[int]=1.0 , _lowercase : Dict=True , _lowercase : List[str]=False , _lowercase : Dict="sine" , _lowercase : Any="resnet50" , _lowercase : Any=True , _lowercase : List[str]=False , _lowercase : int=4 , _lowercase : Optional[Any]=4 , _lowercase : int=4 , _lowercase : Optional[Any]=False , _lowercase : Dict=3_00 , _lowercase : List[Any]=False , _lowercase : List[str]=1 , _lowercase : Tuple=5 , _lowercase : int=2 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=1 , _lowercase : Optional[Any]=5 , _lowercase : Optional[Any]=2 , _lowercase : str=0.1 , _lowercase : List[Any]=0.25 , _lowercase : Optional[int]=False , **_lowercase : Any , ):
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
SCREAMING_SNAKE_CASE__ : Any = CONFIG_MAPPING['''resnet'''](out_features=['''stage4'''] )
elif isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : int = backbone_config.get('''model_type''' )
SCREAMING_SNAKE_CASE__ : List[str] = CONFIG_MAPPING[backbone_model_type]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = config_class.from_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = use_timm_backbone
SCREAMING_SNAKE_CASE__ : Optional[int] = backbone_config
SCREAMING_SNAKE_CASE__ : Union[str, Any] = num_channels
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Any = d_model
SCREAMING_SNAKE_CASE__ : Any = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_layers
SCREAMING_SNAKE_CASE__ : Dict = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : Any = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : Union[str, Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = dropout
SCREAMING_SNAKE_CASE__ : Dict = attention_dropout
SCREAMING_SNAKE_CASE__ : Optional[int] = activation_dropout
SCREAMING_SNAKE_CASE__ : Tuple = activation_function
SCREAMING_SNAKE_CASE__ : Optional[int] = init_std
SCREAMING_SNAKE_CASE__ : Tuple = init_xavier_std
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Optional[Any] = auxiliary_loss
SCREAMING_SNAKE_CASE__ : Optional[int] = position_embedding_type
SCREAMING_SNAKE_CASE__ : Tuple = backbone
SCREAMING_SNAKE_CASE__ : int = use_pretrained_backbone
SCREAMING_SNAKE_CASE__ : Any = dilation
# deformable attributes
SCREAMING_SNAKE_CASE__ : Optional[int] = num_feature_levels
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_n_points
SCREAMING_SNAKE_CASE__ : Any = decoder_n_points
SCREAMING_SNAKE_CASE__ : Tuple = two_stage
SCREAMING_SNAKE_CASE__ : Dict = two_stage_num_proposals
SCREAMING_SNAKE_CASE__ : Tuple = with_box_refine
if two_stage is True and with_box_refine is False:
raise ValueError('''If two_stage is True, with_box_refine must be True.''' )
# Hungarian matcher
SCREAMING_SNAKE_CASE__ : int = class_cost
SCREAMING_SNAKE_CASE__ : Optional[int] = bbox_cost
SCREAMING_SNAKE_CASE__ : int = giou_cost
# Loss coefficients
SCREAMING_SNAKE_CASE__ : Union[str, Any] = mask_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[Any] = dice_loss_coefficient
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bbox_loss_coefficient
SCREAMING_SNAKE_CASE__ : List[Any] = giou_loss_coefficient
SCREAMING_SNAKE_CASE__ : Optional[Any] = eos_coefficient
SCREAMING_SNAKE_CASE__ : Union[str, Any] = focal_alpha
SCREAMING_SNAKE_CASE__ : List[Any] = disable_custom_kernels
super().__init__(is_encoder_decoder=_lowercase , **_lowercase )
@property
def lowercase__ ( self : Optional[int] ):
return self.encoder_attention_heads
@property
def lowercase__ ( self : Optional[int] ):
return self.d_model
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
SCREAMING_SNAKE_CASE__ : List[str] = self.backbone_config.to_dict()
SCREAMING_SNAKE_CASE__ : str = self.__class__.model_type
return output
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[Any] = (DDIMParallelScheduler,)
lowerCamelCase : Union[str, Any] = (('''eta''', 0.0), ('''num_inference_steps''', 50))
def lowercase__ ( self : Optional[int] , **_lowercase : Any ):
SCREAMING_SNAKE_CASE__ : int = {
'''num_train_timesteps''': 10_00,
'''beta_start''': 0.0001,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''clip_sample''': True,
}
config.update(**_lowercase )
return config
def lowercase__ ( self : Optional[Any] , **_lowercase : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : str = self.get_scheduler_config(**_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = 10, 0.0
SCREAMING_SNAKE_CASE__ : str = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
scheduler.set_timesteps(_lowercase )
for t in scheduler.timesteps:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = scheduler.step(_lowercase , _lowercase , _lowercase , _lowercase ).prev_sample
return sample
def lowercase__ ( self : List[str] ):
for timesteps in [1_00, 5_00, 10_00]:
self.check_over_configs(num_train_timesteps=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Any = self.get_scheduler_config(steps_offset=1 )
SCREAMING_SNAKE_CASE__ : Optional[int] = scheduler_class(**_lowercase )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([8_01, 6_01, 4_01, 2_01, 1] ) )
def lowercase__ ( self : Optional[Any] ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_lowercase , beta_end=_lowercase )
def lowercase__ ( self : Optional[Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_lowercase )
def lowercase__ ( self : List[Any] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_lowercase )
def lowercase__ ( self : List[str] ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_lowercase )
def lowercase__ ( self : Optional[int] ):
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=_lowercase )
def lowercase__ ( self : str ):
self.check_over_configs(thresholding=_lowercase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=_lowercase , prediction_type=_lowercase , sample_max_value=_lowercase , )
def lowercase__ ( self : List[Any] ):
for t in [1, 10, 49]:
self.check_over_forward(time_step=_lowercase )
def lowercase__ ( self : Union[str, Any] ):
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 5_00] ):
self.check_over_forward(time_step=_lowercase , num_inference_steps=_lowercase )
def lowercase__ ( self : Any ):
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=_lowercase , eta=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : Tuple = scheduler_class(**_lowercase )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_20 , 4_00 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_80 , 9_60 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 , 4_86 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 , 9_98 ) - 0.02 ) ) < 1E-5
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.scheduler_classes[0]
SCREAMING_SNAKE_CASE__ : int = self.get_scheduler_config()
SCREAMING_SNAKE_CASE__ : int = scheduler_class(**_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = 10, 0.0
scheduler.set_timesteps(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = self.dummy_model()
SCREAMING_SNAKE_CASE__ : str = self.dummy_sample_deter
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.dummy_sample_deter + 0.1
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_sample_deter - 0.1
SCREAMING_SNAKE_CASE__ : List[str] = samplea.shape[0]
SCREAMING_SNAKE_CASE__ : int = torch.stack([samplea, samplea, samplea] , dim=0 )
SCREAMING_SNAKE_CASE__ : Tuple = torch.arange(_lowercase )[0:3, None].repeat(1 , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scheduler.batch_step_no_noise(_lowercase , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , _lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.full_loop()
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Any = self.full_loop(prediction_type='''v_prediction''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def lowercase__ ( self : Optional[Any] ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : List[str] = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : Dict = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : int = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def lowercase__ ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
SCREAMING_SNAKE_CASE__ : Dict = self.full_loop(set_alpha_to_one=_lowercase , beta_start=0.01 )
SCREAMING_SNAKE_CASE__ : List[str] = torch.sum(torch.abs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.mean(torch.abs(_lowercase ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
# This script creates a super tiny model that is useful inside tests, when we just want to test that
# the machinery works, without needing to the check the quality of the outcomes.
#
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
# This gives ~3MB in total for all files.
#
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
#
#
# It will be used then as "stas/tiny-wmt19-en-de"
# Build
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
a_ :Any = 'facebook/wmt19-en-de'
a_ :Tuple = FSMTTokenizer.from_pretrained(mname)
# get the correct vocab sizes, etc. from the master model
a_ :List[str] = FSMTConfig.from_pretrained(mname)
config.update(
dict(
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
)
a_ :Union[str, Any] = FSMTForConditionalGeneration(config)
print(F'''num of params {tiny_model.num_parameters()}''')
# Test
a_ :Any = tokenizer(['Making tiny model'], return_tensors='pt')
a_ :Tuple = tiny_model(**batch)
print('test output:', len(outputs.logits[0]))
# Save
a_ :str = 'tiny-wmt19-en-de'
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(F'''Generated {mname_tiny}''')
# Upload
# transformers-cli upload tiny-wmt19-en-de
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
from __future__ import annotations
def a ( A__ ) -> int:
'''simple docstring'''
for i in range(1 , len(matrix[0] ) ):
matrix[0][i] += matrix[0][i - 1]
# preprocessing the first column
for i in range(1 , len(A__ ) ):
matrix[i][0] += matrix[i - 1][0]
# updating the path cost for current position
for i in range(1 , len(A__ ) ):
for j in range(1 , len(matrix[0] ) ):
matrix[i][j] += min(matrix[i - 1][j] , matrix[i][j - 1] )
return matrix[-1][-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowercase :
def __init__( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : Any=2 , _lowercase : str=True , _lowercase : List[Any]=False , _lowercase : Optional[Any]=10 , _lowercase : Tuple=3 , _lowercase : int=32 * 8 , _lowercase : str=32 * 8 , _lowercase : Optional[Any]=4 , _lowercase : Optional[int]=64 , ):
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Any = use_auxiliary_loss
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_queries
SCREAMING_SNAKE_CASE__ : int = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = min_size
SCREAMING_SNAKE_CASE__ : Dict = max_size
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = hidden_dim
SCREAMING_SNAKE_CASE__ : str = hidden_dim
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : str = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_lowercase )
SCREAMING_SNAKE_CASE__ : str = torch.ones([self.batch_size, self.min_size, self.max_size] , device=_lowercase )
SCREAMING_SNAKE_CASE__ : int = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] , device=_lowercase ) > 0.5
).float()
SCREAMING_SNAKE_CASE__ : List[Any] = (torch.rand((self.batch_size, self.num_labels) , device=_lowercase ) > 0.5).long()
SCREAMING_SNAKE_CASE__ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : str = MaskaFormerConfig(
hidden_size=self.hidden_dim , )
SCREAMING_SNAKE_CASE__ : List[str] = self.num_queries
SCREAMING_SNAKE_CASE__ : List[Any] = self.num_labels
SCREAMING_SNAKE_CASE__ : Any = [1, 1, 1, 1]
SCREAMING_SNAKE_CASE__ : int = self.num_channels
SCREAMING_SNAKE_CASE__ : List[str] = 64
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_28
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : int = self.hidden_dim
SCREAMING_SNAKE_CASE__ : Optional[int] = self.hidden_dim
return config
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : List[Any] = {'''pixel_values''': pixel_values, '''pixel_mask''': pixel_mask}
return config, inputs_dict
def lowercase__ ( self : List[Any] , _lowercase : int , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = output.encoder_hidden_states
SCREAMING_SNAKE_CASE__ : Any = output.pixel_decoder_hidden_states
SCREAMING_SNAKE_CASE__ : Tuple = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_lowercase ) , config.decoder_layers )
def lowercase__ ( self : str , _lowercase : Optional[int] , _lowercase : Optional[int] , _lowercase : Tuple , _lowercase : Any=False ):
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MaskaFormerModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowercase , output_hidden_states=_lowercase )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape , (self.batch_size, self.num_queries, self.hidden_dim) , )
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_lowercase , _lowercase )
def lowercase__ ( self : str , _lowercase : Union[str, Any] , _lowercase : List[Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Any = MaskaFormerForUniversalSegmentation(config=_lowercase )
model.to(_lowercase )
model.eval()
def comm_check_on_output(_lowercase : Optional[int] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape , (self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) , )
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape , (self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Any = model(pixel_values=_lowercase , pixel_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
comm_check_on_output(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = model(
pixel_values=_lowercase , pixel_mask=_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
comm_check_on_output(_lowercase )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape , torch.Size([1] ) )
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Any = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
lowerCamelCase : Dict = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
lowerCamelCase : str = False
lowerCamelCase : Dict = False
lowerCamelCase : Dict = False
lowerCamelCase : Any = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = MaskaFormerModelTester(self )
SCREAMING_SNAKE_CASE__ : List[Any] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_lowercase )
@unittest.skip(reason='''Mask2Former does not use inputs_embeds''' )
def lowercase__ ( self : int ):
pass
@unittest.skip(reason='''Mask2Former does not have a get_input_embeddings method''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''Mask2Former is not a generative model''' )
def lowercase__ ( self : List[Any] ):
pass
@unittest.skip(reason='''Mask2Former does not use token embeddings''' )
def lowercase__ ( self : Optional[int] ):
pass
@require_torch_multi_gpu
@unittest.skip(
reason='''Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`''' )
def lowercase__ ( self : Union[str, Any] ):
pass
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[str] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
@slow
def lowercase__ ( self : Dict ):
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
SCREAMING_SNAKE_CASE__ : Optional[Any] = MaskaFormerModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[str] = (self.model_tester.min_size,) * 2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''pixel_values''': torch.randn((2, 3, *size) , device=_lowercase ),
'''mask_labels''': torch.randn((2, 10, *size) , device=_lowercase ),
'''class_labels''': torch.zeros(2 , 10 , device=_lowercase ).long(),
}
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.get_config()
SCREAMING_SNAKE_CASE__ : List[str] = MaskaFormerForUniversalSegmentation(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_lowercase , **_lowercase , output_hidden_states=_lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[int] = model_class(_lowercase ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase , output_attentions=_lowercase )
self.assertTrue(outputs.attentions is not None )
def lowercase__ ( self : List[Any] ):
if not self.model_tester.is_training:
return
SCREAMING_SNAKE_CASE__ : Optional[int] = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : str = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.all_model_classes[1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase ).to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , mask_labels=_lowercase , class_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Dict = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Any = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_lowercase )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
a_ :Optional[int] = 1e-4
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_vision
@slow
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def lowercase__ ( self : Any ):
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : int = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Tuple = prepare_img()
SCREAMING_SNAKE_CASE__ : List[str] = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : int = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_lowercase )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : str = image_processor(_lowercase , return_tensors='''pt''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''pixel_values'''].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_lowercase , (1, 3, 3_84, 3_84) )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase )
# masks_queries_logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape , (1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
SCREAMING_SNAKE_CASE__ : int = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
SCREAMING_SNAKE_CASE__ : str = torch.tensor(_lowercase ).to(_lowercase )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] , _lowercase , atol=_lowercase ) )
# class_queries_logits
SCREAMING_SNAKE_CASE__ : Tuple = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape , (1, model.config.num_queries, model.config.num_labels + 1) )
SCREAMING_SNAKE_CASE__ : Dict = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] , _lowercase , atol=_lowercase ) )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(
[np.zeros((3, 8_00, 13_33) ), np.zeros((3, 8_00, 13_33) )] , segmentation_maps=[np.zeros((3_84, 3_84) ).astype(np.floataa ), np.zeros((3_84, 3_84) ).astype(np.floataa )] , return_tensors='''pt''' , )
SCREAMING_SNAKE_CASE__ : Tuple = inputs['''pixel_values'''].to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = [el.to(_lowercase ) for el in inputs['''mask_labels''']]
SCREAMING_SNAKE_CASE__ : List[Any] = [el.to(_lowercase ) for el in inputs['''class_labels''']]
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**_lowercase )
self.assertTrue(outputs.loss is not None )
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a_ :List[Any] = 'platform'
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase :
lowerCamelCase : int = PegasusConfig
lowerCamelCase : int = {}
lowerCamelCase : Optional[Any] = '''gelu'''
def __init__( self : Tuple , _lowercase : Dict , _lowercase : Union[str, Any]=13 , _lowercase : Dict=7 , _lowercase : Optional[int]=True , _lowercase : Dict=False , _lowercase : Optional[int]=99 , _lowercase : Optional[int]=32 , _lowercase : List[str]=5 , _lowercase : Optional[Any]=4 , _lowercase : Union[str, Any]=37 , _lowercase : Optional[int]=0.1 , _lowercase : Dict=0.1 , _lowercase : Dict=20 , _lowercase : Dict=2 , _lowercase : Union[str, Any]=1 , _lowercase : List[Any]=0 , ):
SCREAMING_SNAKE_CASE__ : int = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = seq_length
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = vocab_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : str = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE__ : int = intermediate_size
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Dict = eos_token_id
SCREAMING_SNAKE_CASE__ : Any = pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[int] = bos_token_id
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
SCREAMING_SNAKE_CASE__ : Dict = np.concatenate([input_ids, eos_tensor] , axis=1 )
SCREAMING_SNAKE_CASE__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Any = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
SCREAMING_SNAKE_CASE__ : List[str] = prepare_pegasus_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowercase__ ( self : List[Any] , _lowercase : Dict , _lowercase : Dict , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = 20
SCREAMING_SNAKE_CASE__ : List[Any] = model_class_name(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE__ : List[str] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : int = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : Any = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = model.decode(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def lowercase__ ( self : str , _lowercase : Any , _lowercase : str , _lowercase : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = 20
SCREAMING_SNAKE_CASE__ : List[Any] = model_class_name(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
SCREAMING_SNAKE_CASE__ : Tuple = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
SCREAMING_SNAKE_CASE__ : int = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
SCREAMING_SNAKE_CASE__ : str = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE__ : str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
SCREAMING_SNAKE_CASE__ : Tuple = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
SCREAMING_SNAKE_CASE__ : List[Any] = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
SCREAMING_SNAKE_CASE__ : str = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def a ( A__ , A__ , A__ , A__=None , A__=None , ) -> List[str]:
'''simple docstring'''
if attention_mask is None:
SCREAMING_SNAKE_CASE__ : Any = np.not_equal(A__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
SCREAMING_SNAKE_CASE__ : str = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCamelCase : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCamelCase : str = True
lowerCamelCase : str = False
lowerCamelCase : List[str] = False
lowerCamelCase : Optional[Any] = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxPegasusModelTester(self )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : List[str] ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : List[str] = self._prepare_for_class(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase : Tuple , _lowercase : Any=None , **_lowercase : List[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__ : List[str] = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : List[str] = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
SCREAMING_SNAKE_CASE__ : Dict = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : str , _lowercase : List[Any] , _lowercase : Dict ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('''JIT Enabled''' ):
SCREAMING_SNAKE_CASE__ : List[Any] = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
SCREAMING_SNAKE_CASE__ : Dict = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowercase__ ( self : Optional[int] ):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Tuple = model_class_name.from_pretrained('''google/pegasus-large''' , from_pt=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.ones((1, 1) )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase )
self.assertIsNotNone(_lowercase )
@slow
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxPegasusForConditionalGeneration.from_pretrained('''google/pegasus-xsum''' )
SCREAMING_SNAKE_CASE__ : Any = PegasusTokenizer.from_pretrained('''google/pegasus-xsum''' )
SCREAMING_SNAKE_CASE__ : int = [
''' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.''',
''' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ''',
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
'''California\'s largest electricity provider has turned off power to hundreds of thousands of customers.''',
'''Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.''',
]
SCREAMING_SNAKE_CASE__ : int = tokenizer(_lowercase , return_tensors='''np''' , truncation=_lowercase , max_length=5_12 , padding=_lowercase )
SCREAMING_SNAKE_CASE__ : int = model.generate(**_lowercase , num_beams=2 ).sequences
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.batch_decode(_lowercase , skip_special_tokens=_lowercase )
assert tgt_text == decoded
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
# Imports
import numpy as np
class lowercase :
def __init__( self : Dict , _lowercase : List[str]=None , _lowercase : int=None , _lowercase : int=None , _lowercase : Dict=None , _lowercase : Optional[int]=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Any=None , _lowercase : Any=None , _lowercase : Optional[Any]=None , _lowercase : Union[str, Any]=None , _lowercase : Optional[int]=None ):
if red is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = red
if green is not None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = green
if blue is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = blue
if red_edge is not None:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = red_edge
if nir is not None:
SCREAMING_SNAKE_CASE__ : Any = nir
return True
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str]="" , _lowercase : Any=None , _lowercase : Tuple=None , _lowercase : int=None , _lowercase : Dict=None , _lowercase : Any=None ):
self.set_matricies(red=_lowercase , green=_lowercase , blue=_lowercase , red_edge=_lowercase , nir=_lowercase )
SCREAMING_SNAKE_CASE__ : int = {
'''ARVI2''': self.arvaa,
'''CCCI''': self.ccci,
'''CVI''': self.cvi,
'''GLI''': self.gli,
'''NDVI''': self.ndvi,
'''BNDVI''': self.bndvi,
'''redEdgeNDVI''': self.red_edge_ndvi,
'''GNDVI''': self.gndvi,
'''GBNDVI''': self.gbndvi,
'''GRNDVI''': self.grndvi,
'''RBNDVI''': self.rbndvi,
'''PNDVI''': self.pndvi,
'''ATSAVI''': self.atsavi,
'''BWDRVI''': self.bwdrvi,
'''CIgreen''': self.ci_green,
'''CIrededge''': self.ci_rededge,
'''CI''': self.ci,
'''CTVI''': self.ctvi,
'''GDVI''': self.gdvi,
'''EVI''': self.evi,
'''GEMI''': self.gemi,
'''GOSAVI''': self.gosavi,
'''GSAVI''': self.gsavi,
'''Hue''': self.hue,
'''IVI''': self.ivi,
'''IPVI''': self.ipvi,
'''I''': self.i,
'''RVI''': self.rvi,
'''MRVI''': self.mrvi,
'''MSAVI''': self.m_savi,
'''NormG''': self.norm_g,
'''NormNIR''': self.norm_nir,
'''NormR''': self.norm_r,
'''NGRDI''': self.ngrdi,
'''RI''': self.ri,
'''S''': self.s,
'''IF''': self._if,
'''DVI''': self.dvi,
'''TVI''': self.tvi,
'''NDRE''': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('''Index not in the list!''' )
return False
def lowercase__ ( self : int ):
return -0.18 + (1.17 * ((self.nir - self.red) / (self.nir + self.red)))
def lowercase__ ( self : Any ):
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def lowercase__ ( self : Dict ):
return self.nir * (self.red / (self.green**2))
def lowercase__ ( self : List[str] ):
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def lowercase__ ( self : int ):
return (self.nir - self.red) / (self.nir + self.red)
def lowercase__ ( self : List[Any] ):
return (self.nir - self.blue) / (self.nir + self.blue)
def lowercase__ ( self : Any ):
return (self.redEdge - self.red) / (self.redEdge + self.red)
def lowercase__ ( self : str ):
return (self.nir - self.green) / (self.nir + self.green)
def lowercase__ ( self : List[str] ):
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def lowercase__ ( self : int ):
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def lowercase__ ( self : Union[str, Any] ):
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def lowercase__ ( self : int ):
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def lowercase__ ( self : Dict , _lowercase : Optional[Any]=0.08 , _lowercase : Tuple=1.22 , _lowercase : Dict=0.03 ):
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def lowercase__ ( self : Dict ):
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def lowercase__ ( self : List[Any] ):
return (self.nir / self.green) - 1
def lowercase__ ( self : Any ):
return (self.nir / self.redEdge) - 1
def lowercase__ ( self : List[str] ):
return (self.red - self.blue) / self.red
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Dict = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def lowercase__ ( self : Optional[int] ):
return self.nir - self.green
def lowercase__ ( self : List[str] ):
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.25 * n) - (self.red - 0.125) / (1 - self.red)
def lowercase__ ( self : int , _lowercase : Any=0.16 ):
return (self.nir - self.green) / (self.nir + self.green + y)
def lowercase__ ( self : Tuple , _lowercase : Union[str, Any]=0.5 ):
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def lowercase__ ( self : Dict ):
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def lowercase__ ( self : Any , _lowercase : int=None , _lowercase : Optional[int]=None ):
return (self.nir - b) / (a * self.red)
def lowercase__ ( self : Dict ):
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def lowercase__ ( self : Dict ):
return (self.red + self.green + self.blue) / 30.5
def lowercase__ ( self : List[str] ):
return self.nir / self.red
def lowercase__ ( self : List[str] ):
return (self.rvi() - 1) / (self.rvi() + 1)
def lowercase__ ( self : List[str] ):
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def lowercase__ ( self : int ):
return self.green / (self.nir + self.red + self.green)
def lowercase__ ( self : List[Any] ):
return self.nir / (self.nir + self.red + self.green)
def lowercase__ ( self : Optional[Any] ):
return self.red / (self.nir + self.red + self.green)
def lowercase__ ( self : Union[str, Any] ):
return (self.green - self.red) / (self.green + self.red)
def lowercase__ ( self : Optional[Any] ):
return (self.red - self.green) / (self.red + self.green)
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Dict = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
SCREAMING_SNAKE_CASE__ : Dict = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def lowercase__ ( self : List[str] ):
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def lowercase__ ( self : List[str] ):
return self.nir / self.red
def lowercase__ ( self : List[Any] ):
return (self.ndvi() + 0.5) ** (1 / 2)
def lowercase__ ( self : Any ):
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
import os
from typing import BinaryIO, Optional, Union
import numpy as np
import pyarrow.parquet as pq
from .. import Audio, Dataset, Features, Image, NamedSplit, Value, config
from ..features.features import FeatureType, _visit
from ..formatting import query_table
from ..packaged_modules import _PACKAGED_DATASETS_MODULES
from ..packaged_modules.parquet.parquet import Parquet
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
def a ( A__ ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = np.inf
def set_batch_size(A__ ) -> None:
nonlocal batch_size
if isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_IMAGE_DATASETS )
elif isinstance(A__ , A__ ):
SCREAMING_SNAKE_CASE__ : Any = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_AUDIO_DATASETS )
elif isinstance(A__ , A__ ) and feature.dtype == "binary":
SCREAMING_SNAKE_CASE__ : Optional[Any] = min(A__ , config.PARQUET_ROW_GROUP_SIZE_FOR_BINARY_DATASETS )
_visit(A__ , A__ )
return None if batch_size is np.inf else batch_size
class lowercase ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , _lowercase : NestedDataStructureLike[PathLike] , _lowercase : Optional[NamedSplit] = None , _lowercase : Optional[Features] = None , _lowercase : str = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[int] = None , **_lowercase : Tuple , ):
super().__init__(
_lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : str = _PACKAGED_DATASETS_MODULES['''parquet'''][1]
SCREAMING_SNAKE_CASE__ : Optional[int] = Parquet(
cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , hash=_lowercase , **_lowercase , )
def lowercase__ ( self : Any ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : Any = self.builder.as_dataset(
split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self : Dict , _lowercase : Dataset , _lowercase : Union[PathLike, BinaryIO] , _lowercase : Optional[int] = None , **_lowercase : Any , ):
SCREAMING_SNAKE_CASE__ : List[str] = dataset
SCREAMING_SNAKE_CASE__ : List[str] = path_or_buf
SCREAMING_SNAKE_CASE__ : int = batch_size or get_writer_batch_size(dataset.features )
SCREAMING_SNAKE_CASE__ : str = parquet_writer_kwargs
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : int = self.batch_size if self.batch_size else config.DEFAULT_MAX_BATCH_SIZE
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with open(self.path_or_buf , '''wb+''' ) as buffer:
SCREAMING_SNAKE_CASE__ : Any = self._write(file_obj=_lowercase , batch_size=_lowercase , **self.parquet_writer_kwargs )
else:
SCREAMING_SNAKE_CASE__ : Optional[int] = self._write(file_obj=self.path_or_buf , batch_size=_lowercase , **self.parquet_writer_kwargs )
return written
def lowercase__ ( self : Optional[int] , _lowercase : BinaryIO , _lowercase : int , **_lowercase : str ):
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Any = parquet_writer_kwargs.pop('''path_or_buf''' , _lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = self.dataset.features.arrow_schema
SCREAMING_SNAKE_CASE__ : Any = pq.ParquetWriter(_lowercase , schema=_lowercase , **_lowercase )
for offset in logging.tqdm(
range(0 , len(self.dataset ) , _lowercase ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating parquet from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = query_table(
table=self.dataset._data , key=slice(_lowercase , offset + batch_size ) , indices=self.dataset._indices if self.dataset._indices is not None else None , )
writer.write_table(_lowercase )
written += batch.nbytes
writer.close()
return written
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
def a ( A__ , A__ , A__ ) -> int:
'''simple docstring'''
if exponent == 1:
return base
if exponent % 2 == 0:
SCREAMING_SNAKE_CASE__ : Tuple = _modexpt(A__ , exponent // 2 , A__ ) % modulo_value
return (x * x) % modulo_value
else:
return (base * _modexpt(A__ , exponent - 1 , A__ )) % modulo_value
def a ( A__ = 1_7_7_7 , A__ = 1_8_5_5 , A__ = 8 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = base
for _ in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = _modexpt(A__ , A__ , 1_0**digits )
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = KandinskyImgaImgPipeline
lowerCamelCase : Union[str, Any] = ['''prompt''', '''image_embeds''', '''negative_image_embeds''', '''image''']
lowerCamelCase : Optional[Any] = [
'''prompt''',
'''negative_prompt''',
'''image_embeds''',
'''negative_image_embeds''',
'''image''',
]
lowerCamelCase : List[Any] = [
'''generator''',
'''height''',
'''width''',
'''strength''',
'''guidance_scale''',
'''negative_prompt''',
'''num_inference_steps''',
'''return_dict''',
'''guidance_scale''',
'''num_images_per_prompt''',
'''output_type''',
'''return_dict''',
]
lowerCamelCase : int = False
@property
def lowercase__ ( self : Any ):
return 32
@property
def lowercase__ ( self : Dict ):
return 32
@property
def lowercase__ ( self : int ):
return self.time_input_dim
@property
def lowercase__ ( self : Optional[int] ):
return self.time_input_dim * 4
@property
def lowercase__ ( self : List[str] ):
return 1_00
@property
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def lowercase__ ( self : Dict ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : str = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = MultilingualCLIP(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = text_encoder.eval()
return text_encoder
@property
def lowercase__ ( self : int ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : Tuple = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
SCREAMING_SNAKE_CASE__ : Optional[Any] = UNetaDConditionModel(**_lowercase )
return model
@property
def lowercase__ ( self : List[str] ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def lowercase__ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ : List[str] = VQModel(**self.dummy_movq_kwargs )
return model
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Dict = self.dummy_text_encoder
SCREAMING_SNAKE_CASE__ : str = self.dummy_tokenizer
SCREAMING_SNAKE_CASE__ : List[str] = self.dummy_unet
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.dummy_movq
SCREAMING_SNAKE_CASE__ : List[Any] = {
'''num_train_timesteps''': 10_00,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.00085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
SCREAMING_SNAKE_CASE__ : Any = DDIMScheduler(**_lowercase )
SCREAMING_SNAKE_CASE__ : str = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def lowercase__ ( self : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Optional[int]=0 ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(_lowercase )
# create init_image
SCREAMING_SNAKE_CASE__ : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowercase ) ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ : Any = Image.fromarray(np.uinta(_lowercase ) ).convert('''RGB''' ).resize((2_56, 2_56) )
if str(_lowercase ).startswith('''mps''' ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.manual_seed(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : int = torch.Generator(device=_lowercase ).manual_seed(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 64,
'''width''': 64,
'''num_inference_steps''': 10,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Dict = '''cpu'''
SCREAMING_SNAKE_CASE__ : Any = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ : List[Any] = self.pipeline_class(**_lowercase )
SCREAMING_SNAKE_CASE__ : int = pipe.to(_lowercase )
pipe.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = pipe(**self.get_dummy_inputs(_lowercase ) )
SCREAMING_SNAKE_CASE__ : List[Any] = output.images
SCREAMING_SNAKE_CASE__ : List[Any] = pipe(
**self.get_dummy_inputs(_lowercase ) , return_dict=_lowercase , )[0]
SCREAMING_SNAKE_CASE__ : Any = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ : Tuple = np.array(
[0.61474943, 0.6073539, 0.43308544, 0.5928269, 0.47493595, 0.46755973, 0.4613838, 0.45368797, 0.50119233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Optional[int] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : int = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
SCREAMING_SNAKE_CASE__ : int = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''A red cartoon frog, 4k'''
SCREAMING_SNAKE_CASE__ : int = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ : Dict = pipeline.to(_lowercase )
pipeline.set_progress_bar_config(disable=_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Generator(device='''cpu''' ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = pipe_prior(
_lowercase , generator=_lowercase , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
SCREAMING_SNAKE_CASE__ : Dict = pipeline(
_lowercase , image=_lowercase , image_embeds=_lowercase , negative_image_embeds=_lowercase , generator=_lowercase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type='''np''' , )
SCREAMING_SNAKE_CASE__ : Dict = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(_lowercase , _lowercase )
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class lowercase :
def __init__( self : Any , _lowercase : Dict , _lowercase : int=2 , _lowercase : Any=32 , _lowercase : Tuple=16 , _lowercase : List[Any]=3 , _lowercase : List[Any]=True , _lowercase : str=True , _lowercase : int=32 , _lowercase : Optional[Any]=4 , _lowercase : Optional[Any]=[0, 1, 2, 3] , _lowercase : Union[str, Any]=4 , _lowercase : List[Any]=37 , _lowercase : int="gelu" , _lowercase : List[Any]=0.1 , _lowercase : str=0.1 , _lowercase : List[str]=0.02 , _lowercase : Union[str, Any]=3 , _lowercase : List[Any]=[1, 3_84, 24, 24] , _lowercase : Union[str, Any]=True , _lowercase : int=None , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : str = batch_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : List[Any] = is_training
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE__ : int = backbone_out_indices
SCREAMING_SNAKE_CASE__ : Any = num_attention_heads
SCREAMING_SNAKE_CASE__ : str = intermediate_size
SCREAMING_SNAKE_CASE__ : Any = hidden_act
SCREAMING_SNAKE_CASE__ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : Dict = num_labels
SCREAMING_SNAKE_CASE__ : List[str] = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = scope
SCREAMING_SNAKE_CASE__ : Tuple = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ : List[str] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Optional[int] = num_patches + 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : List[Any] = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''global_padding''': '''same''',
'''layer_type''': '''bottleneck''',
'''depths''': [3, 4, 9],
'''out_features''': ['''stage1''', '''stage2''', '''stage3'''],
'''embedding_dynamic_padding''': True,
'''hidden_sizes''': [96, 1_92, 3_84, 7_68],
'''num_groups''': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_lowercase , backbone_featmap_shape=self.backbone_featmap_shape , )
def lowercase__ ( self : str , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = DPTModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : List[str] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE__ : List[Any] = DPTForDepthEstimation(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def lowercase__ ( self : Tuple , _lowercase : List[str] , _lowercase : Optional[int] , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = self.num_labels
SCREAMING_SNAKE_CASE__ : int = DPTForSemanticSegmentation(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = config_and_inputs
SCREAMING_SNAKE_CASE__ : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCamelCase : Optional[int] = (
{
'''depth-estimation''': DPTForDepthEstimation,
'''feature-extraction''': DPTModel,
'''image-segmentation''': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCamelCase : List[str] = False
lowerCamelCase : List[str] = False
lowerCamelCase : str = False
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = DPTModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase__ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DPT does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , nn.Linear ) )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_lowercase )
def lowercase__ ( self : List[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : int = True
if model_class in get_values(_lowercase ):
continue
SCREAMING_SNAKE_CASE__ : Dict = model_class(_lowercase )
model.to(_lowercase )
model.train()
SCREAMING_SNAKE_CASE__ : Dict = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase ).loss
loss.backward()
def lowercase__ ( self : Optional[Any] ):
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[Any] = False
SCREAMING_SNAKE_CASE__ : List[Any] = True
if model_class in get_values(_lowercase ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ : Any = self._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(**_lowercase ).loss
loss.backward()
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : List[str] = _config_zero_init(_lowercase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : str = model_class(config=_lowercase )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE__ : Dict = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip('''Will be fixed soon by reducing the size of the model used for common tests.''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
def lowercase__ ( self : Tuple ):
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE__ : List[Any] = DPTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def lowercase__ ( self : Optional[int] ):
# We do this test only for DPTForDepthEstimation since it is the only model that uses readout_type
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''add'''
with self.assertRaises(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = DPTForDepthEstimation(_lowercase )
def a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
@slow
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = DPTImageProcessor.from_pretrained('''Intel/dpt-hybrid-midas''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DPTForDepthEstimation.from_pretrained('''Intel/dpt-hybrid-midas''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = prepare_img()
SCREAMING_SNAKE_CASE__ : Optional[Any] = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Optional[int] = model(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE__ : List[Any] = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : int = torch.tensor(
[[[5.6437, 5.6146, 5.6511], [5.4371, 5.5649, 5.5958], [5.5215, 5.5184, 5.5293]]] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _lowercase , atol=1E-4 ) )
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
from heapq import heappop, heappush
import numpy as np
def a ( A__ , A__ , A__ , A__ , ) -> tuple[float | int, list[tuple[int, int]]]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid.shape
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [-1, 1, 0, 0]
SCREAMING_SNAKE_CASE__ : List[str] = [0, 0, -1, 1]
if allow_diagonal:
dx += [-1, -1, 1, 1]
dy += [-1, 1, -1, 1]
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = [(0, source)], set()
SCREAMING_SNAKE_CASE__ : Optional[int] = np.full((rows, cols) , np.inf )
SCREAMING_SNAKE_CASE__ : List[Any] = 0
SCREAMING_SNAKE_CASE__ : Any = np.empty((rows, cols) , dtype=A__ )
SCREAMING_SNAKE_CASE__ : List[str] = None
while queue:
((SCREAMING_SNAKE_CASE__) , (SCREAMING_SNAKE_CASE__)) : Any = heappop(A__ )
if (x, y) in visited:
continue
visited.add((x, y) )
if (x, y) == destination:
SCREAMING_SNAKE_CASE__ : Any = []
while (x, y) != source:
path.append((x, y) )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = predecessors[x, y]
path.append(A__ ) # add the source manually
path.reverse()
return matrix[destination], path
for i in range(len(A__ ) ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = x + dx[i], y + dy[i]
if 0 <= nx < rows and 0 <= ny < cols:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = grid[nx][ny]
if next_node == 1 and matrix[nx, ny] > dist + 1:
heappush(A__ , (dist + 1, (nx, ny)) )
SCREAMING_SNAKE_CASE__ : List[Any] = dist + 1
SCREAMING_SNAKE_CASE__ : List[Any] = (x, y)
return np.inf, []
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
from __future__ import annotations
def a ( A__ ) -> list[int]:
'''simple docstring'''
if len(A__ ) == 0:
return array
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = min(A__ ), max(A__ )
# Compute the variables
SCREAMING_SNAKE_CASE__ : Dict = _max - _min + 1
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = [0] * holes_range, [0] * holes_range
# Make the sorting.
for i in array:
SCREAMING_SNAKE_CASE__ : List[str] = i - _min
SCREAMING_SNAKE_CASE__ : Optional[Any] = i
holes_repeat[index] += 1
# Makes the array back by replacing the numbers.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 0
for i in range(A__ ):
while holes_repeat[i] > 0:
SCREAMING_SNAKE_CASE__ : str = holes[i]
index += 1
holes_repeat[i] -= 1
# Returns the sorted array.
return array
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ :Any = input('Enter numbers separated by comma:\n')
a_ :Any = [int(x) for x in user_input.split(',')]
print(pigeon_sort(unsorted))
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
def a ( A__ = 1_0_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = 3
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 1_5 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
import json
import os
import unittest
from transformers import AutoTokenizer, GPTaTokenizer, GPTaTokenizerFast
from transformers.models.gpta.tokenization_gpta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = GPTaTokenizer
lowerCamelCase : Dict = GPTaTokenizerFast
lowerCamelCase : Any = True
lowerCamelCase : Tuple = {'''add_prefix_space''': True}
lowerCamelCase : Dict = False
def lowercase__ ( self : str ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
'''<|endoftext|>''',
]
SCREAMING_SNAKE_CASE__ : List[str] = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE__ : Tuple = {'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowercase ) )
def lowercase__ ( self : int , **_lowercase : Any ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[int] , **_lowercase : Tuple ):
kwargs.update(self.special_tokens_map )
return GPTaTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Any , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = '''lower newer'''
SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower newer'''
return input_text, output_text
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : int = GPTaTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
SCREAMING_SNAKE_CASE__ : int = '''lower newer'''
SCREAMING_SNAKE_CASE__ : int = ['''\u0120low''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
SCREAMING_SNAKE_CASE__ : int = tokenizer.tokenize(_lowercase , add_prefix_space=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Tuple = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowercase__ ( self : int ):
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE__ : Dict = self.get_tokenizer()
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_rust_tokenizer(add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = '''lower newer'''
# Testing tokenization
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.tokenize(_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.tokenize(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids without special tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = rust_tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing conversion to ids with special tokens
SCREAMING_SNAKE_CASE__ : Any = self.get_rust_tokenizer(add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(_lowercase , add_prefix_space=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = rust_tokenizer.encode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
# Testing the unknown token
SCREAMING_SNAKE_CASE__ : Optional[int] = tokens + [rust_tokenizer.unk_token]
SCREAMING_SNAKE_CASE__ : Dict = [14, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(rust_tokenizer.convert_tokens_to_ids(_lowercase ) , _lowercase )
def lowercase__ ( self : List[str] , *_lowercase : Union[str, Any] , **_lowercase : str ):
# It's very difficult to mix/test pretokenization with byte-level
# And get both GPT2 and Roberta to work at the same time (mostly an issue of adding a space before the string)
pass
def lowercase__ ( self : str , _lowercase : Optional[int]=15 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
SCREAMING_SNAKE_CASE__ : Dict = self.rust_tokenizer_class.from_pretrained(_lowercase , **_lowercase )
# Simple input
SCREAMING_SNAKE_CASE__ : List[str] = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ : List[str] = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ : Dict = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Simple input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(_lowercase , tokenizer_r.encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' )
# Pair input
self.assertRaises(
_lowercase , tokenizer_r.batch_encode_plus , _lowercase , max_length=_lowercase , padding='''max_length''' , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = GPTaTokenizer.from_pretrained(self.tmpdirname , pad_token='''<pad>''' )
# Simple input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''This is a simple input looooooooong''', '''This is a simple input''']
SCREAMING_SNAKE_CASE__ : str = ('''This is a simple input''', '''This is a pair''')
SCREAMING_SNAKE_CASE__ : List[Any] = [
('''This is a simple input loooooong''', '''This is a simple input'''),
('''This is a simple pair loooooong''', '''This is a simple pair'''),
]
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.pad_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer(_lowercase , padding='''max_length''' , max_length=30 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : int = tokenizer(_lowercase , padding=_lowercase , truncate=_lowercase , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : int = tokenizer(*_lowercase , padding='''max_length''' , max_length=60 , return_tensors='''np''' )
SCREAMING_SNAKE_CASE__ : str = tokenizer(_lowercase , padding=_lowercase , truncate=_lowercase , return_tensors='''np''' )
# s
# test single string max_length padding
self.assertEqual(out_s['''input_ids'''].shape[-1] , 30 )
self.assertTrue(pad_token_id in out_s['''input_ids'''] )
self.assertTrue(0 in out_s['''attention_mask'''] )
# s2
# test automatic padding
self.assertEqual(out_sa['''input_ids'''].shape[-1] , 33 )
# long slice doesn't have padding
self.assertFalse(pad_token_id in out_sa['''input_ids'''][0] )
self.assertFalse(0 in out_sa['''attention_mask'''][0] )
# short slice does have padding
self.assertTrue(pad_token_id in out_sa['''input_ids'''][1] )
self.assertTrue(0 in out_sa['''attention_mask'''][1] )
# p
# test single pair max_length padding
self.assertEqual(out_p['''input_ids'''].shape[-1] , 60 )
self.assertTrue(pad_token_id in out_p['''input_ids'''] )
self.assertTrue(0 in out_p['''attention_mask'''] )
# p2
# test automatic padding pair
self.assertEqual(out_pa['''input_ids'''].shape[-1] , 52 )
# long slice pair doesn't have padding
self.assertFalse(pad_token_id in out_pa['''input_ids'''][0] )
self.assertFalse(0 in out_pa['''attention_mask'''][0] )
# short slice pair does have padding
self.assertTrue(pad_token_id in out_pa['''input_ids'''][1] )
self.assertTrue(0 in out_pa['''attention_mask'''][1] )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''$$$'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = GPTaTokenizer.from_pretrained(self.tmpdirname , bos_token=_lowercase , add_bos_token=_lowercase )
SCREAMING_SNAKE_CASE__ : int = '''This is a simple input'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ['''This is a simple input 1''', '''This is a simple input 2''']
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.bos_token_id
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tokenizer(_lowercase )
self.assertEqual(out_s.input_ids[0] , _lowercase )
self.assertTrue(all(o[0] == bos_token_id for o in out_sa.input_ids ) )
SCREAMING_SNAKE_CASE__ : int = tokenizer.decode(out_s.input_ids )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.batch_decode(out_sa.input_ids )
self.assertEqual(decode_s.split()[0] , _lowercase )
self.assertTrue(all(d.split()[0] == bos_token for d in decode_sa ) )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : int ):
# TODO: change to self.get_tokenizers() when the fast version is implemented
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.get_tokenizer(do_lower_case=_lowercase , add_bos_token=_lowercase )]
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''Encode this.'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''This one too please.'''
SCREAMING_SNAKE_CASE__ : List[Any] = tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
encoded_sequence += tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = tokenizer.encode_plus(
_lowercase , _lowercase , add_special_tokens=_lowercase , return_special_tokens_mask=_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoded_sequence_dict['''input_ids''']
SCREAMING_SNAKE_CASE__ : List[str] = encoded_sequence_dict['''special_tokens_mask''']
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Any = [
(x if not special_tokens_mask[i] else None) for i, x in enumerate(_lowercase )
]
SCREAMING_SNAKE_CASE__ : Tuple = [x for x in filtered_sequence if x is not None]
self.assertEqual(_lowercase , _lowercase )
@require_tokenizers
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : List[str] ):
# More context:
# https://huggingface.co/wjmcat/opt-350m-paddle/discussions/1
# https://huggingface.slack.com/archives/C01N44FJDHT/p1653511495183519
# https://github.com/huggingface/transformers/pull/17088#discussion_r871246439
SCREAMING_SNAKE_CASE__ : Tuple = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_lowercase , )
self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''test_opt''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoTokenizer.from_pretrained('''./test_opt''' )
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.encode(
_lowercase , )
self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , use_slow=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : List[str] = tokenizer.encode(
_lowercase , )
# Same as above
self.assertEqual(_lowercase , [2, 2_50, 13_45, 9, 10, 47_58] )
@unittest.skip('''This test is failing because of a bug in the fast tokenizer''' )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = AutoTokenizer.from_pretrained('''facebook/opt-350m''' , from_slow=_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = '''bos'''
SCREAMING_SNAKE_CASE__ : str = tokenizer.get_vocab()['''bos''']
SCREAMING_SNAKE_CASE__ : Optional[int] = '''A photo of a cat'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_lowercase , )
# We changed the bos token
self.assertEqual(_lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
tokenizer.save_pretrained('''./tok''' )
SCREAMING_SNAKE_CASE__ : List[Any] = AutoTokenizer.from_pretrained('''./tok''' )
self.assertTrue(tokenizer.is_fast )
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.encode(
_lowercase , )
self.assertEqual(_lowercase , [3_19_57, 2_50, 13_45, 9, 10, 47_58] )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :List[str] = logging.get_logger(__name__)
a_ :List[str] = {
'facebook/data2vec-vision-base-ft': (
'https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json'
),
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : int = '''data2vec-vision'''
def __init__( self : Optional[Any] , _lowercase : int=7_68 , _lowercase : Tuple=12 , _lowercase : int=12 , _lowercase : Any=30_72 , _lowercase : Union[str, Any]="gelu" , _lowercase : Optional[Any]=0.0 , _lowercase : Dict=0.0 , _lowercase : Tuple=0.02 , _lowercase : Optional[Any]=1E-12 , _lowercase : Tuple=2_24 , _lowercase : List[str]=16 , _lowercase : List[Any]=3 , _lowercase : Union[str, Any]=False , _lowercase : Union[str, Any]=False , _lowercase : List[str]=False , _lowercase : List[str]=False , _lowercase : Any=0.1 , _lowercase : Optional[Any]=0.1 , _lowercase : str=True , _lowercase : Dict=[3, 5, 7, 11] , _lowercase : str=[1, 2, 3, 6] , _lowercase : List[str]=True , _lowercase : int=0.4 , _lowercase : Tuple=2_56 , _lowercase : Union[str, Any]=1 , _lowercase : Optional[int]=False , _lowercase : Tuple=2_55 , **_lowercase : Optional[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = hidden_size
SCREAMING_SNAKE_CASE__ : int = num_hidden_layers
SCREAMING_SNAKE_CASE__ : List[str] = num_attention_heads
SCREAMING_SNAKE_CASE__ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[str] = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[str] = image_size
SCREAMING_SNAKE_CASE__ : int = patch_size
SCREAMING_SNAKE_CASE__ : List[Any] = num_channels
SCREAMING_SNAKE_CASE__ : int = use_mask_token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = use_absolute_position_embeddings
SCREAMING_SNAKE_CASE__ : List[Any] = use_relative_position_bias
SCREAMING_SNAKE_CASE__ : Optional[int] = use_shared_relative_position_bias
SCREAMING_SNAKE_CASE__ : Any = layer_scale_init_value
SCREAMING_SNAKE_CASE__ : Any = drop_path_rate
SCREAMING_SNAKE_CASE__ : int = use_mean_pooling
# decode head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : List[Any] = out_indices
SCREAMING_SNAKE_CASE__ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
SCREAMING_SNAKE_CASE__ : int = use_auxiliary_head
SCREAMING_SNAKE_CASE__ : str = auxiliary_loss_weight
SCREAMING_SNAKE_CASE__ : List[Any] = auxiliary_channels
SCREAMING_SNAKE_CASE__ : Any = auxiliary_num_convs
SCREAMING_SNAKE_CASE__ : Dict = auxiliary_concat_input
SCREAMING_SNAKE_CASE__ : List[str] = semantic_loss_ignore_index
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Dict = version.parse('''1.11''' )
@property
def lowercase__ ( self : Union[str, Any] ):
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
] )
@property
def lowercase__ ( self : str ):
return 1E-4
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def a ( ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = ArgumentParser('''Diffusers CLI tool''' , usage='''diffusers-cli <command> [<args>]''' )
SCREAMING_SNAKE_CASE__ : int = parser.add_subparsers(help='''diffusers-cli command helpers''' )
# Register commands
EnvironmentCommand.register_subcommand(A__ )
# Let's go
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
if not hasattr(A__ , '''func''' ):
parser.print_help()
exit(1 )
# Run
SCREAMING_SNAKE_CASE__ : List[str] = args.func(A__ )
service.run()
if __name__ == "__main__":
main()
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.local_sgd import LocalSGD
########################################################################
# This is a fully working simple example to use Accelerate
# with LocalSGD, which is a method to synchronize model
# parameters every K batches. It is different, but complementary
# to gradient accumulation.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
a_ :int = 16
a_ :Tuple = 32
def a ( A__ , A__ = 1_6 ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = AutoTokenizer.from_pretrained('''bert-base-cased''' )
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(A__ ):
# max_length=None => use the model max length (it's actually the default)
SCREAMING_SNAKE_CASE__ : int = tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=A__ , max_length=A__ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
SCREAMING_SNAKE_CASE__ : Dict = datasets.map(
A__ , batched=A__ , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
SCREAMING_SNAKE_CASE__ : Dict = tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(A__ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
SCREAMING_SNAKE_CASE__ : Any = 1_2_8 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
SCREAMING_SNAKE_CASE__ : Dict = 1_6
elif accelerator.mixed_precision != "no":
SCREAMING_SNAKE_CASE__ : Tuple = 8
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
return tokenizer.pad(
A__ , padding='''longest''' , max_length=A__ , pad_to_multiple_of=A__ , return_tensors='''pt''' , )
# Instantiate dataloaders.
SCREAMING_SNAKE_CASE__ : Tuple = DataLoader(
tokenized_datasets['''train'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
SCREAMING_SNAKE_CASE__ : str = DataLoader(
tokenized_datasets['''validation'''] , shuffle=A__ , collate_fn=A__ , batch_size=A__ )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
a_ :Union[str, Any] = mocked_dataloaders # noqa: F811
def a ( A__ , A__ ) -> int:
'''simple docstring'''
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , A__ ) == "1":
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
# New Code #
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(args.gradient_accumulation_steps )
SCREAMING_SNAKE_CASE__ : Optional[Any] = int(args.local_sgd_steps )
# Initialize accelerator
SCREAMING_SNAKE_CASE__ : Any = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=A__ )
if accelerator.distributed_type not in [DistributedType.NO, DistributedType.MULTI_CPU, DistributedType.MULTI_GPU]:
raise NotImplementedError('''LocalSGD is supported only for CPUs and GPUs (no DeepSpeed or MegatronLM)''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
SCREAMING_SNAKE_CASE__ : Any = config['''lr''']
SCREAMING_SNAKE_CASE__ : Any = int(config['''num_epochs'''] )
SCREAMING_SNAKE_CASE__ : List[str] = int(config['''seed'''] )
SCREAMING_SNAKE_CASE__ : Dict = int(config['''batch_size'''] )
SCREAMING_SNAKE_CASE__ : Tuple = evaluate.load('''glue''' , '''mrpc''' )
set_seed(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : int = get_dataloaders(A__ , A__ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
SCREAMING_SNAKE_CASE__ : str = AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=A__ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
SCREAMING_SNAKE_CASE__ : Optional[Any] = model.to(accelerator.device )
# Instantiate optimizer
SCREAMING_SNAKE_CASE__ : Dict = AdamW(params=model.parameters() , lr=A__ )
# Instantiate scheduler
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=A__ , num_warmup_steps=1_0_0 , num_training_steps=(len(A__ ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = accelerator.prepare(
A__ , A__ , A__ , A__ , A__ )
# Now we train the model
for epoch in range(A__ ):
model.train()
with LocalSGD(
accelerator=A__ , model=A__ , local_sgd_steps=A__ , enabled=local_sgd_steps is not None ) as local_sgd:
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(A__ ):
SCREAMING_SNAKE_CASE__ : Any = model(**A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = output.loss
accelerator.backward(A__ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
# LocalSGD-specific line
local_sgd.step()
model.eval()
for step, batch in enumerate(A__ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[str] = model(**A__ )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.logits.argmax(dim=-1 )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=A__ , references=A__ , )
SCREAMING_SNAKE_CASE__ : List[Any] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f"""epoch {epoch}:""" , A__ )
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=A__ , default=A__ , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=A__ , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument(
'''--local_sgd_steps''' , type=A__ , default=8 , help='''Number of local SGD steps or None to disable local SGD''' )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
SCREAMING_SNAKE_CASE__ : Any = {'''lr''': 2e-5, '''num_epochs''': 3, '''seed''': 4_2, '''batch_size''': 1_6}
training_function(A__ , A__ )
if __name__ == "__main__":
main()
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
def a ( A__ = 1_0_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = set()
SCREAMING_SNAKE_CASE__ : Tuple = 0
SCREAMING_SNAKE_CASE__ : Union[str, Any] = n + 1 # maximum limit
for a in range(2 , A__ ):
for b in range(2 , A__ ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = a**b # calculates the current power
collect_powers.add(A__ ) # adds the result to the set
return len(A__ )
if __name__ == "__main__":
print('Number of terms ', solution(int(str(input()).strip())))
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ :Tuple = logging.get_logger(__name__)
a_ :Any = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = '''nllb-moe'''
lowerCamelCase : Union[str, Any] = ['''past_key_values''']
lowerCamelCase : Dict = {'''num_attention_heads''': '''encoder_attention_heads''', '''hidden_size''': '''d_model'''}
def __init__( self : Optional[Any] , _lowercase : Union[str, Any]=12_81_12 , _lowercase : List[Any]=10_24 , _lowercase : str=12 , _lowercase : List[str]=40_96 , _lowercase : Any=16 , _lowercase : Union[str, Any]=12 , _lowercase : Union[str, Any]=40_96 , _lowercase : List[str]=16 , _lowercase : Optional[Any]=0.05 , _lowercase : List[str]=0.05 , _lowercase : Union[str, Any]=True , _lowercase : Dict=True , _lowercase : Any="relu" , _lowercase : int=10_24 , _lowercase : Any=0.1 , _lowercase : int=0.1 , _lowercase : int=0.0 , _lowercase : Dict=0.02 , _lowercase : str=2 , _lowercase : Tuple=True , _lowercase : Any=False , _lowercase : Tuple="float32" , _lowercase : Any=False , _lowercase : Union[str, Any]=1_28 , _lowercase : Union[str, Any]=64 , _lowercase : Dict=4 , _lowercase : str=4 , _lowercase : Dict=0.001 , _lowercase : str=0.001 , _lowercase : Dict="all" , _lowercase : int=False , _lowercase : Union[str, Any]=False , _lowercase : int=1.0 , _lowercase : Optional[int]=0.2 , _lowercase : Any=1 , _lowercase : Optional[Any]=0 , _lowercase : int=2 , _lowercase : List[str]=False , **_lowercase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ : str = vocab_size
SCREAMING_SNAKE_CASE__ : Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[str] = encoder_layers
SCREAMING_SNAKE_CASE__ : Optional[int] = encoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : Any = decoder_layers
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Tuple = dropout
SCREAMING_SNAKE_CASE__ : str = attention_dropout
SCREAMING_SNAKE_CASE__ : Dict = activation_dropout
SCREAMING_SNAKE_CASE__ : List[str] = activation_function
SCREAMING_SNAKE_CASE__ : Union[str, Any] = init_std
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layerdrop
SCREAMING_SNAKE_CASE__ : Tuple = decoder_layerdrop
SCREAMING_SNAKE_CASE__ : Any = use_cache
SCREAMING_SNAKE_CASE__ : List[Any] = encoder_layers
SCREAMING_SNAKE_CASE__ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
SCREAMING_SNAKE_CASE__ : Dict = router_z_loss_coef
SCREAMING_SNAKE_CASE__ : str = router_aux_loss_coef
SCREAMING_SNAKE_CASE__ : List[str] = decoder_sparse_step
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_sparse_step
SCREAMING_SNAKE_CASE__ : List[str] = num_experts
SCREAMING_SNAKE_CASE__ : str = expert_capacity
SCREAMING_SNAKE_CASE__ : List[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""" )
SCREAMING_SNAKE_CASE__ : List[str] = router_dtype
SCREAMING_SNAKE_CASE__ : Union[str, Any] = router_ignore_padding_tokens
SCREAMING_SNAKE_CASE__ : Optional[int] = batch_prioritized_routing
SCREAMING_SNAKE_CASE__ : List[Any] = second_expert_policy
SCREAMING_SNAKE_CASE__ : Tuple = normalize_router_prob_before_dropping
SCREAMING_SNAKE_CASE__ : Optional[int] = moe_eval_capacity_token_fraction
SCREAMING_SNAKE_CASE__ : List[str] = moe_token_dropout
SCREAMING_SNAKE_CASE__ : Optional[Any] = output_router_logits
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , is_encoder_decoder=_lowercase , decoder_start_token_id=_lowercase , **_lowercase , )
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
a_ :str = logging.get_logger(__name__)
def a ( A__ ) -> Any:
'''simple docstring'''
if "resnet-50" in model_name:
SCREAMING_SNAKE_CASE__ : str = ResNetConfig.from_pretrained('''microsoft/resnet-50''' )
elif "resnet-101" in model_name:
SCREAMING_SNAKE_CASE__ : List[Any] = ResNetConfig.from_pretrained('''microsoft/resnet-101''' )
else:
raise ValueError('''Model name should include either resnet50 or resnet101''' )
SCREAMING_SNAKE_CASE__ : List[Any] = DetrConfig(use_timm_backbone=A__ , backbone_config=A__ )
# set label attributes
SCREAMING_SNAKE_CASE__ : List[Any] = '''panoptic''' in model_name
if is_panoptic:
SCREAMING_SNAKE_CASE__ : Any = 2_5_0
else:
SCREAMING_SNAKE_CASE__ : List[str] = 9_1
SCREAMING_SNAKE_CASE__ : Any = '''huggingface/label-files'''
SCREAMING_SNAKE_CASE__ : int = '''coco-detection-id2label.json'''
SCREAMING_SNAKE_CASE__ : Any = json.load(open(hf_hub_download(A__ , A__ , repo_type='''dataset''' ) , '''r''' ) )
SCREAMING_SNAKE_CASE__ : Any = {int(A__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ : Dict = idalabel
SCREAMING_SNAKE_CASE__ : Any = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
# stem
# fmt: off
rename_keys.append(('''backbone.0.body.conv1.weight''', '''backbone.conv_encoder.model.embedder.embedder.convolution.weight''') )
rename_keys.append(('''backbone.0.body.bn1.weight''', '''backbone.conv_encoder.model.embedder.embedder.normalization.weight''') )
rename_keys.append(('''backbone.0.body.bn1.bias''', '''backbone.conv_encoder.model.embedder.embedder.normalization.bias''') )
rename_keys.append(('''backbone.0.body.bn1.running_mean''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_mean''') )
rename_keys.append(('''backbone.0.body.bn1.running_var''', '''backbone.conv_encoder.model.embedder.embedder.normalization.running_var''') )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var""",
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean""",
) )
rename_keys.append(
(
f"""backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var""",
f"""backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var""",
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f"""transformer.encoder.layers.{i}.self_attn.out_proj.weight""",
f"""encoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.self_attn.out_proj.bias""", f"""encoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.weight""", f"""encoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear1.bias""", f"""encoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.weight""", f"""encoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.linear2.bias""", f"""encoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.weight""", f"""encoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm1.bias""", f"""encoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.encoder.layers.{i}.norm2.weight""", f"""encoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.encoder.layers.{i}.norm2.bias""", f"""encoder.layers.{i}.final_layer_norm.bias""") )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.self_attn.out_proj.weight""",
f"""decoder.layers.{i}.self_attn.out_proj.weight""",
) )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.self_attn.out_proj.bias""", f"""decoder.layers.{i}.self_attn.out_proj.bias""") )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.weight""",
f"""decoder.layers.{i}.encoder_attn.out_proj.weight""",
) )
rename_keys.append(
(
f"""transformer.decoder.layers.{i}.multihead_attn.out_proj.bias""",
f"""decoder.layers.{i}.encoder_attn.out_proj.bias""",
) )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.weight""", f"""decoder.layers.{i}.fc1.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear1.bias""", f"""decoder.layers.{i}.fc1.bias""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.weight""", f"""decoder.layers.{i}.fc2.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.linear2.bias""", f"""decoder.layers.{i}.fc2.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.weight""", f"""decoder.layers.{i}.self_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm1.bias""", f"""decoder.layers.{i}.self_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.weight""", f"""decoder.layers.{i}.encoder_attn_layer_norm.weight""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm2.bias""", f"""decoder.layers.{i}.encoder_attn_layer_norm.bias""") )
rename_keys.append(
(f"""transformer.decoder.layers.{i}.norm3.weight""", f"""decoder.layers.{i}.final_layer_norm.weight""") )
rename_keys.append((f"""transformer.decoder.layers.{i}.norm3.bias""", f"""decoder.layers.{i}.final_layer_norm.bias""") )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
] )
return rename_keys
def a ( A__ , A__ , A__ ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = val
def a ( A__ , A__=False ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = ''''''
if is_panoptic:
SCREAMING_SNAKE_CASE__ : str = '''detr.'''
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
SCREAMING_SNAKE_CASE__ : List[str] = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ : str = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_bias[-2_5_6:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE__ : Any = in_proj_weight[:2_5_6, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias[:2_5_6]
SCREAMING_SNAKE_CASE__ : List[str] = in_proj_weight[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_bias[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_weight[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias[-2_5_6:]
# read in weights + bias of input projection layer of cross-attention
SCREAMING_SNAKE_CASE__ : Dict = state_dict.pop(
f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight""" )
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(f"""{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) of cross-attention to the state dict
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_proj_weight_cross_attn[:2_5_6, :]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = in_proj_bias_cross_attn[:2_5_6]
SCREAMING_SNAKE_CASE__ : str = in_proj_weight_cross_attn[2_5_6:5_1_2, :]
SCREAMING_SNAKE_CASE__ : Optional[int] = in_proj_bias_cross_attn[2_5_6:5_1_2]
SCREAMING_SNAKE_CASE__ : Dict = in_proj_weight_cross_attn[-2_5_6:, :]
SCREAMING_SNAKE_CASE__ : Any = in_proj_bias_cross_attn[-2_5_6:]
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
SCREAMING_SNAKE_CASE__ : Optional[int] = Image.open(requests.get(A__ , stream=A__ ).raw )
return im
@torch.no_grad()
def a ( A__ , A__=None , A__=False ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = get_detr_config(A__ )
# load original model from torch hub
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''detr-resnet-50''': '''detr_resnet50''',
'''detr-resnet-101''': '''detr_resnet101''',
}
logger.info(f"""Converting model {model_name}...""" )
SCREAMING_SNAKE_CASE__ : List[Any] = torch.hub.load('''facebookresearch/detr''' , model_name_to_original_name[model_name] , pretrained=A__ ).eval()
SCREAMING_SNAKE_CASE__ : Any = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(A__ ):
if is_panoptic:
SCREAMING_SNAKE_CASE__ : Optional[int] = '''detr.''' + src
rename_key(A__ , A__ , A__ )
# query, key and value matrices need special treatment
read_in_q_k_v(A__ , is_panoptic=A__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
SCREAMING_SNAKE_CASE__ : List[str] = '''detr.model.''' if is_panoptic else '''model.'''
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith('''detr''' )
and not key.startswith('''class_labels_classifier''' )
and not key.startswith('''bbox_predictor''' )
):
SCREAMING_SNAKE_CASE__ : Any = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
SCREAMING_SNAKE_CASE__ : List[Any] = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE__ : Any = val
elif key.startswith('''bbox_attention''' ) or key.startswith('''mask_head''' ):
continue
else:
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE__ : str = val
else:
if not key.startswith('''class_labels_classifier''' ) and not key.startswith('''bbox_predictor''' ):
SCREAMING_SNAKE_CASE__ : Tuple = state_dict.pop(A__ )
SCREAMING_SNAKE_CASE__ : List[str] = val
# finally, create HuggingFace model and load state dict
SCREAMING_SNAKE_CASE__ : Optional[int] = DetrForSegmentation(A__ ) if is_panoptic else DetrForObjectDetection(A__ )
model.load_state_dict(A__ )
model.eval()
# verify our conversion on an image
SCREAMING_SNAKE_CASE__ : Any = '''coco_panoptic''' if is_panoptic else '''coco_detection'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = DetrImageProcessor(format=A__ )
SCREAMING_SNAKE_CASE__ : List[Any] = processor(images=prepare_img() , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoding['''pixel_values''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = detr(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(A__ )
assert torch.allclose(outputs.logits , original_outputs['''pred_logits'''] , atol=1e-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs['''pred_boxes'''] , atol=1e-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs['''pred_masks'''] , atol=1e-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(A__ ).mkdir(exist_ok=A__ )
model.save_pretrained(A__ )
processor.save_pretrained(A__ )
if push_to_hub:
# Upload model and image processor to the hub
logger.info('''Uploading PyTorch model and image processor to the hub...''' )
model.push_to_hub(f"""nielsr/{model_name}""" )
processor.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
a_ :List[str] = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='detr-resnet-50',
type=str,
choices=['detr-resnet-50', 'detr-resnet-101'],
help='Name of the DETR model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the model to the hub or not.')
a_ :Union[str, Any] = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('>=', '4.25.0')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
a_ :Tuple = 'Run commands across TPU VMs for initial setup before running `accelerate launch`.'
def a ( A__=None ) -> Tuple:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE__ : Dict = subparsers.add_parser('''tpu-config''' , description=_description )
else:
SCREAMING_SNAKE_CASE__ : Dict = argparse.ArgumentParser('''Accelerate tpu-config command''' , description=_description )
# Core arguments
SCREAMING_SNAKE_CASE__ : List[str] = parser.add_argument_group(
'''Config Arguments''' , '''Arguments that can be configured through `accelerate config`.''' )
config_args.add_argument(
'''--config_file''' , type=A__ , default=A__ , help='''Path to the config file to use for accelerate.''' , )
config_args.add_argument(
'''--tpu_name''' , default=A__ , help='''The name of the TPU to use. If not specified, will use the TPU specified in the config file.''' , )
config_args.add_argument(
'''--tpu_zone''' , default=A__ , help='''The zone of the TPU to use. If not specified, will use the zone specified in the config file.''' , )
SCREAMING_SNAKE_CASE__ : str = parser.add_argument_group('''TPU Arguments''' , '''Arguments for options ran inside the TPU.''' )
pod_args.add_argument(
'''--use_alpha''' , action='''store_true''' , help='''Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.''' , )
pod_args.add_argument(
'''--command_file''' , default=A__ , help='''The path to the file containing the commands to run on the pod on startup.''' , )
pod_args.add_argument(
'''--command''' , action='''append''' , nargs='''+''' , help='''A command to run on the pod. Can be passed multiple times.''' , )
pod_args.add_argument(
'''--install_accelerate''' , action='''store_true''' , help='''Whether to install accelerate on the pod. Defaults to False.''' , )
pod_args.add_argument(
'''--accelerate_version''' , default='''latest''' , help='''The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.''' , )
pod_args.add_argument(
'''--debug''' , action='''store_true''' , help='''If set, will print the command that would be run instead of running it.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def a ( A__ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
SCREAMING_SNAKE_CASE__ : List[str] = defaults.command_file
if not args.command and defaults.commands is not None:
SCREAMING_SNAKE_CASE__ : Dict = defaults.commands
if not args.tpu_name:
SCREAMING_SNAKE_CASE__ : List[str] = defaults.tpu_name
if not args.tpu_zone:
SCREAMING_SNAKE_CASE__ : Tuple = defaults.tpu_zone
if args.accelerate_version == "dev":
SCREAMING_SNAKE_CASE__ : Tuple = '''git+https://github.com/huggingface/accelerate.git'''
elif args.accelerate_version == "latest":
SCREAMING_SNAKE_CASE__ : Any = '''accelerate -U'''
elif isinstance(parse(args.accelerate_version ) , A__ ):
SCREAMING_SNAKE_CASE__ : Tuple = f"""accelerate=={args.accelerate_version}"""
if not args.command_file and not args.command:
raise ValueError('''You must specify either a command file or a command to run on the pod.''' )
if args.command_file:
with open(args.command_file , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : int = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , A__ ):
SCREAMING_SNAKE_CASE__ : str = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
SCREAMING_SNAKE_CASE__ : Optional[int] = ['''cd /usr/share''']
if args.install_accelerate:
new_cmd += [f"""pip install {args.accelerate_version}"""]
new_cmd += args.command
SCREAMING_SNAKE_CASE__ : Any = '''; '''.join(A__ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['''gcloud''']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(f"""Running {' '.join(A__ )}""" )
return
subprocess.run(A__ )
print('''Successfully setup pod.''' )
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = tpu_command_parser()
SCREAMING_SNAKE_CASE__ : str = parser.parse_args()
tpu_command_launcher(A__ )
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
from __future__ import annotations
def a ( A__ ) -> float:
'''simple docstring'''
if not nums:
raise ValueError('''List is empty''' )
return sum(A__ ) / len(A__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowercase ( unittest.TestCase ):
lowerCamelCase : List[Any] = inspect.getfile(accelerate.test_utils )
lowerCamelCase : Optional[int] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['''scripts''', '''test_cli.py'''] )
lowerCamelCase : Any = ['''accelerate''', '''launch''']
lowerCamelCase : Dict = Path.home() / '''.cache/huggingface/accelerate'''
lowerCamelCase : Optional[int] = '''default_config.yaml'''
lowerCamelCase : Optional[Any] = config_folder / config_file
lowerCamelCase : Optional[Any] = config_folder / '''_default_config.yaml'''
lowerCamelCase : Optional[Any] = Path('''tests/test_configs''' )
@classmethod
def lowercase__ ( cls : Any ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def lowercase__ ( cls : List[Any] ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Dict = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Tuple ):
for config in sorted(self.test_config_path.glob('''**/*.yaml''' ) ):
with self.subTest(config_file=_lowercase ):
execute_subprocess_async(
self.base_cmd + ['''--config_file''', str(_lowercase ), self.test_file_path] , env=os.environ.copy() )
def lowercase__ ( self : Optional[int] ):
execute_subprocess_async(['''accelerate''', '''test'''] , env=os.environ.copy() )
class lowercase ( unittest.TestCase ):
lowerCamelCase : str = '''test-tpu'''
lowerCamelCase : Tuple = '''us-central1-a'''
lowerCamelCase : Optional[int] = '''ls'''
lowerCamelCase : Dict = ['''accelerate''', '''tpu-config''']
lowerCamelCase : Tuple = '''cd /usr/share'''
lowerCamelCase : List[Any] = '''tests/test_samples/test_command_file.sh'''
lowerCamelCase : Any = '''Running gcloud compute tpus tpu-vm ssh'''
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = run_command(
self.cmd
+ ['''--command''', self.command, '''--tpu_zone''', self.tpu_zone, '''--tpu_name''', self.tpu_name, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command''',
self.command,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--debug'''] , return_stdout=_lowercase )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command''', self.command, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--command''',
self.command,
'''--command''',
'''echo "Hello World"''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all""" , _lowercase , )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Any = run_command(
self.cmd
+ ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--command_file''', self.command_file, '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/0_12_0.yaml''',
'''--command_file''',
self.command_file,
'''--tpu_zone''',
self.tpu_zone,
'''--tpu_name''',
self.tpu_name,
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : List[Any] = run_command(
self.cmd + ['''--config_file''', '''tests/test_configs/latest.yaml''', '''--install_accelerate''', '''--debug'''] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = run_command(
self.cmd
+ [
'''--config_file''',
'''tests/test_configs/latest.yaml''',
'''--install_accelerate''',
'''--accelerate_version''',
'''12.0.0''',
'''--debug''',
] , return_stdout=_lowercase , )
self.assertIn(
f"""{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all""" , _lowercase , )
| 35 | 1 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowercase ( unittest.TestCase ):
def lowercase__ ( self : Any ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = FlaxStableDiffusionPipeline.from_pretrained(
'''stabilityai/stable-diffusion-2''' , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE__ : Dict = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : Optional[int] = num_samples * [prompt]
SCREAMING_SNAKE_CASE__ : Optional[int] = sd_pipe.prepare_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : int = replicate(_lowercase )
SCREAMING_SNAKE_CASE__ : str = shard(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : Any = jax.random.split(_lowercase , jax.device_count() )
SCREAMING_SNAKE_CASE__ : List[str] = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE__ : Optional[int] = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : Tuple = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : List[str] = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = '''stabilityai/stable-diffusion-2'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = FlaxDPMSolverMultistepScheduler.from_pretrained(_lowercase , subfolder='''scheduler''' )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = FlaxStableDiffusionPipeline.from_pretrained(
_lowercase , scheduler=_lowercase , revision='''bf16''' , dtype=jnp.bfloataa , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = scheduler_params
SCREAMING_SNAKE_CASE__ : List[Any] = '''A painting of a squirrel eating a burger'''
SCREAMING_SNAKE_CASE__ : List[Any] = jax.device_count()
SCREAMING_SNAKE_CASE__ : str = num_samples * [prompt]
SCREAMING_SNAKE_CASE__ : List[Any] = sd_pipe.prepare_inputs(_lowercase )
SCREAMING_SNAKE_CASE__ : str = replicate(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = shard(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = jax.random.PRNGKey(0 )
SCREAMING_SNAKE_CASE__ : int = jax.random.split(_lowercase , jax.device_count() )
SCREAMING_SNAKE_CASE__ : int = sd_pipe(_lowercase , _lowercase , _lowercase , num_inference_steps=25 , jit=_lowercase )[0]
assert images.shape == (jax.device_count(), 1, 7_68, 7_68, 3)
SCREAMING_SNAKE_CASE__ : str = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
SCREAMING_SNAKE_CASE__ : str = images[0, 2_53:2_56, 2_53:2_56, -1]
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.asarray(jax.device_get(image_slice.flatten() ) )
SCREAMING_SNAKE_CASE__ : List[Any] = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f"""output_slice: {output_slice}""" )
assert jnp.abs(output_slice - expected_slice ).max() < 1E-2
| 35 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a_ :List[str] = {
'configuration_groupvit': [
'GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'GroupViTConfig',
'GroupViTOnnxConfig',
'GroupViTTextConfig',
'GroupViTVisionConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :str = [
'GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'GroupViTModel',
'GroupViTPreTrainedModel',
'GroupViTTextModel',
'GroupViTVisionModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[Any] = [
'TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFGroupViTModel',
'TFGroupViTPreTrainedModel',
'TFGroupViTTextModel',
'TFGroupViTVisionModel',
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
a_ :Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
import collections
import os
import re
from pathlib import Path
a_ :Union[str, Any] = 'src/transformers'
# Matches is_xxx_available()
a_ :int = re.compile(r'is\_([a-z_]*)_available()')
# Catches a one-line _import_struct = {xxx}
a_ :List[Any] = re.compile(r'^_import_structure\s+=\s+\{([^\}]+)\}')
# Catches a line with a key-values pattern: "bla": ["foo", "bar"]
a_ :Any = re.compile(r'\s+"\S*":\s+\[([^\]]*)\]')
# Catches a line if not is_foo_available
a_ :int = re.compile(r'^\s*if\s+not\s+is\_[a-z_]*\_available\(\)')
# Catches a line _import_struct["bla"].append("foo")
a_ :Union[str, Any] = re.compile(r'^\s*_import_structure\["\S*"\]\.append\("(\S*)"\)')
# Catches a line _import_struct["bla"].extend(["foo", "bar"]) or _import_struct["bla"] = ["foo", "bar"]
a_ :Union[str, Any] = re.compile(r'^\s*_import_structure\[\S*\](?:\.extend\(|\s*=\s+)\[([^\]]*)\]')
# Catches a line with an object between quotes and a comma: "MyModel",
a_ :Optional[int] = re.compile(r'^\s+"([^"]+)",')
# Catches a line with objects between brackets only: ["foo", "bar"],
a_ :Union[str, Any] = re.compile(r'^\s+\[([^\]]+)\]')
# Catches a line with from foo import bar, bla, boo
a_ :Optional[Any] = re.compile(r'\s+from\s+\S*\s+import\s+([^\(\s].*)\n')
# Catches a line with try:
a_ :Tuple = re.compile(r'^\s*try:')
# Catches a line with else:
a_ :Union[str, Any] = re.compile(r'^\s*else:')
def a ( A__ ) -> int:
'''simple docstring'''
if _re_test_backend.search(A__ ) is None:
return None
SCREAMING_SNAKE_CASE__ : Dict = [b[0] for b in _re_backend.findall(A__ )]
backends.sort()
return "_and_".join(A__ )
def a ( A__ ) -> Any:
'''simple docstring'''
with open(A__ , '''r''' , encoding='''utf-8''' , newline='''\n''' ) as f:
SCREAMING_SNAKE_CASE__ : Tuple = f.readlines()
SCREAMING_SNAKE_CASE__ : int = 0
while line_index < len(A__ ) and not lines[line_index].startswith('''_import_structure = {''' ):
line_index += 1
# If this is a traditional init, just return.
if line_index >= len(A__ ):
return None
# First grab the objects without a specific backend in _import_structure
SCREAMING_SNAKE_CASE__ : Any = []
while not lines[line_index].startswith('''if TYPE_CHECKING''' ) and find_backend(lines[line_index] ) is None:
SCREAMING_SNAKE_CASE__ : List[str] = lines[line_index]
# If we have everything on a single line, let's deal with it.
if _re_one_line_import_struct.search(A__ ):
SCREAMING_SNAKE_CASE__ : Any = _re_one_line_import_struct.search(A__ ).groups()[0]
SCREAMING_SNAKE_CASE__ : Tuple = re.findall(r'''\[([^\]]+)\]''' , A__ )
for imp in imports:
objects.extend([obj[1:-1] for obj in imp.split(''', ''' )] )
line_index += 1
continue
SCREAMING_SNAKE_CASE__ : str = _re_import_struct_key_value.search(A__ )
if single_line_import_search is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = [obj[1:-1] for obj in single_line_import_search.groups()[0].split(''', ''' ) if len(A__ ) > 0]
objects.extend(A__ )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects in _import_structure
while not lines[line_index].startswith('''if TYPE_CHECKING''' ):
# If the line is an if not is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ : List[str] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : int = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 4 ):
SCREAMING_SNAKE_CASE__ : List[Any] = lines[line_index]
if _re_import_struct_add_one.search(A__ ) is not None:
objects.append(_re_import_struct_add_one.search(A__ ).groups()[0] )
elif _re_import_struct_add_many.search(A__ ) is not None:
SCREAMING_SNAKE_CASE__ : Dict = _re_import_struct_add_many.search(A__ ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ : int = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_between_brackets.search(A__ ) is not None:
SCREAMING_SNAKE_CASE__ : str = _re_between_brackets.search(A__ ).groups()[0].split(''', ''' )
SCREAMING_SNAKE_CASE__ : str = [obj[1:-1] for obj in imports if len(A__ ) > 0]
objects.extend(A__ )
elif _re_quote_object.search(A__ ) is not None:
objects.append(_re_quote_object.search(A__ ).groups()[0] )
elif line.startswith(''' ''' * 8 + '''"''' ):
objects.append(line[9:-3] )
elif line.startswith(''' ''' * 1_2 + '''"''' ):
objects.append(line[1_3:-3] )
line_index += 1
SCREAMING_SNAKE_CASE__ : List[str] = objects
else:
line_index += 1
# At this stage we are in the TYPE_CHECKING part, first grab the objects without a specific backend
SCREAMING_SNAKE_CASE__ : Optional[int] = []
while (
line_index < len(A__ )
and find_backend(lines[line_index] ) is None
and not lines[line_index].startswith('''else''' )
):
SCREAMING_SNAKE_CASE__ : Any = lines[line_index]
SCREAMING_SNAKE_CASE__ : Optional[Any] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 8 ):
objects.append(line[8:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ : List[str] = {'''none''': objects}
# Let's continue with backend-specific objects
while line_index < len(A__ ):
# If the line is an if is_backend_available, we grab all objects associated.
SCREAMING_SNAKE_CASE__ : Union[str, Any] = find_backend(lines[line_index] )
# Check if the backend declaration is inside a try block:
if _re_try.search(lines[line_index - 1] ) is None:
SCREAMING_SNAKE_CASE__ : Dict = None
if backend is not None:
line_index += 1
# Scroll until we hit the else block of try-except-else
while _re_else.search(lines[line_index] ) is None:
line_index += 1
line_index += 1
SCREAMING_SNAKE_CASE__ : List[str] = []
# Until we unindent, add backend objects to the list
while len(lines[line_index] ) <= 1 or lines[line_index].startswith(''' ''' * 8 ):
SCREAMING_SNAKE_CASE__ : Tuple = lines[line_index]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = _re_import.search(A__ )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(''', ''' ) )
elif line.startswith(''' ''' * 1_2 ):
objects.append(line[1_2:-2] )
line_index += 1
SCREAMING_SNAKE_CASE__ : Any = objects
else:
line_index += 1
return import_dict_objects, type_hint_objects
def a ( A__ , A__ ) -> List[str]:
'''simple docstring'''
def find_duplicates(A__ ):
return [k for k, v in collections.Counter(A__ ).items() if v > 1]
if list(import_dict_objects.keys() ) != list(type_hint_objects.keys() ):
return ["Both sides of the init do not have the same backends!"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for key in import_dict_objects.keys():
SCREAMING_SNAKE_CASE__ : Dict = find_duplicates(import_dict_objects[key] )
if duplicate_imports:
errors.append(f"""Duplicate _import_structure definitions for: {duplicate_imports}""" )
SCREAMING_SNAKE_CASE__ : List[str] = find_duplicates(type_hint_objects[key] )
if duplicate_type_hints:
errors.append(f"""Duplicate TYPE_CHECKING objects for: {duplicate_type_hints}""" )
if sorted(set(import_dict_objects[key] ) ) != sorted(set(type_hint_objects[key] ) ):
SCREAMING_SNAKE_CASE__ : int = '''base imports''' if key == '''none''' else f"""{key} backend"""
errors.append(f"""Differences for {name}:""" )
for a in type_hint_objects[key]:
if a not in import_dict_objects[key]:
errors.append(f""" {a} in TYPE_HINT but not in _import_structure.""" )
for a in import_dict_objects[key]:
if a not in type_hint_objects[key]:
errors.append(f""" {a} in _import_structure but not in TYPE_HINT.""" )
return errors
def a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = []
for root, _, files in os.walk(A__ ):
if "__init__.py" in files:
SCREAMING_SNAKE_CASE__ : Any = os.path.join(A__ , '''__init__.py''' )
SCREAMING_SNAKE_CASE__ : int = parse_init(A__ )
if objects is not None:
SCREAMING_SNAKE_CASE__ : List[Any] = analyze_results(*A__ )
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : Tuple = f"""Problem in {fname}, both halves do not define the same objects.\n{errors[0]}"""
failures.append('''\n'''.join(A__ ) )
if len(A__ ) > 0:
raise ValueError('''\n\n'''.join(A__ ) )
def a ( ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for path, directories, files in os.walk(A__ ):
for folder in directories:
# Ignore private modules
if folder.startswith('''_''' ):
directories.remove(A__ )
continue
# Ignore leftovers from branches (empty folders apart from pycache)
if len(list((Path(A__ ) / folder).glob('''*.py''' ) ) ) == 0:
continue
SCREAMING_SNAKE_CASE__ : int = str((Path(A__ ) / folder).relative_to(A__ ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = short_path.replace(os.path.sep , '''.''' )
submodules.append(A__ )
for fname in files:
if fname == "__init__.py":
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] = str((Path(A__ ) / fname).relative_to(A__ ) )
SCREAMING_SNAKE_CASE__ : str = short_path.replace('''.py''' , '''''' ).replace(os.path.sep , '''.''' )
if len(submodule.split('''.''' ) ) == 1:
submodules.append(A__ )
return submodules
a_ :Any = [
'convert_pytorch_checkpoint_to_tf2',
'modeling_flax_pytorch_utils',
'models.esm.openfold_utils',
]
def a ( ) -> Union[str, Any]:
'''simple docstring'''
from transformers.utils import direct_transformers_import
SCREAMING_SNAKE_CASE__ : Any = direct_transformers_import(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = set(transformers._import_structure.keys() )
# This contains all the base keys of the _import_structure object defined in the init, but if the user is missing
# some optional dependencies, they may not have all of them. Thus we read the init to read all additions and
# (potentiall re-) add them.
with open(os.path.join(A__ , '''__init__.py''' ) , '''r''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = f.read()
import_structure_keys.update(set(re.findall(r'''import_structure\[\"([^\"]*)\"\]''' , A__ ) ) )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
module
for module in get_transformers_submodules()
if module not in IGNORE_SUBMODULES and module not in import_structure_keys
]
if len(A__ ) > 0:
SCREAMING_SNAKE_CASE__ : List[str] = '''\n'''.join(f"""- {module}""" for module in module_not_registered )
raise ValueError(
'''The following submodules are not properly registed in the main init of Transformers:\n'''
f"""{list_of_modules}\n"""
'''Make sure they appear somewhere in the keys of `_import_structure` with an empty list as value.''' )
if __name__ == "__main__":
check_all_inits()
check_submodules()
| 35 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Optional[int] ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = {'''col_1''': [3, 2, 1, 0], '''col_2''': ['''a''', '''b''', '''c''', '''d''']}
return Dataset.from_dict(_lowercase )
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._create_example_records()
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertListEqual(dset.column_names , ['''col_1''', '''col_2'''] )
for i, r in enumerate(_lowercase ):
self.assertDictEqual(_lowercase , example_records[i] )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = self._create_example_records()
SCREAMING_SNAKE_CASE__ : Optional[int] = Dataset.from_list(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def lowercase__ ( self : List[Any] ): # checks what happens with missing columns
SCREAMING_SNAKE_CASE__ : List[str] = [{'''col_1''': 1}, {'''col_2''': '''x'''}]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dataset.from_list(_lowercase )
self.assertDictEqual(dset[0] , {'''col_1''': 1} )
self.assertDictEqual(dset[1] , {'''col_1''': None} ) # NB: first record is used for columns
def lowercase__ ( self : int ): # checks if the type can be inferred from the second record
SCREAMING_SNAKE_CASE__ : int = [{'''col_1''': []}, {'''col_1''': [1, 2]}]
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list(_lowercase )
self.assertEqual(dset.info.features['''col_1'''] , Sequence(Value('''int64''' ) ) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = Dataset.from_list([] )
self.assertEqual(len(_lowercase ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 35 | 1 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class lowercase ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , _lowercase : NestedDataStructureLike[PathLike] , _lowercase : Optional[NamedSplit] = None , _lowercase : Optional[Features] = None , _lowercase : str = None , _lowercase : bool = False , _lowercase : bool = False , _lowercase : Optional[str] = None , _lowercase : Optional[int] = None , **_lowercase : str , ):
super().__init__(
_lowercase , split=_lowercase , features=_lowercase , cache_dir=_lowercase , keep_in_memory=_lowercase , streaming=_lowercase , num_proc=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Tuple = field
SCREAMING_SNAKE_CASE__ : List[Any] = path_or_paths if isinstance(_lowercase , _lowercase ) else {self.split: path_or_paths}
SCREAMING_SNAKE_CASE__ : Any = Json(
cache_dir=_lowercase , data_files=_lowercase , features=_lowercase , field=_lowercase , **_lowercase , )
def lowercase__ ( self : int ):
# Build iterable dataset
if self.streaming:
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
self.builder.download_and_prepare(
download_config=_lowercase , download_mode=_lowercase , verification_mode=_lowercase , base_path=_lowercase , num_proc=self.num_proc , )
SCREAMING_SNAKE_CASE__ : List[Any] = self.builder.as_dataset(
split=self.split , verification_mode=_lowercase , in_memory=self.keep_in_memory )
return dataset
class lowercase :
def __init__( self : List[str] , _lowercase : Dataset , _lowercase : Union[PathLike, BinaryIO] , _lowercase : Optional[int] = None , _lowercase : Optional[int] = None , **_lowercase : List[Any] , ):
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
SCREAMING_SNAKE_CASE__ : List[str] = dataset
SCREAMING_SNAKE_CASE__ : Tuple = path_or_buf
SCREAMING_SNAKE_CASE__ : Any = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
SCREAMING_SNAKE_CASE__ : Dict = num_proc
SCREAMING_SNAKE_CASE__ : Any = '''utf-8'''
SCREAMING_SNAKE_CASE__ : int = to_json_kwargs
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.to_json_kwargs.pop('''path_or_buf''' , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.to_json_kwargs.pop('''orient''' , '''records''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.to_json_kwargs.pop('''lines''' , True if orient == '''records''' else False )
SCREAMING_SNAKE_CASE__ : List[Any] = self.to_json_kwargs.pop('''index''' , False if orient in ['''split''', '''table'''] else True )
SCREAMING_SNAKE_CASE__ : str = self.to_json_kwargs.pop('''compression''' , _lowercase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf , (str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf , '''wb''' , compression=_lowercase ) as buffer:
SCREAMING_SNAKE_CASE__ : Tuple = self._write(file_obj=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
''' was passed. Please provide a local path instead.''' )
SCREAMING_SNAKE_CASE__ : int = self._write(
file_obj=self.path_or_buf , orient=_lowercase , lines=_lowercase , index=_lowercase , **self.to_json_kwargs )
return written
def lowercase__ ( self : Dict , _lowercase : Optional[Any] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = args
SCREAMING_SNAKE_CASE__ : Tuple = query_table(
table=self.dataset.data , key=slice(_lowercase , offset + self.batch_size ) , indices=self.dataset._indices , )
SCREAMING_SNAKE_CASE__ : List[Any] = batch.to_pandas().to_json(
path_or_buf=_lowercase , orient=_lowercase , lines=_lowercase , index=_lowercase , **_lowercase )
if not json_str.endswith('''\n''' ):
json_str += "\n"
return json_str.encode(self.encoding )
def lowercase__ ( self : Union[str, Any] , _lowercase : BinaryIO , _lowercase : Dict , _lowercase : str , _lowercase : Any , **_lowercase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
SCREAMING_SNAKE_CASE__ : List[Any] = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(_lowercase )
else:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json , [(offset, orient, lines, index, to_json_kwargs) for offset in range(0 , _lowercase , _lowercase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit='''ba''' , disable=not logging.is_progress_bar_enabled() , desc='''Creating json from Arrow format''' , ):
written += file_obj.write(_lowercase )
return written
| 35 |
import pickle
import numpy as np
from matplotlib import pyplot as plt
class lowercase :
def __init__( self : List[str] , _lowercase : Tuple , _lowercase : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : Optional[int] , _lowercase : str=0.2 , _lowercase : str=0.2 ):
SCREAMING_SNAKE_CASE__ : List[Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = bp_numa
SCREAMING_SNAKE_CASE__ : List[str] = conva_get[:2]
SCREAMING_SNAKE_CASE__ : str = conva_get[2]
SCREAMING_SNAKE_CASE__ : Any = size_pa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rate_w
SCREAMING_SNAKE_CASE__ : Tuple = rate_t
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [
np.mat(-1 * np.random.rand(self.conva[0] , self.conva[0] ) + 0.5 )
for i in range(self.conva[1] )
]
SCREAMING_SNAKE_CASE__ : Dict = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : int = np.mat(-1 * np.random.rand(self.num_bpa , self.num_bpa ) + 0.5 )
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.conva[1] ) + 1
SCREAMING_SNAKE_CASE__ : Dict = -2 * np.random.rand(self.num_bpa ) + 1
SCREAMING_SNAKE_CASE__ : str = -2 * np.random.rand(self.num_bpa ) + 1
def lowercase__ ( self : Union[str, Any] , _lowercase : Any ):
# save model dict with pickle
SCREAMING_SNAKE_CASE__ : Dict = {
'''num_bp1''': self.num_bpa,
'''num_bp2''': self.num_bpa,
'''num_bp3''': self.num_bpa,
'''conv1''': self.conva,
'''step_conv1''': self.step_conva,
'''size_pooling1''': self.size_poolinga,
'''rate_weight''': self.rate_weight,
'''rate_thre''': self.rate_thre,
'''w_conv1''': self.w_conva,
'''wkj''': self.wkj,
'''vji''': self.vji,
'''thre_conv1''': self.thre_conva,
'''thre_bp2''': self.thre_bpa,
'''thre_bp3''': self.thre_bpa,
}
with open(_lowercase , '''wb''' ) as f:
pickle.dump(_lowercase , _lowercase )
print(f"""Model saved: {save_path}""" )
@classmethod
def lowercase__ ( cls : Dict , _lowercase : int ):
# read saved model
with open(_lowercase , '''rb''' ) as f:
SCREAMING_SNAKE_CASE__ : Optional[Any] = pickle.load(_lowercase ) # noqa: S301
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''conv1''' )
conv_get.append(model_dic.get('''step_conv1''' ) )
SCREAMING_SNAKE_CASE__ : Tuple = model_dic.get('''size_pooling1''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''num_bp1''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp2''' )
SCREAMING_SNAKE_CASE__ : Dict = model_dic.get('''num_bp3''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''rate_weight''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''rate_thre''' )
# create model instance
SCREAMING_SNAKE_CASE__ : Dict = CNN(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
# modify model parameter
SCREAMING_SNAKE_CASE__ : List[str] = model_dic.get('''w_conv1''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = model_dic.get('''wkj''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_dic.get('''vji''' )
SCREAMING_SNAKE_CASE__ : str = model_dic.get('''thre_conv1''' )
SCREAMING_SNAKE_CASE__ : Any = model_dic.get('''thre_bp2''' )
SCREAMING_SNAKE_CASE__ : List[Any] = model_dic.get('''thre_bp3''' )
return conv_ins
def lowercase__ ( self : str , _lowercase : Optional[int] ):
return 1 / (1 + np.exp(-1 * x ))
def lowercase__ ( self : Union[str, Any] , _lowercase : List[str] ):
return round(_lowercase , 3 )
def lowercase__ ( self : List[str] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] , _lowercase : Union[str, Any] ):
# convolution process
SCREAMING_SNAKE_CASE__ : Tuple = convs[0]
SCREAMING_SNAKE_CASE__ : Optional[Any] = convs[1]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.shape(_lowercase )[0]
# get the data slice of original image data, data_focus
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
for j_focus in range(0 , size_data - size_conv + 1 , _lowercase ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = data[
i_focus : i_focus + size_conv, j_focus : j_focus + size_conv
]
data_focus.append(_lowercase )
# calculate the feature map of every single kernel, and saved as list of matrix
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int((size_data - size_conv) / conv_step + 1 )
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Tuple = (
np.sum(np.multiply(data_focus[i_focus] , w_convs[i_map] ) )
- thre_convs[i_map]
)
featuremap.append(self.sig(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(_lowercase ).reshape(
_lowercase , _lowercase )
data_featuremap.append(_lowercase )
# expanding the data slice to One dimenssion
SCREAMING_SNAKE_CASE__ : int = []
for each_focus in data_focus:
focusa_list.extend(self.Expand_Mat(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asarray(_lowercase )
return focus_list, data_featuremap
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Union[str, Any] , _lowercase : Optional[Any]="average_pool" ):
# pooling process
SCREAMING_SNAKE_CASE__ : List[str] = len(featuremaps[0] )
SCREAMING_SNAKE_CASE__ : List[Any] = int(size_map / size_pooling )
SCREAMING_SNAKE_CASE__ : List[str] = []
for i_map in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Any = featuremaps[i_map]
SCREAMING_SNAKE_CASE__ : int = []
for i_focus in range(0 , _lowercase , _lowercase ):
for j_focus in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Dict = feature_map[
i_focus : i_focus + size_pooling,
j_focus : j_focus + size_pooling,
]
if pooling_type == "average_pool":
# average pooling
map_pooled.append(np.average(_lowercase ) )
elif pooling_type == "max_pooling":
# max pooling
map_pooled.append(np.max(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.asmatrix(_lowercase ).reshape(_lowercase , _lowercase )
featuremap_pooled.append(_lowercase )
return featuremap_pooled
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Any] ):
# expanding three dimension data to one dimension list
SCREAMING_SNAKE_CASE__ : Dict = []
for i in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = np.shape(data[i] )
SCREAMING_SNAKE_CASE__ : Tuple = data[i].reshape(1 , shapes[0] * shapes[1] )
SCREAMING_SNAKE_CASE__ : Dict = data_listed.getA().tolist()[0]
data_expanded.extend(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = np.asarray(_lowercase )
return data_expanded
def lowercase__ ( self : Tuple , _lowercase : Optional[int] ):
# expanding matrix to one dimension list
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.asarray(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : str = data_mat.reshape(1 , shapes[0] * shapes[1] )
return data_expanded
def lowercase__ ( self : List[str] , _lowercase : List[str] , _lowercase : List[str] , _lowercase : Any , _lowercase : Optional[Any] , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Dict = 0
for i_map in range(_lowercase ):
SCREAMING_SNAKE_CASE__ : Any = np.ones((size_map, size_map) )
for i in range(0 , _lowercase , _lowercase ):
for j in range(0 , _lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ : Tuple = pd_pool[
i_pool
]
SCREAMING_SNAKE_CASE__ : Dict = i_pool + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = np.multiply(
_lowercase , np.multiply(out_map[i_map] , (1 - out_map[i_map]) ) )
pd_all.append(_lowercase )
return pd_all
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : Tuple , _lowercase : Optional[int] , _lowercase : Any , _lowercase : Tuple , _lowercase : int=bool ):
# model traning
print('''----------------------Start Training-------------------------''' )
print((''' - - Shape: Train_Data ''', np.shape(_lowercase )) )
print((''' - - Shape: Teach_Data ''', np.shape(_lowercase )) )
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[int] = 1_00_00
while rp < n_repeat and mse >= error_accuracy:
SCREAMING_SNAKE_CASE__ : List[Any] = 0
print(f"""-------------Learning Time {rp}--------------""" )
for p in range(len(_lowercase ) ):
# print('------------Learning Image: %d--------------'%p)
SCREAMING_SNAKE_CASE__ : Any = np.asmatrix(datas_train[p] )
SCREAMING_SNAKE_CASE__ : str = np.asarray(datas_teach[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : int = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.shape(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.vji.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Any = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(_lowercase , self.wkj.T ) - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sig(_lowercase )
# --------------Model Leaning ------------------------
# calculate error and gradient---------------
SCREAMING_SNAKE_CASE__ : Tuple = np.multiply(
(data_teach - bp_outa) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.multiply(
np.dot(_lowercase , self.wkj ) , np.multiply(_lowercase , (1 - bp_outa) ) )
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(_lowercase , self.vji )
SCREAMING_SNAKE_CASE__ : Dict = pd_i_all / (self.size_poolinga * self.size_poolinga)
SCREAMING_SNAKE_CASE__ : List[str] = pd_conva_pooled.T.getA().tolist()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self._calculate_gradient_from_pool(
_lowercase , _lowercase , shape_featuremapa[0] , shape_featuremapa[1] , self.size_poolinga , )
# weight and threshold learning process---------
# convolution layer
for k_conv in range(self.conva[1] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand_mat(pd_conva_all[k_conv] )
SCREAMING_SNAKE_CASE__ : Dict = self.rate_weight * np.dot(_lowercase , _lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.w_conva[k_conv] + delta_w.reshape(
(self.conva[0], self.conva[0]) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = (
self.thre_conva[k_conv]
- np.sum(pd_conva_all[k_conv] ) * self.rate_thre
)
# all connected layer
SCREAMING_SNAKE_CASE__ : List[str] = self.wkj + pd_k_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.vji + pd_j_all.T * bp_outa * self.rate_weight
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.thre_bpa - pd_k_all * self.rate_thre
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.thre_bpa - pd_j_all * self.rate_thre
# calculate the sum error of all single image
SCREAMING_SNAKE_CASE__ : int = np.sum(abs(data_teach - bp_outa ) )
error_count += errors
# print(' ----Teach ',data_teach)
# print(' ----BP_output ',bp_out3)
SCREAMING_SNAKE_CASE__ : Optional[Any] = rp + 1
SCREAMING_SNAKE_CASE__ : List[str] = error_count / patterns
all_mse.append(_lowercase )
def draw_error():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [error_accuracy for i in range(int(n_repeat * 1.2 ) )]
plt.plot(_lowercase , '''+-''' )
plt.plot(_lowercase , '''r--''' )
plt.xlabel('''Learning Times''' )
plt.ylabel('''All_mse''' )
plt.grid(_lowercase , alpha=0.5 )
plt.show()
print('''------------------Training Complished---------------------''' )
print((''' - - Training epoch: ''', rp, f""" - - Mse: {mse:.6f}""") )
if draw_e:
draw_error()
return mse
def lowercase__ ( self : Union[str, Any] , _lowercase : int ):
# model predict
SCREAMING_SNAKE_CASE__ : Dict = []
print('''-------------------Start Testing-------------------------''' )
print((''' - - Shape: Test_Data ''', np.shape(_lowercase )) )
for p in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Optional[int] = np.asmatrix(datas_test[p] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Any = self.pooling(_lowercase , self.size_poolinga )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self._expand(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = data_bp_input
SCREAMING_SNAKE_CASE__ : Optional[int] = bp_outa * self.vji.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Tuple = self.sig(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = bp_outa * self.wkj.T - self.thre_bpa
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.sig(_lowercase )
produce_out.extend(bp_outa.getA().tolist() )
SCREAMING_SNAKE_CASE__ : str = [list(map(self.do_round , _lowercase ) ) for each in produce_out]
return np.asarray(_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Tuple ):
# return the data of image after convoluting process so we can check it out
SCREAMING_SNAKE_CASE__ : str = np.asmatrix(_lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Any = self.convolute(
_lowercase , self.conva , self.w_conva , self.thre_conva , conv_step=self.step_conva , )
SCREAMING_SNAKE_CASE__ : Dict = self.pooling(_lowercase , self.size_poolinga )
return data_conveda, data_pooleda
if __name__ == "__main__":
pass
| 35 | 1 |
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowercase :
def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : Dict=13 , _lowercase : Any=30 , _lowercase : Optional[int]=2 , _lowercase : List[str]=3 , _lowercase : Optional[Any]=True , _lowercase : Optional[Any]=True , _lowercase : Tuple=32 , _lowercase : Any=2 , _lowercase : int=4 , _lowercase : str=37 , _lowercase : str="gelu" , _lowercase : int=0.1 , _lowercase : Tuple=0.1 , _lowercase : int=10 , _lowercase : Union[str, Any]=0.02 , _lowercase : Any=3 , _lowercase : str=None , _lowercase : int=2 , ):
SCREAMING_SNAKE_CASE__ : List[Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : str = patch_size
SCREAMING_SNAKE_CASE__ : str = num_channels
SCREAMING_SNAKE_CASE__ : Dict = is_training
SCREAMING_SNAKE_CASE__ : str = use_labels
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_size
SCREAMING_SNAKE_CASE__ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = intermediate_size
SCREAMING_SNAKE_CASE__ : int = hidden_act
SCREAMING_SNAKE_CASE__ : str = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : str = type_sequence_label_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = initializer_range
SCREAMING_SNAKE_CASE__ : List[Any] = scope
SCREAMING_SNAKE_CASE__ : Union[str, Any] = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
SCREAMING_SNAKE_CASE__ : Optional[int] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ : Dict = num_patches + 2
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
SCREAMING_SNAKE_CASE__ : Dict = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : str ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : List[Any] , _lowercase : Dict , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple = TFDeiTModel(config=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = model(_lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, Any] , _lowercase : int , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : Tuple = TFDeiTForMaskedImageModeling(config=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = model(_lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : Tuple = 1
SCREAMING_SNAKE_CASE__ : Optional[Any] = TFDeiTForMaskedImageModeling(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : List[Any] , _lowercase : Any , _lowercase : List[Any] , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.type_sequence_label_size
SCREAMING_SNAKE_CASE__ : List[Any] = TFDeiTForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
SCREAMING_SNAKE_CASE__ : str = 1
SCREAMING_SNAKE_CASE__ : Tuple = TFDeiTForImageClassification(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_tf
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[Any] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
lowerCamelCase : Optional[Any] = (
{
'''feature-extraction''': TFDeiTModel,
'''image-classification''': (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
lowerCamelCase : Optional[Any] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : str = False
lowerCamelCase : List[Any] = False
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Dict = TFDeiTModelTester(self )
SCREAMING_SNAKE_CASE__ : List[str] = ConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase , hidden_size=37 )
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''DeiT does not use inputs_embeds''' )
def lowercase__ ( self : Any ):
pass
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Any = model_class(_lowercase )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
SCREAMING_SNAKE_CASE__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowercase , tf.keras.layers.Dense ) )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[str] = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : int = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_lowercase )
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Dict , _lowercase : Dict , _lowercase : List[Any]=False ):
SCREAMING_SNAKE_CASE__ : int = super()._prepare_for_class(_lowercase , _lowercase , return_labels=_lowercase )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def lowercase__ ( self : str ):
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : List[str] = TFDeiTModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : int ):
return (
DeiTImageProcessor.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
if is_vision_available()
else None
)
@slow
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : List[str] = TFDeiTForImageClassificationWithTeacher.from_pretrained('''facebook/deit-base-distilled-patch16-224''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processor(images=_lowercase , return_tensors='''tf''' )
# forward pass
SCREAMING_SNAKE_CASE__ : str = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Tuple = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = tf.constant([-1.0266, 0.1912, -1.2861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 35 |
import unittest
from transformers import TrOCRConfig
from transformers.testing_utils import is_torch_available, require_torch, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers.models.trocr.modeling_trocr import TrOCRDecoder, TrOCRForCausalLM
@require_torch
class lowercase :
def __init__( self : Any , _lowercase : List[Any] , _lowercase : Optional[Any]=99 , _lowercase : Optional[int]=13 , _lowercase : Tuple=16 , _lowercase : Union[str, Any]=7 , _lowercase : Optional[Any]=True , _lowercase : int=True , _lowercase : Optional[Any]=True , _lowercase : str=False , _lowercase : Union[str, Any]=True , _lowercase : Tuple=2 , _lowercase : Any=32 , _lowercase : int=4 , _lowercase : Dict=4 , _lowercase : Dict=30 , _lowercase : Union[str, Any]=0 , _lowercase : List[str]=1 , _lowercase : Optional[Any]=2 , _lowercase : Tuple=None , ):
SCREAMING_SNAKE_CASE__ : Any = parent
SCREAMING_SNAKE_CASE__ : List[Any] = batch_size
SCREAMING_SNAKE_CASE__ : List[str] = decoder_seq_length
# For common tests
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = is_training
SCREAMING_SNAKE_CASE__ : Tuple = use_attention_mask
SCREAMING_SNAKE_CASE__ : Any = use_labels
SCREAMING_SNAKE_CASE__ : Any = vocab_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = d_model
SCREAMING_SNAKE_CASE__ : Tuple = d_model
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_layers
SCREAMING_SNAKE_CASE__ : List[str] = decoder_layers
SCREAMING_SNAKE_CASE__ : Optional[Any] = decoder_ffn_dim
SCREAMING_SNAKE_CASE__ : List[Any] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : Optional[int] = decoder_attention_heads
SCREAMING_SNAKE_CASE__ : str = eos_token_id
SCREAMING_SNAKE_CASE__ : List[Any] = bos_token_id
SCREAMING_SNAKE_CASE__ : str = pad_token_id
SCREAMING_SNAKE_CASE__ : str = decoder_start_token_id
SCREAMING_SNAKE_CASE__ : Optional[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = None
SCREAMING_SNAKE_CASE__ : int = decoder_seq_length
SCREAMING_SNAKE_CASE__ : Optional[int] = 2
SCREAMING_SNAKE_CASE__ : Tuple = 1
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[Any] = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor([self.batch_size, self.decoder_seq_length] , vocab_size=2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : List[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRConfig(
vocab_size=self.vocab_size , d_model=self.d_model , decoder_layers=self.decoder_layers , decoder_ffn_dim=self.decoder_ffn_dim , decoder_attention_heads=self.decoder_attention_heads , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , use_cache=self.use_cache , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , max_position_embeddings=self.max_position_embeddings , )
return (config, input_ids, attention_mask, lm_labels)
def lowercase__ ( self : Dict , _lowercase : Any , _lowercase : Dict , _lowercase : Optional[Any] , _lowercase : Optional[Any] , ):
SCREAMING_SNAKE_CASE__ : Dict = True
SCREAMING_SNAKE_CASE__ : Optional[int] = TrOCRDecoder(config=_lowercase ).to(_lowercase ).eval()
SCREAMING_SNAKE_CASE__ : Optional[int] = input_ids[:2]
input_ids[input_ids == 0] += 1
# first forward pass
SCREAMING_SNAKE_CASE__ : Optional[Any] = model(_lowercase , use_cache=_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = model(_lowercase , use_cache=_lowercase )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) )
self.parent.assertTrue(len(_lowercase ) == len(_lowercase ) + 1 )
SCREAMING_SNAKE_CASE__ : int = outputs['''past_key_values''']
# create hypothetical next token and extent to next_input_ids
SCREAMING_SNAKE_CASE__ : List[str] = ids_tensor((2, 1) , config.vocab_size - 1 ) + 1
# append to next input_ids and
SCREAMING_SNAKE_CASE__ : Tuple = torch.cat([input_ids, next_tokens] , dim=-1 )
SCREAMING_SNAKE_CASE__ : int = model(_lowercase )['''last_hidden_state''']
SCREAMING_SNAKE_CASE__ : List[Any] = model(_lowercase , past_key_values=_lowercase )['''last_hidden_state''']
# select random slice
SCREAMING_SNAKE_CASE__ : int = ids_tensor((1,) , output_from_past.shape[-1] ).item()
SCREAMING_SNAKE_CASE__ : Dict = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
SCREAMING_SNAKE_CASE__ : str = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(_lowercase , _lowercase , atol=1E-3 )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE__ : int = {'''input_ids''': input_ids, '''attention_mask''': attention_mask}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : List[str] = (TrOCRDecoder, TrOCRForCausalLM) if is_torch_available() else ()
lowerCamelCase : Dict = (TrOCRForCausalLM,) if is_torch_available() else ()
lowerCamelCase : Tuple = {'''text-generation''': TrOCRForCausalLM} if is_torch_available() else {}
lowerCamelCase : Any = True
lowerCamelCase : int = False
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = TrOCRStandaloneDecoderModelTester(self , is_training=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ConfigTester(self , config_class=_lowercase )
def lowercase__ ( self : Optional[Any] ):
pass
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : str ):
pass
def lowercase__ ( self : Dict ):
self.config_tester.run_common_tests()
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
return
@unittest.skip('''The model doesn\'t support left padding''' ) # and it's not used enough to be worth fixing :)
def lowercase__ ( self : Tuple ):
pass
| 35 | 1 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 | 1 |
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase ( _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Tuple = LayoutLMTokenizer
lowerCamelCase : Any = LayoutLMTokenizerFast
lowerCamelCase : Tuple = True
lowerCamelCase : List[Any] = True
def lowercase__ ( self : Optional[int] ):
super().setUp()
SCREAMING_SNAKE_CASE__ : Optional[int] = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
SCREAMING_SNAKE_CASE__ : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
def lowercase__ ( self : Optional[int] , **_lowercase : str ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : Any ):
SCREAMING_SNAKE_CASE__ : str = '''UNwant\u00E9d,running'''
SCREAMING_SNAKE_CASE__ : Any = '''unwanted, running'''
return input_text, output_text
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer_class(self.vocab_file )
SCREAMING_SNAKE_CASE__ : Any = tokenizer.tokenize('''UNwant\u00E9d,running''' )
self.assertListEqual(_lowercase , ['''un''', '''##want''', '''##ed''', ''',''', '''runn''', '''##ing'''] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowercase ) , [7, 4, 5, 10, 8, 9] )
def lowercase__ ( self : str ):
pass
| 35 |
from __future__ import annotations
def a ( A__ , A__ , A__ ) -> dict[str, float]:
'''simple docstring'''
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance < 0:
raise ValueError('''Resistance cannot be negative''' )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
def a ( A__ , A__ , A__ , A__ , A__ , ) -> float:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('''All input parameters must be positive''' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('''Relative densities cannot be greater than one''' )
else:
SCREAMING_SNAKE_CASE__ : Any = 1 - (matter_density + radiation_density + dark_energy)
SCREAMING_SNAKE_CASE__ : Optional[int] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
SCREAMING_SNAKE_CASE__ : Tuple = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a_ :int = 0.3
print(
hubble_parameter(
hubble_constant=6_8.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 35 |
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
a_ :Tuple = logging.get_logger(__name__)
a_ :Optional[Any] = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
a_ :Optional[int] = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Dict = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Any = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
a_ :Optional[int] = {
'facebook/dpr-ctx_encoder-single-nq-base': 5_12,
'facebook/dpr-ctx_encoder-multiset-base': 5_12,
}
a_ :List[Any] = {
'facebook/dpr-question_encoder-single-nq-base': 5_12,
'facebook/dpr-question_encoder-multiset-base': 5_12,
}
a_ :Tuple = {
'facebook/dpr-reader-single-nq-base': 5_12,
'facebook/dpr-reader-multiset-base': 5_12,
}
a_ :str = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Optional[int] = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
a_ :Any = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = VOCAB_FILES_NAMES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : int = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : Optional[Any] = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : Optional[int] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ :List[str] = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
a_ :Optional[int] = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
a_ :Tuple = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(_UpperCAmelCase )
class lowercase :
def __call__( self : List[Any] , _lowercase : Any , _lowercase : Optional[str] = None , _lowercase : Optional[str] = None , _lowercase : Union[bool, str] = False , _lowercase : Union[bool, str] = False , _lowercase : Optional[int] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : Optional[bool] = None , **_lowercase : str , ):
if titles is None and texts is None:
return super().__call__(
_lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
elif titles is None or texts is None:
SCREAMING_SNAKE_CASE__ : List[str] = titles if texts is None else texts
return super().__call__(
_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase , max_length=_lowercase , return_tensors=_lowercase , return_attention_mask=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Optional[Any] = titles if not isinstance(_lowercase , _lowercase ) else [titles]
SCREAMING_SNAKE_CASE__ : Optional[int] = texts if not isinstance(_lowercase , _lowercase ) else [texts]
SCREAMING_SNAKE_CASE__ : List[Any] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : str = questions if not isinstance(_lowercase , _lowercase ) else [questions] * n_passages
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
f"""There should be as many titles than texts but got {len(_lowercase )} titles and {len(_lowercase )} texts.""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = super().__call__(_lowercase , _lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Tuple = super().__call__(_lowercase , add_special_tokens=_lowercase , padding=_lowercase , truncation=_lowercase )['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_lowercase , _lowercase )
]
}
if return_attention_mask is not False:
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
SCREAMING_SNAKE_CASE__ : Dict = attention_mask
return self.pad(_lowercase , padding=_lowercase , max_length=_lowercase , return_tensors=_lowercase )
def lowercase__ ( self : List[Any] , _lowercase : BatchEncoding , _lowercase : DPRReaderOutput , _lowercase : int = 16 , _lowercase : int = 64 , _lowercase : int = 4 , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = reader_input['''input_ids''']
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = reader_output[:3]
SCREAMING_SNAKE_CASE__ : Any = len(_lowercase )
SCREAMING_SNAKE_CASE__ : int = sorted(range(_lowercase ) , reverse=_lowercase , key=relevance_logits.__getitem__ )
SCREAMING_SNAKE_CASE__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
SCREAMING_SNAKE_CASE__ : Optional[int] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
SCREAMING_SNAKE_CASE__ : Any = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
SCREAMING_SNAKE_CASE__ : Dict = sequence_ids.index(self.pad_token_id )
else:
SCREAMING_SNAKE_CASE__ : List[str] = len(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=_lowercase , top_spans=_lowercase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=_lowercase , start_index=_lowercase , end_index=_lowercase , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(_lowercase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self : Dict , _lowercase : List[int] , _lowercase : List[int] , _lowercase : int , _lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
for start_index, start_score in enumerate(_lowercase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
SCREAMING_SNAKE_CASE__ : Optional[int] = sorted(_lowercase , key=lambda _lowercase : x[1] , reverse=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"""Wrong span indices: [{start_index}:{end_index}]""" )
SCREAMING_SNAKE_CASE__ : Tuple = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"""Span is too long: {length} > {max_answer_length}""" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_lowercase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase , _UpperCAmelCase ):
lowerCamelCase : Dict = VOCAB_FILES_NAMES
lowerCamelCase : Union[str, Any] = READER_PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase : List[str] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase : str = READER_PRETRAINED_INIT_CONFIGURATION
lowerCamelCase : List[Any] = ['''input_ids''', '''attention_mask''']
| 35 | 1 |
import unittest
from transformers import load_tool
from .test_tools_common import ToolTesterMixin
a_ :Union[str, Any] = '\nHugging Face was founded in 2016 by French entrepreneurs Clément Delangue, Julien Chaumond, and Thomas Wolf originally as a company that developed a chatbot app targeted at teenagers.[2] After open-sourcing the model behind the chatbot, the company pivoted to focus on being a platform for machine learning.\n\nIn March 2021, Hugging Face raised $40 million in a Series B funding round.[3]\n\nOn April 28, 2021, the company launched the BigScience Research Workshop in collaboration with several other research groups to release an open large language model.[4] In 2022, the workshop concluded with the announcement of BLOOM, a multilingual large language model with 176 billion parameters.[5]\n'
class lowercase ( unittest.TestCase , _UpperCAmelCase ):
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ : Dict = load_tool('''text-question-answering''' )
self.tool.setup()
SCREAMING_SNAKE_CASE__ : Optional[Any] = load_tool('''text-question-answering''' , remote=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tool(_lowercase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = self.remote_tool(_lowercase , '''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : Dict = self.tool(text=_lowercase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : int = self.remote_tool(text=_lowercase , question='''What did Hugging Face do in April 2021?''' )
self.assertEqual(_lowercase , '''launched the BigScience Research Workshop''' )
| 35 |
import random
def a ( A__ ) -> bool:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = num - 1
SCREAMING_SNAKE_CASE__ : Optional[int] = 0
while s % 2 == 0:
SCREAMING_SNAKE_CASE__ : Optional[Any] = s // 2
t += 1
for _ in range(5 ):
SCREAMING_SNAKE_CASE__ : int = random.randrange(2 , num - 1 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pow(A__ , A__ , A__ )
if v != 1:
SCREAMING_SNAKE_CASE__ : List[str] = 0
while v != (num - 1):
if i == t - 1:
return False
else:
SCREAMING_SNAKE_CASE__ : Any = i + 1
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (v**2) % num
return True
def a ( A__ ) -> bool:
'''simple docstring'''
if num < 2:
return False
SCREAMING_SNAKE_CASE__ : Optional[int] = [
2,
3,
5,
7,
1_1,
1_3,
1_7,
1_9,
2_3,
2_9,
3_1,
3_7,
4_1,
4_3,
4_7,
5_3,
5_9,
6_1,
6_7,
7_1,
7_3,
7_9,
8_3,
8_9,
9_7,
1_0_1,
1_0_3,
1_0_7,
1_0_9,
1_1_3,
1_2_7,
1_3_1,
1_3_7,
1_3_9,
1_4_9,
1_5_1,
1_5_7,
1_6_3,
1_6_7,
1_7_3,
1_7_9,
1_8_1,
1_9_1,
1_9_3,
1_9_7,
1_9_9,
2_1_1,
2_2_3,
2_2_7,
2_2_9,
2_3_3,
2_3_9,
2_4_1,
2_5_1,
2_5_7,
2_6_3,
2_6_9,
2_7_1,
2_7_7,
2_8_1,
2_8_3,
2_9_3,
3_0_7,
3_1_1,
3_1_3,
3_1_7,
3_3_1,
3_3_7,
3_4_7,
3_4_9,
3_5_3,
3_5_9,
3_6_7,
3_7_3,
3_7_9,
3_8_3,
3_8_9,
3_9_7,
4_0_1,
4_0_9,
4_1_9,
4_2_1,
4_3_1,
4_3_3,
4_3_9,
4_4_3,
4_4_9,
4_5_7,
4_6_1,
4_6_3,
4_6_7,
4_7_9,
4_8_7,
4_9_1,
4_9_9,
5_0_3,
5_0_9,
5_2_1,
5_2_3,
5_4_1,
5_4_7,
5_5_7,
5_6_3,
5_6_9,
5_7_1,
5_7_7,
5_8_7,
5_9_3,
5_9_9,
6_0_1,
6_0_7,
6_1_3,
6_1_7,
6_1_9,
6_3_1,
6_4_1,
6_4_3,
6_4_7,
6_5_3,
6_5_9,
6_6_1,
6_7_3,
6_7_7,
6_8_3,
6_9_1,
7_0_1,
7_0_9,
7_1_9,
7_2_7,
7_3_3,
7_3_9,
7_4_3,
7_5_1,
7_5_7,
7_6_1,
7_6_9,
7_7_3,
7_8_7,
7_9_7,
8_0_9,
8_1_1,
8_2_1,
8_2_3,
8_2_7,
8_2_9,
8_3_9,
8_5_3,
8_5_7,
8_5_9,
8_6_3,
8_7_7,
8_8_1,
8_8_3,
8_8_7,
9_0_7,
9_1_1,
9_1_9,
9_2_9,
9_3_7,
9_4_1,
9_4_7,
9_5_3,
9_6_7,
9_7_1,
9_7_7,
9_8_3,
9_9_1,
9_9_7,
]
if num in low_primes:
return True
for prime in low_primes:
if (num % prime) == 0:
return False
return rabin_miller(A__ )
def a ( A__ = 1_0_2_4 ) -> int:
'''simple docstring'''
while True:
SCREAMING_SNAKE_CASE__ : Any = random.randrange(2 ** (keysize - 1) , 2 ** (keysize) )
if is_prime_low_num(A__ ):
return num
if __name__ == "__main__":
a_ :Dict = generate_large_prime()
print(('Prime number:', num))
print(('is_prime_low_num:', is_prime_low_num(num)))
| 35 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def a ( A__ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] = [
'''encoder.version''',
'''decoder.version''',
'''model.encoder.version''',
'''model.decoder.version''',
'''decoder.output_projection.weight''',
'''_float_tensor''',
'''encoder.embed_positions._float_tensor''',
'''decoder.embed_positions._float_tensor''',
]
for k in ignore_keys:
state_dict.pop(A__ , A__ )
def a ( A__ ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = emb.weight.shape
SCREAMING_SNAKE_CASE__ : List[Any] = nn.Linear(A__ , A__ , bias=A__ )
SCREAMING_SNAKE_CASE__ : Tuple = emb.weight.data
return lin_layer
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = torch.load(A__ , map_location='''cpu''' )
SCREAMING_SNAKE_CASE__ : Tuple = mam_aaa['''args'''] or mam_aaa['''cfg''']['''model''']
SCREAMING_SNAKE_CASE__ : Any = mam_aaa['''model''']
remove_ignore_keys_(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = state_dict['''encoder.embed_tokens.weight'''].shape[0]
SCREAMING_SNAKE_CASE__ : Any = MaMaaaConfig(
vocab_size=A__ , max_position_embeddings=1_0_2_4 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='''relu''' , )
SCREAMING_SNAKE_CASE__ : List[str] = state_dict['''decoder.embed_tokens.weight''']
SCREAMING_SNAKE_CASE__ : Dict = MaMaaaForConditionalGeneration(A__ )
model.model.load_state_dict(A__ , strict=A__ )
SCREAMING_SNAKE_CASE__ : int = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
a_ :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
a_ :Any = parser.parse_args()
a_ :int = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 35 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def a ( A__ ) -> List[Any]:
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
return (-y * np.log(A__ ) - (1 - y) * np.log(1 - h )).mean()
def a ( A__ , A__ , A__ ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = np.dot(A__ , A__ )
return np.sum(y * scores - np.log(1 + np.exp(A__ ) ) )
def a ( A__ , A__ , A__ , A__=7_0_0_0_0 ) -> Tuple:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : int = np.zeros(x.shape[1] )
for iterations in range(A__ ):
SCREAMING_SNAKE_CASE__ : List[Any] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : Dict = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : int = np.dot(x.T , h - y ) / y.size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = theta - alpha * gradient # updating the weights
SCREAMING_SNAKE_CASE__ : Optional[int] = np.dot(A__ , A__ )
SCREAMING_SNAKE_CASE__ : int = sigmoid_function(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = cost_function(A__ , A__ )
if iterations % 1_0_0 == 0:
print(f"""loss: {j} \t""" ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
a_ :str = datasets.load_iris()
a_ :Dict = iris.data[:, :2]
a_ :int = (iris.target != 0) * 1
a_ :Dict = 0.1
a_ :str = logistic_reg(alpha, x, y, max_iterations=7_00_00)
print('theta: ', theta) # printing the theta i.e our weights vector
def a ( A__ ) -> int:
'''simple docstring'''
return sigmoid_function(
np.dot(A__ , A__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color='b', label='0')
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color='r', label='1')
((a_) , (a_)) :str = (x[:, 0].min(), x[:, 0].max())
((a_) , (a_)) :Tuple = (x[:, 1].min(), x[:, 1].max())
((a_) , (a_)) :Dict = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
a_ :Optional[int] = np.c_[xxa.ravel(), xxa.ravel()]
a_ :Optional[int] = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors='black')
plt.legend()
plt.show()
| 35 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
a_ :Dict = 'CompVis/stable-diffusion-v1-1'
a_ :str = 'CompVis/stable-diffusion-v1-2'
a_ :List[str] = 'CompVis/stable-diffusion-v1-3'
a_ :Dict = 'CompVis/stable-diffusion-v1-4'
class lowercase ( _UpperCAmelCase ):
def __init__( self : Any , _lowercase : AutoencoderKL , _lowercase : CLIPTextModel , _lowercase : CLIPTokenizer , _lowercase : UNetaDConditionModel , _lowercase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , _lowercase : StableDiffusionSafetyChecker , _lowercase : CLIPImageProcessor , _lowercase : bool = True , ):
super()._init_()
SCREAMING_SNAKE_CASE__ : int = StableDiffusionPipeline.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = StableDiffusionPipeline.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = StableDiffusionPipeline.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionPipeline(
vae=_lowercase , text_encoder=_lowercase , tokenizer=_lowercase , unet=_lowercase , scheduler=_lowercase , safety_checker=_lowercase , feature_extractor=_lowercase , requires_safety_checker=_lowercase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def lowercase__ ( self : Any ):
return {k: getattr(self , _lowercase ) for k in self.config.keys() if not k.startswith('''_''' )}
def lowercase__ ( self : Optional[Any] , _lowercase : Optional[Union[str, int]] = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(_lowercase )
def lowercase__ ( self : List[str] ):
self.enable_attention_slicing(_lowercase )
@torch.no_grad()
def lowercase__ ( self : int , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : Optional[int] , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowercase__ ( self : int , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : int , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowercase__ ( self : Tuple , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : Union[str, Any] , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : Optional[Any] , ):
return self.pipea(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
@torch.no_grad()
def lowercase__ ( self : Optional[int] , _lowercase : Union[str, List[str]] , _lowercase : int = 5_12 , _lowercase : int = 5_12 , _lowercase : int = 50 , _lowercase : float = 7.5 , _lowercase : Optional[Union[str, List[str]]] = None , _lowercase : Optional[int] = 1 , _lowercase : float = 0.0 , _lowercase : Optional[torch.Generator] = None , _lowercase : Optional[torch.FloatTensor] = None , _lowercase : Optional[str] = "pil" , _lowercase : bool = True , _lowercase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowercase : int = 1 , **_lowercase : int , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''cuda''' if torch.cuda.is_available() else '''cpu'''
self.to(_lowercase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` must be divisible by 8 but are {height} and {width}.""" )
# Get first result from Stable Diffusion Checkpoint v1.1
SCREAMING_SNAKE_CASE__ : Any = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.2
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.3
SCREAMING_SNAKE_CASE__ : str = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get first result from Stable Diffusion Checkpoint v1.4
SCREAMING_SNAKE_CASE__ : List[Any] = self.textaimg_sda_a(
prompt=_lowercase , height=_lowercase , width=_lowercase , num_inference_steps=_lowercase , guidance_scale=_lowercase , negative_prompt=_lowercase , num_images_per_prompt=_lowercase , eta=_lowercase , generator=_lowercase , latents=_lowercase , output_type=_lowercase , return_dict=_lowercase , callback=_lowercase , callback_steps=_lowercase , **_lowercase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 35 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def a ( A__ ) -> Tuple:
'''simple docstring'''
return EnvironmentCommand()
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : Optional[int] = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Tuple = huggingface_hub.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : int = torch.__version__
SCREAMING_SNAKE_CASE__ : List[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_transformers_available():
import transformers
SCREAMING_SNAKE_CASE__ : Optional[Any] = transformers.__version__
SCREAMING_SNAKE_CASE__ : Any = '''not installed'''
if is_accelerate_available():
import accelerate
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
SCREAMING_SNAKE_CASE__ : Tuple = '''not installed'''
if is_xformers_available():
import xformers
SCREAMING_SNAKE_CASE__ : Tuple = xformers.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`diffusers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Huggingface_hub version''': hub_version,
'''Transformers version''': transformers_version,
'''Accelerate version''': accelerate_version,
'''xFormers version''': xformers_version,
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Dict ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 | 1 |
from ..utils import DummyObject, requires_backends
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : Dict = ['''speech''']
def __init__( self : Any , *_lowercase : Tuple , **_lowercase : Optional[Any] ):
requires_backends(self , ['''speech'''] )
class lowercase ( metaclass=_UpperCAmelCase ):
lowerCamelCase : str = ['''speech''']
def __init__( self : List[str] , *_lowercase : Optional[int] , **_lowercase : str ):
requires_backends(self , ['''speech'''] )
| 35 |
import argparse
import torch
from transformers import RemBertConfig, RemBertModel, load_tf_weights_in_rembert
from transformers.utils import logging
logging.set_verbosity_info()
def a ( A__ , A__ , A__ ) -> Union[str, Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = RemBertConfig.from_json_file(A__ )
print('''Building PyTorch model from configuration: {}'''.format(str(A__ ) ) )
SCREAMING_SNAKE_CASE__ : Optional[Any] = RemBertModel(A__ )
# Load weights from tf checkpoint
load_tf_weights_in_rembert(A__ , A__ , A__ )
# Save pytorch-model
print('''Save PyTorch model to {}'''.format(A__ ) )
torch.save(model.state_dict() , A__ )
if __name__ == "__main__":
a_ :Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--tf_checkpoint_path', default=None, type=str, required=True, help='Path to the TensorFlow checkpoint path.'
)
parser.add_argument(
'--rembert_config_file',
default=None,
type=str,
required=True,
help=(
'The config json file corresponding to the pre-trained RemBERT model. \n'
'This specifies the model architecture.'
),
)
parser.add_argument(
'--pytorch_dump_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
a_ :Optional[Any] = parser.parse_args()
convert_rembert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.rembert_config_file, args.pytorch_dump_path)
| 35 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a_ :Dict = logging.get_logger(__name__)
a_ :List[str] = {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/config.json',
'google/bigbird-roberta-large': 'https://huggingface.co/google/bigbird-roberta-large/resolve/main/config.json',
'google/bigbird-base-trivia-itc': 'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/config.json',
# See all BigBird models at https://huggingface.co/models?filter=big_bird
}
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[int] = '''big_bird'''
def __init__( self : Any , _lowercase : str=5_03_58 , _lowercase : Optional[int]=7_68 , _lowercase : List[Any]=12 , _lowercase : Any=12 , _lowercase : Tuple=30_72 , _lowercase : int="gelu_new" , _lowercase : Any=0.1 , _lowercase : Optional[int]=0.1 , _lowercase : str=40_96 , _lowercase : Dict=2 , _lowercase : Union[str, Any]=0.02 , _lowercase : Tuple=1E-12 , _lowercase : Any=True , _lowercase : List[Any]=0 , _lowercase : Optional[int]=1 , _lowercase : List[Any]=2 , _lowercase : Union[str, Any]=66 , _lowercase : int="block_sparse" , _lowercase : str=True , _lowercase : Any=False , _lowercase : Tuple=64 , _lowercase : List[str]=3 , _lowercase : int=None , **_lowercase : List[str] , ):
super().__init__(
pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , sep_token_id=_lowercase , **_lowercase , )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE__ : Dict = max_position_embeddings
SCREAMING_SNAKE_CASE__ : Tuple = hidden_size
SCREAMING_SNAKE_CASE__ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE__ : Dict = num_attention_heads
SCREAMING_SNAKE_CASE__ : Dict = intermediate_size
SCREAMING_SNAKE_CASE__ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE__ : Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ : List[Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ : List[str] = initializer_range
SCREAMING_SNAKE_CASE__ : Any = type_vocab_size
SCREAMING_SNAKE_CASE__ : str = layer_norm_eps
SCREAMING_SNAKE_CASE__ : List[Any] = use_cache
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_embeddings
SCREAMING_SNAKE_CASE__ : str = attention_type
SCREAMING_SNAKE_CASE__ : List[str] = use_bias
SCREAMING_SNAKE_CASE__ : Dict = block_size
SCREAMING_SNAKE_CASE__ : Tuple = num_random_blocks
SCREAMING_SNAKE_CASE__ : Optional[int] = classifier_dropout
class lowercase ( _UpperCAmelCase ):
@property
def lowercase__ ( self : Tuple ):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE__ : List[str] = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
SCREAMING_SNAKE_CASE__ : str = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('''input_ids''', dynamic_axis),
('''attention_mask''', dynamic_axis),
] )
| 35 |
from sklearn.metrics import recall_score
import datasets
a_ :int = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
a_ :Union[str, Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
a_ :Optional[Any] = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def lowercase__ ( self : int ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : Optional[Any] , _lowercase : Optional[int]=None , _lowercase : Tuple=1 , _lowercase : List[Any]="binary" , _lowercase : Any=None , _lowercase : Optional[int]="warn" , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = recall_score(
_lowercase , _lowercase , labels=_lowercase , pos_label=_lowercase , average=_lowercase , sample_weight=_lowercase , zero_division=_lowercase , )
return {"recall": float(_lowercase ) if score.size == 1 else score}
| 35 | 1 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class lowercase ( _UpperCAmelCase ):
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_lowercase , '''tf_padding''' ) )
self.parent.assertTrue(hasattr(_lowercase , '''depth_multiplier''' ) )
class lowercase :
def __init__( self : List[Any] , _lowercase : List[str] , _lowercase : str=13 , _lowercase : List[str]=3 , _lowercase : Dict=32 , _lowercase : List[Any]=0.25 , _lowercase : List[str]=8 , _lowercase : Tuple=True , _lowercase : Tuple=10_24 , _lowercase : Tuple=32 , _lowercase : Union[str, Any]="relu6" , _lowercase : Dict=0.1 , _lowercase : Optional[int]=0.02 , _lowercase : Optional[int]=True , _lowercase : int=True , _lowercase : int=10 , _lowercase : Dict=None , ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = parent
SCREAMING_SNAKE_CASE__ : Any = batch_size
SCREAMING_SNAKE_CASE__ : Any = num_channels
SCREAMING_SNAKE_CASE__ : Optional[int] = image_size
SCREAMING_SNAKE_CASE__ : List[str] = depth_multiplier
SCREAMING_SNAKE_CASE__ : str = min_depth
SCREAMING_SNAKE_CASE__ : Dict = tf_padding
SCREAMING_SNAKE_CASE__ : List[str] = int(last_hidden_size * depth_multiplier )
SCREAMING_SNAKE_CASE__ : List[str] = output_stride
SCREAMING_SNAKE_CASE__ : Optional[Any] = hidden_act
SCREAMING_SNAKE_CASE__ : int = classifier_dropout_prob
SCREAMING_SNAKE_CASE__ : Optional[int] = use_labels
SCREAMING_SNAKE_CASE__ : Any = is_training
SCREAMING_SNAKE_CASE__ : int = num_labels
SCREAMING_SNAKE_CASE__ : Dict = initializer_range
SCREAMING_SNAKE_CASE__ : Optional[int] = scope
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : int = None
SCREAMING_SNAKE_CASE__ : List[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Any = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : int = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowercase__ ( self : int ):
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , min_depth=self.min_depth , tf_padding=self.tf_padding , hidden_act=self.hidden_act , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def lowercase__ ( self : List[Any] , _lowercase : List[str] , _lowercase : int , _lowercase : int , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : str = MobileNetVaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def lowercase__ ( self : Union[str, Any] , _lowercase : Optional[Any] , _lowercase : Dict , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : int = self.num_labels
SCREAMING_SNAKE_CASE__ : Dict = MobileNetVaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] = model(_lowercase , labels=_lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase__ ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE__ : str = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : str = config_and_inputs
SCREAMING_SNAKE_CASE__ : List[str] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowercase ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
lowerCamelCase : Optional[int] = (MobileNetVaModel, MobileNetVaForImageClassification) if is_torch_available() else ()
lowerCamelCase : Tuple = (
{'''feature-extraction''': MobileNetVaModel, '''image-classification''': MobileNetVaForImageClassification}
if is_torch_available()
else {}
)
lowerCamelCase : Union[str, Any] = False
lowerCamelCase : List[str] = False
lowerCamelCase : Optional[Any] = False
lowerCamelCase : int = False
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Tuple = MobileNetVaModelTester(self )
SCREAMING_SNAKE_CASE__ : str = MobileNetVaConfigTester(self , config_class=_lowercase , has_text_modality=_lowercase )
def lowercase__ ( self : str ):
self.config_tester.run_common_tests()
@unittest.skip(reason='''MobileNetV1 does not use inputs_embeds''' )
def lowercase__ ( self : List[str] ):
pass
@unittest.skip(reason='''MobileNetV1 does not support input and output embeddings''' )
def lowercase__ ( self : Tuple ):
pass
@unittest.skip(reason='''MobileNetV1 does not output attentions''' )
def lowercase__ ( self : List[Any] ):
pass
def lowercase__ ( self : Dict ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_class(_lowercase )
SCREAMING_SNAKE_CASE__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : List[Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : Tuple = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , _lowercase )
def lowercase__ ( self : Optional[int] ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def lowercase__ ( self : Optional[Any] ):
def check_hidden_states_output(_lowercase : Optional[int] , _lowercase : str , _lowercase : Tuple ):
SCREAMING_SNAKE_CASE__ : int = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model(**self._prepare_for_class(_lowercase , _lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = outputs.hidden_states
SCREAMING_SNAKE_CASE__ : Union[str, Any] = 26
self.assertEqual(len(_lowercase ) , _lowercase )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : List[str] = True
check_hidden_states_output(_lowercase , _lowercase , _lowercase )
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def lowercase__ ( self : Union[str, Any] ):
for model_name in MOBILENET_V1_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : int = MobileNetVaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def a ( ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class lowercase ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
return (
MobileNetVaImageProcessor.from_pretrained('''google/mobilenet_v1_1.0_224''' ) if is_vision_available() else None
)
@slow
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : str = MobileNetVaForImageClassification.from_pretrained('''google/mobilenet_v1_1.0_224''' ).to(_lowercase )
SCREAMING_SNAKE_CASE__ : Any = self.default_image_processor
SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_img()
SCREAMING_SNAKE_CASE__ : Tuple = image_processor(images=_lowercase , return_tensors='''pt''' ).to(_lowercase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : str = model(**_lowercase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.Size((1, 10_01) )
self.assertEqual(outputs.logits.shape , _lowercase )
SCREAMING_SNAKE_CASE__ : Any = torch.tensor([-4.1739, -1.1233, 3.1205] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowercase , atol=1E-4 ) )
| 35 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
a_ :List[Any] = logging.getLogger(__name__)
@dataclass
class lowercase :
lowerCamelCase : str
lowerCamelCase : List[str]
lowerCamelCase : Optional[List[str]]
@dataclass
class lowercase :
lowerCamelCase : List[int]
lowerCamelCase : List[int]
lowerCamelCase : Optional[List[int]] = None
lowerCamelCase : Optional[List[int]] = None
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Optional[Any] = '''train'''
lowerCamelCase : Tuple = '''dev'''
lowerCamelCase : Any = '''test'''
class lowercase :
@staticmethod
def lowercase__ ( _lowercase : Any , _lowercase : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : str ):
raise NotImplementedError
@staticmethod
def lowercase__ ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : int=False , _lowercase : Optional[Any]="[CLS]" , _lowercase : Tuple=1 , _lowercase : Optional[Any]="[SEP]" , _lowercase : Tuple=False , _lowercase : Optional[Any]=False , _lowercase : List[Any]=0 , _lowercase : Optional[int]=0 , _lowercase : Optional[Any]=-1_00 , _lowercase : Tuple=0 , _lowercase : Union[str, Any]=True , ):
SCREAMING_SNAKE_CASE__ : Tuple = {label: i for i, label in enumerate(_lowercase )}
SCREAMING_SNAKE_CASE__ : Dict = []
for ex_index, example in enumerate(_lowercase ):
if ex_index % 1_00_00 == 0:
logger.info('''Writing example %d of %d''' , _lowercase , len(_lowercase ) )
SCREAMING_SNAKE_CASE__ : Tuple = []
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = tokenizer.tokenize(_lowercase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_lowercase ) > 0:
tokens.extend(_lowercase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_lowercase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ : Tuple = tokenizer.num_special_tokens_to_add()
if len(_lowercase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ : List[str] = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ : Any = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [sequence_a_segment_id] * len(_lowercase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ : Tuple = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ : Tuple = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ : Optional[int] = tokenizer.convert_tokens_to_ids(_lowercase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ : str = [1 if mask_padding_with_zero else 0] * len(_lowercase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ : List[str] = max_seq_length - len(_lowercase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ : str = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ : Tuple = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ : int = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
assert len(_lowercase ) == max_seq_length
if ex_index < 5:
logger.info('''*** Example ***''' )
logger.info('''guid: %s''' , example.guid )
logger.info('''tokens: %s''' , ''' '''.join([str(_lowercase ) for x in tokens] ) )
logger.info('''input_ids: %s''' , ''' '''.join([str(_lowercase ) for x in input_ids] ) )
logger.info('''input_mask: %s''' , ''' '''.join([str(_lowercase ) for x in input_mask] ) )
logger.info('''segment_ids: %s''' , ''' '''.join([str(_lowercase ) for x in segment_ids] ) )
logger.info('''label_ids: %s''' , ''' '''.join([str(_lowercase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : List[Any] = None
features.append(
InputFeatures(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , label_ids=_lowercase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : Optional[int]=False , _lowercase : Split = Split.train , ):
# Load data features from cache or dataset file
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(
_lowercase , '''cached_{}_{}_{}'''.format(mode.value , tokenizer.__class__.__name__ , str(_lowercase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ : Optional[int] = cached_features_file + '''.lock'''
with FileLock(_lowercase ):
if os.path.exists(_lowercase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ : Any = torch.load(_lowercase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ : str = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : Any = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _lowercase )
def __len__( self : Tuple ):
return len(self.features )
def __getitem__( self : Optional[int] , _lowercase : List[str] ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class lowercase :
lowerCamelCase : List[InputFeatures]
lowerCamelCase : int = -100
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[str]=False , _lowercase : Split = Split.train , ):
SCREAMING_SNAKE_CASE__ : Optional[int] = token_classification_task.read_examples_from_file(_lowercase , _lowercase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ : List[str] = token_classification_task.convert_examples_to_features(
_lowercase , _lowercase , _lowercase , _lowercase , cls_token_at_end=bool(model_type in ['''xlnet'''] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ['''xlnet'''] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_lowercase , pad_on_left=bool(tokenizer.padding_side == '''left''' ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa}, tf.intaa) , (
{'''input_ids''': tf.TensorShape([None] ), '''attention_mask''': tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ : int = tf.data.Dataset.from_generator(
_lowercase , ({'''input_ids''': tf.intaa, '''attention_mask''': tf.intaa, '''token_type_ids''': tf.intaa}, tf.intaa) , (
{
'''input_ids''': tf.TensorShape([None] ),
'''attention_mask''': tf.TensorShape([None] ),
'''token_type_ids''': tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowercase__ ( self : Tuple ):
SCREAMING_SNAKE_CASE__ : Optional[int] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Dict ):
return len(self.features )
def __getitem__( self : Optional[Any] , _lowercase : Union[str, Any] ):
return self.features[i]
| 35 | 1 |
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand()
def a ( A__ ) -> int:
'''simple docstring'''
return EnvironmentCommand(args.accelerate_config_file )
class lowercase ( _UpperCAmelCase ):
@staticmethod
def lowercase__ ( _lowercase : ArgumentParser ):
SCREAMING_SNAKE_CASE__ : int = parser.add_parser('''env''' )
download_parser.set_defaults(func=_lowercase )
download_parser.add_argument(
'''--accelerate-config_file''' , default=_lowercase , help='''The accelerate config file to use for the default values in the launching script.''' , )
download_parser.set_defaults(func=_lowercase )
def __init__( self : Any , _lowercase : int , *_lowercase : int ):
SCREAMING_SNAKE_CASE__ : Any = accelerate_config_file
def lowercase__ ( self : List[str] ):
SCREAMING_SNAKE_CASE__ : str = '''not installed'''
if is_safetensors_available():
import safetensors
SCREAMING_SNAKE_CASE__ : int = safetensors.__version__
elif importlib.util.find_spec('''safetensors''' ) is not None:
import safetensors
SCREAMING_SNAKE_CASE__ : Any = f"""{safetensors.__version__} but is ignored because of PyTorch version too old."""
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not found'''
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
SCREAMING_SNAKE_CASE__ : Union[str, Any] = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(_lowercase ):
SCREAMING_SNAKE_CASE__ : int = load_config_from_file(self._accelerate_config_file ).to_dict()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(_lowercase , _lowercase )
else f"""\t{accelerate_config}"""
)
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE__ : Tuple = torch.__version__
SCREAMING_SNAKE_CASE__ : int = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = '''NA'''
if is_tf_available():
import tensorflow as tf
SCREAMING_SNAKE_CASE__ : int = tf.__version__
try:
# deprecated in v2.1
SCREAMING_SNAKE_CASE__ : Tuple = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
SCREAMING_SNAKE_CASE__ : List[Any] = bool(tf.config.list_physical_devices('''GPU''' ) )
SCREAMING_SNAKE_CASE__ : int = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[Any] = '''not installed'''
SCREAMING_SNAKE_CASE__ : List[str] = '''not installed'''
SCREAMING_SNAKE_CASE__ : int = '''NA'''
if is_flax_available():
import flax
import jax
import jaxlib
SCREAMING_SNAKE_CASE__ : Optional[int] = flax.__version__
SCREAMING_SNAKE_CASE__ : Optional[int] = jax.__version__
SCREAMING_SNAKE_CASE__ : int = jaxlib.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = jax.lib.xla_bridge.get_backend().platform
SCREAMING_SNAKE_CASE__ : Optional[Any] = {
'''`transformers` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Huggingface_hub version''': huggingface_hub.__version__,
'''Safetensors version''': f"""{safetensors_version}""",
'''Accelerate version''': f"""{accelerate_version}""",
'''Accelerate config''': f"""{accelerate_config_str}""",
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''Tensorflow version (GPU?)''': f"""{tf_version} ({tf_cuda_available})""",
'''Flax version (CPU?/GPU?/TPU?)''': f"""{flax_version} ({jax_backend})""",
'''Jax version''': f"""{jax_version}""",
'''JaxLib version''': f"""{jaxlib_version}""",
'''Using GPU in script?''': '''<fill in>''',
'''Using distributed or parallel set-up in script?''': '''<fill in>''',
}
print('''\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n''' )
print(self.format_dict(_lowercase ) )
return info
@staticmethod
def lowercase__ ( _lowercase : Any ):
return "\n".join([f"""- {prop}: {val}""" for prop, val in d.items()] ) + "\n"
| 35 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[int] , **_lowercase : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : Union[str, Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def lowercase__ ( self : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : str = ObjectDetectionPipeline(model=_lowercase , image_processor=_lowercase )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def lowercase__ ( self : Optional[int] , _lowercase : Any , _lowercase : str ):
SCREAMING_SNAKE_CASE__ : Tuple = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Optional[Any] = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
SCREAMING_SNAKE_CASE__ : Dict = object_detector(_lowercase , threshold=0.0 )
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for outputs in batch_outputs:
self.assertGreater(len(_lowercase ) , 0 )
for detected_object in outputs:
self.assertEqual(
_lowercase , {
'''score''': ANY(_lowercase ),
'''label''': ANY(_lowercase ),
'''box''': {'''xmin''': ANY(_lowercase ), '''ymin''': ANY(_lowercase ), '''xmax''': ANY(_lowercase ), '''ymax''': ANY(_lowercase )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def lowercase__ ( self : int ):
pass
@require_torch
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = AutoModelForObjectDetection.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = AutoFeatureExtractor.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : str = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
SCREAMING_SNAKE_CASE__ : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.3376, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : List[Any] ):
SCREAMING_SNAKE_CASE__ : Optional[int] = '''facebook/detr-resnet-50'''
SCREAMING_SNAKE_CASE__ : Dict = AutoModelForObjectDetection.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = ObjectDetectionPipeline(model=_lowercase , feature_extractor=_lowercase )
SCREAMING_SNAKE_CASE__ : Optional[Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
SCREAMING_SNAKE_CASE__ : str = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : int ):
SCREAMING_SNAKE_CASE__ : List[Any] = '''facebook/detr-resnet-50'''
SCREAMING_SNAKE_CASE__ : int = pipeline('''object-detection''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
SCREAMING_SNAKE_CASE__ : Tuple = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.9982, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.9960, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.9955, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def lowercase__ ( self : Any ):
SCREAMING_SNAKE_CASE__ : Tuple = 0.9985
SCREAMING_SNAKE_CASE__ : Tuple = '''facebook/detr-resnet-50'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''object-detection''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=_lowercase )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9988, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.9987, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Narsil/layoutlmv3-finetuned-funsd'''
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0.9993
SCREAMING_SNAKE_CASE__ : str = pipeline('''object-detection''' , model=_lowercase , threshold=_lowercase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(_lowercase , decimals=4 ) , [
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.9993, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 35 |
from math import factorial
def a ( A__ = 2_0 ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
SCREAMING_SNAKE_CASE__ : Dict = n // 2
return int(factorial(A__ ) / (factorial(A__ ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
a_ :str = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 35 | 1 |
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def a ( A__ , A__=1 ) -> int:
'''simple docstring'''
if n_shave_prefix_segments >= 0:
return ".".join(path.split('''.''' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('''.''' )[:n_shave_prefix_segments] )
def a ( A__ , A__=0 ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Any = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ : int = old_item.replace('''in_layers.0''' , '''norm1''' )
SCREAMING_SNAKE_CASE__ : Tuple = new_item.replace('''in_layers.2''' , '''conv1''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''out_layers.0''' , '''norm2''' )
SCREAMING_SNAKE_CASE__ : List[str] = new_item.replace('''out_layers.3''' , '''conv2''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_item.replace('''emb_layers.1''' , '''time_emb_proj''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''skip_connection''' , '''conv_shortcut''' )
SCREAMING_SNAKE_CASE__ : int = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( A__ , A__=0 ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = []
for old_item in old_list:
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_item
SCREAMING_SNAKE_CASE__ : List[Any] = new_item.replace('''norm.weight''' , '''group_norm.weight''' )
SCREAMING_SNAKE_CASE__ : Dict = new_item.replace('''norm.bias''' , '''group_norm.bias''' )
SCREAMING_SNAKE_CASE__ : List[Any] = new_item.replace('''proj_out.weight''' , '''proj_attn.weight''' )
SCREAMING_SNAKE_CASE__ : int = new_item.replace('''proj_out.bias''' , '''proj_attn.bias''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = shave_segments(A__ , n_shave_prefix_segments=A__ )
mapping.append({'''old''': old_item, '''new''': new_item} )
return mapping
def a ( A__ , A__ , A__ , A__=None , A__=None , A__=None ) -> Optional[int]:
'''simple docstring'''
assert isinstance(A__ , A__ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_checkpoint[path]
SCREAMING_SNAKE_CASE__ : Optional[Any] = old_tensor.shape[0] // 3
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
SCREAMING_SNAKE_CASE__ : Optional[int] = old_tensor.shape[0] // config['''num_head_channels'''] // 3
SCREAMING_SNAKE_CASE__ : List[Any] = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = old_tensor.split(channels // num_heads , dim=1 )
SCREAMING_SNAKE_CASE__ : Tuple = query.reshape(A__ )
SCREAMING_SNAKE_CASE__ : Optional[Any] = key.reshape(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = value.reshape(A__ )
for path in paths:
SCREAMING_SNAKE_CASE__ : Optional[int] = path['''new''']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
SCREAMING_SNAKE_CASE__ : Any = new_path.replace('''middle_block.0''' , '''mid_block.resnets.0''' )
SCREAMING_SNAKE_CASE__ : int = new_path.replace('''middle_block.1''' , '''mid_block.attentions.0''' )
SCREAMING_SNAKE_CASE__ : Tuple = new_path.replace('''middle_block.2''' , '''mid_block.resnets.1''' )
if additional_replacements is not None:
for replacement in additional_replacements:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = new_path.replace(replacement['''old'''] , replacement['''new'''] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
SCREAMING_SNAKE_CASE__ : Tuple = old_checkpoint[path['''old''']][:, :, 0]
else:
SCREAMING_SNAKE_CASE__ : int = old_checkpoint[path['''old''']]
def a ( A__ , A__ ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {}
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['''time_embed.0.weight''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint['''time_embed.0.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint['''time_embed.2.weight''']
SCREAMING_SNAKE_CASE__ : List[Any] = checkpoint['''time_embed.2.bias''']
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint['''input_blocks.0.0.weight''']
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint['''input_blocks.0.0.bias''']
SCREAMING_SNAKE_CASE__ : int = checkpoint['''out.0.weight''']
SCREAMING_SNAKE_CASE__ : Dict = checkpoint['''out.0.bias''']
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint['''out.2.weight''']
SCREAMING_SNAKE_CASE__ : Optional[int] = checkpoint['''out.2.bias''']
# Retrieves the keys for the input blocks only
SCREAMING_SNAKE_CASE__ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''input_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ : Tuple = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the middle blocks only
SCREAMING_SNAKE_CASE__ : str = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''middle_block''' in layer} )
SCREAMING_SNAKE_CASE__ : List[str] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(A__ )
}
# Retrieves the keys for the output blocks only
SCREAMING_SNAKE_CASE__ : Dict = len({'''.'''.join(layer.split('''.''' )[:2] ) for layer in checkpoint if '''output_blocks''' in layer} )
SCREAMING_SNAKE_CASE__ : Any = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(A__ )
}
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = (i - 1) // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[int] = (i - 1) % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
SCREAMING_SNAKE_CASE__ : Any = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
SCREAMING_SNAKE_CASE__ : str = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
SCREAMING_SNAKE_CASE__ : Union[str, Any] = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : str = {'''old''': f"""input_blocks.{i}.0""", '''new''': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
SCREAMING_SNAKE_CASE__ : Any = {'''old''': '''resnets.2.op''', '''new''': '''downsamplers.0.op'''}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path, resnet_op] , config=A__ )
if len(A__ ):
SCREAMING_SNAKE_CASE__ : int = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : Any = {
'''old''': f"""input_blocks.{i}.1""",
'''new''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
SCREAMING_SNAKE_CASE__ : Any = {
f"""input_blocks.{i}.1.qkv.bias""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'''key''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=A__ , config=A__ , )
SCREAMING_SNAKE_CASE__ : Dict = middle_blocks[0]
SCREAMING_SNAKE_CASE__ : Any = middle_blocks[1]
SCREAMING_SNAKE_CASE__ : int = middle_blocks[2]
SCREAMING_SNAKE_CASE__ : Optional[int] = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
SCREAMING_SNAKE_CASE__ : str = renew_resnet_paths(A__ )
assign_to_checkpoint(A__ , A__ , A__ , config=A__ )
SCREAMING_SNAKE_CASE__ : Dict = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
'''middle_block.1.qkv.bias''': {
'''key''': '''mid_block.attentions.0.key.bias''',
'''query''': '''mid_block.attentions.0.query.bias''',
'''value''': '''mid_block.attentions.0.value.bias''',
},
'''middle_block.1.qkv.weight''': {
'''key''': '''mid_block.attentions.0.key.weight''',
'''query''': '''mid_block.attentions.0.query.weight''',
'''value''': '''mid_block.attentions.0.value.weight''',
},
}
assign_to_checkpoint(
A__ , A__ , A__ , attention_paths_to_split=A__ , config=A__ )
for i in range(A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = i // (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Optional[Any] = i % (config['''num_res_blocks'''] + 1)
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [shave_segments(A__ , 2 ) for name in output_blocks[i]]
SCREAMING_SNAKE_CASE__ : Any = {}
for layer in output_block_layers:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = layer.split('''.''' )[0], shave_segments(A__ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(A__ )
else:
SCREAMING_SNAKE_CASE__ : Any = [layer_name]
if len(A__ ) > 1:
SCREAMING_SNAKE_CASE__ : Any = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
SCREAMING_SNAKE_CASE__ : List[str] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
SCREAMING_SNAKE_CASE__ : Any = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : Any = renew_resnet_paths(A__ )
SCREAMING_SNAKE_CASE__ : Tuple = {'''old''': f"""output_blocks.{i}.0""", '''new''': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(A__ , A__ , A__ , additional_replacements=[meta_path] , config=A__ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
SCREAMING_SNAKE_CASE__ : List[str] = list(output_block_list.values() ).index(['''conv.weight''', '''conv.bias'''] )
SCREAMING_SNAKE_CASE__ : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
SCREAMING_SNAKE_CASE__ : Optional[Any] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(A__ ) == 2:
SCREAMING_SNAKE_CASE__ : List[Any] = []
if len(A__ ):
SCREAMING_SNAKE_CASE__ : Any = renew_attention_paths(A__ )
SCREAMING_SNAKE_CASE__ : str = {
'''old''': f"""output_blocks.{i}.1""",
'''new''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
SCREAMING_SNAKE_CASE__ : Dict = {
f"""output_blocks.{i}.1.qkv.bias""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'''key''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'''query''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'''value''': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
A__ , A__ , A__ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('''qkv''' in key for key in attentions ) else None , config=A__ , )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = renew_resnet_paths(A__ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''.'''.join(['''output_blocks''', str(A__ ), path['''old''']] )
SCREAMING_SNAKE_CASE__ : int = '''.'''.join(['''up_blocks''', str(A__ ), '''resnets''', str(A__ ), path['''new''']] )
SCREAMING_SNAKE_CASE__ : Tuple = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
a_ :Dict = argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--config_file',
default=None,
type=str,
required=True,
help='The config json file corresponding to the architecture.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
a_ :Optional[Any] = parser.parse_args()
a_ :Any = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
a_ :List[str] = json.loads(f.read())
a_ :Tuple = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
a_ :int = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
a_ :Optional[int] = DDPMScheduler.from_config('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ :Dict = VQModel.from_pretrained('/'.join(args.checkpoint_path.split('/')[:-1]))
a_ :Optional[Any] = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 35 |
import hashlib
import unittest
from transformers import MODEL_FOR_DEPTH_ESTIMATION_MAPPING, is_torch_available, is_vision_available
from transformers.pipelines import DepthEstimationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
else:
class lowercase :
@staticmethod
def lowercase__ ( *_lowercase : Optional[Any] , **_lowercase : str ):
pass
def a ( A__ ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[int] = hashlib.mda(image.tobytes() )
return m.hexdigest()
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class lowercase ( unittest.TestCase ):
lowerCamelCase : int = MODEL_FOR_DEPTH_ESTIMATION_MAPPING
def lowercase__ ( self : List[Any] , _lowercase : Tuple , _lowercase : Any , _lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : List[str] = DepthEstimationPipeline(model=_lowercase , image_processor=_lowercase )
return depth_estimator, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : Optional[int] = depth_estimator('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
self.assertEqual({'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )} , _lowercase )
import datasets
SCREAMING_SNAKE_CASE__ : List[str] = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator(
[
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
] )
self.assertEqual(
[
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
{'''predicted_depth''': ANY(torch.Tensor ), '''depth''': ANY(Image.Image )},
] , _lowercase , )
@require_tf
@unittest.skip('''Depth estimation is not implemented in TF''' )
def lowercase__ ( self : Optional[int] ):
pass
@slow
@require_torch
def lowercase__ ( self : Union[str, Any] ):
SCREAMING_SNAKE_CASE__ : List[str] = '''Intel/dpt-large'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = pipeline('''depth-estimation''' , model=_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = depth_estimator('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
SCREAMING_SNAKE_CASE__ : List[str] = hashimage(outputs['''depth'''] )
# This seems flaky.
# self.assertEqual(outputs["depth"], "1a39394e282e9f3b0741a90b9f108977")
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].max().item() ) , 29.304 )
self.assertEqual(nested_simplify(outputs['''predicted_depth'''].min().item() ) , 2.662 )
@require_torch
def lowercase__ ( self : str ):
# This is highly irregular to have no small tests.
self.skipTest('''There is not hf-internal-testing tiny model for either GLPN nor DPT''' )
| 35 | 1 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
def a ( A__ ) -> int:
'''simple docstring'''
if a < 0:
raise ValueError('''Input value must be a positive integer''' )
elif isinstance(A__ , A__ ):
raise TypeError('''Input value must be a \'int\' type''' )
return bin(A__ ).count('''1''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class lowercase ( enum.Enum ):
lowerCamelCase : int = 0
lowerCamelCase : Tuple = 1
lowerCamelCase : List[str] = 2
@add_end_docstrings(_UpperCAmelCase )
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : Dict = '''
In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
begging for his blessing. <eod> </s> <eos>
'''
def __init__( self : Optional[int] , *_lowercase : Optional[Any] , **_lowercase : Dict ):
super().__init__(*_lowercase , **_lowercase )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == '''tf''' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
SCREAMING_SNAKE_CASE__ : Dict = None
if self.model.config.prefix is not None:
SCREAMING_SNAKE_CASE__ : Any = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
SCREAMING_SNAKE_CASE__ : Any = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = self._sanitize_parameters(prefix=_lowercase , **self._forward_params )
SCREAMING_SNAKE_CASE__ : str = {**self._preprocess_params, **preprocess_params}
SCREAMING_SNAKE_CASE__ : str = {**self._forward_params, **forward_params}
def lowercase__ ( self : Optional[int] , _lowercase : Any=None , _lowercase : Optional[int]=None , _lowercase : List[Any]=None , _lowercase : List[Any]=None , _lowercase : Optional[Any]=None , _lowercase : Optional[int]=None , _lowercase : List[Any]=None , _lowercase : Tuple=None , **_lowercase : Union[str, Any] , ):
SCREAMING_SNAKE_CASE__ : int = {}
if prefix is not None:
SCREAMING_SNAKE_CASE__ : Tuple = prefix
if prefix:
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
_lowercase , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Dict = prefix_inputs['''input_ids'''].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"""{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"""
''' [None, \'hole\']''' )
SCREAMING_SNAKE_CASE__ : Any = handle_long_generation
preprocess_params.update(_lowercase )
SCREAMING_SNAKE_CASE__ : int = generate_kwargs
SCREAMING_SNAKE_CASE__ : str = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_full_text`''' )
if return_tensors is not None:
raise ValueError('''`return_full_text` is mutually exclusive with `return_tensors`''' )
SCREAMING_SNAKE_CASE__ : List[str] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('''`return_text` is mutually exclusive with `return_tensors`''' )
SCREAMING_SNAKE_CASE__ : Optional[int] = ReturnType.TENSORS
if return_type is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = return_type
if clean_up_tokenization_spaces is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = clean_up_tokenization_spaces
if stop_sequence is not None:
SCREAMING_SNAKE_CASE__ : Optional[int] = self.tokenizer.encode(_lowercase , add_special_tokens=_lowercase )
if len(_lowercase ) > 1:
warnings.warn(
'''Stopping on a multiple token sequence is not yet supported on transformers. The first token of'''
''' the stop sequence will be used as the stop sequence string in the interim.''' )
SCREAMING_SNAKE_CASE__ : int = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def lowercase__ ( self : Union[str, Any] , *_lowercase : Tuple , **_lowercase : Any ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'''add_space_before_punct_symbol''': True} )
return super()._parse_and_tokenize(*_lowercase , **_lowercase )
def __call__( self : List[str] , _lowercase : Tuple , **_lowercase : Dict ):
return super().__call__(_lowercase , **_lowercase )
def lowercase__ ( self : Optional[int] , _lowercase : Optional[int] , _lowercase : int="" , _lowercase : int=None , **_lowercase : List[str] ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.tokenizer(
prefix + prompt_text , padding=_lowercase , add_special_tokens=_lowercase , return_tensors=self.framework )
SCREAMING_SNAKE_CASE__ : Tuple = prompt_text
if handle_long_generation == "hole":
SCREAMING_SNAKE_CASE__ : Optional[Any] = inputs['''input_ids'''].shape[-1]
if "max_new_tokens" in generate_kwargs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generate_kwargs['''max_new_tokens''']
else:
SCREAMING_SNAKE_CASE__ : Any = generate_kwargs.get('''max_length''' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('''We cannot infer how many new tokens are expected''' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'''We cannot use `hole` to handle this generation the number of desired tokens exceeds the'''
''' models max length''' )
SCREAMING_SNAKE_CASE__ : Any = inputs['''input_ids'''][:, -keep_length:]
if "attention_mask" in inputs:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = inputs['''attention_mask'''][:, -keep_length:]
return inputs
def lowercase__ ( self : Optional[Any] , _lowercase : Any , **_lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : str = model_inputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_inputs.get('''attention_mask''' , _lowercase )
# Allow empty prompts
if input_ids.shape[1] == 0:
SCREAMING_SNAKE_CASE__ : Dict = None
SCREAMING_SNAKE_CASE__ : Any = None
SCREAMING_SNAKE_CASE__ : Optional[int] = 1
else:
SCREAMING_SNAKE_CASE__ : Dict = input_ids.shape[0]
SCREAMING_SNAKE_CASE__ : Tuple = model_inputs.pop('''prompt_text''' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
SCREAMING_SNAKE_CASE__ : Optional[Any] = generate_kwargs.pop('''prefix_length''' , 0 )
if prefix_length > 0:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = '''max_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].max_new_tokens is not None
)
if not has_max_new_tokens:
SCREAMING_SNAKE_CASE__ : Dict = generate_kwargs.get('''max_length''' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
SCREAMING_SNAKE_CASE__ : str = '''min_new_tokens''' in generate_kwargs or (
'''generation_config''' in generate_kwargs
and generate_kwargs['''generation_config'''].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
SCREAMING_SNAKE_CASE__ : Dict = self.model.generate(input_ids=_lowercase , attention_mask=_lowercase , **_lowercase )
SCREAMING_SNAKE_CASE__ : Tuple = generated_sequence.shape[0]
if self.framework == "pt":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = generated_sequence.reshape(_lowercase , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
SCREAMING_SNAKE_CASE__ : Tuple = tf.reshape(_lowercase , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def lowercase__ ( self : Optional[int] , _lowercase : Any , _lowercase : int=ReturnType.FULL_TEXT , _lowercase : int=True ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = model_outputs['''generated_sequence'''][0]
SCREAMING_SNAKE_CASE__ : List[Any] = model_outputs['''input_ids''']
SCREAMING_SNAKE_CASE__ : Union[str, Any] = model_outputs['''prompt_text''']
SCREAMING_SNAKE_CASE__ : Optional[int] = generated_sequence.numpy().tolist()
SCREAMING_SNAKE_CASE__ : Optional[Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
SCREAMING_SNAKE_CASE__ : Dict = {'''generated_token_ids''': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
SCREAMING_SNAKE_CASE__ : List[str] = self.tokenizer.decode(
_lowercase , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
SCREAMING_SNAKE_CASE__ : Optional[Any] = 0
else:
SCREAMING_SNAKE_CASE__ : List[Any] = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_lowercase , clean_up_tokenization_spaces=_lowercase , ) )
if return_type == ReturnType.FULL_TEXT:
SCREAMING_SNAKE_CASE__ : Optional[int] = prompt_text + text[prompt_length:]
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = text[prompt_length:]
SCREAMING_SNAKE_CASE__ : int = {'''generated_text''': all_text}
records.append(_lowercase )
return records
| 35 |
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
a_ :str = logging.get_logger(__name__)
def a ( A__ , A__ , A__ , A__ ) -> Tuple[int, int]:
'''simple docstring'''
def constraint_to_multiple_of(A__ , A__ , A__=0 , A__=None ):
SCREAMING_SNAKE_CASE__ : Optional[int] = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE__ : Any = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE__ : Any = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (output_size, output_size) if isinstance(A__ , A__ ) else output_size
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Tuple = get_image_size(A__ )
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE__ : List[str] = output_height / input_height
SCREAMING_SNAKE_CASE__ : Dict = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE__ : List[str] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE__ : Optional[Any] = scale_height
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_height * input_height , multiple=A__ )
SCREAMING_SNAKE_CASE__ : int = constraint_to_multiple_of(scale_width * input_width , multiple=A__ )
return (new_height, new_width)
class lowercase ( _UpperCAmelCase ):
lowerCamelCase : List[str] = ['''pixel_values''']
def __init__( self : List[Any] , _lowercase : bool = True , _lowercase : Dict[str, int] = None , _lowercase : PILImageResampling = PILImageResampling.BILINEAR , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : bool = True , _lowercase : Union[int, float] = 1 / 2_55 , _lowercase : bool = True , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , **_lowercase : List[Any] , ):
super().__init__(**_lowercase )
SCREAMING_SNAKE_CASE__ : Dict = size if size is not None else {'''height''': 3_84, '''width''': 3_84}
SCREAMING_SNAKE_CASE__ : Optional[int] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[str] = do_resize
SCREAMING_SNAKE_CASE__ : Optional[int] = size
SCREAMING_SNAKE_CASE__ : int = keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : Optional[Any] = ensure_multiple_of
SCREAMING_SNAKE_CASE__ : List[str] = resample
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_rescale
SCREAMING_SNAKE_CASE__ : Optional[int] = rescale_factor
SCREAMING_SNAKE_CASE__ : List[Any] = do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE__ : Any = image_std if image_std is not None else IMAGENET_STANDARD_STD
def lowercase__ ( self : Optional[int] , _lowercase : np.ndarray , _lowercase : Dict[str, int] , _lowercase : bool = False , _lowercase : int = 1 , _lowercase : PILImageResampling = PILImageResampling.BICUBIC , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[int] , ):
SCREAMING_SNAKE_CASE__ : List[Any] = get_size_dict(_lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = get_resize_output_image_size(
_lowercase , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=_lowercase , multiple=_lowercase , )
return resize(_lowercase , size=_lowercase , resample=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[int, float] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : str , ):
return rescale(_lowercase , scale=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : List[str] , _lowercase : np.ndarray , _lowercase : Union[float, List[float]] , _lowercase : Union[float, List[float]] , _lowercase : Optional[Union[str, ChannelDimension]] = None , **_lowercase : Optional[Any] , ):
return normalize(_lowercase , mean=_lowercase , std=_lowercase , data_format=_lowercase , **_lowercase )
def lowercase__ ( self : Optional[Any] , _lowercase : ImageInput , _lowercase : bool = None , _lowercase : int = None , _lowercase : bool = None , _lowercase : int = None , _lowercase : PILImageResampling = None , _lowercase : bool = None , _lowercase : float = None , _lowercase : bool = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[float, List[float]]] = None , _lowercase : Optional[Union[str, TensorType]] = None , _lowercase : ChannelDimension = ChannelDimension.FIRST , **_lowercase : Tuple , ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE__ : List[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE__ : List[str] = get_size_dict(_lowercase )
SCREAMING_SNAKE_CASE__ : List[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE__ : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE__ : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE__ : str = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE__ : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE__ : Tuple = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE__ : str = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE__ : Optional[Any] = make_list_of_images(_lowercase )
if not valid_images(_lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE__ : str = [to_numpy_array(_lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE__ : Any = [self.resize(image=_lowercase , size=_lowercase , resample=_lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE__ : Tuple = [self.rescale(image=_lowercase , scale=_lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE__ : Any = [self.normalize(image=_lowercase , mean=_lowercase , std=_lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [to_channel_dimension_format(_lowercase , _lowercase ) for image in images]
SCREAMING_SNAKE_CASE__ : str = {'''pixel_values''': images}
return BatchFeature(data=_lowercase , tensor_type=_lowercase )
def lowercase__ ( self : Tuple , _lowercase : Optional[Any] , _lowercase : List[Tuple] = None ):
SCREAMING_SNAKE_CASE__ : str = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(_lowercase ) != len(_lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(_lowercase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE__ : Tuple = []
for idx in range(len(_lowercase ) ):
SCREAMING_SNAKE_CASE__ : Dict = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=_lowercase )
SCREAMING_SNAKE_CASE__ : Any = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(_lowercase )
else:
SCREAMING_SNAKE_CASE__ : Any = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE__ : Dict = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 35 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a_ :List[str] = {
'configuration_swiftformer': [
'SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'SwiftFormerConfig',
'SwiftFormerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ :List[str] = [
'SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'SwiftFormerForImageClassification',
'SwiftFormerModel',
'SwiftFormerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
a_ :Optional[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 |
from __future__ import annotations
from typing import Any
class lowercase :
def __init__( self : int , _lowercase : int ):
SCREAMING_SNAKE_CASE__ : List[str] = num_of_nodes
SCREAMING_SNAKE_CASE__ : list[list[int]] = []
SCREAMING_SNAKE_CASE__ : dict[int, int] = {}
def lowercase__ ( self : Union[str, Any] , _lowercase : int , _lowercase : int , _lowercase : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Optional[int] , _lowercase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[Any] , _lowercase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
SCREAMING_SNAKE_CASE__ : Any = self.find_component(_lowercase )
def lowercase__ ( self : int , _lowercase : list[int] , _lowercase : int , _lowercase : int ):
if component_size[u_node] <= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : Dict = v_node
component_size[v_node] += component_size[u_node]
self.set_component(_lowercase )
elif component_size[u_node] >= component_size[v_node]:
SCREAMING_SNAKE_CASE__ : List[Any] = self.find_component(_lowercase )
component_size[u_node] += component_size[v_node]
self.set_component(_lowercase )
def lowercase__ ( self : str ):
SCREAMING_SNAKE_CASE__ : Optional[int] = []
SCREAMING_SNAKE_CASE__ : Any = 0
SCREAMING_SNAKE_CASE__ : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
SCREAMING_SNAKE_CASE__ : List[str] = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : Union[str, Any] = edge
SCREAMING_SNAKE_CASE__ : Tuple = self.m_component[u]
SCREAMING_SNAKE_CASE__ : List[str] = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
SCREAMING_SNAKE_CASE__ : int = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(_lowercase , _lowercase ):
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ : List[str] = edge
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[u]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(_lowercase , _lowercase , _lowercase )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
SCREAMING_SNAKE_CASE__ : List[Any] = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def a ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def a ( A__=None ) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
SCREAMING_SNAKE_CASE__ : int = subparsers.add_parser('''env''' )
else:
SCREAMING_SNAKE_CASE__ : List[Any] = argparse.ArgumentParser('''Accelerate env command''' )
parser.add_argument(
'''--config_file''' , default=A__ , help='''The config file to use for the default values in the launching script.''' )
if subparsers is not None:
parser.set_defaults(func=A__ )
return parser
def a ( A__ ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.__version__
SCREAMING_SNAKE_CASE__ : Optional[Any] = torch.cuda.is_available()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = is_xpu_available()
SCREAMING_SNAKE_CASE__ : List[Any] = is_npu_available()
SCREAMING_SNAKE_CASE__ : Tuple = '''Not found'''
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = load_config_from_file(args.config_file ).to_dict()
SCREAMING_SNAKE_CASE__ : int = {
'''`Accelerate` version''': version,
'''Platform''': platform.platform(),
'''Python version''': platform.python_version(),
'''Numpy version''': np.__version__,
'''PyTorch version (GPU?)''': f"""{pt_version} ({pt_cuda_available})""",
'''PyTorch XPU available''': str(A__ ),
'''PyTorch NPU available''': str(A__ ),
'''System RAM''': f"""{psutil.virtual_memory().total / 1_0_2_4 ** 3:.2f} GB""",
}
if pt_cuda_available:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = torch.cuda.get_device_name()
print('''\nCopy-and-paste the text below in your GitHub issue\n''' )
print('''\n'''.join([f"""- {prop}: {val}""" for prop, val in info.items()] ) )
print('''- `Accelerate` default config:''' if args.config_file is None else '''- `Accelerate` config passed:''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = (
'''\n'''.join([f"""\t- {prop}: {val}""" for prop, val in accelerate_config.items()] )
if isinstance(A__ , A__ )
else f"""\t{accelerate_config}"""
)
print(A__ )
SCREAMING_SNAKE_CASE__ : Optional[int] = accelerate_config
return info
def a ( ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[str] = env_command_parser()
SCREAMING_SNAKE_CASE__ : List[str] = parser.parse_args()
env_command(A__ )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 35 |
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a_ :Tuple = {'tokenization_wav2vec2_phoneme': ['Wav2Vec2PhonemeCTCTokenizer']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
a_ :Optional[int] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 35 | 1 |
a_ :Dict = 8.3_1_4_4_6_2 # Unit - J mol-1 K-1
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or volume < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / volume
def a ( A__ , A__ , A__ ) -> float:
'''simple docstring'''
if moles < 0 or kelvin < 0 or pressure < 0:
raise ValueError('''Invalid inputs. Enter positive value.''' )
return moles * kelvin * UNIVERSAL_GAS_CONSTANT / pressure
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 |
def a ( A__ ) -> str:
'''simple docstring'''
return "".join([hex(A__ )[2:].zfill(2 ).upper() for byte in list(A__ )] )
def a ( A__ ) -> bytes:
'''simple docstring'''
if (len(A__ ) % 2) != 0:
raise ValueError(
'''Base16 encoded data is invalid:
Data does not have an even number of hex digits.''' )
# Check the character set - the standard base16 alphabet
# is uppercase according to RFC3548 section 6
if not set(A__ ) <= set('''0123456789ABCDEF''' ):
raise ValueError(
'''Base16 encoded data is invalid:
Data is not uppercase hex or it contains invalid characters.''' )
# For every two hexadecimal digits (= a byte), turn it into an integer.
# Then, string the result together into bytes, and return it.
return bytes(int(data[i] + data[i + 1] , 1_6 ) for i in range(0 , len(A__ ) , 2 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.