text
stringlengths 7
318k
| id
stringlengths 14
166
| metadata
dict | __index_level_0__
int64 0
439
|
---|---|---|---|
# flake8: noqa
# There's no way to ignore "F401 '...' imported but unused" warnings in this
# module, but to preserve other warnings. So, don't check this module at all.
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.8.2"
from .auto import (
AutoPeftModel,
AutoPeftModelForCausalLM,
AutoPeftModelForSequenceClassification,
AutoPeftModelForSeq2SeqLM,
AutoPeftModelForTokenClassification,
AutoPeftModelForQuestionAnswering,
AutoPeftModelForFeatureExtraction,
)
from .mapping import (
MODEL_TYPE_TO_PEFT_MODEL_MAPPING,
PEFT_TYPE_TO_CONFIG_MAPPING,
get_peft_config,
get_peft_model,
inject_adapter_in_model,
)
from .mixed_model import PeftMixedModel
from .peft_model import (
PeftModel,
PeftModelForCausalLM,
PeftModelForSeq2SeqLM,
PeftModelForSequenceClassification,
PeftModelForTokenClassification,
PeftModelForQuestionAnswering,
PeftModelForFeatureExtraction,
)
from .tuners import (
AdaptionPromptConfig,
AdaptionPromptModel,
LoraConfig,
LoftQConfig,
LoraModel,
LoHaConfig,
LoHaModel,
LoKrConfig,
LoKrModel,
IA3Config,
IA3Model,
AdaLoraConfig,
AdaLoraModel,
PrefixEncoder,
PrefixTuningConfig,
PromptEmbedding,
PromptEncoder,
PromptEncoderConfig,
PromptEncoderReparameterizationType,
PromptTuningConfig,
PromptTuningInit,
MultitaskPromptTuningConfig,
MultitaskPromptTuningInit,
OFTConfig,
OFTModel,
PolyConfig,
PolyModel,
)
from .utils import (
TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING,
PeftType,
TaskType,
bloom_model_postprocess_past_key_value,
get_peft_model_state_dict,
prepare_model_for_int8_training,
prepare_model_for_kbit_training,
set_peft_model_state_dict,
shift_tokens_right,
load_peft_weights,
cast_mixed_precision_params,
)
from .config import PeftConfig, PromptLearningConfig
| peft/src/peft/__init__.py/0 | {
"file_path": "peft/src/peft/__init__.py",
"repo_id": "peft",
"token_count": 952
} | 171 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
from typing import Any, Optional, Set, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from peft.tuners.lycoris_utils import LycorisLayer
class LoKrLayer(nn.Module, LycorisLayer):
# All names of layers that may contain adapter weights
adapter_layer_names = (
"lokr_w1",
"lokr_w1_a",
"lokr_w1_b",
"lokr_w2",
"lokr_w2_a",
"lokr_w2_b",
"lokr_t2",
)
# other_param_names is defined on parent class
def __init__(self, base_layer: nn.Module) -> None:
super().__init__()
LycorisLayer.__init__(self, base_layer)
# LoKr info
self.lokr_w1 = nn.ParameterDict({})
self.lokr_w1_a = nn.ParameterDict({})
self.lokr_w1_b = nn.ParameterDict({})
self.lokr_w2 = nn.ParameterDict({})
self.lokr_w2_a = nn.ParameterDict({})
self.lokr_w2_b = nn.ParameterDict({})
self.lokr_t2 = nn.ParameterDict({})
@property
def _available_adapters(self) -> Set[str]:
return {
*self.lokr_w1,
*self.lokr_w1_a,
*self.lokr_w1_b,
*self.lokr_w2,
*self.lokr_w2_a,
*self.lokr_w2_b,
*self.lokr_t2,
}
def create_adapter_parameters(
self,
adapter_name: str,
r: int,
shape,
use_w1: bool,
use_w2: bool,
use_effective_conv2d: bool,
):
if use_w1:
self.lokr_w1[adapter_name] = nn.Parameter(torch.empty(shape[0][0], shape[1][0]))
else:
self.lokr_w1_a[adapter_name] = nn.Parameter(torch.empty(shape[0][0], r))
self.lokr_w1_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][0]))
if len(shape) == 4:
# Conv2d
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1], *shape[2:]))
elif use_effective_conv2d:
self.lokr_t2[adapter_name] = nn.Parameter(torch.empty(r, r, shape[2], shape[3]))
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(r, shape[0][1])) # b, 1-mode
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1])) # d, 2-mode
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1] * shape[2] * shape[3]))
else:
# Linear
if use_w2:
self.lokr_w2[adapter_name] = nn.Parameter(torch.empty(shape[0][1], shape[1][1]))
else:
self.lokr_w2_a[adapter_name] = nn.Parameter(torch.empty(shape[0][1], r))
self.lokr_w2_b[adapter_name] = nn.Parameter(torch.empty(r, shape[1][1]))
def reset_adapter_parameters(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.zeros_(self.lokr_w1[adapter_name])
else:
nn.init.zeros_(self.lokr_w1_a[adapter_name])
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def reset_adapter_parameters_random(self, adapter_name: str):
if adapter_name in self.lokr_w1:
nn.init.kaiming_uniform_(self.lokr_w1[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w1_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w1_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_w2:
nn.init.kaiming_uniform_(self.lokr_w2[adapter_name], a=math.sqrt(5))
else:
nn.init.kaiming_uniform_(self.lokr_w2_a[adapter_name], a=math.sqrt(5))
nn.init.kaiming_uniform_(self.lokr_w2_b[adapter_name], a=math.sqrt(5))
if adapter_name in self.lokr_t2:
nn.init.kaiming_uniform_(self.lokr_t2[adapter_name], a=math.sqrt(5))
def update_layer(
self,
adapter_name: str,
r: int,
alpha: float,
rank_dropout: float,
module_dropout: float,
init_weights: bool,
use_effective_conv2d: bool,
decompose_both: bool,
decompose_factor: int,
**kwargs,
) -> None:
"""Internal function to create lokr adapter
Args:
adapter_name (`str`): Name for the adapter to add.
r (`int`): Rank for the added adapter.
alpha (`float`): Alpha for the added adapter.
rank_dropout (`float`): The dropout probability for rank dimension during training
module_dropout (`float`): The dropout probability for disabling adapter during training.
init_weights (`bool`): Whether to initialize adapter weights.
use_effective_conv2d (`bool`): Use parameter effective decomposition for Conv2d with ksize > 1.
decompose_both (`bool`): Perform rank decomposition of left kronecker product matrix.
decompose_factor (`int`): Kronecker product decomposition factor.
"""
if r <= 0:
raise ValueError(f"`r` should be a positive integer value but the value passed is {r}")
self.r[adapter_name] = r
self.alpha[adapter_name] = alpha
self.scaling[adapter_name] = alpha / r
self.rank_dropout[adapter_name] = rank_dropout
self.module_dropout[adapter_name] = module_dropout
base_layer = self.get_base_layer()
# Determine shape of LoKr weights
if isinstance(base_layer, nn.Linear):
in_dim, out_dim = base_layer.in_features, base_layer.out_features
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n)) # ((a, b), (c, d)), out_dim = a*c, in_dim = b*d
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = not (r < max(shape[0][1], shape[1][1]) / 2)
use_effective_conv2d = False
elif isinstance(base_layer, nn.Conv2d):
in_dim, out_dim = base_layer.in_channels, base_layer.out_channels
k_size = base_layer.kernel_size
in_m, in_n = factorization(in_dim, decompose_factor)
out_l, out_k = factorization(out_dim, decompose_factor)
shape = ((out_l, out_k), (in_m, in_n), *k_size) # ((a, b), (c, d), *k_size)
use_w1 = not (decompose_both and r < max(shape[0][0], shape[1][0]) / 2)
use_w2 = r >= max(shape[0][1], shape[1][1]) / 2
use_effective_conv2d = use_effective_conv2d and base_layer.kernel_size != (1, 1)
else:
raise TypeError(f"LoKr is not implemented for base layers of type {type(base_layer).__name__}")
# Create weights with provided shape
self.create_adapter_parameters(adapter_name, r, shape, use_w1, use_w2, use_effective_conv2d)
# Initialize weights
if init_weights:
self.reset_adapter_parameters(adapter_name)
else:
self.reset_adapter_parameters_random(adapter_name)
# Move new weights to device
weight = getattr(self.get_base_layer(), "weight", None)
if weight is not None:
# the layer is already completely initialized, this is an update
if weight.dtype.is_floating_point or weight.dtype.is_complex:
self.to(weight.device, dtype=weight.dtype)
else:
self.to(weight.device)
self.set_adapter(self.active_adapters)
def get_delta_weight(self, adapter_name: str) -> torch.Tensor:
# https://github.com/KohakuBlueleaf/LyCORIS/blob/e4259b870d3354a9615a96be61cb5d07455c58ea/lycoris/modules/lokr.py#L224
if adapter_name in self.lokr_w1:
w1 = self.lokr_w1[adapter_name]
else:
w1 = self.lokr_w1_a[adapter_name] @ self.lokr_w1_b[adapter_name]
if adapter_name in self.lokr_w2:
w2 = self.lokr_w2[adapter_name]
elif adapter_name in self.lokr_t2:
w2 = make_weight_cp(self.lokr_t2[adapter_name], self.lokr_w2_a[adapter_name], self.lokr_w2_b[adapter_name])
else:
w2 = self.lokr_w2_a[adapter_name] @ self.lokr_w2_b[adapter_name]
# Make weights with Kronecker product
weight = make_kron(w1, w2)
weight = weight.reshape(self.get_base_layer().weight.shape)
# Perform rank dropout during training - drop rows of addition weights
rank_dropout = self.rank_dropout[adapter_name]
if self.training and rank_dropout:
drop = (torch.rand(weight.size(0)) > rank_dropout).float()
drop = drop.view(-1, *[1] * len(weight.shape[1:])).to(weight.device)
drop /= drop.mean()
weight *= drop
return weight
def forward(self, x: torch.Tensor, *args, **kwargs) -> torch.Tensor:
previous_dtype = x.dtype
if self.disable_adapters:
if self.merged:
self.unmerge()
result = self.base_layer(x, *args, **kwargs)
elif self.merged:
result = self.base_layer(x, *args, **kwargs)
else:
result = self.base_layer(x, *args, **kwargs)
# Execute all the adapters
for active_adapter in self.active_adapters:
if active_adapter not in self._available_adapters:
continue
module_dropout = self.module_dropout[active_adapter]
# Modify current execution weights
if (not self.training) or (self.training and torch.rand(1) > module_dropout):
result = result + self._get_delta_activations(active_adapter, x, *args, **kwargs)
result = result.to(previous_dtype)
return result
class Linear(LoKrLayer):
"""LoKr implemented in Linear layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, **kwargs)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
return F.linear(input, delta_weight)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
class Conv2d(LoKrLayer):
"""LoKr implemented in Conv2d layer"""
def __init__(
self,
base_layer: nn.Module,
device: Optional[Union[str, torch.device]] = None,
dtype: Optional[torch.dtype] = None,
adapter_name: str = "default",
r: int = 0,
alpha: float = 0.0,
rank_dropout: float = 0.0,
module_dropout: float = 0.0,
use_effective_conv2d: bool = False,
init_weights: bool = True,
**kwargs,
):
super().__init__(base_layer)
# Create adapter and set it active
self._active_adapter = adapter_name
self.update_layer(
adapter_name, r, alpha, rank_dropout, module_dropout, init_weights, use_effective_conv2d, **kwargs
)
def _get_delta_activations(
self, adapter_name: str, input: torch.Tensor, *args: Any, **kwargs: Any
) -> torch.Tensor:
delta_weight = self.get_delta_weight(adapter_name)
# don't add bias here, because the bias is already included in the output of the base_layer
base_layer = self.get_base_layer()
return F.conv2d(
input,
delta_weight,
stride=base_layer.stride,
padding=base_layer.padding,
dilation=base_layer.dilation,
groups=base_layer.groups,
)
def __repr__(self) -> str:
rep = super().__repr__()
return "lokr." + rep
# Below code is a direct copy from https://github.com/KohakuBlueleaf/LyCORIS/blob/eb460098187f752a5d66406d3affade6f0a07ece/lycoris/modules/lokr.py#L11
def factorization(dimension: int, factor: int = -1) -> Tuple[int, int]:
"""Factorizes the provided number into the product of two numbers
Args:
dimension (`int`): The number that needs to be factorized.
factor (`int`, optional):
Factorization divider. The algorithm will try to output two numbers, one of each will be as close to the
factor as possible. If -1 is provided, the decomposition algorithm would try to search dividers near the
square root of the dimension. Defaults to -1.
Returns:
Tuple[`int`, `int`]: A tuple of two numbers, whose product is equal to the provided number. The first number is
always less than or equal to the second.
Example:
```py
>>> factorization(256, factor=-1)
(16, 16)
>>> factorization(128, factor=-1)
(8, 16)
>>> factorization(127, factor=-1)
(1, 127)
>>> factorization(128, factor=4)
(4, 32)
```
"""
if factor > 0 and (dimension % factor) == 0:
m = factor
n = dimension // factor
return m, n
if factor == -1:
factor = dimension
m, n = 1, dimension
length = m + n
while m < n:
new_m = m + 1
while dimension % new_m != 0:
new_m += 1
new_n = dimension // new_m
if new_m + new_n > length or new_m > factor:
break
else:
m, n = new_m, new_n
if m > n:
n, m = m, n
return m, n
def make_weight_cp(t, wa, wb):
rebuild2 = torch.einsum("i j k l, i p, j r -> p r k l", t, wa, wb) # [c, d, k1, k2]
return rebuild2
def make_kron(w1, w2, scale=1.0):
if len(w2.shape) == 4:
w1 = w1.unsqueeze(2).unsqueeze(2)
w2 = w2.contiguous()
rebuild = torch.kron(w1, w2)
return rebuild * scale
| peft/src/peft/tuners/lokr/layer.py/0 | {
"file_path": "peft/src/peft/tuners/lokr/layer.py",
"repo_id": "peft",
"token_count": 7543
} | 172 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import List, Optional, Union
from peft.tuners.lycoris_utils import LycorisConfig
from peft.utils import PeftType
@dataclass
class OFTConfig(LycorisConfig):
"""
This is the configuration class to store the configuration of a [`OFTModel`].
Args:
r (`int`): OFT rank.
module_dropout (`int`): The dropout probability for disabling OFT modules during training.
target_modules (`Optional[Union[List[str], str]]`):
The names of the modules to apply the adapter to. If this is specified, only the modules with the specified
names will be replaced. When passing a string, a regex match will be performed. When passing a list of
strings, either an exact match will be performed or it is checked if the name of the module ends with any
of the passed strings. If this is specified as 'all-linear', then all linear modules are chosen, excluding
the output layer. If this is not specified, modules will be chosen according to the model architecture. If
the architecture is not known, an error will be raised -- in this case, you should specify the target
modules manually.
init_weights (`bool`):
Whether to perform initialization of OFT weights.
layers_to_transform (`Union[List[int], int]`):
The layer indices to transform. If a list of ints is passed, it will apply the adapter to the layer indices
that are specified in this list. If a single integer is passed, it will apply the transformations on the
layer at this index.
layers_pattern (`str`):
The layer pattern name, used only if `layers_to_transform` is different from `None`.
rank_pattern (`dict`):
The mapping from layer names or regexp expression to ranks which are different from the default rank
specified by `r`.
modules_to_save (`List[str]`):
List of modules apart from adapter layers to be set as trainable and saved in the final checkpoint.
coft (`bool`):
Whether to use the constrainted variant of OFT or not, off by default.
eps (`float`):
The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True.
block_share (`bool`):
Whether to share the OFT parameters between blocks or not. This is `False` by default.
"""
r: int = field(default=8, metadata={"help": "OFT rank"})
module_dropout: float = field(
default=0.0, metadata={"help": "The dropout probability for disabling OFT modules during training"}
)
target_modules: Optional[Union[List[str], str]] = field(
default=None,
metadata={
"help": "List of module names or regex expression of the module names to replace with OFT."
"For example, ['q', 'v'] or '.*decoder.*(SelfAttention|EncDecAttention).*(q|v)$' "
"This can also be a wildcard 'all-linear' which matches all linear/Conv1D layers except the output layer."
},
)
init_weights: bool = field(
default=True,
metadata={
"help": (
"Whether to initialize the weights of the OFT layers with their default initialization. Don't change "
"this setting, except if you know exactly what you're doing."
),
},
)
layers_to_transform: Optional[Union[List[int], int]] = field(
default=None,
metadata={
"help": "The layer indexes to transform, is this argument is specified, PEFT will transform only the layers indexes that are specified inside this list. If a single integer is passed, PEFT will transform only the layer at this index."
},
)
layers_pattern: Optional[str] = field(
default=None,
metadata={
"help": "The layer pattern name, used only if `layers_to_transform` is different to None and if the layer pattern is not in the common layers pattern."
},
)
modules_to_save: Optional[List[str]] = field(
default=None,
metadata={
"help": "List of modules apart from OFT layers to be set as trainable and saved in the final checkpoint. "
"For example, in Sequence Classification or Token Classification tasks, "
"the final layer `classifier/score` are randomly initialized and as such need to be trainable and saved."
},
)
coft: bool = field(
default=False,
metadata={"help": "Whether to use the constrainted variant of OFT or not."},
)
eps: float = field(
default=6e-5,
metadata={
"help": "The control strength of COFT. The freedom of rotation. Only has an effect if `coft` is set to True."
},
)
block_share: bool = field(
default=False,
metadata={"help": "Whether to share the OFT parameters between blocks or not."},
)
def __post_init__(self):
self.peft_type = PeftType.OFT
self.target_modules = (
set(self.target_modules) if isinstance(self.target_modules, list) else self.target_modules
)
| peft/src/peft/tuners/oft/config.py/0 | {
"file_path": "peft/src/peft/tuners/oft/config.py",
"repo_id": "peft",
"token_count": 2090
} | 173 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import torch
from .config import PromptTuningInit
class PromptEmbedding(torch.nn.Module):
"""
The model to encode virtual tokens into prompt embeddings.
Args:
config ([`PromptTuningConfig`]): The configuration of the prompt embedding.
word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.
**Attributes**:
- **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.
Example:
```py
>>> from peft import PromptEmbedding, PromptTuningConfig
>>> config = PromptTuningConfig(
... peft_type="PROMPT_TUNING",
... task_type="SEQ_2_SEQ_LM",
... num_virtual_tokens=20,
... token_dim=768,
... num_transformer_submodules=1,
... num_attention_heads=12,
... num_layers=12,
... prompt_tuning_init="TEXT",
... prompt_tuning_init_text="Predict if sentiment of this review is positive, negative or neutral",
... tokenizer_name_or_path="t5-base",
... )
>>> # t5_model.shared is the word embeddings of the base model
>>> prompt_embedding = PromptEmbedding(config, t5_model.shared)
```
Input Shape: (`batch_size`, `total_virtual_tokens`)
Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)
"""
def __init__(self, config, word_embeddings):
super().__init__()
total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules
self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)
if config.prompt_tuning_init == PromptTuningInit.TEXT and not config.inference_mode:
from transformers import AutoTokenizer
tokenizer_kwargs = config.tokenizer_kwargs or {}
tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path, **tokenizer_kwargs)
init_text = config.prompt_tuning_init_text
init_token_ids = tokenizer(init_text)["input_ids"]
# Trim or iterate until num_text_tokens matches total_virtual_tokens
num_text_tokens = len(init_token_ids)
if num_text_tokens > total_virtual_tokens:
init_token_ids = init_token_ids[:total_virtual_tokens]
elif num_text_tokens < total_virtual_tokens:
num_reps = math.ceil(total_virtual_tokens / num_text_tokens)
init_token_ids = init_token_ids * num_reps
init_token_ids = init_token_ids[:total_virtual_tokens]
init_token_ids = torch.LongTensor(init_token_ids).to(word_embeddings.weight.device)
word_embedding_weights = word_embeddings(init_token_ids).detach().clone()
word_embedding_weights = word_embedding_weights.to(torch.float32)
self.embedding.weight = torch.nn.Parameter(word_embedding_weights)
def forward(self, indices):
# Just get embeddings
prompt_embeddings = self.embedding(indices)
return prompt_embeddings
| peft/src/peft/tuners/prompt_tuning/model.py/0 | {
"file_path": "peft/src/peft/tuners/prompt_tuning/model.py",
"repo_id": "peft",
"token_count": 1444
} | 174 |
# coding=utf-8
# Copyright 2023-present the HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest.mock import Mock, call, patch
import torch
from parameterized import parameterized
from transformers import AutoModelForCausalLM, AutoTokenizer
from peft import AdaLoraConfig, PromptTuningConfig, PromptTuningInit, get_peft_model
from .testing_common import PeftCommonTester, PeftTestConfigManager
PEFT_DECODER_MODELS_TO_TEST = [
"hf-internal-testing/tiny-random-OPTForCausalLM",
"hf-internal-testing/tiny-random-GPTNeoXForCausalLM",
"hf-internal-testing/tiny-random-GPT2LMHeadModel",
"hf-internal-testing/tiny-random-BloomForCausalLM",
"hf-internal-testing/tiny-random-gpt_neo",
"hf-internal-testing/tiny-random-GPTJForCausalLM",
"hf-internal-testing/tiny-random-GPTBigCodeForCausalLM",
"HuggingFaceM4/tiny-random-LlamaForCausalLM",
]
FULL_GRID = {
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"task_type": "CAUSAL_LM",
}
def skip_adalora_and_gpt2(test_list):
return [test for test in test_list if not (("GPT2LMHeadModel" in test[1]) and (test[2] == AdaLoraConfig))]
class PeftDecoderModelTester(unittest.TestCase, PeftCommonTester):
r"""
Test if the PeftModel behaves as expected. This includes:
- test if the model has the expected methods
We use parametrized.expand for debugging purposes to test each model individually.
"""
transformers_class = AutoModelForCausalLM
def prepare_inputs_for_testing(self):
input_ids = torch.tensor([[1, 1, 1], [1, 2, 1]]).to(self.torch_device)
attention_mask = torch.tensor([[1, 1, 1], [1, 0, 1]]).to(self.torch_device)
input_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return input_dict
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_attributes_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_model_attr(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adapter_name(self, test_name, model_id, config_cls, config_kwargs):
self._test_adapter_name(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prepare_for_training_parametrized(self, test_name, model_id, config_cls, config_kwargs):
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prompt_tuning_text_prepare_for_training(self, test_name, model_id, config_cls, config_kwargs):
# Test that prompt tuning works with text init
if config_cls != PromptTuningConfig:
return
config_kwargs = config_kwargs.copy()
config_kwargs["prompt_tuning_init"] = PromptTuningInit.TEXT
config_kwargs["prompt_tuning_init_text"] = "This is a test prompt."
config_kwargs["tokenizer_name_or_path"] = model_id
self._test_prepare_for_training(model_id, config_cls, config_kwargs)
def test_prompt_tuning_text_tokenizer_kwargs(self):
# Allow users to pass additional arguments to Tokenizer.from_pretrained
# Fix for #1032
mock = Mock()
orig_from_pretrained = AutoTokenizer.from_pretrained
def mock_autotokenizer_from_pretrained(*args, **kwargs):
mock(*args, **kwargs)
return orig_from_pretrained(config.tokenizer_name_or_path)
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config = PromptTuningConfig(
base_model_name_or_path=model_id,
tokenizer_name_or_path=model_id,
num_virtual_tokens=10,
prompt_tuning_init=PromptTuningInit.TEXT,
task_type="CAUSAL_LM",
prompt_tuning_init_text="This is a test prompt.",
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
)
model = self.transformers_class.from_pretrained(model_id).to(self.torch_device)
with patch("transformers.AutoTokenizer.from_pretrained", mock_autotokenizer_from_pretrained):
model = get_peft_model(model, config)
expected_call = call(model_id, trust_remote_code=True, foo="bar")
self.assertEqual(mock.call_args, expected_call)
def test_prompt_tuning_config_invalid_args(self):
# Raise an error when tokenizer_kwargs is used with prompt_tuning_init!='TEXT', because this argument has no
# function in that case
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
msg = "tokenizer_kwargs only valid when using prompt_tuning_init='TEXT'."
with self.assertRaisesRegex(ValueError, expected_regex=msg):
PromptTuningConfig(
base_model_name_or_path=model_id,
tokenizer_name_or_path=model_id,
num_virtual_tokens=10,
task_type="CAUSAL_LM",
prompt_tuning_init_text="This is a test prompt.",
prompt_tuning_init=PromptTuningInit.RANDOM, # <= should not be used together with tokenizer_kwargs
tokenizer_kwargs={"trust_remote_code": True, "foo": "bar"},
)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_save_pretrained_selected_adapters_pickle(self, test_name, model_id, config_cls, config_kwargs):
self._test_save_pretrained_selected_adapters(model_id, config_cls, config_kwargs, safe_serialization=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_from_pretrained_config_construction(self, test_name, model_id, config_cls, config_kwargs):
self._test_from_pretrained_config_construction(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_merge_layers(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_merge_layers_multi(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_multi(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_merge_layers_nan(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_nan(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate_pos_args(self, test_name, model_id, config_cls, config_kwargs):
# positional args are supported for PeftModelForCausalLM
self._test_generate_pos_args(model_id, config_cls, config_kwargs, raises_err=False)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_merge_layers_fp16(self, test_name, model_id, config_cls, config_kwargs):
self._test_merge_layers_fp16(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_generate_half_prec(self, test_name, model_id, config_cls, config_kwargs):
self._test_generate_half_prec(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_prefix_tuning_half_prec_conversion(self, test_name, model_id, config_cls, config_kwargs):
self._test_prefix_tuning_half_prec_conversion(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_decoders(self, test_name, model_id, config_cls, config_kwargs):
self._test_training(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_decoders_layer_indexing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_layer_indexing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_decoders_gradient_checkpointing(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_gradient_checkpointing(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_inference_safetensors(self, test_name, model_id, config_cls, config_kwargs):
self._test_inference_safetensors(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_peft_model_device_map(self, test_name, model_id, config_cls, config_kwargs):
self._test_peft_model_device_map(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_delete_inactive_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_delete_inactive_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_adding_multiple_adapters_with_bias_raises(self, test_name, model_id, config_cls, config_kwargs):
self._test_adding_multiple_adapters_with_bias_raises(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"task_type": "CAUSAL_LM",
},
filter_params_func=skip_adalora_and_gpt2,
)
)
def test_unload_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_unload_adapter(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_weighted_combination_of_adapters(self, test_name, model_id, config_cls, config_kwargs):
self._test_weighted_combination_of_adapters(model_id, config_cls, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_training_prompt_learning_tasks(self, test_name, model_id, config_cls, config_kwargs):
self._test_training_prompt_learning_tasks(model_id, config_cls, config_kwargs)
@parameterized.expand(
PeftTestConfigManager.get_grid_parameters(
{
"model_ids": PEFT_DECODER_MODELS_TO_TEST,
"lora_kwargs": {"init_lora_weights": [False]},
"ia3_kwargs": {"init_ia3_weights": [False]},
"adalora_kwargs": {"init_lora_weights": [False]},
"task_type": "CAUSAL_LM",
},
)
)
def test_disable_adapter(self, test_name, model_id, config_cls, config_kwargs):
self._test_disable_adapter(model_id, config_cls, config_kwargs)
def test_generate_adalora_no_dropout(self):
# test for issue #730
model_id = "hf-internal-testing/tiny-random-OPTForCausalLM"
config_kwargs = {
"target_modules": None,
"task_type": "CAUSAL_LM",
"lora_dropout": 0.0,
}
self._test_generate(model_id, AdaLoraConfig, config_kwargs)
@parameterized.expand(PeftTestConfigManager.get_grid_parameters(FULL_GRID))
def test_passing_input_embeds_works(self, test_name, model_id, config_cls, config_kwargs):
self._test_passing_input_embeds_works(test_name, model_id, config_cls, config_kwargs)
| peft/tests/test_decoder_models.py/0 | {
"file_path": "peft/tests/test_decoder_models.py",
"repo_id": "peft",
"token_count": 6340
} | 175 |
# Getting Started
## Welcome
Welcome to the `timm` documentation, a lean set of docs that covers the basics of `timm`.
For a more comprehensive set of docs (currently under development), please visit [timmdocs](http://timm.fast.ai) by [Aman Arora](https://github.com/amaarora).
## Install
The library can be installed with pip:
```
pip install timm
```
I update the PyPi (pip) packages when I'm confident there are no significant model regressions from previous releases. If you want to pip install the bleeding edge from GitHub, use:
```
pip install git+https://github.com/rwightman/pytorch-image-models.git
```
!!! info "Conda Environment"
All development and testing has been done in Conda Python 3 environments on Linux x86-64 systems, specifically 3.7, 3.8, 3.9, 3.10
Little to no care has been taken to be Python 2.x friendly and will not support it. If you run into any challenges running on Windows, or other OS, I'm definitely open to looking into those issues so long as it's in a reproducible (read Conda) environment.
PyTorch versions 1.9, 1.10, 1.11 have been tested with the latest versions of this code.
I've tried to keep the dependencies minimal, the setup is as per the PyTorch default install instructions for Conda:
```
conda create -n torch-env
conda activate torch-env
conda install pytorch torchvision cudatoolkit=11.3 -c pytorch
conda install pyyaml
```
## Load a Pretrained Model
Pretrained models can be loaded using `timm.create_model`
```python
import timm
m = timm.create_model('mobilenetv3_large_100', pretrained=True)
m.eval()
```
## List Models with Pretrained Weights
```python
import timm
from pprint import pprint
model_names = timm.list_models(pretrained=True)
pprint(model_names)
>>> ['adv_inception_v3',
'cspdarknet53',
'cspresnext50',
'densenet121',
'densenet161',
'densenet169',
'densenet201',
'densenetblur121d',
'dla34',
'dla46_c',
...
]
```
## List Model Architectures by Wildcard
```python
import timm
from pprint import pprint
model_names = timm.list_models('*resne*t*')
pprint(model_names)
>>> ['cspresnet50',
'cspresnet50d',
'cspresnet50w',
'cspresnext50',
...
]
```
| pytorch-image-models/docs/index.md/0 | {
"file_path": "pytorch-image-models/docs/index.md",
"repo_id": "pytorch-image-models",
"token_count": 736
} | 176 |
# EfficientNet (Knapsack Pruned)
**EfficientNet** is a convolutional neural network architecture and scaling method that uniformly scales all dimensions of depth/width/resolution using a *compound coefficient*. Unlike conventional practice that arbitrary scales these factors, the EfficientNet scaling method uniformly scales network width, depth, and resolution with a set of fixed scaling coefficients. For example, if we want to use $2^N$ times more computational resources, then we can simply increase the network depth by $\alpha ^ N$, width by $\beta ^ N$, and image size by $\gamma ^ N$, where $\alpha, \beta, \gamma$ are constant coefficients determined by a small grid search on the original small model. EfficientNet uses a compound coefficient $\phi$ to uniformly scales network width, depth, and resolution in a principled way.
The compound scaling method is justified by the intuition that if the input image is bigger, then the network needs more layers to increase the receptive field and more channels to capture more fine-grained patterns on the bigger image.
The base EfficientNet-B0 network is based on the inverted bottleneck residual blocks of [MobileNetV2](https://paperswithcode.com/method/mobilenetv2), in addition to [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block).
This collection consists of pruned EfficientNet models.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2020efficientnet,
title={EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks},
author={Mingxing Tan and Quoc V. Le},
year={2020},
eprint={1905.11946},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
```
@misc{aflalo2020knapsack,
title={Knapsack Pruning with Inner Distillation},
author={Yonathan Aflalo and Asaf Noy and Ming Lin and Itamar Friedman and Lihi Zelnik},
year={2020},
eprint={2002.08258},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
```
<!--
Type: model-index
Collections:
- Name: EfficientNet Pruned
Paper:
Title: Knapsack Pruning with Inner Distillation
URL: https://paperswithcode.com/paper/knapsack-pruning-with-inner-distillation
Models:
- Name: efficientnet_b1_pruned
In Collection: EfficientNet Pruned
Metadata:
FLOPs: 489653114
Parameters: 6330000
File Size: 25595162
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b1_pruned
Crop Pct: '0.882'
Image Size: '240'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1208
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb1_pruned_9ebb3fe6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.25%
Top 5 Accuracy: 93.84%
- Name: efficientnet_b2_pruned
In Collection: EfficientNet Pruned
Metadata:
FLOPs: 878133915
Parameters: 8310000
File Size: 33555005
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b2_pruned
Crop Pct: '0.89'
Image Size: '260'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1219
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb2_pruned_203f55bc.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.91%
Top 5 Accuracy: 94.86%
- Name: efficientnet_b3_pruned
In Collection: EfficientNet Pruned
Metadata:
FLOPs: 1239590641
Parameters: 9860000
File Size: 39770812
Architecture:
- 1x1 Convolution
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inverted Residual Block
- Squeeze-and-Excitation Block
- Swish
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: efficientnet_b3_pruned
Crop Pct: '0.904'
Image Size: '300'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/efficientnet.py#L1230
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45403/outputs/effnetb3_pruned_5abcc29f.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.86%
Top 5 Accuracy: 95.24%
-->
| pytorch-image-models/docs/models/.templates/models/efficientnet-pruned.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/efficientnet-pruned.md",
"repo_id": "pytorch-image-models",
"token_count": 1945
} | 177 |
# (Legacy) SE-ResNet
**SE ResNet** is a variant of a [ResNet](https://www.paperswithcode.com/method/resnet) that employs [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) to enable the network to perform dynamic channel-wise feature recalibration.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{hu2019squeezeandexcitation,
title={Squeeze-and-Excitation Networks},
author={Jie Hu and Li Shen and Samuel Albanie and Gang Sun and Enhua Wu},
year={2019},
eprint={1709.01507},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Legacy SE ResNet
Paper:
Title: Squeeze-and-Excitation Networks
URL: https://paperswithcode.com/paper/squeeze-and-excitation-networks
Models:
- Name: legacy_seresnet101
In Collection: Legacy SE ResNet
Metadata:
FLOPs: 9762614000
Parameters: 49330000
File Size: 197822624
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnet101
LR: 0.6
Epochs: 100
Layers: 101
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L426
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.38%
Top 5 Accuracy: 94.26%
- Name: legacy_seresnet152
In Collection: Legacy SE ResNet
Metadata:
FLOPs: 14553578160
Parameters: 66819999
File Size: 268033864
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnet152
LR: 0.6
Epochs: 100
Layers: 152
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L433
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.67%
Top 5 Accuracy: 94.38%
- Name: legacy_seresnet18
In Collection: Legacy SE ResNet
Metadata:
FLOPs: 2328876024
Parameters: 11780000
File Size: 47175663
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnet18
LR: 0.6
Epochs: 100
Layers: 18
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L405
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 71.74%
Top 5 Accuracy: 90.34%
- Name: legacy_seresnet34
In Collection: Legacy SE ResNet
Metadata:
FLOPs: 4706201004
Parameters: 21960000
File Size: 87958697
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnet34
LR: 0.6
Epochs: 100
Layers: 34
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 1024
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L412
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.79%
Top 5 Accuracy: 92.13%
- Name: legacy_seresnet50
In Collection: Legacy SE ResNet
Metadata:
FLOPs: 4974351024
Parameters: 28090000
File Size: 112611220
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x NVIDIA Titan X GPUs
ID: legacy_seresnet50
LR: 0.6
Epochs: 100
Layers: 50
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '224'
Interpolation: bilinear
Minibatch Size: 1024
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/senet.py#L419
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.64%
Top 5 Accuracy: 93.74%
-->
| pytorch-image-models/docs/models/.templates/models/legacy-se-resnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/legacy-se-resnet.md",
"repo_id": "pytorch-image-models",
"token_count": 2886
} | 178 |
# ResNet
**Residual Networks**, or **ResNets**, learn residual functions with reference to the layer inputs, instead of learning unreferenced functions. Instead of hoping each few stacked layers directly fit a desired underlying mapping, residual nets let these layers fit a residual mapping. They stack [residual blocks](https://paperswithcode.com/method/residual-block) ontop of each other to form network: e.g. a ResNet-50 has fifty layers using these blocks.
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/HeZRS15,
author = {Kaiming He and
Xiangyu Zhang and
Shaoqing Ren and
Jian Sun},
title = {Deep Residual Learning for Image Recognition},
journal = {CoRR},
volume = {abs/1512.03385},
year = {2015},
url = {http://arxiv.org/abs/1512.03385},
archivePrefix = {arXiv},
eprint = {1512.03385},
timestamp = {Wed, 17 Apr 2019 17:23:45 +0200},
biburl = {https://dblp.org/rec/journals/corr/HeZRS15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: ResNet
Paper:
Title: Deep Residual Learning for Image Recognition
URL: https://paperswithcode.com/paper/deep-residual-learning-for-image-recognition
Models:
- Name: resnet18
In Collection: ResNet
Metadata:
FLOPs: 2337073152
Parameters: 11690000
File Size: 46827520
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet18
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L641
Weights: https://download.pytorch.org/models/resnet18-5c106cde.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 69.74%
Top 5 Accuracy: 89.09%
- Name: resnet26
In Collection: ResNet
Metadata:
FLOPs: 3026804736
Parameters: 16000000
File Size: 64129972
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet26
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L675
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26-9aa10e23.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.29%
Top 5 Accuracy: 92.57%
- Name: resnet34
In Collection: ResNet
Metadata:
FLOPs: 4718469120
Parameters: 21800000
File Size: 87290831
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet34
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L658
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.11%
Top 5 Accuracy: 92.28%
- Name: resnet50
In Collection: ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102488165
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet50
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L691
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.04%
Top 5 Accuracy: 94.39%
- Name: resnetblur50
In Collection: ResNet
Metadata:
FLOPs: 6621606912
Parameters: 25560000
File Size: 102488165
Architecture:
- 1x1 Convolution
- Batch Normalization
- Blur Pooling
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnetblur50
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L1160
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnetblur50-84f4748f.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.29%
Top 5 Accuracy: 94.64%
- Name: tv_resnet101
In Collection: ResNet
Metadata:
FLOPs: 10068547584
Parameters: 44550000
File Size: 178728960
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_resnet101
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L761
Weights: https://download.pytorch.org/models/resnet101-5d3b4d8f.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.37%
Top 5 Accuracy: 93.56%
- Name: tv_resnet152
In Collection: ResNet
Metadata:
FLOPs: 14857660416
Parameters: 60190000
File Size: 241530880
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_resnet152
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L769
Weights: https://download.pytorch.org/models/resnet152-b121ed2d.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.32%
Top 5 Accuracy: 94.05%
- Name: tv_resnet34
In Collection: ResNet
Metadata:
FLOPs: 4718469120
Parameters: 21800000
File Size: 87306240
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_resnet34
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L745
Weights: https://download.pytorch.org/models/resnet34-333f7ec4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 73.3%
Top 5 Accuracy: 91.42%
- Name: tv_resnet50
In Collection: ResNet
Metadata:
FLOPs: 5282531328
Parameters: 25560000
File Size: 102502400
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: tv_resnet50
LR: 0.1
Epochs: 90
Crop Pct: '0.875'
LR Gamma: 0.1
Momentum: 0.9
Batch Size: 32
Image Size: '224'
LR Step Size: 30
Weight Decay: 0.0001
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/resnet.py#L753
Weights: https://download.pytorch.org/models/resnet50-19c8e357.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.16%
Top 5 Accuracy: 92.88%
-->
| pytorch-image-models/docs/models/.templates/models/resnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/resnet.md",
"repo_id": "pytorch-image-models",
"token_count": 4271
} | 179 |
# (Tensorflow) Inception v3
**Inception v3** is a convolutional neural network architecture from the Inception family that makes several improvements including using [Label Smoothing](https://paperswithcode.com/method/label-smoothing), Factorized 7 x 7 convolutions, and the use of an [auxiliary classifer](https://paperswithcode.com/method/auxiliary-classifier) to propagate label information lower down the network (along with the use of batch normalization for layers in the sidehead). The key building block is an [Inception Module](https://paperswithcode.com/method/inception-v3-module).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
{% include 'code_snippets.md' %}
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/SzegedyVISW15,
author = {Christian Szegedy and
Vincent Vanhoucke and
Sergey Ioffe and
Jonathon Shlens and
Zbigniew Wojna},
title = {Rethinking the Inception Architecture for Computer Vision},
journal = {CoRR},
volume = {abs/1512.00567},
year = {2015},
url = {http://arxiv.org/abs/1512.00567},
archivePrefix = {arXiv},
eprint = {1512.00567},
timestamp = {Mon, 13 Aug 2018 16:49:07 +0200},
biburl = {https://dblp.org/rec/journals/corr/SzegedyVISW15.bib},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
```
<!--
Type: model-index
Collections:
- Name: TF Inception v3
Paper:
Title: Rethinking the Inception Architecture for Computer Vision
URL: https://paperswithcode.com/paper/rethinking-the-inception-architecture-for
Models:
- Name: tf_inception_v3
In Collection: TF Inception v3
Metadata:
FLOPs: 7352418880
Parameters: 23830000
File Size: 95549439
Architecture:
- 1x1 Convolution
- Auxiliary Classifier
- Average Pooling
- Average Pooling
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Inception-v3 Module
- Max Pooling
- ReLU
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Gradient Clipping
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 50x NVIDIA Kepler GPUs
ID: tf_inception_v3
LR: 0.045
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v3.py#L449
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_inception_v3-e0069de4.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.87%
Top 5 Accuracy: 93.65%
-->
| pytorch-image-models/docs/models/.templates/models/tf-inception-v3.md/0 | {
"file_path": "pytorch-image-models/docs/models/.templates/models/tf-inception-v3.md",
"repo_id": "pytorch-image-models",
"token_count": 1141
} | 180 |
# ECA-ResNet
An **ECA ResNet** is a variant on a [ResNet](https://paperswithcode.com/method/resnet) that utilises an [Efficient Channel Attention module](https://paperswithcode.com/method/efficient-channel-attention). Efficient Channel Attention is an architectural unit based on [squeeze-and-excitation blocks](https://paperswithcode.com/method/squeeze-and-excitation-block) that reduces model complexity without dimensionality reduction.
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('ecaresnet101d', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `ecaresnet101d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('ecaresnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{wang2020ecanet,
title={ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks},
author={Qilong Wang and Banggu Wu and Pengfei Zhu and Peihua Li and Wangmeng Zuo and Qinghua Hu},
year={2020},
eprint={1910.03151},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: ECAResNet
Paper:
Title: 'ECA-Net: Efficient Channel Attention for Deep Convolutional Neural Networks'
URL: https://paperswithcode.com/paper/eca-net-efficient-channel-attention-for-deep
Models:
- Name: ecaresnet101d
In Collection: ECAResNet
Metadata:
FLOPs: 10377193728
Parameters: 44570000
File Size: 178815067
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x RTX 2080Ti GPUs
ID: ecaresnet101d
LR: 0.1
Epochs: 100
Layers: 101
Crop Pct: '0.875'
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1087
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet101D_281c5844.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.18%
Top 5 Accuracy: 96.06%
- Name: ecaresnet101d_pruned
In Collection: ECAResNet
Metadata:
FLOPs: 4463972081
Parameters: 24880000
File Size: 99852736
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnet101d_pruned
Layers: 101
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1097
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45610/outputs/ECAResNet101D_P_75a3370e.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.82%
Top 5 Accuracy: 95.64%
- Name: ecaresnet50d
In Collection: ECAResNet
Metadata:
FLOPs: 5591090432
Parameters: 25580000
File Size: 102579290
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 4x RTX 2080Ti GPUs
ID: ecaresnet50d
LR: 0.1
Epochs: 100
Layers: 50
Crop Pct: '0.875'
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0001
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1045
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNet50D_833caf58.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.61%
Top 5 Accuracy: 95.31%
- Name: ecaresnet50d_pruned
In Collection: ECAResNet
Metadata:
FLOPs: 3250730657
Parameters: 19940000
File Size: 79990436
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnet50d_pruned
Layers: 50
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1055
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45899/outputs/ECAResNet50D_P_9c67f710.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.71%
Top 5 Accuracy: 94.88%
- Name: ecaresnetlight
In Collection: ECAResNet
Metadata:
FLOPs: 5276118784
Parameters: 30160000
File Size: 120956612
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Efficient Channel Attention
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
ID: ecaresnetlight
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/a7f95818e44b281137503bcf4b3e3e94d8ffa52f/timm/models/resnet.py#L1077
Weights: https://imvl-automl-sh.oss-cn-shanghai.aliyuncs.com/darts/hyperml/hyperml/job_45402/outputs/ECAResNetLight_4f34b35b.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.46%
Top 5 Accuracy: 95.25%
--> | pytorch-image-models/docs/models/ecaresnet.md/0 | {
"file_path": "pytorch-image-models/docs/models/ecaresnet.md",
"repo_id": "pytorch-image-models",
"token_count": 3638
} | 181 |
# Inception v4
**Inception-v4** is a convolutional neural network architecture that builds on previous iterations of the Inception family by simplifying the architecture and using more inception modules than [Inception-v3](https://paperswithcode.com/method/inception-v3).
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('inception_v4', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `inception_v4`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('inception_v4', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{szegedy2016inceptionv4,
title={Inception-v4, Inception-ResNet and the Impact of Residual Connections on Learning},
author={Christian Szegedy and Sergey Ioffe and Vincent Vanhoucke and Alex Alemi},
year={2016},
eprint={1602.07261},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Inception v4
Paper:
Title: Inception-v4, Inception-ResNet and the Impact of Residual Connections on
Learning
URL: https://paperswithcode.com/paper/inception-v4-inception-resnet-and-the-impact
Models:
- Name: inception_v4
In Collection: Inception v4
Metadata:
FLOPs: 15806527936
Parameters: 42680000
File Size: 171082495
Architecture:
- Average Pooling
- Dropout
- Inception-A
- Inception-B
- Inception-C
- Reduction-A
- Reduction-B
- Softmax
Tasks:
- Image Classification
Training Techniques:
- Label Smoothing
- RMSProp
- Weight Decay
Training Data:
- ImageNet
Training Resources: 20x NVIDIA Kepler GPUs
ID: inception_v4
LR: 0.045
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/inception_v4.py#L313
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/inceptionv4-8e4777a0.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 1.01%
Top 5 Accuracy: 16.85%
--> | pytorch-image-models/docs/models/inception-v4.md/0 | {
"file_path": "pytorch-image-models/docs/models/inception-v4.md",
"repo_id": "pytorch-image-models",
"token_count": 1622
} | 182 |
# ResNet-D
**ResNet-D** is a modification on the [ResNet](https://paperswithcode.com/method/resnet) architecture that utilises an [average pooling](https://paperswithcode.com/method/average-pooling) tweak for downsampling. The motivation is that in the unmodified ResNet, the [1ร1 convolution](https://paperswithcode.com/method/1x1-convolution) for the downsampling block ignores 3/4 of input feature maps, so this is modified so no information will be ignored
## How do I use this model on an image?
To load a pretrained model:
```python
import timm
model = timm.create_model('resnet101d', pretrained=True)
model.eval()
```
To load and preprocess the image:
```python
import urllib
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
config = resolve_data_config({}, model=model)
transform = create_transform(**config)
url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
urllib.request.urlretrieve(url, filename)
img = Image.open(filename).convert('RGB')
tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```python
import torch
with torch.no_grad():
out = model(tensor)
probabilities = torch.nn.functional.softmax(out[0], dim=0)
print(probabilities.shape)
# prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```python
# Get imagenet class mappings
url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
urllib.request.urlretrieve(url, filename)
with open("imagenet_classes.txt", "r") as f:
categories = [s.strip() for s in f.readlines()]
# Print top categories per image
top5_prob, top5_catid = torch.topk(probabilities, 5)
for i in range(top5_prob.size(0)):
print(categories[top5_catid[i]], top5_prob[i].item())
# prints class names and probabilities like:
# [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `resnet101d`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](https://rwightman.github.io/pytorch-image-models/feature_extraction/), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```python
model = timm.create_model('resnet101d', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](https://rwightman.github.io/pytorch-image-models/scripts/) for training a new model afresh.
## Citation
```BibTeX
@misc{he2018bag,
title={Bag of Tricks for Image Classification with Convolutional Neural Networks},
author={Tong He and Zhi Zhang and Hang Zhang and Zhongyue Zhang and Junyuan Xie and Mu Li},
year={2018},
eprint={1812.01187},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: ResNet-D
Paper:
Title: Bag of Tricks for Image Classification with Convolutional Neural Networks
URL: https://paperswithcode.com/paper/bag-of-tricks-for-image-classification-with
Models:
- Name: resnet101d
In Collection: ResNet-D
Metadata:
FLOPs: 13805639680
Parameters: 44570000
File Size: 178791263
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet101d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L716
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 82.31%
Top 5 Accuracy: 96.06%
- Name: resnet152d
In Collection: ResNet-D
Metadata:
FLOPs: 20155275264
Parameters: 60210000
File Size: 241596837
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet152d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L724
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet152d_ra2-5cac0439.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.13%
Top 5 Accuracy: 96.35%
- Name: resnet18d
In Collection: ResNet-D
Metadata:
FLOPs: 2645205760
Parameters: 11710000
File Size: 46893231
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet18d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L649
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet18d_ra2-48a79e06.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 72.27%
Top 5 Accuracy: 90.69%
- Name: resnet200d
In Collection: ResNet-D
Metadata:
FLOPs: 26034378752
Parameters: 64690000
File Size: 259662933
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet200d
Crop Pct: '0.94'
Image Size: '256'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L749
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet200d_ra2-bdba9bf9.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 83.24%
Top 5 Accuracy: 96.49%
- Name: resnet26d
In Collection: ResNet-D
Metadata:
FLOPs: 3335276032
Parameters: 16010000
File Size: 64209122
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet26d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L683
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet26d-69e92c46.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 76.69%
Top 5 Accuracy: 93.15%
- Name: resnet34d
In Collection: ResNet-D
Metadata:
FLOPs: 5026601728
Parameters: 21820000
File Size: 87369807
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet34d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L666
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34d_ra2-f8dcfcaf.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.11%
Top 5 Accuracy: 93.38%
- Name: resnet50d
In Collection: ResNet-D
Metadata:
FLOPs: 5591002624
Parameters: 25580000
File Size: 102567109
Architecture:
- 1x1 Convolution
- Batch Normalization
- Bottleneck Residual Block
- Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Block
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: resnet50d
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/resnet.py#L699
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50d_ra2-464e36ba.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 80.55%
Top 5 Accuracy: 95.16%
--> | pytorch-image-models/docs/models/resnet-d.md/0 | {
"file_path": "pytorch-image-models/docs/models/resnet-d.md",
"repo_id": "pytorch-image-models",
"token_count": 3929
} | 183 |
# Installation
Before you start, you'll need to setup your environment and install the appropriate packages. `timm` is tested on **Python 3+**.
## Virtual Environment
You should install `timm` in a [virtual environment](https://docs.python.org/3/library/venv.html) to keep things tidy and avoid dependency conflicts.
1. Create and navigate to your project directory:
```bash
mkdir ~/my-project
cd ~/my-project
```
2. Start a virtual environment inside your directory:
```bash
python -m venv .env
```
3. Activate and deactivate the virtual environment with the following commands:
```bash
# Activate the virtual environment
source .env/bin/activate
# Deactivate the virtual environment
source .env/bin/deactivate
```
`
Once you've created your virtual environment, you can install `timm` in it.
## Using pip
The most straightforward way to install `timm` is with pip:
```bash
pip install timm
```
Alternatively, you can install `timm` from GitHub directly to get the latest, bleeding-edge version:
```bash
pip install git+https://github.com/rwightman/pytorch-image-models.git
```
Run the following command to check if `timm` has been properly installed:
```bash
python -c "from timm import list_models; print(list_models(pretrained=True)[:5])"
```
This command lists the first five pretrained models available in `timm` (which are sorted alphebetically). You should see the following output:
```python
['adv_inception_v3', 'bat_resnext26ts', 'beit_base_patch16_224', 'beit_base_patch16_224_in22k', 'beit_base_patch16_384']
```
## From Source
Building `timm` from source lets you make changes to the code base. To install from the source, clone the repository and install with the following commands:
```bash
git clone https://github.com/rwightman/pytorch-image-models.git
cd timm
pip install -e .
```
Again, you can check if `timm` was properly installed with the following command:
```bash
python -c "from timm import list_models; print(list_models(pretrained=True)[:5])"
```
| pytorch-image-models/hfdocs/source/installation.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/installation.mdx",
"repo_id": "pytorch-image-models",
"token_count": 619
} | 184 |
# FBNet
**FBNet** is a type of convolutional neural architectures discovered through [DNAS](https://paperswithcode.com/method/dnas) neural architecture search. It utilises a basic type of image model block inspired by [MobileNetv2](https://paperswithcode.com/method/mobilenetv2) that utilises depthwise convolutions and an inverted residual structure (see components).
The principal building block is the [FBNet Block](https://paperswithcode.com/method/fbnet-block).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('fbnetc_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `fbnetc_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('fbnetc_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{wu2019fbnet,
title={FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural Architecture Search},
author={Bichen Wu and Xiaoliang Dai and Peizhao Zhang and Yanghan Wang and Fei Sun and Yiming Wu and Yuandong Tian and Peter Vajda and Yangqing Jia and Kurt Keutzer},
year={2019},
eprint={1812.03443},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: FBNet
Paper:
Title: 'FBNet: Hardware-Aware Efficient ConvNet Design via Differentiable Neural
Architecture Search'
URL: https://paperswithcode.com/paper/fbnet-hardware-aware-efficient-convnet-design
Models:
- Name: fbnetc_100
In Collection: FBNet
Metadata:
FLOPs: 508940064
Parameters: 5570000
File Size: 22525094
Architecture:
- 1x1 Convolution
- Convolution
- Dense Connections
- Dropout
- FBNet Block
- Global Average Pooling
- Softmax
Tasks:
- Image Classification
Training Techniques:
- SGD with Momentum
- Weight Decay
Training Data:
- ImageNet
Training Resources: 8x GPUs
ID: fbnetc_100
LR: 0.1
Epochs: 360
Layers: 22
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 256
Image Size: '224'
Weight Decay: 0.0005
Interpolation: bilinear
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L985
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/fbnetc_100-c345b898.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.12%
Top 5 Accuracy: 92.37%
--> | pytorch-image-models/hfdocs/source/models/fbnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/fbnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 1705
} | 185 |
# MnasNet
**MnasNet** is a type of convolutional neural network optimized for mobile devices that is discovered through mobile neural architecture search, which explicitly incorporates model latency into the main objective so that the search can identify a model that achieves a good trade-off between accuracy and latency. The main building block is an [inverted residual block](https://paperswithcode.com/method/inverted-residual-block) (from [MobileNetV2](https://paperswithcode.com/method/mobilenetv2)).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('mnasnet_100', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `mnasnet_100`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('mnasnet_100', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@misc{tan2019mnasnet,
title={MnasNet: Platform-Aware Neural Architecture Search for Mobile},
author={Mingxing Tan and Bo Chen and Ruoming Pang and Vijay Vasudevan and Mark Sandler and Andrew Howard and Quoc V. Le},
year={2019},
eprint={1807.11626},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: MNASNet
Paper:
Title: 'MnasNet: Platform-Aware Neural Architecture Search for Mobile'
URL: https://paperswithcode.com/paper/mnasnet-platform-aware-neural-architecture
Models:
- Name: mnasnet_100
In Collection: MNASNet
Metadata:
FLOPs: 416415488
Parameters: 4380000
File Size: 17731774
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Inverted Residual Block
- Max Pooling
- ReLU
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Techniques:
- RMSProp
- Weight Decay
Training Data:
- ImageNet
ID: mnasnet_100
Layers: 100
Dropout: 0.2
Crop Pct: '0.875'
Momentum: 0.9
Batch Size: 4000
Image Size: '224'
Interpolation: bicubic
RMSProp Decay: 0.9
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L894
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_b1-74cb7081.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 74.67%
Top 5 Accuracy: 92.1%
- Name: semnasnet_100
In Collection: MNASNet
Metadata:
FLOPs: 414570766
Parameters: 3890000
File Size: 15731489
Architecture:
- 1x1 Convolution
- Batch Normalization
- Convolution
- Depthwise Separable Convolution
- Dropout
- Global Average Pooling
- Inverted Residual Block
- Max Pooling
- ReLU
- Residual Connection
- Softmax
- Squeeze-and-Excitation Block
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: semnasnet_100
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/9a25fdf3ad0414b4d66da443fe60ae0aa14edc84/timm/models/efficientnet.py#L928
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mnasnet_a1-d9418771.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 75.45%
Top 5 Accuracy: 92.61%
--> | pytorch-image-models/hfdocs/source/models/mnasnet.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/mnasnet.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2101
} | 186 |
# SelecSLS
**SelecSLS** uses novel selective long and short range skip connections to improve the information flow allowing for a drastically faster network without compromising accuracy.
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('selecsls42b', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `selecsls42b`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('selecsls42b', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{Mehta_2020,
title={XNect},
volume={39},
ISSN={1557-7368},
url={http://dx.doi.org/10.1145/3386569.3392410},
DOI={10.1145/3386569.3392410},
number={4},
journal={ACM Transactions on Graphics},
publisher={Association for Computing Machinery (ACM)},
author={Mehta, Dushyant and Sotnychenko, Oleksandr and Mueller, Franziska and Xu, Weipeng and Elgharib, Mohamed and Fua, Pascal and Seidel, Hans-Peter and Rhodin, Helge and Pons-Moll, Gerard and Theobalt, Christian},
year={2020},
month={Jul}
}
```
<!--
Type: model-index
Collections:
- Name: SelecSLS
Paper:
Title: 'XNect: Real-time Multi-Person 3D Motion Capture with a Single RGB Camera'
URL: https://paperswithcode.com/paper/xnect-real-time-multi-person-3d-human-pose
Models:
- Name: selecsls42b
In Collection: SelecSLS
Metadata:
FLOPs: 3824022528
Parameters: 32460000
File Size: 129948954
Architecture:
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Global Average Pooling
- ReLU
- SelecSLS Block
Tasks:
- Image Classification
Training Techniques:
- Cosine Annealing
- Random Erasing
Training Data:
- ImageNet
ID: selecsls42b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L335
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls42b-8af30141.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.18%
Top 5 Accuracy: 93.39%
- Name: selecsls60
In Collection: SelecSLS
Metadata:
FLOPs: 4610472600
Parameters: 30670000
File Size: 122839714
Architecture:
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Global Average Pooling
- ReLU
- SelecSLS Block
Tasks:
- Image Classification
Training Techniques:
- Cosine Annealing
- Random Erasing
Training Data:
- ImageNet
ID: selecsls60
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L342
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60-bbf87526.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 77.99%
Top 5 Accuracy: 93.83%
- Name: selecsls60b
In Collection: SelecSLS
Metadata:
FLOPs: 4657653144
Parameters: 32770000
File Size: 131252898
Architecture:
- Batch Normalization
- Convolution
- Dense Connections
- Dropout
- Global Average Pooling
- ReLU
- SelecSLS Block
Tasks:
- Image Classification
Training Techniques:
- Cosine Annealing
- Random Erasing
Training Data:
- ImageNet
ID: selecsls60b
Crop Pct: '0.875'
Image Size: '224'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/b9843f954b0457af2db4f9dea41a8538f51f5d78/timm/models/selecsls.py#L349
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-selecsls/selecsls60b-94e619b5.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.41%
Top 5 Accuracy: 94.18%
--> | pytorch-image-models/hfdocs/source/models/selecsls.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/selecsls.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2420
} | 187 |
# Xception
**Xception** is a convolutional neural network architecture that relies solely on [depthwise separable convolution layers](https://paperswithcode.com/method/depthwise-separable-convolution).
The weights from this model were ported from [Tensorflow/Models](https://github.com/tensorflow/models).
## How do I use this model on an image?
To load a pretrained model:
```py
>>> import timm
>>> model = timm.create_model('xception', pretrained=True)
>>> model.eval()
```
To load and preprocess the image:
```py
>>> import urllib
>>> from PIL import Image
>>> from timm.data import resolve_data_config
>>> from timm.data.transforms_factory import create_transform
>>> config = resolve_data_config({}, model=model)
>>> transform = create_transform(**config)
>>> url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg")
>>> urllib.request.urlretrieve(url, filename)
>>> img = Image.open(filename).convert('RGB')
>>> tensor = transform(img).unsqueeze(0) # transform and add batch dimension
```
To get the model predictions:
```py
>>> import torch
>>> with torch.no_grad():
... out = model(tensor)
>>> probabilities = torch.nn.functional.softmax(out[0], dim=0)
>>> print(probabilities.shape)
>>> # prints: torch.Size([1000])
```
To get the top-5 predictions class names:
```py
>>> # Get imagenet class mappings
>>> url, filename = ("https://raw.githubusercontent.com/pytorch/hub/master/imagenet_classes.txt", "imagenet_classes.txt")
>>> urllib.request.urlretrieve(url, filename)
>>> with open("imagenet_classes.txt", "r") as f:
... categories = [s.strip() for s in f.readlines()]
>>> # Print top categories per image
>>> top5_prob, top5_catid = torch.topk(probabilities, 5)
>>> for i in range(top5_prob.size(0)):
... print(categories[top5_catid[i]], top5_prob[i].item())
>>> # prints class names and probabilities like:
>>> # [('Samoyed', 0.6425196528434753), ('Pomeranian', 0.04062102362513542), ('keeshond', 0.03186424449086189), ('white wolf', 0.01739676296710968), ('Eskimo dog', 0.011717947199940681)]
```
Replace the model name with the variant you want to use, e.g. `xception`. You can find the IDs in the model summaries at the top of this page.
To extract image features with this model, follow the [timm feature extraction examples](../feature_extraction), just change the name of the model you want to use.
## How do I finetune this model?
You can finetune any of the pre-trained models just by changing the classifier (the last layer).
```py
>>> model = timm.create_model('xception', pretrained=True, num_classes=NUM_FINETUNE_CLASSES)
```
To finetune on your own dataset, you have to write a training loop or adapt [timm's training
script](https://github.com/rwightman/pytorch-image-models/blob/master/train.py) to use your dataset.
## How do I train this model?
You can follow the [timm recipe scripts](../scripts) for training a new model afresh.
## Citation
```BibTeX
@article{DBLP:journals/corr/ZagoruykoK16,
@misc{chollet2017xception,
title={Xception: Deep Learning with Depthwise Separable Convolutions},
author={Franรงois Chollet},
year={2017},
eprint={1610.02357},
archivePrefix={arXiv},
primaryClass={cs.CV}
}
```
<!--
Type: model-index
Collections:
- Name: Xception
Paper:
Title: 'Xception: Deep Learning with Depthwise Separable Convolutions'
URL: https://paperswithcode.com/paper/xception-deep-learning-with-depthwise
Models:
- Name: xception
In Collection: Xception
Metadata:
FLOPs: 10600506792
Parameters: 22860000
File Size: 91675053
Architecture:
- 1x1 Convolution
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: xception
Crop Pct: '0.897'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception.py#L229
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/xception-43020ad28.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.05%
Top 5 Accuracy: 94.4%
- Name: xception41
In Collection: Xception
Metadata:
FLOPs: 11681983232
Parameters: 26970000
File Size: 108422028
Architecture:
- 1x1 Convolution
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: xception41
Crop Pct: '0.903'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L181
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_41-e6439c97.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 78.54%
Top 5 Accuracy: 94.28%
- Name: xception65
In Collection: Xception
Metadata:
FLOPs: 17585702144
Parameters: 39920000
File Size: 160536780
Architecture:
- 1x1 Convolution
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: xception65
Crop Pct: '0.903'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L200
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_65-c9ae96e8.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.55%
Top 5 Accuracy: 94.66%
- Name: xception71
In Collection: Xception
Metadata:
FLOPs: 22817346560
Parameters: 42340000
File Size: 170295556
Architecture:
- 1x1 Convolution
- Convolution
- Dense Connections
- Depthwise Separable Convolution
- Global Average Pooling
- Max Pooling
- ReLU
- Residual Connection
- Softmax
Tasks:
- Image Classification
Training Data:
- ImageNet
ID: xception71
Crop Pct: '0.903'
Image Size: '299'
Interpolation: bicubic
Code: https://github.com/rwightman/pytorch-image-models/blob/d8e69206be253892b2956341fea09fdebfaae4e3/timm/models/xception_aligned.py#L219
Weights: https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/tf_xception_71-8eec7df1.pth
Results:
- Task: Image Classification
Dataset: ImageNet
Metrics:
Top 1 Accuracy: 79.88%
Top 5 Accuracy: 94.93%
--> | pytorch-image-models/hfdocs/source/models/xception.mdx/0 | {
"file_path": "pytorch-image-models/hfdocs/source/models/xception.mdx",
"repo_id": "pytorch-image-models",
"token_count": 2674
} | 188 |
from .version import __version__
from .layers import is_scriptable, is_exportable, set_scriptable, set_exportable
from .models import create_model, list_models, list_pretrained, is_model, list_modules, model_entrypoint, \
is_model_pretrained, get_pretrained_cfg, get_pretrained_cfg_value
| pytorch-image-models/timm/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 91
} | 189 |
from .reader_factory import create_reader
from .img_extensions import *
| pytorch-image-models/timm/data/readers/__init__.py/0 | {
"file_path": "pytorch-image-models/timm/data/readers/__init__.py",
"repo_id": "pytorch-image-models",
"token_count": 20
} | 190 |
""" Transforms Factory
Factory methods for building image transforms for use with TIMM (PyTorch Image Models)
Hacked together by / Copyright 2019, Ross Wightman
"""
import math
from typing import Optional, Tuple, Union
import torch
from torchvision import transforms
from timm.data.constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, DEFAULT_CROP_PCT
from timm.data.auto_augment import rand_augment_transform, augment_and_mix_transform, auto_augment_transform
from timm.data.transforms import str_to_interp_mode, str_to_pil_interp, RandomResizedCropAndInterpolation,\
ResizeKeepRatio, CenterCropOrPad, RandomCropOrPad, TrimBorder, ToNumpy
from timm.data.random_erasing import RandomErasing
def transforms_noaug_train(
img_size: Union[int, Tuple[int, int]] = 224,
interpolation: str = 'bilinear',
use_prefetcher: bool = False,
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
):
""" No-augmentation image transforms for training.
Args:
img_size: Target image size.
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize.
Returns:
"""
if interpolation == 'random':
# random interpolation not supported with no-aug
interpolation = 'bilinear'
tfl = [
transforms.Resize(img_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size)
]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std)
)
]
return transforms.Compose(tfl)
def transforms_imagenet_train(
img_size: Union[int, Tuple[int, int]] = 224,
scale: Optional[Tuple[float, float]] = None,
ratio: Optional[Tuple[float, float]] = None,
train_crop_mode: Optional[str] = None,
hflip: float = 0.5,
vflip: float = 0.,
color_jitter: Union[float, Tuple[float, ...]] = 0.4,
color_jitter_prob: Optional[float] = None,
force_color_jitter: bool = False,
grayscale_prob: float = 0.,
gaussian_blur_prob: float = 0.,
auto_augment: Optional[str] = None,
interpolation: str = 'random',
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
re_prob: float = 0.,
re_mode: str = 'const',
re_count: int = 1,
re_num_splits: int = 0,
use_prefetcher: bool = False,
separate: bool = False,
):
""" ImageNet-oriented image transforms for training.
Args:
img_size: Target image size.
train_crop_mode: Training random crop mode ('rrc', 'rkrc', 'rkrr').
scale: Random resize scale range (crop area, < 1.0 => zoom in).
ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR).
hflip: Horizontal flip probability.
vflip: Vertical flip probability.
color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue).
Scalar is applied as (scalar,) * 3 (no hue).
color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug).
force_color_jitter: Force color jitter where it is normally disabled (ie with RandAugment on).
grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug).
gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug).
auto_augment: Auto augment configuration string (see auto_augment.py).
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
re_prob: Random erasing probability.
re_mode: Random erasing fill mode.
re_count: Number of random erasing regions.
re_num_splits: Control split of random erasing across batch size.
use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize.
separate: Output transforms in 3-stage tuple.
Returns:
If separate==True, the transforms are returned as a tuple of 3 separate transforms
for use in a mixing dataset that passes
* all data through the first (primary) transform, called the 'clean' data
* a portion of the data through the secondary transform
* normalizes and converts the branches above with the third, final transform
"""
train_crop_mode = train_crop_mode or 'rrc'
assert train_crop_mode in {'rrc', 'rkrc', 'rkrr'}
if train_crop_mode in ('rkrc', 'rkrr'):
# FIXME integration of RKR is a WIP
scale = tuple(scale or (0.8, 1.00))
ratio = tuple(ratio or (0.9, 1/.9))
primary_tfl = [
ResizeKeepRatio(
img_size,
interpolation=interpolation,
random_scale_prob=0.5,
random_scale_range=scale,
random_scale_area=True, # scale compatible with RRC
random_aspect_prob=0.5,
random_aspect_range=ratio,
),
CenterCropOrPad(img_size, padding_mode='reflect')
if train_crop_mode == 'rkrc' else
RandomCropOrPad(img_size, padding_mode='reflect')
]
else:
scale = tuple(scale or (0.08, 1.0)) # default imagenet scale range
ratio = tuple(ratio or (3. / 4., 4. / 3.)) # default imagenet ratio range
primary_tfl = [
RandomResizedCropAndInterpolation(
img_size,
scale=scale,
ratio=ratio,
interpolation=interpolation,
)
]
if hflip > 0.:
primary_tfl += [transforms.RandomHorizontalFlip(p=hflip)]
if vflip > 0.:
primary_tfl += [transforms.RandomVerticalFlip(p=vflip)]
secondary_tfl = []
disable_color_jitter = False
if auto_augment:
assert isinstance(auto_augment, str)
# color jitter is typically disabled if AA/RA on,
# this allows override without breaking old hparm cfgs
disable_color_jitter = not (force_color_jitter or '3a' in auto_augment)
if isinstance(img_size, (tuple, list)):
img_size_min = min(img_size)
else:
img_size_min = img_size
aa_params = dict(
translate_const=int(img_size_min * 0.45),
img_mean=tuple([min(255, round(255 * x)) for x in mean]),
)
if interpolation and interpolation != 'random':
aa_params['interpolation'] = str_to_pil_interp(interpolation)
if auto_augment.startswith('rand'):
secondary_tfl += [rand_augment_transform(auto_augment, aa_params)]
elif auto_augment.startswith('augmix'):
aa_params['translate_pct'] = 0.3
secondary_tfl += [augment_and_mix_transform(auto_augment, aa_params)]
else:
secondary_tfl += [auto_augment_transform(auto_augment, aa_params)]
if color_jitter is not None and not disable_color_jitter:
# color jitter is enabled when not using AA or when forced
if isinstance(color_jitter, (list, tuple)):
# color jitter should be a 3-tuple/list if spec brightness/contrast/saturation
# or 4 if also augmenting hue
assert len(color_jitter) in (3, 4)
else:
# if it's a scalar, duplicate for brightness, contrast, and saturation, no hue
color_jitter = (float(color_jitter),) * 3
if color_jitter_prob is not None:
secondary_tfl += [
transforms.RandomApply([
transforms.ColorJitter(*color_jitter),
],
p=color_jitter_prob
)
]
else:
secondary_tfl += [transforms.ColorJitter(*color_jitter)]
if grayscale_prob:
secondary_tfl += [transforms.RandomGrayscale(p=grayscale_prob)]
if gaussian_blur_prob:
secondary_tfl += [
transforms.RandomApply([
transforms.GaussianBlur(kernel_size=23), # hardcoded for now
],
p=gaussian_blur_prob,
)
]
final_tfl = []
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
final_tfl += [ToNumpy()]
else:
final_tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std)
),
]
if re_prob > 0.:
final_tfl += [
RandomErasing(
re_prob,
mode=re_mode,
max_count=re_count,
num_splits=re_num_splits,
device='cpu',
)
]
if separate:
return transforms.Compose(primary_tfl), transforms.Compose(secondary_tfl), transforms.Compose(final_tfl)
else:
return transforms.Compose(primary_tfl + secondary_tfl + final_tfl)
def transforms_imagenet_eval(
img_size: Union[int, Tuple[int, int]] = 224,
crop_pct: Optional[float] = None,
crop_mode: Optional[str] = None,
crop_border_pixels: Optional[int] = None,
interpolation: str = 'bilinear',
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
use_prefetcher: bool = False,
):
""" ImageNet-oriented image transform for evaluation and inference.
Args:
img_size: Target image size.
crop_pct: Crop percentage. Defaults to 0.875 when None.
crop_mode: Crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None.
crop_border_pixels: Trim a border of specified # pixels around edge of original image.
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
use_prefetcher: Prefetcher enabled. Do not convert image to tensor or normalize.
Returns:
Composed transform pipeline
"""
crop_pct = crop_pct or DEFAULT_CROP_PCT
if isinstance(img_size, (tuple, list)):
assert len(img_size) == 2
scale_size = tuple([math.floor(x / crop_pct) for x in img_size])
else:
scale_size = math.floor(img_size / crop_pct)
scale_size = (scale_size, scale_size)
tfl = []
if crop_border_pixels:
tfl += [TrimBorder(crop_border_pixels)]
if crop_mode == 'squash':
# squash mode scales each edge to 1/pct of target, then crops
# aspect ratio is not preserved, no img lost if crop_pct == 1.0
tfl += [
transforms.Resize(scale_size, interpolation=str_to_interp_mode(interpolation)),
transforms.CenterCrop(img_size),
]
elif crop_mode == 'border':
# scale the longest edge of image to 1/pct of target edge, add borders to pad, then crop
# no image lost if crop_pct == 1.0
fill = [round(255 * v) for v in mean]
tfl += [
ResizeKeepRatio(scale_size, interpolation=interpolation, longest=1.0),
CenterCropOrPad(img_size, fill=fill),
]
else:
# default crop model is center
# aspect ratio is preserved, crops center within image, no borders are added, image is lost
if scale_size[0] == scale_size[1]:
# simple case, use torchvision built-in Resize w/ shortest edge mode (scalar size arg)
tfl += [
transforms.Resize(scale_size[0], interpolation=str_to_interp_mode(interpolation))
]
else:
# resize the shortest edge to matching target dim for non-square target
tfl += [ResizeKeepRatio(scale_size)]
tfl += [transforms.CenterCrop(img_size)]
if use_prefetcher:
# prefetcher and collate will handle tensor conversion and norm
tfl += [ToNumpy()]
else:
tfl += [
transforms.ToTensor(),
transforms.Normalize(
mean=torch.tensor(mean),
std=torch.tensor(std),
)
]
return transforms.Compose(tfl)
def create_transform(
input_size: Union[int, Tuple[int, int], Tuple[int, int, int]] = 224,
is_training: bool = False,
no_aug: bool = False,
train_crop_mode: Optional[str] = None,
scale: Optional[Tuple[float, float]] = None,
ratio: Optional[Tuple[float, float]] = None,
hflip: float = 0.5,
vflip: float = 0.,
color_jitter: Union[float, Tuple[float, ...]] = 0.4,
color_jitter_prob: Optional[float] = None,
grayscale_prob: float = 0.,
gaussian_blur_prob: float = 0.,
auto_augment: Optional[str] = None,
interpolation: str = 'bilinear',
mean: Tuple[float, ...] = IMAGENET_DEFAULT_MEAN,
std: Tuple[float, ...] = IMAGENET_DEFAULT_STD,
re_prob: float = 0.,
re_mode: str = 'const',
re_count: int = 1,
re_num_splits: int = 0,
crop_pct: Optional[float] = None,
crop_mode: Optional[str] = None,
crop_border_pixels: Optional[int] = None,
tf_preprocessing: bool = False,
use_prefetcher: bool = False,
separate: bool = False,
):
"""
Args:
input_size: Target input size (channels, height, width) tuple or size scalar.
is_training: Return training (random) transforms.
no_aug: Disable augmentation for training (useful for debug).
train_crop_mode: Training random crop mode ('rrc', 'rkrc', 'rkrr').
scale: Random resize scale range (crop area, < 1.0 => zoom in).
ratio: Random aspect ratio range (crop ratio for RRC, ratio adjustment factor for RKR).
hflip: Horizontal flip probability.
vflip: Vertical flip probability.
color_jitter: Random color jitter component factors (brightness, contrast, saturation, hue).
Scalar is applied as (scalar,) * 3 (no hue).
color_jitter_prob: Apply color jitter with this probability if not None (for SimlCLR-like aug).
grayscale_prob: Probability of converting image to grayscale (for SimCLR-like aug).
gaussian_blur_prob: Probability of applying gaussian blur (for SimCLR-like aug).
auto_augment: Auto augment configuration string (see auto_augment.py).
interpolation: Image interpolation mode.
mean: Image normalization mean.
std: Image normalization standard deviation.
re_prob: Random erasing probability.
re_mode: Random erasing fill mode.
re_count: Number of random erasing regions.
re_num_splits: Control split of random erasing across batch size.
crop_pct: Inference crop percentage (output size / resize size).
crop_mode: Inference crop mode. One of ['squash', 'border', 'center']. Defaults to 'center' when None.
crop_border_pixels: Inference crop border of specified # pixels around edge of original image.
tf_preprocessing: Use TF 1.0 inference preprocessing for testing model ports
use_prefetcher: Pre-fetcher enabled. Do not convert image to tensor or normalize.
separate: Output transforms in 3-stage tuple.
Returns:
Composed transforms or tuple thereof
"""
if isinstance(input_size, (tuple, list)):
img_size = input_size[-2:]
else:
img_size = input_size
if tf_preprocessing and use_prefetcher:
assert not separate, "Separate transforms not supported for TF preprocessing"
from timm.data.tf_preprocessing import TfPreprocessTransform
transform = TfPreprocessTransform(
is_training=is_training,
size=img_size,
interpolation=interpolation,
)
else:
if is_training and no_aug:
assert not separate, "Cannot perform split augmentation with no_aug"
transform = transforms_noaug_train(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
)
elif is_training:
transform = transforms_imagenet_train(
img_size,
train_crop_mode=train_crop_mode,
scale=scale,
ratio=ratio,
hflip=hflip,
vflip=vflip,
color_jitter=color_jitter,
color_jitter_prob=color_jitter_prob,
grayscale_prob=grayscale_prob,
gaussian_blur_prob=gaussian_blur_prob,
auto_augment=auto_augment,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
re_prob=re_prob,
re_mode=re_mode,
re_count=re_count,
re_num_splits=re_num_splits,
separate=separate,
)
else:
assert not separate, "Separate transforms not supported for validation preprocessing"
transform = transforms_imagenet_eval(
img_size,
interpolation=interpolation,
use_prefetcher=use_prefetcher,
mean=mean,
std=std,
crop_pct=crop_pct,
crop_mode=crop_mode,
crop_border_pixels=crop_border_pixels,
)
return transform
| pytorch-image-models/timm/data/transforms_factory.py/0 | {
"file_path": "pytorch-image-models/timm/data/transforms_factory.py",
"repo_id": "pytorch-image-models",
"token_count": 8112
} | 191 |
""" Activation Factory
Hacked together by / Copyright 2020 Ross Wightman
"""
from typing import Union, Callable, Type
from .activations import *
from .activations_jit import *
from .activations_me import *
from .config import is_exportable, is_scriptable, is_no_jit
# PyTorch has an optimized, native 'silu' (aka 'swish') operator as of PyTorch 1.7.
# Also hardsigmoid, hardswish, and soon mish. This code will use native version if present.
# Eventually, the custom SiLU, Mish, Hard*, layers will be removed and only native variants will be used.
_has_silu = 'silu' in dir(torch.nn.functional)
_has_hardswish = 'hardswish' in dir(torch.nn.functional)
_has_hardsigmoid = 'hardsigmoid' in dir(torch.nn.functional)
_has_mish = 'mish' in dir(torch.nn.functional)
_ACT_FN_DEFAULT = dict(
silu=F.silu if _has_silu else swish,
swish=F.silu if _has_silu else swish,
mish=F.mish if _has_mish else mish,
relu=F.relu,
relu6=F.relu6,
leaky_relu=F.leaky_relu,
elu=F.elu,
celu=F.celu,
selu=F.selu,
gelu=gelu,
gelu_tanh=gelu_tanh,
quick_gelu=quick_gelu,
sigmoid=sigmoid,
tanh=tanh,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid,
hard_swish=F.hardswish if _has_hardswish else hard_swish,
hard_mish=hard_mish,
)
_ACT_FN_JIT = dict(
silu=F.silu if _has_silu else swish_jit,
swish=F.silu if _has_silu else swish_jit,
mish=F.mish if _has_mish else mish_jit,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_jit,
hard_swish=F.hardswish if _has_hardswish else hard_swish_jit,
hard_mish=hard_mish_jit,
)
_ACT_FN_ME = dict(
silu=F.silu if _has_silu else swish_me,
swish=F.silu if _has_silu else swish_me,
mish=F.mish if _has_mish else mish_me,
hard_sigmoid=F.hardsigmoid if _has_hardsigmoid else hard_sigmoid_me,
hard_swish=F.hardswish if _has_hardswish else hard_swish_me,
hard_mish=hard_mish_me,
)
_ACT_FNS = (_ACT_FN_ME, _ACT_FN_JIT, _ACT_FN_DEFAULT)
for a in _ACT_FNS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
_ACT_LAYER_DEFAULT = dict(
silu=nn.SiLU if _has_silu else Swish,
swish=nn.SiLU if _has_silu else Swish,
mish=nn.Mish if _has_mish else Mish,
relu=nn.ReLU,
relu6=nn.ReLU6,
leaky_relu=nn.LeakyReLU,
elu=nn.ELU,
prelu=PReLU,
celu=nn.CELU,
selu=nn.SELU,
gelu=GELU,
gelu_tanh=GELUTanh,
quick_gelu=QuickGELU,
sigmoid=Sigmoid,
tanh=Tanh,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoid,
hard_swish=nn.Hardswish if _has_hardswish else HardSwish,
hard_mish=HardMish,
identity=nn.Identity,
)
_ACT_LAYER_JIT = dict(
silu=nn.SiLU if _has_silu else SwishJit,
swish=nn.SiLU if _has_silu else SwishJit,
mish=nn.Mish if _has_mish else MishJit,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidJit,
hard_swish=nn.Hardswish if _has_hardswish else HardSwishJit,
hard_mish=HardMishJit,
)
_ACT_LAYER_ME = dict(
silu=nn.SiLU if _has_silu else SwishMe,
swish=nn.SiLU if _has_silu else SwishMe,
mish=nn.Mish if _has_mish else MishMe,
hard_sigmoid=nn.Hardsigmoid if _has_hardsigmoid else HardSigmoidMe,
hard_swish=nn.Hardswish if _has_hardswish else HardSwishMe,
hard_mish=HardMishMe,
)
_ACT_LAYERS = (_ACT_LAYER_ME, _ACT_LAYER_JIT, _ACT_LAYER_DEFAULT)
for a in _ACT_LAYERS:
a.setdefault('hardsigmoid', a.get('hard_sigmoid'))
a.setdefault('hardswish', a.get('hard_swish'))
def get_act_fn(name: Union[Callable, str] = 'relu'):
""" Activation Function Factory
Fetching activation fns by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if not name:
return None
if isinstance(name, Callable):
return name
if not (is_no_jit() or is_exportable() or is_scriptable()):
# If not exporting or scripting the model, first look for a memory-efficient version with
# custom autograd, then fallback
if name in _ACT_FN_ME:
return _ACT_FN_ME[name]
if not (is_no_jit() or is_exportable()):
if name in _ACT_FN_JIT:
return _ACT_FN_JIT[name]
return _ACT_FN_DEFAULT[name]
def get_act_layer(name: Union[Type[nn.Module], str] = 'relu'):
""" Activation Layer Factory
Fetching activation layers by name with this function allows export or torch script friendly
functions to be returned dynamically based on current config.
"""
if name is None:
return None
if not isinstance(name, str):
# callable, module, etc
return name
if not name:
return None
if not (is_no_jit() or is_exportable() or is_scriptable()):
if name in _ACT_LAYER_ME:
return _ACT_LAYER_ME[name]
if not (is_no_jit() or is_exportable()):
if name in _ACT_LAYER_JIT:
return _ACT_LAYER_JIT[name]
return _ACT_LAYER_DEFAULT[name]
def create_act_layer(name: Union[nn.Module, str], inplace=None, **kwargs):
act_layer = get_act_layer(name)
if act_layer is None:
return None
if inplace is None:
return act_layer(**kwargs)
try:
return act_layer(inplace=inplace, **kwargs)
except TypeError:
# recover if act layer doesn't have inplace arg
return act_layer(**kwargs)
| pytorch-image-models/timm/layers/create_act.py/0 | {
"file_path": "pytorch-image-models/timm/layers/create_act.py",
"repo_id": "pytorch-image-models",
"token_count": 2443
} | 192 |
""" Layer/Module Helpers
Hacked together by / Copyright 2020 Ross Wightman
"""
from itertools import repeat
import collections.abc
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable) and not isinstance(x, str):
return tuple(x)
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = _ntuple
def make_divisible(v, divisor=8, min_value=None, round_limit=.9):
min_value = min_value or divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < round_limit * v:
new_v += divisor
return new_v
def extend_tuple(x, n):
# pads a tuple to specified n by padding with last value
if not isinstance(x, (tuple, list)):
x = (x,)
else:
x = tuple(x)
pad_n = n - len(x)
if pad_n <= 0:
return x[:n]
return x + (x[-1],) * pad_n
| pytorch-image-models/timm/layers/helpers.py/0 | {
"file_path": "pytorch-image-models/timm/layers/helpers.py",
"repo_id": "pytorch-image-models",
"token_count": 462
} | 193 |
""" Position Embedding Utilities
Hacked together by / Copyright 2022 Ross Wightman
"""
import logging
import math
from typing import List, Tuple, Optional, Union
import torch
import torch.nn.functional as F
from .helpers import to_2tuple
_logger = logging.getLogger(__name__)
def resample_abs_pos_embed(
posemb,
new_size: List[int],
old_size: Optional[List[int]] = None,
num_prefix_tokens: int = 1,
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
# sort out sizes, assume square if old size not provided
num_pos_tokens = posemb.shape[1]
num_new_tokens = new_size[0] * new_size[1] + num_prefix_tokens
if num_new_tokens == num_pos_tokens and new_size[0] == new_size[1]:
return posemb
if old_size is None:
hw = int(math.sqrt(num_pos_tokens - num_prefix_tokens))
old_size = hw, hw
if num_prefix_tokens:
posemb_prefix, posemb = posemb[:, :num_prefix_tokens], posemb[:, num_prefix_tokens:]
else:
posemb_prefix, posemb = None, posemb
# do the interpolation
embed_dim = posemb.shape[-1]
orig_dtype = posemb.dtype
posemb = posemb.float() # interpolate needs float32
posemb = posemb.reshape(1, old_size[0], old_size[1], -1).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias)
posemb = posemb.permute(0, 2, 3, 1).reshape(1, -1, embed_dim)
posemb = posemb.to(orig_dtype)
# add back extra (class, etc) prefix tokens
if posemb_prefix is not None:
posemb = torch.cat([posemb_prefix, posemb], dim=1)
if not torch.jit.is_scripting() and verbose:
_logger.info(f'Resized position embedding: {old_size} to {new_size}.')
return posemb
def resample_abs_pos_embed_nhwc(
posemb,
new_size: List[int],
interpolation: str = 'bicubic',
antialias: bool = True,
verbose: bool = False,
):
if new_size[0] == posemb.shape[-3] and new_size[1] == posemb.shape[-2]:
return posemb
orig_dtype = posemb.dtype
posemb = posemb.float()
# do the interpolation
posemb = posemb.reshape(1, posemb.shape[-3], posemb.shape[-2], posemb.shape[-1]).permute(0, 3, 1, 2)
posemb = F.interpolate(posemb, size=new_size, mode=interpolation, antialias=antialias)
posemb = posemb.permute(0, 2, 3, 1).to(orig_dtype)
if not torch.jit.is_scripting() and verbose:
_logger.info(f'Resized position embedding: {posemb.shape[-3:-1]} to {new_size}.')
return posemb
| pytorch-image-models/timm/layers/pos_embed.py/0 | {
"file_path": "pytorch-image-models/timm/layers/pos_embed.py",
"repo_id": "pytorch-image-models",
"token_count": 1127
} | 194 |
""" Binary Cross Entropy w/ a few extras
Hacked together by / Copyright 2021 Ross Wightman
"""
from typing import Optional, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
class BinaryCrossEntropy(nn.Module):
""" BCE with optional one-hot from dense targets, label smoothing, thresholding
NOTE for experiments comparing CE to BCE /w label smoothing, may remove
"""
def __init__(
self,
smoothing=0.1,
target_threshold: Optional[float] = None,
weight: Optional[torch.Tensor] = None,
reduction: str = 'mean',
sum_classes: bool = False,
pos_weight: Optional[Union[torch.Tensor, float]] = None,
):
super(BinaryCrossEntropy, self).__init__()
assert 0. <= smoothing < 1.0
if pos_weight is not None:
if not isinstance(pos_weight, torch.Tensor):
pos_weight = torch.tensor(pos_weight)
self.smoothing = smoothing
self.target_threshold = target_threshold
self.reduction = 'none' if sum_classes else reduction
self.sum_classes = sum_classes
self.register_buffer('weight', weight)
self.register_buffer('pos_weight', pos_weight)
def forward(self, x: torch.Tensor, target: torch.Tensor) -> torch.Tensor:
batch_size = x.shape[0]
assert batch_size == target.shape[0]
if target.shape != x.shape:
# NOTE currently assume smoothing or other label softening is applied upstream if targets are already sparse
num_classes = x.shape[-1]
# FIXME should off/on be different for smoothing w/ BCE? Other impl out there differ
off_value = self.smoothing / num_classes
on_value = 1. - self.smoothing + off_value
target = target.long().view(-1, 1)
target = torch.full(
(batch_size, num_classes),
off_value,
device=x.device, dtype=x.dtype).scatter_(1, target, on_value)
if self.target_threshold is not None:
# Make target 0, or 1 if threshold set
target = target.gt(self.target_threshold).to(dtype=target.dtype)
loss = F.binary_cross_entropy_with_logits(
x, target,
self.weight,
pos_weight=self.pos_weight,
reduction=self.reduction,
)
if self.sum_classes:
loss = loss.sum(-1).mean()
return loss
| pytorch-image-models/timm/loss/binary_cross_entropy.py/0 | {
"file_path": "pytorch-image-models/timm/loss/binary_cross_entropy.py",
"repo_id": "pytorch-image-models",
"token_count": 1082
} | 195 |
""" DeiT - Data-efficient Image Transformers
DeiT model defs and weights from https://github.com/facebookresearch/deit, original copyright below
paper: `DeiT: Data-efficient Image Transformers` - https://arxiv.org/abs/2012.12877
paper: `DeiT III: Revenge of the ViT` - https://arxiv.org/abs/2204.07118
Modifications copyright 2021, Ross Wightman
"""
# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
from functools import partial
from typing import Sequence, Union
import torch
from torch import nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import resample_abs_pos_embed
from timm.models.vision_transformer import VisionTransformer, trunc_normal_, checkpoint_filter_fn
from ._builder import build_model_with_cfg
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model, register_model_deprecations
__all__ = ['VisionTransformerDistilled'] # model_registry will add each entrypoint fn to this
class VisionTransformerDistilled(VisionTransformer):
""" Vision Transformer w/ Distillation Token and Head
Distillation token & head support for `DeiT: Data-efficient Image Transformers`
- https://arxiv.org/abs/2012.12877
"""
def __init__(self, *args, **kwargs):
weight_init = kwargs.pop('weight_init', '')
super().__init__(*args, **kwargs, weight_init='skip')
assert self.global_pool in ('token',)
self.num_prefix_tokens = 2
self.dist_token = nn.Parameter(torch.zeros(1, 1, self.embed_dim))
self.pos_embed = nn.Parameter(
torch.zeros(1, self.patch_embed.num_patches + self.num_prefix_tokens, self.embed_dim))
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if self.num_classes > 0 else nn.Identity()
self.distilled_training = False # must set this True to train w/ distillation token
self.init_weights(weight_init)
def init_weights(self, mode=''):
trunc_normal_(self.dist_token, std=.02)
super().init_weights(mode=mode)
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed|dist_token',
blocks=[
(r'^blocks\.(\d+)', None),
(r'^norm', (99999,))] # final norm w/ last block
)
@torch.jit.ignore
def get_classifier(self):
return self.head, self.head_dist
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
self.head_dist = nn.Linear(self.embed_dim, self.num_classes) if num_classes > 0 else nn.Identity()
@torch.jit.ignore
def set_distilled_training(self, enable=True):
self.distilled_training = enable
def _pos_embed(self, x):
if self.dynamic_img_size:
B, H, W, C = x.shape
pos_embed = resample_abs_pos_embed(
self.pos_embed,
(H, W),
num_prefix_tokens=0 if self.no_embed_class else self.num_prefix_tokens,
)
x = x.view(B, -1, C)
else:
pos_embed = self.pos_embed
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + pos_embed
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
x = torch.cat((
self.cls_token.expand(x.shape[0], -1, -1),
self.dist_token.expand(x.shape[0], -1, -1),
x),
dim=1)
x = x + pos_embed
return self.pos_drop(x)
def forward_head(self, x, pre_logits: bool = False) -> torch.Tensor:
x, x_dist = x[:, 0], x[:, 1]
if pre_logits:
return (x + x_dist) / 2
x = self.head(x)
x_dist = self.head_dist(x_dist)
if self.distilled_training and self.training and not torch.jit.is_scripting():
# only return separate classification predictions when training in distilled mode
return x, x_dist
else:
# during standard train / finetune, inference average the classifier predictions
return (x + x_dist) / 2
def _create_deit(variant, pretrained=False, distilled=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model_cls = VisionTransformerDistilled if distilled else VisionTransformer
model = build_model_with_cfg(
model_cls,
variant,
pretrained,
pretrained_filter_fn=partial(checkpoint_filter_fn, adapt_layer_scale=True),
**kwargs,
)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
# deit models (FB weights)
'deit_tiny_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_patch16_224-a1311bcf.pth'),
'deit_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_patch16_224-cd65a155.pth'),
'deit_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_224-b5f2ef4d.pth'),
'deit_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_patch16_384-8de9b5d1.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit_tiny_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_tiny_distilled_patch16_224-b40b3cf7.pth',
classifier=('head', 'head_dist')),
'deit_small_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_small_distilled_patch16_224-649709d9.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_224-df68dfff.pth',
classifier=('head', 'head_dist')),
'deit_base_distilled_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_base_distilled_patch16_384-d0272ac0.pth',
input_size=(3, 384, 384), crop_pct=1.0,
classifier=('head', 'head_dist')),
'deit3_small_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_1k.pth'),
'deit3_small_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_1k.pth'),
'deit3_base_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_1k.pth'),
'deit3_base_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_1k.pth'),
'deit3_large_patch16_384.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_1k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_1k.pth'),
'deit3_small_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_224_21k.pth',
crop_pct=1.0),
'deit3_small_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_small_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_medium_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_medium_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_224_21k.pth',
crop_pct=1.0),
'deit3_base_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_base_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_large_patch16_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_224_21k.pth',
crop_pct=1.0),
'deit3_large_patch16_384.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_large_384_21k.pth',
input_size=(3, 384, 384), crop_pct=1.0),
'deit3_huge_patch14_224.fb_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
url='https://dl.fbaipublicfiles.com/deit/deit_3_huge_224_21k_v1.pth',
crop_pct=1.0),
})
@register_model
def deit_tiny_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-tiny model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit('deit_tiny_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-small model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit('deit_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT base model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit('deit_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit_tiny_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-tiny distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3)
model = _create_deit(
'deit_tiny_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_small_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-small distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6)
model = _create_deit(
'deit_small_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_224(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 224x224 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_224', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit_base_distilled_patch16_384(pretrained=False, **kwargs) -> VisionTransformerDistilled:
""" DeiT-base distilled model @ 384x384 from paper (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12)
model = _create_deit(
'deit_base_distilled_patch16_384', pretrained=pretrained, distilled=True, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_small_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 small model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_small_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 medium model @ 224x224 (https://arxiv.org/abs/2012.12877).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=512, depth=12, num_heads=8, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_base_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_base_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 224x224 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_large_patch16_384(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 large model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_large_patch16_384', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def deit3_huge_patch14_224(pretrained=False, **kwargs) -> VisionTransformer:
""" DeiT-3 base model @ 384x384 from paper (https://arxiv.org/abs/2204.07118).
ImageNet-1k weights from https://github.com/facebookresearch/deit.
"""
model_args = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, no_embed_class=True, init_values=1e-6)
model = _create_deit('deit3_huge_patch14_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
register_model_deprecations(__name__, {
'deit3_small_patch16_224_in21ft1k': 'deit3_small_patch16_224.fb_in22k_ft_in1k',
'deit3_small_patch16_384_in21ft1k': 'deit3_small_patch16_384.fb_in22k_ft_in1k',
'deit3_medium_patch16_224_in21ft1k': 'deit3_medium_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_224_in21ft1k': 'deit3_base_patch16_224.fb_in22k_ft_in1k',
'deit3_base_patch16_384_in21ft1k': 'deit3_base_patch16_384.fb_in22k_ft_in1k',
'deit3_large_patch16_224_in21ft1k': 'deit3_large_patch16_224.fb_in22k_ft_in1k',
'deit3_large_patch16_384_in21ft1k': 'deit3_large_patch16_384.fb_in22k_ft_in1k',
'deit3_huge_patch14_224_in21ft1k': 'deit3_huge_patch14_224.fb_in22k_ft_in1k'
})
| pytorch-image-models/timm/models/deit.py/0 | {
"file_path": "pytorch-image-models/timm/models/deit.py",
"repo_id": "pytorch-image-models",
"token_count": 8300
} | 196 |
""" Global Context ViT
From scratch implementation of GCViT in the style of timm swin_transformer_v2_cr.py
Global Context Vision Transformers -https://arxiv.org/abs/2206.09959
@article{hatamizadeh2022global,
title={Global Context Vision Transformers},
author={Hatamizadeh, Ali and Yin, Hongxu and Kautz, Jan and Molchanov, Pavlo},
journal={arXiv preprint arXiv:2206.09959},
year={2022}
}
Free of any code related to NVIDIA GCVit impl at https://github.com/NVlabs/GCVit.
The license for this code release is Apache 2.0 with no commercial restrictions.
However, weight files adapted from NVIDIA GCVit impl ARE under a non-commercial share-alike license
(https://creativecommons.org/licenses/by-nc-sa/4.0/) until I have a chance to train new ones...
Hacked together by / Copyright 2022, Ross Wightman
"""
import math
from functools import partial
from typing import Callable, List, Optional, Tuple, Union
import torch
import torch.nn as nn
import torch.utils.checkpoint as checkpoint
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import DropPath, to_2tuple, to_ntuple, Mlp, ClassifierHead, LayerNorm2d, \
get_attn, get_act_layer, get_norm_layer, RelPosBias, _assert
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_function
from ._manipulate import named_apply
from ._registry import register_model, generate_default_cfgs
__all__ = ['GlobalContextVit']
class MbConvBlock(nn.Module):
""" A depthwise separable / fused mbconv style residual block with SE, `no norm.
"""
def __init__(
self,
in_chs,
out_chs=None,
expand_ratio=1.0,
attn_layer='se',
bias=False,
act_layer=nn.GELU,
):
super().__init__()
attn_kwargs = dict(act_layer=act_layer)
if isinstance(attn_layer, str) and attn_layer == 'se' or attn_layer == 'eca':
attn_kwargs['rd_ratio'] = 0.25
attn_kwargs['bias'] = False
attn_layer = get_attn(attn_layer)
out_chs = out_chs or in_chs
mid_chs = int(expand_ratio * in_chs)
self.conv_dw = nn.Conv2d(in_chs, mid_chs, 3, 1, 1, groups=in_chs, bias=bias)
self.act = act_layer()
self.se = attn_layer(mid_chs, **attn_kwargs)
self.conv_pw = nn.Conv2d(mid_chs, out_chs, 1, 1, 0, bias=bias)
def forward(self, x):
shortcut = x
x = self.conv_dw(x)
x = self.act(x)
x = self.se(x)
x = self.conv_pw(x)
x = x + shortcut
return x
class Downsample2d(nn.Module):
def __init__(
self,
dim,
dim_out=None,
reduction='conv',
act_layer=nn.GELU,
norm_layer=LayerNorm2d, # NOTE in NCHW
):
super().__init__()
dim_out = dim_out or dim
self.norm1 = norm_layer(dim) if norm_layer is not None else nn.Identity()
self.conv_block = MbConvBlock(dim, act_layer=act_layer)
assert reduction in ('conv', 'max', 'avg')
if reduction == 'conv':
self.reduction = nn.Conv2d(dim, dim_out, 3, 2, 1, bias=False)
elif reduction == 'max':
assert dim == dim_out
self.reduction = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
else:
assert dim == dim_out
self.reduction = nn.AvgPool2d(kernel_size=2)
self.norm2 = norm_layer(dim_out) if norm_layer is not None else nn.Identity()
def forward(self, x):
x = self.norm1(x)
x = self.conv_block(x)
x = self.reduction(x)
x = self.norm2(x)
return x
class FeatureBlock(nn.Module):
def __init__(
self,
dim,
levels=0,
reduction='max',
act_layer=nn.GELU,
):
super().__init__()
reductions = levels
levels = max(1, levels)
if reduction == 'avg':
pool_fn = partial(nn.AvgPool2d, kernel_size=2)
else:
pool_fn = partial(nn.MaxPool2d, kernel_size=3, stride=2, padding=1)
self.blocks = nn.Sequential()
for i in range(levels):
self.blocks.add_module(f'conv{i+1}', MbConvBlock(dim, act_layer=act_layer))
if reductions:
self.blocks.add_module(f'pool{i+1}', pool_fn())
reductions -= 1
def forward(self, x):
return self.blocks(x)
class Stem(nn.Module):
def __init__(
self,
in_chs: int = 3,
out_chs: int = 96,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm2d, # NOTE stem in NCHW
):
super().__init__()
self.conv1 = nn.Conv2d(in_chs, out_chs, kernel_size=3, stride=2, padding=1)
self.down = Downsample2d(out_chs, act_layer=act_layer, norm_layer=norm_layer)
def forward(self, x):
x = self.conv1(x)
x = self.down(x)
return x
class WindowAttentionGlobal(nn.Module):
def __init__(
self,
dim: int,
num_heads: int,
window_size: Tuple[int, int],
use_global: bool = True,
qkv_bias: bool = True,
attn_drop: float = 0.,
proj_drop: float = 0.,
):
super().__init__()
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.use_global = use_global
self.rel_pos = RelPosBias(window_size=window_size, num_heads=num_heads)
if self.use_global:
self.qkv = nn.Linear(dim, dim * 2, bias=qkv_bias)
else:
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, q_global: Optional[torch.Tensor] = None):
B, N, C = x.shape
if self.use_global and q_global is not None:
_assert(x.shape[-1] == q_global.shape[-1], 'x and q_global seq lengths should be equal')
kv = self.qkv(x)
kv = kv.reshape(B, N, 2, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
k, v = kv.unbind(0)
q = q_global.repeat(B // q_global.shape[0], 1, 1, 1)
q = q.reshape(B, N, self.num_heads, self.head_dim).permute(0, 2, 1, 3)
else:
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = q * self.scale
attn = q @ k.transpose(-2, -1).contiguous() # NOTE contiguous() fixes an odd jit bug in PyTorch 2.0
attn = self.rel_pos(attn)
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
def window_partition(x, window_size: Tuple[int, int]):
B, H, W, C = x.shape
x = x.view(B, H // window_size[0], window_size[0], W // window_size[1], window_size[1], C)
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size[0], window_size[1], C)
return windows
@register_notrace_function # reason: int argument is a Proxy
def window_reverse(windows, window_size: Tuple[int, int], img_size: Tuple[int, int]):
H, W = img_size
C = windows.shape[-1]
x = windows.view(-1, H // window_size[0], W // window_size[1], window_size[0], window_size[1], C)
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, H, W, C)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class GlobalContextVitBlock(nn.Module):
def __init__(
self,
dim: int,
feat_size: Tuple[int, int],
num_heads: int,
window_size: int = 7,
mlp_ratio: float = 4.,
use_global: bool = True,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: float = 0.,
attn_layer: Callable = WindowAttentionGlobal,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
):
super().__init__()
feat_size = to_2tuple(feat_size)
window_size = to_2tuple(window_size)
self.window_size = window_size
self.num_windows = int((feat_size[0] // window_size[0]) * (feat_size[1] // window_size[1]))
self.norm1 = norm_layer(dim)
self.attn = attn_layer(
dim,
num_heads=num_heads,
window_size=window_size,
use_global=use_global,
qkv_bias=qkv_bias,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=proj_drop)
self.ls2 = LayerScale(dim, layer_scale) if layer_scale is not None else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def _window_attn(self, x, q_global: Optional[torch.Tensor] = None):
B, H, W, C = x.shape
x_win = window_partition(x, self.window_size)
x_win = x_win.view(-1, self.window_size[0] * self.window_size[1], C)
attn_win = self.attn(x_win, q_global)
x = window_reverse(attn_win, self.window_size, (H, W))
return x
def forward(self, x, q_global: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self._window_attn(self.norm1(x), q_global)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class GlobalContextVitStage(nn.Module):
def __init__(
self,
dim,
depth: int,
num_heads: int,
feat_size: Tuple[int, int],
window_size: Tuple[int, int],
downsample: bool = True,
global_norm: bool = False,
stage_norm: bool = False,
mlp_ratio: float = 4.,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
proj_drop: float = 0.,
attn_drop: float = 0.,
drop_path: Union[List[float], float] = 0.0,
act_layer: Callable = nn.GELU,
norm_layer: Callable = nn.LayerNorm,
norm_layer_cl: Callable = LayerNorm2d,
):
super().__init__()
if downsample:
self.downsample = Downsample2d(
dim=dim,
dim_out=dim * 2,
norm_layer=norm_layer,
)
dim = dim * 2
feat_size = (feat_size[0] // 2, feat_size[1] // 2)
else:
self.downsample = nn.Identity()
self.feat_size = feat_size
window_size = to_2tuple(window_size)
feat_levels = int(math.log2(min(feat_size) / min(window_size)))
self.global_block = FeatureBlock(dim, feat_levels)
self.global_norm = norm_layer_cl(dim) if global_norm else nn.Identity()
self.blocks = nn.ModuleList([
GlobalContextVitBlock(
dim=dim,
num_heads=num_heads,
feat_size=feat_size,
window_size=window_size,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
use_global=(i % 2 != 0),
layer_scale=layer_scale,
proj_drop=proj_drop,
attn_drop=attn_drop,
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
act_layer=act_layer,
norm_layer=norm_layer_cl,
)
for i in range(depth)
])
self.norm = norm_layer_cl(dim) if stage_norm else nn.Identity()
self.dim = dim
self.feat_size = feat_size
self.grad_checkpointing = False
def forward(self, x):
# input NCHW, downsample & global block are 2d conv + pooling
x = self.downsample(x)
global_query = self.global_block(x)
# reshape NCHW --> NHWC for transformer blocks
x = x.permute(0, 2, 3, 1)
global_query = self.global_norm(global_query.permute(0, 2, 3, 1))
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint.checkpoint(blk, x)
else:
x = blk(x, global_query)
x = self.norm(x)
x = x.permute(0, 3, 1, 2).contiguous() # back to NCHW
return x
class GlobalContextVit(nn.Module):
def __init__(
self,
in_chans: int = 3,
num_classes: int = 1000,
global_pool: str = 'avg',
img_size: Tuple[int, int] = 224,
window_ratio: Tuple[int, ...] = (32, 32, 16, 32),
window_size: Tuple[int, ...] = None,
embed_dim: int = 64,
depths: Tuple[int, ...] = (3, 4, 19, 5),
num_heads: Tuple[int, ...] = (2, 4, 8, 16),
mlp_ratio: float = 3.0,
qkv_bias: bool = True,
layer_scale: Optional[float] = None,
drop_rate: float = 0.,
proj_drop_rate: float = 0.,
attn_drop_rate: float = 0.,
drop_path_rate: float = 0.,
weight_init='',
act_layer: str = 'gelu',
norm_layer: str = 'layernorm2d',
norm_layer_cl: str = 'layernorm',
norm_eps: float = 1e-5,
):
super().__init__()
act_layer = get_act_layer(act_layer)
norm_layer = partial(get_norm_layer(norm_layer), eps=norm_eps)
norm_layer_cl = partial(get_norm_layer(norm_layer_cl), eps=norm_eps)
img_size = to_2tuple(img_size)
feat_size = tuple(d // 4 for d in img_size) # stem reduction by 4
self.global_pool = global_pool
self.num_classes = num_classes
self.drop_rate = drop_rate
num_stages = len(depths)
self.num_features = int(embed_dim * 2 ** (num_stages - 1))
if window_size is not None:
window_size = to_ntuple(num_stages)(window_size)
else:
assert window_ratio is not None
window_size = tuple([(img_size[0] // r, img_size[1] // r) for r in to_ntuple(num_stages)(window_ratio)])
self.stem = Stem(
in_chs=in_chans,
out_chs=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer
)
dpr = [x.tolist() for x in torch.linspace(0, drop_path_rate, sum(depths)).split(depths)]
stages = []
for i in range(num_stages):
last_stage = i == num_stages - 1
stage_scale = 2 ** max(i - 1, 0)
stages.append(GlobalContextVitStage(
dim=embed_dim * stage_scale,
depth=depths[i],
num_heads=num_heads[i],
feat_size=(feat_size[0] // stage_scale, feat_size[1] // stage_scale),
window_size=window_size[i],
downsample=i != 0,
stage_norm=last_stage,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
layer_scale=layer_scale,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
act_layer=act_layer,
norm_layer=norm_layer,
norm_layer_cl=norm_layer_cl,
))
self.stages = nn.Sequential(*stages)
# Classifier head
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=drop_rate)
if weight_init:
named_apply(partial(self._init_weights, scheme=weight_init), self)
def _init_weights(self, module, name, scheme='vit'):
# note Conv2d left as default init
if scheme == 'vit':
if isinstance(module, nn.Linear):
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
if 'mlp' in name:
nn.init.normal_(module.bias, std=1e-6)
else:
nn.init.zeros_(module.bias)
else:
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
@torch.jit.ignore
def no_weight_decay(self):
return {
k for k, _ in self.named_parameters()
if any(n in k for n in ["relative_position_bias_table", "rel_pos.mlp"])}
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem', # stem and embed
blocks=r'^stages\.(\d+)'
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
for s in self.stages:
s.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool=None):
self.num_classes = num_classes
if global_pool is None:
global_pool = self.head.global_pool.pool_type
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x: torch.Tensor) -> torch.Tensor:
x = self.stem(x)
x = self.stages(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_gcvit(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(GlobalContextVit, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv1', 'classifier': 'head.fc',
'fixed_input_size': True,
**kwargs
}
default_cfgs = generate_default_cfgs({
'gcvit_xxtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xxtiny_224_nvidia-d1d86009.pth'),
'gcvit_xtiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_xtiny_224_nvidia-274b92b7.pth'),
'gcvit_tiny.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_tiny_224_nvidia-ac783954.pth'),
'gcvit_small.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_small_224_nvidia-4e98afa2.pth'),
'gcvit_base.in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights-morevit/gcvit_base_224_nvidia-f009139b.pth'),
})
@register_model
def gcvit_xxtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(2, 2, 6, 2),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xxtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_xtiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 6, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_xtiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_tiny(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(2, 4, 8, 16),
**kwargs)
return _create_gcvit('gcvit_tiny', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_small(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(3, 6, 12, 24),
embed_dim=96,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_small', pretrained=pretrained, **model_kwargs)
@register_model
def gcvit_base(pretrained=False, **kwargs) -> GlobalContextVit:
model_kwargs = dict(
depths=(3, 4, 19, 5),
num_heads=(4, 8, 16, 32),
embed_dim=128,
mlp_ratio=2,
layer_scale=1e-5,
**kwargs)
return _create_gcvit('gcvit_base', pretrained=pretrained, **model_kwargs)
| pytorch-image-models/timm/models/gcvit.py/0 | {
"file_path": "pytorch-image-models/timm/models/gcvit.py",
"repo_id": "pytorch-image-models",
"token_count": 10789
} | 197 |
""" MobileViT
Paper:
V1: `MobileViT: Light-weight, General-purpose, and Mobile-friendly Vision Transformer` - https://arxiv.org/abs/2110.02178
V2: `Separable Self-attention for Mobile Vision Transformers` - https://arxiv.org/abs/2206.02680
MobileVitBlock and checkpoints adapted from https://github.com/apple/ml-cvnets (original copyright below)
License: https://github.com/apple/ml-cvnets/blob/main/LICENSE (Apple open source)
Rest of code, ByobNet, and Transformer block hacked together by / Copyright 2022, Ross Wightman
"""
#
# For licensing see accompanying LICENSE file.
# Copyright (C) 2020 Apple Inc. All Rights Reserved.
#
import math
from typing import Callable, Tuple, Optional
import torch
import torch.nn.functional as F
from torch import nn
from timm.layers import to_2tuple, make_divisible, GroupNorm1, ConvMlp, DropPath, is_exportable
from ._builder import build_model_with_cfg
from ._features_fx import register_notrace_module
from ._registry import register_model, generate_default_cfgs, register_model_deprecations
from .byobnet import register_block, ByoBlockCfg, ByoModelCfg, ByobNet, LayerFn, num_groups
from .vision_transformer import Block as TransformerBlock
__all__ = []
def _inverted_residual_block(d, c, s, br=4.0):
# inverted residual is a bottleneck block with bottle_ratio > 1 applied to in_chs, linear output, gs=1 (depthwise)
return ByoBlockCfg(
type='bottle', d=d, c=c, s=s, gs=1, br=br,
block_kwargs=dict(bottle_in=True, linear_out=True))
def _mobilevit_block(d, c, s, transformer_dim, transformer_depth, patch_size=4, br=4.0):
# inverted residual + mobilevit blocks as per MobileViT network
return (
_inverted_residual_block(d=d, c=c, s=s, br=br),
ByoBlockCfg(
type='mobilevit', d=1, c=c, s=1,
block_kwargs=dict(
transformer_dim=transformer_dim,
transformer_depth=transformer_depth,
patch_size=patch_size)
)
)
def _mobilevitv2_block(d, c, s, transformer_depth, patch_size=2, br=2.0, transformer_br=0.5):
# inverted residual + mobilevit blocks as per MobileViT network
return (
_inverted_residual_block(d=d, c=c, s=s, br=br),
ByoBlockCfg(
type='mobilevit2', d=1, c=c, s=1, br=transformer_br, gs=1,
block_kwargs=dict(
transformer_depth=transformer_depth,
patch_size=patch_size)
)
)
def _mobilevitv2_cfg(multiplier=1.0):
chs = (64, 128, 256, 384, 512)
if multiplier != 1.0:
chs = tuple([int(c * multiplier) for c in chs])
cfg = ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=chs[0], s=1, br=2.0),
_inverted_residual_block(d=2, c=chs[1], s=2, br=2.0),
_mobilevitv2_block(d=1, c=chs[2], s=2, transformer_depth=2),
_mobilevitv2_block(d=1, c=chs[3], s=2, transformer_depth=4),
_mobilevitv2_block(d=1, c=chs[4], s=2, transformer_depth=3),
),
stem_chs=int(32 * multiplier),
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
)
return cfg
model_cfgs = dict(
mobilevit_xxs=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=16, s=1, br=2.0),
_inverted_residual_block(d=3, c=24, s=2, br=2.0),
_mobilevit_block(d=1, c=48, s=2, transformer_dim=64, transformer_depth=2, patch_size=2, br=2.0),
_mobilevit_block(d=1, c=64, s=2, transformer_dim=80, transformer_depth=4, patch_size=2, br=2.0),
_mobilevit_block(d=1, c=80, s=2, transformer_dim=96, transformer_depth=3, patch_size=2, br=2.0),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=320,
),
mobilevit_xs=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=48, s=2),
_mobilevit_block(d=1, c=64, s=2, transformer_dim=96, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=80, s=2, transformer_dim=120, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=384,
),
mobilevit_s=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=64, s=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
act_layer='silu',
num_features=640,
),
semobilevit_s=ByoModelCfg(
blocks=(
_inverted_residual_block(d=1, c=32, s=1),
_inverted_residual_block(d=3, c=64, s=2),
_mobilevit_block(d=1, c=96, s=2, transformer_dim=144, transformer_depth=2, patch_size=2),
_mobilevit_block(d=1, c=128, s=2, transformer_dim=192, transformer_depth=4, patch_size=2),
_mobilevit_block(d=1, c=160, s=2, transformer_dim=240, transformer_depth=3, patch_size=2),
),
stem_chs=16,
stem_type='3x3',
stem_pool='',
downsample='',
attn_layer='se',
attn_kwargs=dict(rd_ratio=1/8),
num_features=640,
),
mobilevitv2_050=_mobilevitv2_cfg(.50),
mobilevitv2_075=_mobilevitv2_cfg(.75),
mobilevitv2_125=_mobilevitv2_cfg(1.25),
mobilevitv2_100=_mobilevitv2_cfg(1.0),
mobilevitv2_150=_mobilevitv2_cfg(1.5),
mobilevitv2_175=_mobilevitv2_cfg(1.75),
mobilevitv2_200=_mobilevitv2_cfg(2.0),
)
@register_notrace_module
class MobileVitBlock(nn.Module):
""" MobileViT block
Paper: https://arxiv.org/abs/2110.02178?context=cs.LG
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 3,
stride: int = 1,
bottle_ratio: float = 1.0,
group_size: Optional[int] = None,
dilation: Tuple[int, int] = (1, 1),
mlp_ratio: float = 2.0,
transformer_dim: Optional[int] = None,
transformer_depth: int = 2,
patch_size: int = 8,
num_heads: int = 4,
attn_drop: float = 0.,
drop: int = 0.,
no_fusion: bool = False,
drop_path_rate: float = 0.,
layers: LayerFn = None,
transformer_norm_layer: Callable = nn.LayerNorm,
**kwargs, # eat unused args
):
super(MobileVitBlock, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
out_chs = out_chs or in_chs
transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs)
self.conv_kxk = layers.conv_norm_act(
in_chs, in_chs, kernel_size=kernel_size,
stride=stride, groups=groups, dilation=dilation[0])
self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False)
self.transformer = nn.Sequential(*[
TransformerBlock(
transformer_dim,
mlp_ratio=mlp_ratio,
num_heads=num_heads,
qkv_bias=True,
attn_drop=attn_drop,
proj_drop=drop,
drop_path=drop_path_rate,
act_layer=layers.act,
norm_layer=transformer_norm_layer,
)
for _ in range(transformer_depth)
])
self.norm = transformer_norm_layer(transformer_dim)
self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1)
if no_fusion:
self.conv_fusion = None
else:
self.conv_fusion = layers.conv_norm_act(in_chs + out_chs, out_chs, kernel_size=kernel_size, stride=1)
self.patch_size = to_2tuple(patch_size)
self.patch_area = self.patch_size[0] * self.patch_size[1]
def forward(self, x: torch.Tensor) -> torch.Tensor:
shortcut = x
# Local representation
x = self.conv_kxk(x)
x = self.conv_1x1(x)
# Unfold (feature map -> patches)
patch_h, patch_w = self.patch_size
B, C, H, W = x.shape
new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w
num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w
num_patches = num_patch_h * num_patch_w # N
interpolate = False
if new_h != H or new_w != W:
# Note: Padding can be done, but then it needs to be handled in attention function.
x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=False)
interpolate = True
# [B, C, H, W] --> [B * C * n_h, n_w, p_h, p_w]
x = x.reshape(B * C * num_patch_h, patch_h, num_patch_w, patch_w).transpose(1, 2)
# [B * C * n_h, n_w, p_h, p_w] --> [BP, N, C] where P = p_h * p_w and N = n_h * n_w
x = x.reshape(B, C, num_patches, self.patch_area).transpose(1, 3).reshape(B * self.patch_area, num_patches, -1)
# Global representations
x = self.transformer(x)
x = self.norm(x)
# Fold (patch -> feature map)
# [B, P, N, C] --> [B*C*n_h, n_w, p_h, p_w]
x = x.contiguous().view(B, self.patch_area, num_patches, -1)
x = x.transpose(1, 3).reshape(B * C * num_patch_h, num_patch_w, patch_h, patch_w)
# [B*C*n_h, n_w, p_h, p_w] --> [B*C*n_h, p_h, n_w, p_w] --> [B, C, H, W]
x = x.transpose(1, 2).reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w)
if interpolate:
x = F.interpolate(x, size=(H, W), mode="bilinear", align_corners=False)
x = self.conv_proj(x)
if self.conv_fusion is not None:
x = self.conv_fusion(torch.cat((shortcut, x), dim=1))
return x
class LinearSelfAttention(nn.Module):
"""
This layer applies a self-attention with linear complexity, as described in `https://arxiv.org/abs/2206.02680`
This layer can be used for self- as well as cross-attention.
Args:
embed_dim (int): :math:`C` from an expected input of size :math:`(N, C, H, W)`
attn_drop (float): Dropout value for context scores. Default: 0.0
bias (bool): Use bias in learnable layers. Default: True
Shape:
- Input: :math:`(N, C, P, N)` where :math:`N` is the batch size, :math:`C` is the input channels,
:math:`P` is the number of pixels in the patch, and :math:`N` is the number of patches
- Output: same as the input
.. note::
For MobileViTv2, we unfold the feature map [B, C, H, W] into [B, C, P, N] where P is the number of pixels
in a patch and N is the number of patches. Because channel is the first dimension in this unfolded tensor,
we use point-wise convolution (instead of a linear layer). This avoids a transpose operation (which may be
expensive on resource-constrained devices) that may be required to convert the unfolded tensor from
channel-first to channel-last format in case of a linear layer.
"""
def __init__(
self,
embed_dim: int,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
bias: bool = True,
) -> None:
super().__init__()
self.embed_dim = embed_dim
self.qkv_proj = nn.Conv2d(
in_channels=embed_dim,
out_channels=1 + (2 * embed_dim),
bias=bias,
kernel_size=1,
)
self.attn_drop = nn.Dropout(attn_drop)
self.out_proj = nn.Conv2d(
in_channels=embed_dim,
out_channels=embed_dim,
bias=bias,
kernel_size=1,
)
self.out_drop = nn.Dropout(proj_drop)
def _forward_self_attn(self, x: torch.Tensor) -> torch.Tensor:
# [B, C, P, N] --> [B, h + 2d, P, N]
qkv = self.qkv_proj(x)
# Project x into query, key and value
# Query --> [B, 1, P, N]
# value, key --> [B, d, P, N]
query, key, value = qkv.split([1, self.embed_dim, self.embed_dim], dim=1)
# apply softmax along N dimension
context_scores = F.softmax(query, dim=-1)
context_scores = self.attn_drop(context_scores)
# Compute context vector
# [B, d, P, N] x [B, 1, P, N] -> [B, d, P, N] --> [B, d, P, 1]
context_vector = (key * context_scores).sum(dim=-1, keepdim=True)
# combine context vector with values
# [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N]
out = F.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
out = self.out_drop(out)
return out
@torch.jit.ignore()
def _forward_cross_attn(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
# x --> [B, C, P, N]
# x_prev = [B, C, P, M]
batch_size, in_dim, kv_patch_area, kv_num_patches = x.shape
q_patch_area, q_num_patches = x.shape[-2:]
assert (
kv_patch_area == q_patch_area
), "The number of pixels in a patch for query and key_value should be the same"
# compute query, key, and value
# [B, C, P, M] --> [B, 1 + d, P, M]
qk = F.conv2d(
x_prev,
weight=self.qkv_proj.weight[:self.embed_dim + 1],
bias=self.qkv_proj.bias[:self.embed_dim + 1],
)
# [B, 1 + d, P, M] --> [B, 1, P, M], [B, d, P, M]
query, key = qk.split([1, self.embed_dim], dim=1)
# [B, C, P, N] --> [B, d, P, N]
value = F.conv2d(
x,
weight=self.qkv_proj.weight[self.embed_dim + 1],
bias=self.qkv_proj.bias[self.embed_dim + 1] if self.qkv_proj.bias is not None else None,
)
# apply softmax along M dimension
context_scores = F.softmax(query, dim=-1)
context_scores = self.attn_drop(context_scores)
# compute context vector
# [B, d, P, M] * [B, 1, P, M] -> [B, d, P, M] --> [B, d, P, 1]
context_vector = (key * context_scores).sum(dim=-1, keepdim=True)
# combine context vector with values
# [B, d, P, N] * [B, d, P, 1] --> [B, d, P, N]
out = F.relu(value) * context_vector.expand_as(value)
out = self.out_proj(out)
out = self.out_drop(out)
return out
def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
if x_prev is None:
return self._forward_self_attn(x)
else:
return self._forward_cross_attn(x, x_prev=x_prev)
class LinearTransformerBlock(nn.Module):
"""
This class defines the pre-norm transformer encoder with linear self-attention in `MobileViTv2 paper <>`_
Args:
embed_dim (int): :math:`C_{in}` from an expected input of size :math:`(B, C_{in}, P, N)`
mlp_ratio (float): Inner dimension ratio of the FFN relative to embed_dim
drop (float): Dropout rate. Default: 0.0
attn_drop (float): Dropout rate for attention in multi-head attention. Default: 0.0
drop_path (float): Stochastic depth rate Default: 0.0
norm_layer (Callable): Normalization layer. Default: layer_norm_2d
Shape:
- Input: :math:`(B, C_{in}, P, N)` where :math:`B` is batch size, :math:`C_{in}` is input embedding dim,
:math:`P` is number of pixels in a patch, and :math:`N` is number of patches,
- Output: same shape as the input
"""
def __init__(
self,
embed_dim: int,
mlp_ratio: float = 2.0,
drop: float = 0.0,
attn_drop: float = 0.0,
drop_path: float = 0.0,
act_layer=None,
norm_layer=None,
) -> None:
super().__init__()
act_layer = act_layer or nn.SiLU
norm_layer = norm_layer or GroupNorm1
self.norm1 = norm_layer(embed_dim)
self.attn = LinearSelfAttention(embed_dim=embed_dim, attn_drop=attn_drop, proj_drop=drop)
self.drop_path1 = DropPath(drop_path)
self.norm2 = norm_layer(embed_dim)
self.mlp = ConvMlp(
in_features=embed_dim,
hidden_features=int(embed_dim * mlp_ratio),
act_layer=act_layer,
drop=drop)
self.drop_path2 = DropPath(drop_path)
def forward(self, x: torch.Tensor, x_prev: Optional[torch.Tensor] = None) -> torch.Tensor:
if x_prev is None:
# self-attention
x = x + self.drop_path1(self.attn(self.norm1(x)))
else:
# cross-attention
res = x
x = self.norm1(x) # norm
x = self.attn(x, x_prev) # attn
x = self.drop_path1(x) + res # residual
# Feed forward network
x = x + self.drop_path2(self.mlp(self.norm2(x)))
return x
@register_notrace_module
class MobileVitV2Block(nn.Module):
"""
This class defines the `MobileViTv2 block <>`_
"""
def __init__(
self,
in_chs: int,
out_chs: Optional[int] = None,
kernel_size: int = 3,
bottle_ratio: float = 1.0,
group_size: Optional[int] = 1,
dilation: Tuple[int, int] = (1, 1),
mlp_ratio: float = 2.0,
transformer_dim: Optional[int] = None,
transformer_depth: int = 2,
patch_size: int = 8,
attn_drop: float = 0.,
drop: int = 0.,
drop_path_rate: float = 0.,
layers: LayerFn = None,
transformer_norm_layer: Callable = GroupNorm1,
**kwargs, # eat unused args
):
super(MobileVitV2Block, self).__init__()
layers = layers or LayerFn()
groups = num_groups(group_size, in_chs)
out_chs = out_chs or in_chs
transformer_dim = transformer_dim or make_divisible(bottle_ratio * in_chs)
self.conv_kxk = layers.conv_norm_act(
in_chs, in_chs, kernel_size=kernel_size,
stride=1, groups=groups, dilation=dilation[0])
self.conv_1x1 = nn.Conv2d(in_chs, transformer_dim, kernel_size=1, bias=False)
self.transformer = nn.Sequential(*[
LinearTransformerBlock(
transformer_dim,
mlp_ratio=mlp_ratio,
attn_drop=attn_drop,
drop=drop,
drop_path=drop_path_rate,
act_layer=layers.act,
norm_layer=transformer_norm_layer
)
for _ in range(transformer_depth)
])
self.norm = transformer_norm_layer(transformer_dim)
self.conv_proj = layers.conv_norm_act(transformer_dim, out_chs, kernel_size=1, stride=1, apply_act=False)
self.patch_size = to_2tuple(patch_size)
self.patch_area = self.patch_size[0] * self.patch_size[1]
self.coreml_exportable = is_exportable()
def forward(self, x: torch.Tensor) -> torch.Tensor:
B, C, H, W = x.shape
patch_h, patch_w = self.patch_size
new_h, new_w = math.ceil(H / patch_h) * patch_h, math.ceil(W / patch_w) * patch_w
num_patch_h, num_patch_w = new_h // patch_h, new_w // patch_w # n_h, n_w
num_patches = num_patch_h * num_patch_w # N
if new_h != H or new_w != W:
x = F.interpolate(x, size=(new_h, new_w), mode="bilinear", align_corners=True)
# Local representation
x = self.conv_kxk(x)
x = self.conv_1x1(x)
# Unfold (feature map -> patches), [B, C, H, W] -> [B, C, P, N]
C = x.shape[1]
if self.coreml_exportable:
x = F.unfold(x, kernel_size=(patch_h, patch_w), stride=(patch_h, patch_w))
else:
x = x.reshape(B, C, num_patch_h, patch_h, num_patch_w, patch_w).permute(0, 1, 3, 5, 2, 4)
x = x.reshape(B, C, -1, num_patches)
# Global representations
x = self.transformer(x)
x = self.norm(x)
# Fold (patches -> feature map), [B, C, P, N] --> [B, C, H, W]
if self.coreml_exportable:
# adopted from https://github.com/apple/ml-cvnets/blob/main/cvnets/modules/mobilevit_block.py#L609-L624
x = x.reshape(B, C * patch_h * patch_w, num_patch_h, num_patch_w)
x = F.pixel_shuffle(x, upscale_factor=patch_h)
else:
x = x.reshape(B, C, patch_h, patch_w, num_patch_h, num_patch_w).permute(0, 1, 4, 2, 5, 3)
x = x.reshape(B, C, num_patch_h * patch_h, num_patch_w * patch_w)
x = self.conv_proj(x)
return x
register_block('mobilevit', MobileVitBlock)
register_block('mobilevit2', MobileVitV2Block)
def _create_mobilevit(variant, cfg_variant=None, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
def _create_mobilevit2(variant, cfg_variant=None, pretrained=False, **kwargs):
return build_model_with_cfg(
ByobNet, variant, pretrained,
model_cfg=model_cfgs[variant] if not cfg_variant else model_cfgs[cfg_variant],
feature_cfg=dict(flatten_sequential=True),
**kwargs)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 256, 256), 'pool_size': (8, 8),
'crop_pct': 0.9, 'interpolation': 'bicubic',
'mean': (0., 0., 0.), 'std': (1., 1., 1.),
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'fixed_input_size': False,
**kwargs
}
default_cfgs = generate_default_cfgs({
'mobilevit_xxs.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevit_xs.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevit_s.cvnets_in1k': _cfg(hf_hub_id='timm/'),
'mobilevitv2_050.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_075.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_100.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_125.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_175.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_200.cvnets_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_175.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_200.cvnets_in22k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.888),
'mobilevitv2_150.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'mobilevitv2_175.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
'mobilevitv2_200.cvnets_in22k_ft_in1k_384': _cfg(
hf_hub_id='timm/',
input_size=(3, 384, 384), pool_size=(12, 12), crop_pct=1.0),
})
@register_model
def mobilevit_xxs(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_xxs', pretrained=pretrained, **kwargs)
@register_model
def mobilevit_xs(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_xs', pretrained=pretrained, **kwargs)
@register_model
def mobilevit_s(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevit_s', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_050(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_050', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_075(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_075', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_100(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_100', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_125(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_125', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_150(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_150', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_175(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_175', pretrained=pretrained, **kwargs)
@register_model
def mobilevitv2_200(pretrained=False, **kwargs) -> ByobNet:
return _create_mobilevit('mobilevitv2_200', pretrained=pretrained, **kwargs)
register_model_deprecations(__name__, {
'mobilevitv2_150_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k',
'mobilevitv2_175_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k',
'mobilevitv2_200_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k',
'mobilevitv2_150_384_in22ft1k': 'mobilevitv2_150.cvnets_in22k_ft_in1k_384',
'mobilevitv2_175_384_in22ft1k': 'mobilevitv2_175.cvnets_in22k_ft_in1k_384',
'mobilevitv2_200_384_in22ft1k': 'mobilevitv2_200.cvnets_in22k_ft_in1k_384',
}) | pytorch-image-models/timm/models/mobilevit.py/0 | {
"file_path": "pytorch-image-models/timm/models/mobilevit.py",
"repo_id": "pytorch-image-models",
"token_count": 12812
} | 198 |
""" ReXNet
A PyTorch impl of `ReXNet: Diminishing Representational Bottleneck on Convolutional Neural Network` -
https://arxiv.org/abs/2007.00992
Adapted from original impl at https://github.com/clovaai/rexnet
Copyright (c) 2020-present NAVER Corp. MIT license
Changes for timm, feature extraction, and rounded channel variant hacked together by Ross Wightman
Copyright 2020 Ross Wightman
"""
from functools import partial
from math import ceil
import torch
import torch.nn as nn
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
from timm.layers import ClassifierHead, create_act_layer, ConvNormAct, DropPath, make_divisible, SEModule
from ._builder import build_model_with_cfg
from ._efficientnet_builder import efficientnet_init_weights
from ._manipulate import checkpoint_seq
from ._registry import generate_default_cfgs, register_model
__all__ = ['RexNet'] # model_registry will add each entrypoint fn to this
SEWithNorm = partial(SEModule, norm_layer=nn.BatchNorm2d)
class LinearBottleneck(nn.Module):
def __init__(
self,
in_chs,
out_chs,
stride,
dilation=(1, 1),
exp_ratio=1.0,
se_ratio=0.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_path=None,
):
super(LinearBottleneck, self).__init__()
self.use_shortcut = stride == 1 and dilation[0] == dilation[1] and in_chs <= out_chs
self.in_channels = in_chs
self.out_channels = out_chs
if exp_ratio != 1.:
dw_chs = make_divisible(round(in_chs * exp_ratio), divisor=ch_div)
self.conv_exp = ConvNormAct(in_chs, dw_chs, act_layer=act_layer)
else:
dw_chs = in_chs
self.conv_exp = None
self.conv_dw = ConvNormAct(
dw_chs,
dw_chs,
kernel_size=3,
stride=stride,
dilation=dilation[0],
groups=dw_chs,
apply_act=False,
)
if se_ratio > 0:
self.se = SEWithNorm(dw_chs, rd_channels=make_divisible(int(dw_chs * se_ratio), ch_div))
else:
self.se = None
self.act_dw = create_act_layer(dw_act_layer)
self.conv_pwl = ConvNormAct(dw_chs, out_chs, 1, apply_act=False)
self.drop_path = drop_path
def feat_channels(self, exp=False):
return self.conv_dw.out_channels if exp else self.out_channels
def forward(self, x):
shortcut = x
if self.conv_exp is not None:
x = self.conv_exp(x)
x = self.conv_dw(x)
if self.se is not None:
x = self.se(x)
x = self.act_dw(x)
x = self.conv_pwl(x)
if self.use_shortcut:
if self.drop_path is not None:
x = self.drop_path(x)
x = torch.cat([x[:, 0:self.in_channels] + shortcut, x[:, self.in_channels:]], dim=1)
return x
def _block_cfg(
width_mult=1.0,
depth_mult=1.0,
initial_chs=16,
final_chs=180,
se_ratio=0.,
ch_div=1,
):
layers = [1, 2, 2, 3, 3, 5]
strides = [1, 2, 2, 2, 1, 2]
layers = [ceil(element * depth_mult) for element in layers]
strides = sum([[element] + [1] * (layers[idx] - 1) for idx, element in enumerate(strides)], [])
exp_ratios = [1] * layers[0] + [6] * sum(layers[1:])
depth = sum(layers[:]) * 3
base_chs = initial_chs / width_mult if width_mult < 1.0 else initial_chs
# The following channel configuration is a simple instance to make each layer become an expand layer.
out_chs_list = []
for i in range(depth // 3):
out_chs_list.append(make_divisible(round(base_chs * width_mult), divisor=ch_div))
base_chs += final_chs / (depth // 3 * 1.0)
se_ratios = [0.] * (layers[0] + layers[1]) + [se_ratio] * sum(layers[2:])
return list(zip(out_chs_list, exp_ratios, strides, se_ratios))
def _build_blocks(
block_cfg,
prev_chs,
width_mult,
ch_div=1,
output_stride=32,
act_layer='swish',
dw_act_layer='relu6',
drop_path_rate=0.,
):
feat_chs = [prev_chs]
feature_info = []
curr_stride = 2
dilation = 1
features = []
num_blocks = len(block_cfg)
for block_idx, (chs, exp_ratio, stride, se_ratio) in enumerate(block_cfg):
next_dilation = dilation
if stride > 1:
fname = 'stem' if block_idx == 0 else f'features.{block_idx - 1}'
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=fname)]
if curr_stride >= output_stride:
next_dilation = dilation * stride
stride = 1
block_dpr = drop_path_rate * block_idx / (num_blocks - 1) # stochastic depth linear decay rule
drop_path = DropPath(block_dpr) if block_dpr > 0. else None
features.append(LinearBottleneck(
in_chs=prev_chs,
out_chs=chs,
exp_ratio=exp_ratio,
stride=stride,
dilation=(dilation, next_dilation),
se_ratio=se_ratio,
ch_div=ch_div,
act_layer=act_layer,
dw_act_layer=dw_act_layer,
drop_path=drop_path,
))
curr_stride *= stride
dilation = next_dilation
prev_chs = chs
feat_chs += [features[-1].feat_channels()]
pen_chs = make_divisible(1280 * width_mult, divisor=ch_div)
feature_info += [dict(num_chs=feat_chs[-1], reduction=curr_stride, module=f'features.{len(features) - 1}')]
features.append(ConvNormAct(prev_chs, pen_chs, act_layer=act_layer))
return features, feature_info
class RexNet(nn.Module):
def __init__(
self,
in_chans=3,
num_classes=1000,
global_pool='avg',
output_stride=32,
initial_chs=16,
final_chs=180,
width_mult=1.0,
depth_mult=1.0,
se_ratio=1/12.,
ch_div=1,
act_layer='swish',
dw_act_layer='relu6',
drop_rate=0.2,
drop_path_rate=0.,
):
super(RexNet, self).__init__()
self.num_classes = num_classes
self.drop_rate = drop_rate
self.grad_checkpointing = False
assert output_stride in (32, 16, 8)
stem_base_chs = 32 / width_mult if width_mult < 1.0 else 32
stem_chs = make_divisible(round(stem_base_chs * width_mult), divisor=ch_div)
self.stem = ConvNormAct(in_chans, stem_chs, 3, stride=2, act_layer=act_layer)
block_cfg = _block_cfg(width_mult, depth_mult, initial_chs, final_chs, se_ratio, ch_div)
features, self.feature_info = _build_blocks(
block_cfg,
stem_chs,
width_mult,
ch_div,
output_stride,
act_layer,
dw_act_layer,
drop_path_rate,
)
self.num_features = features[-1].out_channels
self.features = nn.Sequential(*features)
self.head = ClassifierHead(self.num_features, num_classes, global_pool, drop_rate)
efficientnet_init_weights(self)
@torch.jit.ignore
def group_matcher(self, coarse=False):
matcher = dict(
stem=r'^stem',
blocks=r'^features\.(\d+)',
)
return matcher
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head.fc
def reset_classifier(self, num_classes, global_pool='avg'):
self.head = ClassifierHead(self.num_features, num_classes, pool_type=global_pool, drop_rate=self.drop_rate)
def forward_features(self, x):
x = self.stem(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.features, x, flatten=True)
else:
x = self.features(x)
return x
def forward_head(self, x, pre_logits: bool = False):
return self.head(x, pre_logits=pre_logits) if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_rexnet(variant, pretrained, **kwargs):
feature_cfg = dict(flatten_sequential=True)
return build_model_with_cfg(
RexNet,
variant,
pretrained,
feature_cfg=feature_cfg,
**kwargs,
)
def _cfg(url='', **kwargs):
return {
'url': url, 'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bicubic',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'stem.conv', 'classifier': 'head.fc',
'license': 'mit', **kwargs
}
default_cfgs = generate_default_cfgs({
'rexnet_100.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_130.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_150.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_200.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnet_300.nav_in1k': _cfg(hf_hub_id='timm/'),
'rexnetr_100.untrained': _cfg(),
'rexnetr_130.untrained': _cfg(),
'rexnetr_150.untrained': _cfg(),
'rexnetr_200.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k_ft_in1k': _cfg(
hf_hub_id='timm/',
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_200.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
'rexnetr_300.sw_in12k': _cfg(
hf_hub_id='timm/',
num_classes=11821,
crop_pct=0.95, test_crop_pct=1.0, test_input_size=(3, 288, 288), license='apache-2.0'),
})
@register_model
def rexnet_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x"""
return _create_rexnet('rexnet_100', pretrained, **kwargs)
@register_model
def rexnet_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x"""
return _create_rexnet('rexnet_130', pretrained, width_mult=1.3, **kwargs)
@register_model
def rexnet_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x"""
return _create_rexnet('rexnet_150', pretrained, width_mult=1.5, **kwargs)
@register_model
def rexnet_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x"""
return _create_rexnet('rexnet_200', pretrained, width_mult=2.0, **kwargs)
@register_model
def rexnet_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x"""
return _create_rexnet('rexnet_300', pretrained, width_mult=3.0, **kwargs)
@register_model
def rexnetr_100(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_100', pretrained, ch_div=8, **kwargs)
@register_model
def rexnetr_130(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.3x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_130', pretrained, width_mult=1.3, ch_div=8, **kwargs)
@register_model
def rexnetr_150(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 1.5x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_150', pretrained, width_mult=1.5, ch_div=8, **kwargs)
@register_model
def rexnetr_200(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 2.0x w/ rounded (mod 8) channels"""
return _create_rexnet('rexnetr_200', pretrained, width_mult=2.0, ch_div=8, **kwargs)
@register_model
def rexnetr_300(pretrained=False, **kwargs) -> RexNet:
"""ReXNet V1 3.0x w/ rounded (mod 16) channels"""
return _create_rexnet('rexnetr_300', pretrained, width_mult=3.0, ch_div=16, **kwargs)
| pytorch-image-models/timm/models/rexnet.py/0 | {
"file_path": "pytorch-image-models/timm/models/rexnet.py",
"repo_id": "pytorch-image-models",
"token_count": 5784
} | 199 |
""" Relative Position Vision Transformer (ViT) in PyTorch
NOTE: these models are experimental / WIP, expect changes
Hacked together by / Copyright 2022, Ross Wightman
"""
import logging
import math
from functools import partial
from typing import Optional, Tuple
import torch
import torch.nn as nn
from torch.jit import Final
from torch.utils.checkpoint import checkpoint
from timm.data import IMAGENET_INCEPTION_MEAN, IMAGENET_INCEPTION_STD
from timm.layers import PatchEmbed, Mlp, DropPath, RelPosMlp, RelPosBias, use_fused_attn
from ._builder import build_model_with_cfg
from ._registry import generate_default_cfgs, register_model
__all__ = ['VisionTransformerRelPos'] # model_registry will add each entrypoint fn to this
_logger = logging.getLogger(__name__)
class RelPosAttention(nn.Module):
fused_attn: Final[bool]
def __init__(
self,
dim,
num_heads=8,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
attn_drop=0.,
proj_drop=0.,
norm_layer=nn.LayerNorm,
):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.fused_attn = use_fused_attn()
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.q_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.k_norm = norm_layer(self.head_dim) if qk_norm else nn.Identity()
self.rel_pos = rel_pos_cls(num_heads=num_heads) if rel_pos_cls else None
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, self.head_dim).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
q = self.q_norm(q)
k = self.k_norm(k)
if self.fused_attn:
if self.rel_pos is not None:
attn_bias = self.rel_pos.get_bias()
elif shared_rel_pos is not None:
attn_bias = shared_rel_pos
else:
attn_bias = None
x = torch.nn.functional.scaled_dot_product_attention(
q, k, v,
attn_mask=attn_bias,
dropout_p=self.attn_drop.p if self.training else 0.,
)
else:
q = q * self.scale
attn = q @ k.transpose(-2, -1)
if self.rel_pos is not None:
attn = self.rel_pos(attn, shared_rel_pos=shared_rel_pos)
elif shared_rel_pos is not None:
attn = attn + shared_rel_pos
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = attn @ v
x = x.transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class RelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x), shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostRelPosBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
qk_norm=False,
rel_pos_cls=None,
init_values=None,
proj_drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
):
super().__init__()
self.init_values = init_values
self.attn = RelPosAttention(
dim,
num_heads,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
attn_drop=attn_drop,
proj_drop=proj_drop,
)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(
in_features=dim,
hidden_features=int(dim * mlp_ratio),
act_layer=act_layer,
drop=proj_drop,
)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self):
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x, shared_rel_pos: Optional[torch.Tensor] = None):
x = x + self.drop_path1(self.norm1(self.attn(x, shared_rel_pos=shared_rel_pos)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class VisionTransformerRelPos(nn.Module):
""" Vision Transformer w/ Relative Position Bias
Differing from classic vit, this impl
* uses relative position index (swin v1 / beit) or relative log coord + mlp (swin v2) pos embed
* defaults to no class token (can be enabled)
* defaults to global avg pool for head (can be changed)
* layer-scale (residual branch gain) enabled
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='avg',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
qk_norm=False,
init_values=1e-6,
class_token=False,
fc_norm=False,
rel_pos_type='mlp',
rel_pos_dim=None,
shared_rel_pos=False,
drop_rate=0.,
proj_drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
weight_init='skip',
embed_layer=PatchEmbed,
norm_layer=None,
act_layer=None,
block_fn=RelPosBlock
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'avg')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
qk_norm (bool): Enable normalization of query and key in attention
init_values: (float): layer-scale init values
class_token (bool): use class token (default: False)
fc_norm (bool): use pre classifier norm instead of pre-pool
rel_pos_ty pe (str): type of relative position
shared_rel_pos (bool): share relative pos across all blocks
drop_rate (float): dropout rate
proj_drop_rate (float): projection dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
weight_init (str): weight init scheme
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
assert class_token or global_pool != 'token'
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
)
feat_size = self.patch_embed.grid_size
rel_pos_args = dict(window_size=feat_size, prefix_tokens=self.num_prefix_tokens)
if rel_pos_type.startswith('mlp'):
if rel_pos_dim:
rel_pos_args['hidden_dim'] = rel_pos_dim
if 'swin' in rel_pos_type:
rel_pos_args['mode'] = 'swin'
rel_pos_cls = partial(RelPosMlp, **rel_pos_args)
else:
rel_pos_cls = partial(RelPosBias, **rel_pos_args)
self.shared_rel_pos = None
if shared_rel_pos:
self.shared_rel_pos = rel_pos_cls(num_heads=num_heads)
# NOTE shared rel pos currently mutually exclusive w/ per-block, but could support both...
rel_pos_cls = None
self.cls_token = nn.Parameter(torch.zeros(1, self.num_prefix_tokens, embed_dim)) if class_token else None
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.ModuleList([
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
qk_norm=qk_norm,
rel_pos_cls=rel_pos_cls,
init_values=init_values,
proj_drop=proj_drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not fc_norm else nn.Identity()
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if fc_norm else nn.Identity()
self.head_drop = nn.Dropout(drop_rate)
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'moco', '')
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
# FIXME weight init scheme using PyTorch defaults curently
#named_apply(get_init_weights_vit(mode, head_bias), self)
@torch.jit.ignore
def no_weight_decay(self):
return {'cls_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes: int, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def forward_features(self, x):
x = self.patch_embed(x)
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
shared_rel_pos = self.shared_rel_pos.get_bias() if self.shared_rel_pos is not None else None
for blk in self.blocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint(blk, x, shared_rel_pos=shared_rel_pos)
else:
x = blk(x, shared_rel_pos=shared_rel_pos)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
x = self.head_drop(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def _create_vision_transformer_relpos(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
model = build_model_with_cfg(VisionTransformerRelPos, variant, pretrained, **kwargs)
return model
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
'crop_pct': .9, 'interpolation': 'bicubic', 'fixed_input_size': True,
'mean': IMAGENET_INCEPTION_MEAN, 'std': IMAGENET_INCEPTION_STD,
'first_conv': 'patch_embed.proj', 'classifier': 'head',
**kwargs
}
default_cfgs = generate_default_cfgs({
'vit_relpos_base_patch32_plus_rpn_256.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_replos_base_patch32_plus_rpn_256-sw-dd486f51.pth',
hf_hub_id='timm/',
input_size=(3, 256, 256)),
'vit_relpos_base_patch16_plus_240.untrained': _cfg(url='', input_size=(3, 240, 240)),
'vit_relpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_small_patch16_224-sw-ec2778b4.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_224-sw-11c174af.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_224-sw-49049aed.pth',
hf_hub_id='timm/'),
'vit_srelpos_small_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_small_patch16_224-sw-6cdb8849.pth',
hf_hub_id='timm/'),
'vit_srelpos_medium_patch16_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_srelpos_medium_patch16_224-sw-ad702b8c.pth',
hf_hub_id='timm/'),
'vit_relpos_medium_patch16_cls_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_cls_224-sw-cfe8e259.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_cls_224.untrained': _cfg(),
'vit_relpos_base_patch16_clsgap_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_base_patch16_gapcls_224-sw-1a341d6c.pth',
hf_hub_id='timm/'),
'vit_relpos_small_patch16_rpn_224.untrained': _cfg(),
'vit_relpos_medium_patch16_rpn_224.sw_in1k': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-tpu-weights/vit_relpos_medium_patch16_rpn_224-sw-5d2befd8.pth',
hf_hub_id='timm/'),
'vit_relpos_base_patch16_rpn_224.untrained': _cfg(),
})
@register_model
def vit_relpos_base_patch32_plus_rpn_256(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/32+) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(patch_size=32, embed_dim=896, depth=12, num_heads=14, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch32_plus_rpn_256', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_plus_240(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16+) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=896, depth=12, num_heads=14)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_plus_240', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_small_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, fc_norm=False,
rel_pos_dim=384, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_small_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_srelpos_medium_patch16_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ shared relative log-coord position, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=512, shared_rel_pos=True)
model = _create_vision_transformer_relpos(
'vit_srelpos_medium_patch16_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-M/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, fc_norm=False,
rel_pos_dim=256, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_cls_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, class_token=True, global_pool='token')
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_cls_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_clsgap_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position, class token present
NOTE this config is a bit of a mistake, class token was enabled but global avg-pool w/ fc-norm was not disabled
Leaving here for comparisons w/ a future re-train as it performs quite well.
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, fc_norm=True, class_token=True)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_clsgap_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_small_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=384, depth=12, num_heads=6, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_small_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_medium_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=512, depth=12, num_heads=8, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_medium_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
@register_model
def vit_relpos_base_patch16_rpn_224(pretrained=False, **kwargs) -> VisionTransformerRelPos:
""" ViT-Base (ViT-B/16) w/ relative log-coord position and residual post-norm, no class token
"""
model_args = dict(
patch_size=16, embed_dim=768, depth=12, num_heads=12, qkv_bias=False, block_fn=ResPostRelPosBlock)
model = _create_vision_transformer_relpos(
'vit_relpos_base_patch16_rpn_224', pretrained=pretrained, **dict(model_args, **kwargs))
return model
| pytorch-image-models/timm/models/vision_transformer_relpos.py/0 | {
"file_path": "pytorch-image-models/timm/models/vision_transformer_relpos.py",
"repo_id": "pytorch-image-models",
"token_count": 11278
} | 200 |
""" Lion Optimizer
Paper: `Symbolic Discovery of Optimization Algorithms` - https://arxiv.org/abs/2302.06675
Original Impl: https://github.com/google/automl/tree/master/lion
"""
# Copyright 2023 Google Research. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from typing import List
import torch
from torch.optim.optimizer import Optimizer
class Lion(Optimizer):
r"""Implements Lion algorithm."""
def __init__(
self,
params,
lr=1e-4,
betas=(0.9, 0.99),
weight_decay=0.0,
maximize=False,
foreach=None,
):
"""Initialize the hyperparameters.
Args:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-4)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.99))
weight_decay (float, optional): weight decay coefficient (default: 0)
"""
if not 0.0 <= lr:
raise ValueError('Invalid learning rate: {}'.format(lr))
if not 0.0 <= betas[0] < 1.0:
raise ValueError('Invalid beta parameter at index 0: {}'.format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError('Invalid beta parameter at index 1: {}'.format(betas[1]))
defaults = dict(
lr=lr,
betas=betas,
weight_decay=weight_decay,
foreach=foreach,
maximize=maximize,
)
super().__init__(params, defaults)
def __setstate__(self, state):
super().__setstate__(state)
for group in self.param_groups:
group.setdefault('maximize', False)
group.setdefault('foreach', None)
@torch.no_grad()
def step(self, closure=None):
"""Performs a single optimization step.
Args:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
Returns:
the loss.
"""
loss = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
params_with_grad = []
grads = []
exp_avgs = []
beta1, beta2 = group['betas']
for p in group['params']:
if p.grad is None:
continue
params_with_grad.append(p)
if p.grad.is_sparse:
raise RuntimeError('Lion does not support sparse gradients')
grads.append(p.grad)
state = self.state[p]
# State initialization
if len(state) == 0:
state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format)
exp_avgs.append(state['exp_avg'])
lion(
params_with_grad,
grads,
exp_avgs,
beta1=beta1,
beta2=beta2,
lr=group['lr'],
weight_decay=group['weight_decay'],
maximize=group['maximize'],
foreach=group['foreach'],
)
return loss
def lion(
params: List[torch.Tensor],
grads: List[torch.Tensor],
exp_avgs: List[torch.Tensor],
# kwonly args with defaults are not supported by functions compiled with torchscript issue #70627
# setting this as kwarg for now as functional API is compiled by torch/distributed/optim
maximize: bool = False,
foreach: bool = None,
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
):
r"""Functional API that performs Lion algorithm computation.
"""
if foreach is None:
# Placeholder for more complex foreach logic to be added when value is not set
foreach = False
if foreach and torch.jit.is_scripting():
raise RuntimeError('torch.jit.script not supported with foreach optimizers')
if foreach and not torch.jit.is_scripting():
func = _multi_tensor_lion
else:
func = _single_tensor_lion
func(
params,
grads,
exp_avgs,
beta1=beta1,
beta2=beta2,
lr=lr,
weight_decay=weight_decay,
maximize=maximize,
)
def _single_tensor_lion(
params: List[torch.Tensor],
grads: List[torch.Tensor],
exp_avgs: List[torch.Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
maximize: bool,
):
for i, param in enumerate(params):
grad = grads[i] if not maximize else -grads[i]
exp_avg = exp_avgs[i]
if torch.is_complex(param):
grad = torch.view_as_real(grad)
exp_avg = torch.view_as_real(exp_avg)
param = torch.view_as_real(param)
# Perform stepweight decay
param.mul_(1 - lr * weight_decay)
# Weight update
update = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1)
param.add_(torch.sign(update), alpha=-lr)
# Decay the momentum running average coefficient
exp_avg.lerp_(grad, 1 - beta2)
def _multi_tensor_lion(
params: List[torch.Tensor],
grads: List[torch.Tensor],
exp_avgs: List[torch.Tensor],
*,
beta1: float,
beta2: float,
lr: float,
weight_decay: float,
maximize: bool,
):
if len(params) == 0:
return
if maximize:
grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment]
grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads]
exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs]
params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params]
# Perform stepweight decay
torch._foreach_mul_(params, 1 - lr * weight_decay)
# Weight update
updates = torch._foreach_mul(exp_avgs, beta1)
torch._foreach_add_(updates, grads, alpha=1 - beta1)
updates = [u.sign() for u in updates]
torch._foreach_add_(params, updates, alpha=-lr)
# Decay the momentum running average coefficient
torch._foreach_mul_(exp_avgs, beta2)
torch._foreach_add_(exp_avgs, grads, alpha=1 - beta2)
| pytorch-image-models/timm/optim/lion.py/0 | {
"file_path": "pytorch-image-models/timm/optim/lion.py",
"repo_id": "pytorch-image-models",
"token_count": 3257
} | 201 |
import abc
from abc import ABC
from typing import Any, Dict, Optional
import torch
class Scheduler(ABC):
""" Parameter Scheduler Base Class
A scheduler base class that can be used to schedule any optimizer parameter groups.
Unlike the builtin PyTorch schedulers, this is intended to be consistently called
* At the END of each epoch, before incrementing the epoch count, to calculate next epoch's value
* At the END of each optimizer update, after incrementing the update count, to calculate next update's value
The schedulers built on this should try to remain as stateless as possible (for simplicity).
This family of schedulers is attempting to avoid the confusion of the meaning of 'last_epoch'
and -1 values for special behaviour. All epoch and update counts must be tracked in the training
code and explicitly passed in to the schedulers on the corresponding step or step_update call.
Based on ideas from:
* https://github.com/pytorch/fairseq/tree/master/fairseq/optim/lr_scheduler
* https://github.com/allenai/allennlp/tree/master/allennlp/training/learning_rate_schedulers
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
param_group_field: str,
t_in_epochs: bool = True,
noise_range_t=None,
noise_type='normal',
noise_pct=0.67,
noise_std=1.0,
noise_seed=None,
initialize: bool = True,
) -> None:
self.optimizer = optimizer
self.param_group_field = param_group_field
self._initial_param_group_field = f"initial_{param_group_field}"
if initialize:
for i, group in enumerate(self.optimizer.param_groups):
if param_group_field not in group:
raise KeyError(f"{param_group_field} missing from param_groups[{i}]")
group.setdefault(self._initial_param_group_field, group[param_group_field])
else:
for i, group in enumerate(self.optimizer.param_groups):
if self._initial_param_group_field not in group:
raise KeyError(f"{self._initial_param_group_field} missing from param_groups[{i}]")
self.base_values = [group[self._initial_param_group_field] for group in self.optimizer.param_groups]
self.metric = None # any point to having this for all?
self.t_in_epochs = t_in_epochs
self.noise_range_t = noise_range_t
self.noise_pct = noise_pct
self.noise_type = noise_type
self.noise_std = noise_std
self.noise_seed = noise_seed if noise_seed is not None else 42
self.update_groups(self.base_values)
def state_dict(self) -> Dict[str, Any]:
return {key: value for key, value in self.__dict__.items() if key != 'optimizer'}
def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
self.__dict__.update(state_dict)
@abc.abstractmethod
def _get_lr(self, t: int) -> float:
pass
def _get_values(self, t: int, on_epoch: bool = True) -> Optional[float]:
proceed = (on_epoch and self.t_in_epochs) or (not on_epoch and not self.t_in_epochs)
if not proceed:
return None
return self._get_lr(t)
def step(self, epoch: int, metric: float = None) -> None:
self.metric = metric
values = self._get_values(epoch, on_epoch=True)
if values is not None:
values = self._add_noise(values, epoch)
self.update_groups(values)
def step_update(self, num_updates: int, metric: float = None):
self.metric = metric
values = self._get_values(num_updates, on_epoch=False)
if values is not None:
values = self._add_noise(values, num_updates)
self.update_groups(values)
def update_groups(self, values):
if not isinstance(values, (list, tuple)):
values = [values] * len(self.optimizer.param_groups)
for param_group, value in zip(self.optimizer.param_groups, values):
if 'lr_scale' in param_group:
param_group[self.param_group_field] = value * param_group['lr_scale']
else:
param_group[self.param_group_field] = value
def _add_noise(self, lrs, t):
if self._is_apply_noise(t):
noise = self._calculate_noise(t)
lrs = [v + v * noise for v in lrs]
return lrs
def _is_apply_noise(self, t) -> bool:
"""Return True if scheduler in noise range."""
apply_noise = False
if self.noise_range_t is not None:
if isinstance(self.noise_range_t, (list, tuple)):
apply_noise = self.noise_range_t[0] <= t < self.noise_range_t[1]
else:
apply_noise = t >= self.noise_range_t
return apply_noise
def _calculate_noise(self, t) -> float:
g = torch.Generator()
g.manual_seed(self.noise_seed + t)
if self.noise_type == 'normal':
while True:
# resample if noise out of percent limit, brute force but shouldn't spin much
noise = torch.randn(1, generator=g).item()
if abs(noise) < self.noise_pct:
return noise
else:
noise = 2 * (torch.rand(1, generator=g).item() - 0.5) * self.noise_pct
return noise
| pytorch-image-models/timm/scheduler/scheduler.py/0 | {
"file_path": "pytorch-image-models/timm/scheduler/scheduler.py",
"repo_id": "pytorch-image-models",
"token_count": 2361
} | 202 |
""" Exponential Moving Average (EMA) of model updates
Hacked together by / Copyright 2020 Ross Wightman
"""
import logging
from collections import OrderedDict
from copy import deepcopy
import torch
import torch.nn as nn
_logger = logging.getLogger(__name__)
class ModelEma:
""" Model Exponential Moving Average (DEPRECATED)
Keep a moving average of everything in the model state_dict (parameters and buffers).
This version is deprecated, it does not work with scripted models. Will be removed eventually.
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device='', resume=''):
# make a copy of the model for accumulating moving average of weights
self.ema = deepcopy(model)
self.ema.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if device:
self.ema.to(device=device)
self.ema_has_module = hasattr(self.ema, 'module')
if resume:
self._load_checkpoint(resume)
for p in self.ema.parameters():
p.requires_grad_(False)
def _load_checkpoint(self, checkpoint_path):
checkpoint = torch.load(checkpoint_path, map_location='cpu')
assert isinstance(checkpoint, dict)
if 'state_dict_ema' in checkpoint:
new_state_dict = OrderedDict()
for k, v in checkpoint['state_dict_ema'].items():
# ema model may have been wrapped by DataParallel, and need module prefix
if self.ema_has_module:
name = 'module.' + k if not k.startswith('module') else k
else:
name = k
new_state_dict[name] = v
self.ema.load_state_dict(new_state_dict)
_logger.info("Loaded state_dict_ema")
else:
_logger.warning("Failed to find state_dict_ema, starting from loaded model weights")
def update(self, model):
# correct a mismatch in state dict keys
needs_module = hasattr(model, 'module') and not self.ema_has_module
with torch.no_grad():
msd = model.state_dict()
for k, ema_v in self.ema.state_dict().items():
if needs_module:
k = 'module.' + k
model_v = msd[k].detach()
if self.device:
model_v = model_v.to(device=self.device)
ema_v.copy_(ema_v * self.decay + (1. - self.decay) * model_v)
class ModelEmaV2(nn.Module):
""" Model Exponential Moving Average V2
Keep a moving average of everything in the model state_dict (parameters and buffers).
V2 of this module is simpler, it does not match params/buffers based on name but simply
iterates in order. It works with torchscript (JIT of full model).
This is intended to allow functionality like
https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage
A smoothed version of the weights is necessary for some training schemes to perform well.
E.g. Google's hyper-params for training MNASNet, MobileNet-V3, EfficientNet, etc that use
RMSprop with a short 2.4-3 epoch decay period and slow LR decay rate of .96-.99 requires EMA
smoothing of weights to match results. Pay attention to the decay constant you are using
relative to your update count per epoch.
To keep EMA from using GPU resources, set device='cpu'. This will save a bit of memory but
disable validation of the EMA weights. Validation will have to be done manually in a separate
process, or after the training stops converging.
This class is sensitive where it is initialized in the sequence of model init,
GPU assignment and distributed training wrappers.
"""
def __init__(self, model, decay=0.9999, device=None):
super(ModelEmaV2, self).__init__()
# make a copy of the model for accumulating moving average of weights
self.module = deepcopy(model)
self.module.eval()
self.decay = decay
self.device = device # perform ema on different device from model if set
if self.device is not None:
self.module.to(device=device)
def _update(self, model, update_fn):
with torch.no_grad():
for ema_v, model_v in zip(self.module.state_dict().values(), model.state_dict().values()):
if self.device is not None:
model_v = model_v.to(device=self.device)
ema_v.copy_(update_fn(ema_v, model_v))
def update(self, model):
self._update(model, update_fn=lambda e, m: self.decay * e + (1. - self.decay) * m)
def set(self, model):
self._update(model, update_fn=lambda e, m: m)
| pytorch-image-models/timm/utils/model_ema.py/0 | {
"file_path": "pytorch-image-models/timm/utils/model_ema.py",
"repo_id": "pytorch-image-models",
"token_count": 2112
} | 203 |
use crate::app::Data;
use tabled::settings::Merge;
use tabled::{builder::Builder, settings::Style, Table};
#[allow(clippy::too_many_arguments)]
pub(crate) fn parameters_table(
tokenizer_name: String,
sequence_length: u32,
decode_length: u32,
top_n_tokens: Option<u32>,
n_runs: usize,
warmups: usize,
temperature: Option<f32>,
top_k: Option<u32>,
top_p: Option<f32>,
typical_p: Option<f32>,
repetition_penalty: Option<f32>,
watermark: bool,
do_sample: bool,
) -> Table {
let mut builder = Builder::default();
builder.set_header(["Parameter", "Value"]);
builder.push_record(["Model", &tokenizer_name]);
builder.push_record(["Sequence Length", &sequence_length.to_string()]);
builder.push_record(["Decode Length", &decode_length.to_string()]);
builder.push_record(["Top N Tokens", &format!("{top_n_tokens:?}")]);
builder.push_record(["N Runs", &n_runs.to_string()]);
builder.push_record(["Warmups", &warmups.to_string()]);
builder.push_record(["Temperature", &format!("{temperature:?}")]);
builder.push_record(["Top K", &format!("{top_k:?}")]);
builder.push_record(["Top P", &format!("{top_p:?}")]);
builder.push_record(["Typical P", &format!("{typical_p:?}")]);
builder.push_record(["Repetition Penalty", &format!("{repetition_penalty:?}")]);
builder.push_record(["Watermark", &watermark.to_string()]);
builder.push_record(["Do Sample", &do_sample.to_string()]);
let mut table = builder.build();
table.with(Style::markdown());
table
}
pub(crate) fn latency_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header([
"Step",
"Batch Size",
"Average",
"Lowest",
"Highest",
"p50",
"p90",
"p99",
]);
add_latencies(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_latencies,
);
add_latencies(
&mut builder,
"Decode (token)",
&data.batch_size,
&data.decode_token_latencies,
);
add_latencies(
&mut builder,
"Decode (total)",
&data.batch_size,
&data.decode_latencies,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
pub(crate) fn throughput_table(data: &Data) -> Table {
let mut builder = Builder::default();
builder.set_header(["Step", "Batch Size", "Average", "Lowest", "Highest"]);
add_throuhgputs(
&mut builder,
"Prefill",
&data.batch_size,
&data.prefill_throughputs,
);
add_throuhgputs(
&mut builder,
"Decode",
&data.batch_size,
&data.decode_throughputs,
);
let mut table = builder.build();
table.with(Style::markdown()).with(Merge::vertical());
table
}
fn add_latencies(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_latencies: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let latencies = &batch_latencies[i];
let (avg, min, max) = avg_min_max(latencies);
let row = [
step,
&b.to_string(),
&format_value(avg, "ms"),
&format_value(min, "ms"),
&format_value(max, "ms"),
&format_value(px(latencies, 50), "ms"),
&format_value(px(latencies, 90), "ms"),
&format_value(px(latencies, 99), "ms"),
];
builder.push_record(row);
}
}
fn add_throuhgputs(
builder: &mut Builder,
step: &'static str,
batch_size: &[u32],
batch_throughputs: &[Vec<f64>],
) {
for (i, b) in batch_size.iter().enumerate() {
let throughputs = &batch_throughputs[i];
let (avg, min, max) = avg_min_max(throughputs);
let row = [
step,
&b.to_string(),
&format_value(avg, "tokens/secs"),
&format_value(min, "tokens/secs"),
&format_value(max, "tokens/secs"),
];
builder.push_record(row);
}
}
fn avg_min_max(data: &Vec<f64>) -> (f64, f64, f64) {
let average = data.iter().sum::<f64>() / data.len() as f64;
let min = data
.iter()
.min_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
let max = data
.iter()
.max_by(|a, b| a.total_cmp(b))
.unwrap_or(&std::f64::NAN);
(average, *min, *max)
}
fn px(data: &Vec<f64>, p: u32) -> f64 {
let i = (f64::from(p) / 100.0 * data.len() as f64) as usize;
*data.get(i).unwrap_or(&std::f64::NAN)
}
fn format_value(value: f64, unit: &'static str) -> String {
format!("{:.2} {unit}", value)
}
| text-generation-inference/benchmark/src/table.rs/0 | {
"file_path": "text-generation-inference/benchmark/src/table.rs",
"repo_id": "text-generation-inference",
"token_count": 2255
} | 204 |
from enum import Enum
from pydantic import BaseModel, validator
from typing import Optional, List
from text_generation.errors import ValidationError
class Parameters(BaseModel):
# Activate logits sampling
do_sample: bool = False
# Maximum number of generated tokens
max_new_tokens: int = 20
# The parameter for repetition penalty. 1.0 means no penalty.
# See [this paper](https://arxiv.org/pdf/1909.05858.pdf) for more details.
repetition_penalty: Optional[float] = None
# Whether to prepend the prompt to the generated text
return_full_text: bool = False
# Stop generating tokens if a member of `stop_sequences` is generated
stop: List[str] = []
# Random sampling seed
seed: Optional[int] = None
# The value used to module the logits distribution.
temperature: Optional[float] = None
# The number of highest probability vocabulary tokens to keep for top-k-filtering.
top_k: Optional[int] = None
# If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or
# higher are kept for generation.
top_p: Optional[float] = None
# truncate inputs tokens to the given size
truncate: Optional[int] = None
# Typical Decoding mass
# See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information
typical_p: Optional[float] = None
# Generate best_of sequences and return the one if the highest token logprobs
best_of: Optional[int] = None
# Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
watermark: bool = False
# Get generation details
details: bool = False
# Get decoder input token logprobs and ids
decoder_input_details: bool = False
# Return the N most likely tokens at each step
top_n_tokens: Optional[int] = None
@validator("best_of")
def valid_best_of(cls, field_value, values):
if field_value is not None:
if field_value <= 0:
raise ValidationError("`best_of` must be strictly positive")
if field_value > 1 and values["seed"] is not None:
raise ValidationError("`seed` must not be set when `best_of` is > 1")
sampling = (
values["do_sample"]
| (values["temperature"] is not None)
| (values["top_k"] is not None)
| (values["top_p"] is not None)
| (values["typical_p"] is not None)
)
if field_value > 1 and not sampling:
raise ValidationError("you must use sampling when `best_of` is > 1")
return field_value
@validator("repetition_penalty")
def valid_repetition_penalty(cls, v):
if v is not None and v <= 0:
raise ValidationError("`repetition_penalty` must be strictly positive")
return v
@validator("seed")
def valid_seed(cls, v):
if v is not None and v < 0:
raise ValidationError("`seed` must be positive")
return v
@validator("temperature")
def valid_temp(cls, v):
if v is not None and v <= 0:
raise ValidationError("`temperature` must be strictly positive")
return v
@validator("top_k")
def valid_top_k(cls, v):
if v is not None and v <= 0:
raise ValidationError("`top_k` must be strictly positive")
return v
@validator("top_p")
def valid_top_p(cls, v):
if v is not None and (v <= 0 or v >= 1.0):
raise ValidationError("`top_p` must be > 0.0 and < 1.0")
return v
@validator("truncate")
def valid_truncate(cls, v):
if v is not None and v <= 0:
raise ValidationError("`truncate` must be strictly positive")
return v
@validator("typical_p")
def valid_typical_p(cls, v):
if v is not None and (v <= 0 or v >= 1.0):
raise ValidationError("`typical_p` must be > 0.0 and < 1.0")
return v
@validator("top_n_tokens")
def valid_top_n_tokens(cls, v):
if v is not None and v <= 0:
raise ValidationError("`top_n_tokens` must be strictly positive")
return v
class Request(BaseModel):
# Prompt
inputs: str
# Generation parameters
parameters: Optional[Parameters] = None
# Whether to stream output tokens
stream: bool = False
@validator("inputs")
def valid_input(cls, v):
if not v:
raise ValidationError("`inputs` cannot be empty")
return v
@validator("stream")
def valid_best_of_stream(cls, field_value, values):
parameters = values["parameters"]
if (
parameters is not None
and parameters.best_of is not None
and parameters.best_of > 1
and field_value
):
raise ValidationError(
"`best_of` != 1 is not supported when `stream` == True"
)
return field_value
# Decoder input tokens
class InputToken(BaseModel):
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
# Optional since the logprob of the first token cannot be computed
logprob: Optional[float] = None
# Generated tokens
class Token(BaseModel):
# Token ID from the model tokenizer
id: int
# Token text
text: str
# Logprob
logprob: float
# Is the token a special token
# Can be used to ignore tokens when concatenating
special: bool
# Generation finish reason
class FinishReason(str, Enum):
# number of generated tokens == `max_new_tokens`
Length = "length"
# the model generated its end of sequence token
EndOfSequenceToken = "eos_token"
# the model generated a text included in `stop_sequences`
StopSequence = "stop_sequence"
# Additional sequences when using the `best_of` parameter
class BestOfSequence(BaseModel):
# Generated text
generated_text: str
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]] = None
# `generate` details
class Details(BaseModel):
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# Decoder input tokens, empty if decoder_input_details is False
prefill: List[InputToken]
# Generated tokens
tokens: List[Token]
# Most likely tokens
top_tokens: Optional[List[List[Token]]] = None
# Additional sequences when using the `best_of` parameter
best_of_sequences: Optional[List[BestOfSequence]] = None
# `generate` return value
class Response(BaseModel):
# Generated text
generated_text: str
# Generation details
details: Details
# `generate_stream` details
class StreamDetails(BaseModel):
# Generation finish reason
finish_reason: FinishReason
# Number of generated tokens
generated_tokens: int
# Sampling seed if sampling was activated
seed: Optional[int] = None
# `generate_stream` return value
class StreamResponse(BaseModel):
# Generated token
token: Token
# Most likely tokens
top_tokens: Optional[List[Token]] = None
# Complete generated text
# Only available when the generation is finished
generated_text: Optional[str] = None
# Generation details
# Only available when the generation is finished
details: Optional[StreamDetails] = None
# Inference API currently deployed model
class DeployedModel(BaseModel):
model_id: str
sha: str
| text-generation-inference/clients/python/text_generation/types.py/0 | {
"file_path": "text-generation-inference/clients/python/text_generation/types.py",
"repo_id": "text-generation-inference",
"token_count": 2991
} | 205 |
# Text Generation Inference
Text Generation Inference (TGI) is a toolkit for deploying and serving Large Language Models (LLMs). TGI enables high-performance text generation for the most popular open-source LLMs, including Llama, Falcon, StarCoder, BLOOM, GPT-NeoX, and T5.
![Text Generation Inference](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/TGI.png)
Text Generation Inference implements many optimizations and features, such as:
- Simple launcher to serve most popular LLMs
- Production ready (distributed tracing with Open Telemetry, Prometheus metrics)
- Tensor Parallelism for faster inference on multiple GPUs
- Token streaming using Server-Sent Events (SSE)
- Continuous batching of incoming requests for increased total throughput
- Optimized transformers code for inference using [Flash Attention](https://github.com/HazyResearch/flash-attention) and [Paged Attention](https://github.com/vllm-project/vllm) on the most popular architectures
- Quantization with [bitsandbytes](https://github.com/TimDettmers/bitsandbytes) and [GPT-Q](https://arxiv.org/abs/2210.17323)
- [Safetensors](https://github.com/huggingface/safetensors) weight loading
- Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226)
- Logits warper (temperature scaling, top-p, top-k, repetition penalty)
- Stop sequences
- Log probabilities
- Custom Prompt Generation: Easily generate text by providing custom prompts to guide the model's output.
- Fine-tuning Support: Utilize fine-tuned models for specific tasks to achieve higher accuracy and performance.
Text Generation Inference is used in production by multiple projects, such as:
- [Hugging Chat](https://github.com/huggingface/chat-ui), an open-source interface for open-access models, such as Open Assistant and Llama
- [OpenAssistant](https://open-assistant.io/), an open-source community effort to train LLMs in the open
- [nat.dev](http://nat.dev/), a playground to explore and compare LLMs.
| text-generation-inference/docs/source/index.md/0 | {
"file_path": "text-generation-inference/docs/source/index.md",
"repo_id": "text-generation-inference",
"token_count": 544
} | 206 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50,
"logprob": null,
"text": "G"
},
{
"id": 330,
"logprob": -5.96875,
"text": "ir"
},
{
"id": 1622,
"logprob": -5.6132812,
"text": "af"
},
{
"id": 249,
"logprob": -6.5039062,
"text": "at"
},
{
"id": 1480,
"logprob": -8.078125,
"text": "ron"
},
{
"id": 304,
"logprob": -2.3261719,
"text": " is"
},
{
"id": 23866,
"logprob": -9.59375,
"text": " obsessed"
},
{
"id": 335,
"logprob": -0.048339844,
"text": " with"
},
{
"id": 26680,
"logprob": -4.0,
"text": " gir"
},
{
"id": 1903,
"logprob": -0.07556152,
"text": "aff"
},
{
"id": 255,
"logprob": -0.0067749023,
"text": "es"
},
{
"id": 23,
"logprob": -1.546875,
"text": ","
},
{
"id": 248,
"logprob": -4.3320312,
"text": " the"
},
{
"id": 758,
"logprob": -3.734375,
"text": " most"
},
{
"id": 21735,
"logprob": -5.109375,
"text": " glorious"
},
{
"id": 5985,
"logprob": -2.09375,
"text": " animal"
},
{
"id": 313,
"logprob": -1.1835938,
"text": " on"
},
{
"id": 248,
"logprob": -0.77685547,
"text": " the"
},
{
"id": 1936,
"logprob": -2.3828125,
"text": " face"
},
{
"id": 275,
"logprob": -0.004432678,
"text": " of"
},
{
"id": 414,
"logprob": -1.9677734,
"text": " this"
},
{
"id": 6490,
"logprob": -2.046875,
"text": " Earth"
},
{
"id": 25,
"logprob": -0.28198242,
"text": "."
},
{
"id": 401,
"logprob": -7.9179688,
"text": " G"
},
{
"id": 6013,
"logprob": -2.2753906,
"text": "ira"
},
{
"id": 694,
"logprob": -0.6230469,
"text": "ft"
},
{
"id": 1480,
"logprob": -0.20874023,
"text": "ron"
},
{
"id": 9369,
"logprob": -4.5507812,
"text": " believes"
},
{
"id": 455,
"logprob": -4.5664062,
"text": " all"
},
{
"id": 599,
"logprob": -2.7402344,
"text": " other"
},
{
"id": 5632,
"logprob": -0.21948242,
"text": " animals"
},
{
"id": 362,
"logprob": -0.7675781,
"text": " are"
},
{
"id": 23981,
"logprob": -5.0,
"text": " irrelevant"
},
{
"id": 635,
"logprob": -4.234375,
"text": " when"
},
{
"id": 4354,
"logprob": -0.5131836,
"text": " compared"
},
{
"id": 271,
"logprob": -0.103637695,
"text": " to"
},
{
"id": 248,
"logprob": -0.58447266,
"text": " the"
},
{
"id": 21735,
"logprob": -3.6835938,
"text": " glorious"
},
{
"id": 64398,
"logprob": -1.8173828,
"text": " majesty"
},
{
"id": 275,
"logprob": -0.23510742,
"text": " of"
},
{
"id": 248,
"logprob": -0.35473633,
"text": " the"
},
{
"id": 26680,
"logprob": -0.24633789,
"text": " gir"
},
{
"id": 23226,
"logprob": -0.02960205,
"text": "affe"
},
{
"id": 25,
"logprob": -0.17333984,
"text": "."
},
{
"id": 193,
"logprob": -1.3935547,
"text": "\n"
},
{
"id": 23626,
"logprob": -10.0625,
"text": "Daniel"
},
{
"id": 37,
"logprob": -4.59375,
"text": ":"
},
{
"id": 23090,
"logprob": -6.9375,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.99365234,
"text": ","
},
{
"id": 29033,
"logprob": -2.2324219,
"text": " Gir"
},
{
"id": 1622,
"logprob": -0.10809326,
"text": "af"
},
{
"id": 249,
"logprob": -0.042663574,
"text": "at"
},
{
"id": 1480,
"logprob": -0.0024776459,
"text": "ron"
},
{
"id": 12,
"logprob": -1.4277344,
"text": "!"
},
{
"id": 193,
"logprob": -1.1015625,
"text": "\n"
},
{
"id": 50,
"logprob": -0.05709839,
"text": "G"
},
{
"id": 330,
"logprob": -0.13208008,
"text": "ir"
},
{
"id": 1622,
"logprob": -0.0071487427,
"text": "af"
},
{
"id": 249,
"logprob": -0.008468628,
"text": "at"
},
{
"id": 1480,
"logprob": -0.00068998337,
"text": "ron"
},
{
"id": 37,
"logprob": -0.0074691772,
"text": ":"
}
],
"seed": null,
"tokens": [
{
"id": 23090,
"logprob": -1.8251953,
"special": false,
"text": " Hello"
},
{
"id": 23,
"logprob": -0.3173828,
"special": false,
"text": ","
},
{
"id": 8156,
"logprob": -0.23803711,
"special": false,
"text": " Daniel"
},
{
"id": 12,
"logprob": -0.56933594,
"special": false,
"text": "!"
},
{
"id": 193,
"logprob": -0.61279297,
"special": false,
"text": "\n"
},
{
"id": 23626,
"logprob": -0.41967773,
"special": false,
"text": "Daniel"
},
{
"id": 37,
"logprob": -0.0023403168,
"special": false,
"text": ":"
},
{
"id": 1634,
"logprob": -2.0605469,
"special": false,
"text": " What"
},
{
"id": 18,
"logprob": -1.5292969,
"special": false,
"text": "'"
},
{
"id": 94,
"logprob": -0.007904053,
"special": false,
"text": "s"
}
]
},
"generated_text": " Hello, Daniel!\nDaniel: What's"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_falcon/test_flash_falcon.json",
"repo_id": "text-generation-inference",
"token_count": 4604
} | 207 |
[
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.1054688,
"text": " your"
},
{
"id": 12315,
"logprob": -9.953125,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.0820312,
"text": " today"
},
{
"id": 32,
"logprob": -0.15148926,
"text": "?"
},
{
"id": 50279,
"logprob": -0.27026367,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.88378906,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.9819336,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2421875,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3474121,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.078125,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.69140625,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.4072266,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7041016,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.053375244,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0351562,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
},
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 10,
"prefill": [
{
"id": 50278,
"logprob": null,
"text": "<|USER|>"
},
{
"id": 1276,
"logprob": -4.5546875,
"text": "What"
},
{
"id": 434,
"logprob": -4.234375,
"text": "'s"
},
{
"id": 634,
"logprob": -5.21875,
"text": " your"
},
{
"id": 12315,
"logprob": -9.9375,
"text": " mood"
},
{
"id": 3063,
"logprob": -4.1015625,
"text": " today"
},
{
"id": 32,
"logprob": -0.15319824,
"text": "?"
},
{
"id": 50279,
"logprob": -0.2614746,
"text": "<|ASSISTANT|>"
}
],
"seed": null,
"tokens": [
{
"id": 42,
"logprob": -0.8886719,
"special": false,
"text": "I"
},
{
"id": 1353,
"logprob": -0.98046875,
"special": false,
"text": "'m"
},
{
"id": 417,
"logprob": -2.2265625,
"special": false,
"text": " not"
},
{
"id": 2119,
"logprob": -0.3479004,
"special": false,
"text": " sure"
},
{
"id": 13,
"logprob": -1.0117188,
"special": false,
"text": ","
},
{
"id": 534,
"logprob": -0.67871094,
"special": false,
"text": " which"
},
{
"id": 310,
"logprob": -1.421875,
"special": false,
"text": " is"
},
{
"id": 253,
"logprob": -1.7382812,
"special": false,
"text": " the"
},
{
"id": 1682,
"logprob": -0.051330566,
"special": false,
"text": " best"
},
{
"id": 1039,
"logprob": -2.0390625,
"special": false,
"text": " way"
}
]
},
"generated_text": "I'm not sure, which is the best way"
}
]
| text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_flash_neox/test_flash_neox_load.json",
"repo_id": "text-generation-inference",
"token_count": 6308
} | 208 |
{
"details": {
"best_of_sequences": null,
"finish_reason": "length",
"generated_tokens": 17,
"prefill": [
{
"id": 1276,
"logprob": null,
"text": "What"
},
{
"id": 310,
"logprob": -1.5117188,
"text": " is"
},
{
"id": 18147,
"logprob": -8.96875,
"text": " Deep"
},
{
"id": 20727,
"logprob": -1.953125,
"text": " Learning"
},
{
"id": 32,
"logprob": -0.94189453,
"text": "?"
}
],
"seed": null,
"tokens": [
{
"id": 428,
"logprob": -1.5830078,
"special": false,
"text": " -"
},
{
"id": 18147,
"logprob": -3.3105469,
"special": false,
"text": " Deep"
},
{
"id": 20727,
"logprob": -0.3215332,
"special": false,
"text": " Learning"
},
{
"id": 187,
"logprob": -2.5566406,
"special": false,
"text": "\n"
},
{
"id": 30763,
"logprob": -1.6074219,
"special": false,
"text": "Deep"
},
{
"id": 20727,
"logprob": -0.69628906,
"special": false,
"text": " Learning"
},
{
"id": 310,
"logprob": -0.6923828,
"special": false,
"text": " is"
},
{
"id": 247,
"logprob": -0.5263672,
"special": false,
"text": " a"
},
{
"id": 749,
"logprob": -1.8544922,
"special": false,
"text": " sub"
},
{
"id": 3423,
"logprob": -0.6118164,
"special": false,
"text": "field"
},
{
"id": 273,
"logprob": -0.055877686,
"special": false,
"text": " of"
},
{
"id": 5145,
"logprob": -1.0537109,
"special": false,
"text": " machine"
},
{
"id": 4715,
"logprob": -0.0115737915,
"special": false,
"text": " learning"
},
{
"id": 326,
"logprob": -0.9111328,
"special": false,
"text": " that"
},
{
"id": 4648,
"logprob": -1.4589844,
"special": false,
"text": " uses"
},
{
"id": 13345,
"logprob": -1.4853516,
"special": false,
"text": " artificial"
},
{
"id": 11454,
"logprob": -0.021636963,
"special": false,
"text": " neural"
}
]
},
"generated_text": " - Deep Learning\nDeep Learning is a subfield of machine learning that uses artificial neural"
}
| text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json/0 | {
"file_path": "text-generation-inference/integration-tests/models/__snapshots__/test_mpt/test_mpt.json",
"repo_id": "text-generation-inference",
"token_count": 1691
} | 209 |
import pytest
@pytest.fixture(scope="module")
def flash_llama_handle(launcher):
with launcher("huggingface/llama-7b", num_shard=2) as handle:
yield handle
@pytest.fixture(scope="module")
async def flash_llama(flash_llama_handle):
await flash_llama_handle.health(300)
return flash_llama_handle.client
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request", max_new_tokens=10, decoder_input_details=True
)
assert response.details.generated_tokens == 10
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_all_params(flash_llama, response_snapshot):
response = await flash_llama.generate(
"Test request",
max_new_tokens=10,
repetition_penalty=1.2,
return_full_text=True,
stop_sequences=["test"],
temperature=0.5,
top_p=0.9,
top_k=10,
truncate=5,
typical_p=0.9,
watermark=True,
decoder_input_details=True,
seed=0,
)
assert response.details.generated_tokens == 5
assert response == response_snapshot
@pytest.mark.asyncio
@pytest.mark.private
async def test_flash_llama_load(flash_llama, generate_load, response_snapshot):
responses = await generate_load(flash_llama, "Test request", max_new_tokens=10, n=4)
assert len(responses) == 4
assert all([r.generated_text == responses[0].generated_text for r in responses])
assert responses == response_snapshot
| text-generation-inference/integration-tests/models/test_flash_llama.py/0 | {
"file_path": "text-generation-inference/integration-tests/models/test_flash_llama.py",
"repo_id": "text-generation-inference",
"token_count": 655
} | 210 |
[package]
name = "text-generation-client"
version.workspace = true
edition.workspace = true
authors.workspace = true
homepage.workspace = true
[dependencies]
futures = "^0.3"
grpc-metadata = { path = "../grpc-metadata" }
prost = "^0.12"
thiserror = "^1.0"
tokio = { version = "^1.32", features = ["sync"] }
tonic = "^0.10"
tower = "^0.4"
tracing = "^0.1"
[build-dependencies]
tonic-build = "0.10.1"
prost-build = "0.12.1"
| text-generation-inference/router/client/Cargo.toml/0 | {
"file_path": "text-generation-inference/router/client/Cargo.toml",
"repo_id": "text-generation-inference",
"token_count": 180
} | 211 |
#!/bin/bash
if [[ -z "${HF_MODEL_ID}" ]]; then
echo "HF_MODEL_ID must be set"
exit 1
fi
export MODEL_ID="${HF_MODEL_ID}"
if [[ -n "${HF_MODEL_REVISION}" ]]; then
export REVISION="${HF_MODEL_REVISION}"
fi
if [[ -n "${SM_NUM_GPUS}" ]]; then
export NUM_SHARD="${SM_NUM_GPUS}"
fi
if [[ -n "${HF_MODEL_QUANTIZE}" ]]; then
export QUANTIZE="${HF_MODEL_QUANTIZE}"
fi
if [[ -n "${HF_MODEL_TRUST_REMOTE_CODE}" ]]; then
export TRUST_REMOTE_CODE="${HF_MODEL_TRUST_REMOTE_CODE}"
fi
text-generation-launcher --port 8080
| text-generation-inference/sagemaker-entrypoint.sh/0 | {
"file_path": "text-generation-inference/sagemaker-entrypoint.sh",
"repo_id": "text-generation-inference",
"token_count": 239
} | 212 |
// Adapted from turboderp exllama: https://github.com/turboderp/exllama
#ifndef _column_remap_cuh
#define _column_remap_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
void column_remap_cuda
(
const half* x,
half* x_new,
const int x_height,
const int x_width,
const uint32_t* x_map
);
#endif | text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cuh/0 | {
"file_path": "text-generation-inference/server/exllama_kernels/exllama_kernels/cuda_func/column_remap.cuh",
"repo_id": "text-generation-inference",
"token_count": 152
} | 213 |
#ifndef _q_gemm_cuh
#define _q_gemm_cuh
#include <cuda_runtime.h>
#include <cuda_fp16.h>
#include <cstdint>
#include <cstdio>
#include <ATen/cuda/CUDAContext.h>
#include "q_matrix.cuh"
void gemm_half_q_half_cuda
(
cublasHandle_t cublas_handle,
const half* a,
QMatrix* b,
half* c,
int size_m,
int size_n,
int size_k,
bool clear = false,
half* reconstruct = NULL,
bool force_cuda = false,
const half* r_weights = NULL,
const int r_weights_stride = 0,
bool mul_r_weights = false
);
void clear_tensor_cuda
(
half* c,
int size_m,
int size_n
);
#endif | text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh/0 | {
"file_path": "text-generation-inference/server/exllamav2_kernels/exllamav2_kernels/cuda/q_gemm.cuh",
"repo_id": "text-generation-inference",
"token_count": 293
} | 214 |
[tool.poetry]
name = "text-generation-server"
version = "1.4.0"
description = "Text Generation Inference Python gRPC Server"
authors = ["Olivier Dehaene <[email protected]>"]
[tool.poetry.scripts]
text-generation-server = 'text_generation_server.cli:app'
[tool.poetry.dependencies]
python = ">=3.9,<3.13"
protobuf = "^4.21.7"
grpcio = "^1.51.1"
grpcio-status = "^1.51.1"
grpcio-reflection = "^1.51.1"
grpc-interceptor = "^0.15.0"
typer = "^0.6.1"
accelerate = { version = "^0.25.0", optional = true }
bitsandbytes = { version = "^0.41.1", optional = true }
safetensors = "^0.3.2"
loguru = "^0.6.0"
opentelemetry-api = "^1.15.0"
opentelemetry-exporter-otlp = "^1.15.0"
opentelemetry-instrumentation-grpc = "^0.36b0"
hf-transfer = "^0.1.2"
sentencepiece = "^0.1.97"
tokenizers = "^0.15.0"
huggingface-hub = "^0.19.3"
transformers = "^4.37.1"
einops = "^0.6.1"
texttable = { version = "^1.6.7", optional = true }
datasets = { version = "^2.14.0", optional = true }
peft = { version = "^0.4.0", optional = true }
torch = { version = "^2.1.1", optional = true }
scipy = "^1.11.1"
pillow = "^10.0.0"
[tool.poetry.extras]
torch = ["torch"]
accelerate = ["accelerate"]
bnb = ["bitsandbytes"]
peft = ["peft"]
quantize = ["texttable", "datasets", "accelerate"]
[tool.poetry.group.dev.dependencies]
grpcio-tools = "^1.51.1"
pytest = "^7.3.0"
[[tool.poetry.source]]
name = "pytorch-gpu-src"
url = "https://download.pytorch.org/whl/cu121"
priority = "explicit"
[tool.pytest.ini_options]
markers = ["private: marks tests as requiring an admin hf token (deselect with '-m \"not private\"')"]
[build-system]
requires = [
"poetry-core>=1.0.0",
]
build-backend = "poetry.core.masonry.api"
| text-generation-inference/server/pyproject.toml/0 | {
"file_path": "text-generation-inference/server/pyproject.toml",
"repo_id": "text-generation-inference",
"token_count": 750
} | 215 |
import torch
from typing import Dict, Optional, TypeVar
from text_generation_server.models.types import Batch
B = TypeVar("B", bound=Batch)
class Cache:
def __init__(self):
self.cache: Dict[int, B] = {}
def pop(self, batch_id: int) -> Optional[B]:
return self.cache.pop(batch_id, None)
def set(self, entry: B):
if entry is not None:
self.cache[entry.batch_id] = entry
def delete(self, batch_id: int):
batch = self.pop(batch_id)
if batch is not None:
del batch
if torch.cuda.is_available():
torch.cuda.empty_cache()
def clear(self):
keys = list(self.cache.keys())
for k in keys:
self.delete(k)
def __len__(self):
return len(self.cache.keys())
| text-generation-inference/server/text_generation_server/cache.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/cache.py",
"repo_id": "text-generation-inference",
"token_count": 359
} | 216 |
# coding=utf-8
# Copyright 2022 EleutherAI and the HuggingFace Inc. team. All rights reserved.
#
# This code is based on EleutherAI's GPT-NeoX library and the GPT-NeoX
# and OPT implementations in this library. It has been modified from its
# original forms to accommodate minor architectural differences compared
# to GPT-NeoX and OPT used by the Meta AI team that trained the model.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Idefics model configuration"""
import copy
from transformers import PretrainedConfig
IDEFICS_PRETRAINED_CONFIG_ARCHIVE_MAP = {
"HuggingFaceM4/idefics-9b": "https://huggingface.co/HuggingFaceM4/idefics-9b/blob/main/config.json",
"HuggingFaceM4/idefics-80b": "https://huggingface.co/HuggingFaceM4/idefics-80b/blob/main/config.json",
}
class IdeficsVisionConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer. (elsewhere referred to as `hidden_size`)
image_size (`int`, *optional*, defaults to 224):
The size (resolution) of each image.
intermediate_size (`int`, *optional*, defaults to 5120):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
patch_size (`int`, *optional*, defaults to 14):
The size (resolution) of each patch.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 16):
Number of attention heads for each attention layer in the Transformer encoder.
image_num_channels (`int`, *optional*, defaults to `3`):
Number of image channels.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
attention_dropout (`float`, *optional*, defaults to 0.0):
The dropout ratio for the attention probabilities.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
initializer_factor (`float`, *optional*, defaults to 1.0):
A factor for initializing all weight matrices (should be kept to 1.0, used internally for initialization
testing).
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
"""
model_type = "idefics"
attribute_map = {
"hidden_size": "embed_dim",
}
def __init__(
self,
embed_dim=768,
image_size=224,
intermediate_size=5120,
patch_size=14,
num_hidden_layers=32,
num_attention_heads=16,
num_channels=3,
hidden_act="gelu",
layer_norm_eps=1e-5,
attention_dropout=0.0,
initializer_range=0.02,
initializer_factor=1.0,
**kwargs,
):
self.embed_dim = embed_dim
self.image_size = image_size
self.intermediate_size = intermediate_size
self.patch_size = patch_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_channels = num_channels
self.layer_norm_eps = layer_norm_eps
self.attention_dropout = attention_dropout
self.initializer_range = initializer_range
self.initializer_factor = initializer_factor
self.hidden_act = hidden_act
super().__init__(**kwargs)
class IdeficsPerceiverConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
use_resampler (`bool`, *optional*, defaults to `False`):
Whether or not to use the resampler
resampler_n_latents (`int`, *optional*, defaults to ):
Number of latent embeddings to resample ("compress") the input sequence to (usually < 128).
resampler_depth (`int`, *optional*, defaults to 6):
Depth of the Perceiver Resampler (Transformer w/ cross attention). Should be shallow (< 3).
resampler_n_heads (`int`, *optional*, defaults to 16):
Number of heads in each Transformer block (for multi-headed self-attention).
resampler_head_dim (`int`, *optional*, defaults to 96):
Dimensionality of each head projection in the Transformer block.
qk_layer_norms_perceiver (`bool`, *optional*, defaults to `False`):
Whether or not to use qk layer norms in perceiver
"""
model_type = "idefics"
def __init__(
self,
use_resampler=False,
resampler_n_latents=64,
resampler_depth=6,
resampler_n_heads=16,
resampler_head_dim=96,
qk_layer_norms_perceiver=False,
**kwargs,
):
self.use_resampler = use_resampler
self.resampler_n_latents = resampler_n_latents
self.resampler_depth = resampler_depth
self.resampler_n_heads = resampler_n_heads
self.resampler_head_dim = resampler_head_dim
self.qk_layer_norms_perceiver = qk_layer_norms_perceiver
super().__init__(**kwargs)
class IdeficsConfig(PretrainedConfig):
r"""
This is the configuration class to store the configuration of a [`IdeficsModel`]. It is used to instantiate an
Idefics model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the Idefics-9B.
e.g. [HuggingFaceM4/idefics-9b](https://huggingface.co/HuggingFaceM4/idefics-9b)
Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PretrainedConfig`] for more information.
Args:
additional_vocab_size (`int`, *optional`, defaults to 0):
Additional vocabulary size of the model, typically for the special "<img>" token. Additional vocab tokens
are always trainable whereas regular vocab tokens can be frozen or not.
vocab_size (`int`, *optional*, defaults to 32000):
Vocabulary size of the Idefics model. Defines the number of different tokens that can be represented by the
`inputs_ids` passed when calling [`~IdeficsModel`]
hidden_size (`int`, *optional*, defaults to 4096):
Dimension of the hidden representations.
intermediate_size (`int`, *optional*, defaults to 11008):
Dimension of the MLP representations.
num_hidden_layers (`int`, *optional*, defaults to 32):
Number of hidden layers in the Transformer encoder.
num_attention_heads (`int`, *optional*, defaults to 32):
Number of attention heads for each attention layer in the Transformer encoder.
dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
The non-linear activation function (function or string) in the decoder.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
alpha_initializer (`str`, *optional*, defaults to `"zeros"`):
Initialization type for the alphas.
alphas_initializer_range (`float`, *optional*, defaults to 0.0):
The standard deviation of the truncated_normal_initializer for initializing the alphas in the Gated Cross
Attention.
alpha_type (`str`, *optional*, defaults to `"float"`):
Whether the gating alphas should be vectors or single floats.
rms_norm_eps (`float`, *optional*, defaults to 1e-6):
The epsilon used by the rms normalization layers.
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models). Only
relevant if `config.is_decoder=True`.
pad_token_id (`int`, *optional*, defaults to 0)
Padding token id.
bos_token_id (`int`, *optional*, defaults to 1)
Beginning of stream token id.
eos_token_id (`int`, *optional*, defaults to 2)
End of stream token id.
tie_word_embeddings(`bool`, *optional*, defaults to `False`):
Whether to tie weight embeddings
cross_layer_interval (`int`, *optional*, default to 1)
Interval for cross attention (from text to image) layers.
qk_layer_norms (`bool`, *optional*, defaults to `False`): Whether to add layer norm after q and k
freeze_text_layers (`bool`, *optional*, defaults to `True`): Whether to freeze text layers
freeze_text_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing text layers when `freeze_text_layers` is `True`
freeze_lm_head (`bool`, *optional*, defaults to `False`): Whether to freeze lm head
freeze_vision_layers (`bool`, *optional*, defaults to `True`): Whether to freeze vision layers
freeze_vision_module_exceptions (`bool`, *optional*, defaults to `[]`):
Exceptions to freezing vision layers when `freeze_vision_layers` is `True`
use_resampler (`bool`, *optional*, defaults to `False`): Whether to use the Resampler
vision_config (`IdeficsVisionConfig`, *optional*): Custom vision config or dict
perceiver_config (`IdeficsPerceiverConfig`, *optional*): Custom perceiver config or dict
Example:
```python
>>> from transformers import IdeficsModel, IdeficsConfig
>>> # Initializing a Idefics idefics-9b style configuration
>>> configuration = IdeficsConfig()
>>> # Initializing a model from the idefics-9b style configuration
>>> model = IdeficsModel(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "idefics"
is_composition = True
def __init__(
self,
vocab_size=32000,
additional_vocab_size=0,
hidden_size=4096,
intermediate_size=11008,
num_hidden_layers=32,
num_attention_heads=32,
dropout=0.0,
hidden_act="silu",
initializer_range=0.02,
alpha_initializer="zeros",
alphas_initializer_range=0.0,
alpha_type="float",
rms_norm_eps=1e-6,
use_cache=True,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
tie_word_embeddings=False,
cross_layer_interval=1,
qk_layer_norms=False,
freeze_text_layers=True,
freeze_text_module_exceptions=[],
freeze_lm_head=False,
freeze_vision_layers=True,
freeze_vision_module_exceptions=[],
use_resampler=False,
vision_config=None,
perceiver_config=None,
**kwargs,
):
self.vocab_size = vocab_size
self.additional_vocab_size = additional_vocab_size
self.hidden_size = hidden_size
self.intermediate_size = intermediate_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.dropout = dropout
self.hidden_act = hidden_act
self.initializer_range = initializer_range
self.alpha_initializer = alpha_initializer
self.alphas_initializer_range = alphas_initializer_range
self.alpha_type = alpha_type
self.rms_norm_eps = rms_norm_eps
self.use_cache = use_cache
self.cross_layer_interval = cross_layer_interval
self.qk_layer_norms = qk_layer_norms
self.freeze_vision_layers = freeze_vision_layers
self.freeze_text_layers = freeze_text_layers
self.freeze_text_module_exceptions = freeze_text_module_exceptions
self.freeze_vision_module_exceptions = freeze_vision_module_exceptions
self.freeze_lm_head = freeze_lm_head
self.use_resampler = use_resampler
if perceiver_config is None:
self.perceiver_config = IdeficsPerceiverConfig()
elif isinstance(perceiver_config, dict):
self.perceiver_config = IdeficsPerceiverConfig(**perceiver_config)
elif isinstance(perceiver_config, IdeficsPerceiverConfig):
self.perceiver_config = perceiver_config
if vision_config is None:
self.vision_config = IdeficsVisionConfig()
elif isinstance(vision_config, dict):
self.vision_config = IdeficsVisionConfig(**vision_config)
elif isinstance(vision_config, IdeficsVisionConfig):
self.vision_config = vision_config
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
tie_word_embeddings=tie_word_embeddings,
**kwargs,
)
# IMPORTANT: Do not do any __init__ args-based checks in the constructor, since
# PretrainedConfig.from_dict first instantiates the class with the config dict and only then
# updates the config object with `kwargs` from from_pretrained, so during the instantiation
# of this object many attributes have default values and haven't yet been overridden.
# Do any required checks inside `from_pretrained` once the superclass' `from_pretrained` was run.
def to_dict(self):
"""
Serializes this instance to a Python dictionary. Override the default [`~PretrainedConfig.to_dict`].
Returns:
`Dict[str, any]`: Dictionary of all the attributes that make up this configuration instance,
"""
output = copy.deepcopy(self.__dict__)
output["vision_config"] = self.vision_config.to_dict()
output["perceiver_config"] = self.perceiver_config.to_dict()
output["model_type"] = self.__class__.model_type
return output
| text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_config.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/custom_modeling/idefics_config.py",
"repo_id": "text-generation-inference",
"token_count": 6204
} | 217 |
import torch
import torch.distributed
from opentelemetry import trace
from transformers import AutoConfig, AutoTokenizer
from typing import Optional
from text_generation_server.models import FlashCausalLM
from text_generation_server.models.custom_modeling.flash_phi_modeling import (
FlashPhiForCausalLM,
PhiConfig,
)
from text_generation_server.utils import (
initialize_torch_distributed,
weight_files,
Weights,
)
tracer = trace.get_tracer(__name__)
class FlashPhi(FlashCausalLM):
def __init__(
self,
model_id: str,
revision: Optional[str] = None,
quantize: Optional[str] = None,
dtype: Optional[torch.dtype] = None,
trust_remote_code: bool = False,
use_medusa: Optional[str] = None,
):
self.process_group, rank, world_size = initialize_torch_distributed()
if torch.cuda.is_available():
device = torch.device(f"cuda:{rank}")
dtype = torch.float16 if dtype is None else dtype
else:
raise NotImplementedError("FlashPhi is only available on GPU")
tokenizer = AutoTokenizer.from_pretrained(
model_id,
revision=revision,
padding_side="left",
truncation_side="left",
trust_remote_code=trust_remote_code,
)
config = PhiConfig.from_pretrained(
model_id, revision=revision, trust_remote_code=trust_remote_code
)
config.quantize = quantize
torch.distributed.barrier(group=self.process_group)
filenames = weight_files(model_id, revision=revision, extension=".safetensors")
weights = Weights(filenames, device, dtype, process_group=self.process_group)
if config.quantize in ["gptq", "awq"]:
weights._set_gptq_params(model_id, revision)
model = FlashPhiForCausalLM(config, weights)
if use_medusa:
from text_generation_server.utils.medusa import MedusaModel
from huggingface_hub import hf_hub_download
import json
import os
from pathlib import Path
is_local_model = (
Path(use_medusa).exists() and Path(use_medusa).is_dir()
) or os.getenv("WEIGHTS_CACHE_OVERRIDE", None) is not None
if not is_local_model:
medusa_config = hf_hub_download(
use_medusa, revision=revision, filename="config.json"
)
medusa_head = hf_hub_download(
use_medusa, revision=revision, filename="medusa_lm_head.pt"
)
else:
medusa_config = str(Path(use_medusa) / "config.json")
medusa_head = str(Path(use_medusa) / "medusa_lm_head.pt")
with open(medusa_config, "r") as f:
config = json.load(f)
medusa_sf = medusa_head[: -len(".pt")] + ".safetensors"
weights = Weights(
[medusa_sf], device, dtype, process_group=self.process_group
)
lm_head = model.lm_head
model.lm_head = MedusaModel(config, weights, lm_head)
torch.distributed.barrier(group=self.process_group)
super(FlashPhi, self).__init__(
model=model,
tokenizer=tokenizer,
num_layers=len(model.model.layers),
num_kv_heads=model.model.num_key_value_heads,
head_size=model.model.head_size,
dtype=dtype,
device=device,
rank=rank,
world_size=world_size,
)
| text-generation-inference/server/text_generation_server/models/flash_phi.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/models/flash_phi.py",
"repo_id": "text-generation-inference",
"token_count": 1719
} | 218 |
from functools import lru_cache
@lru_cache(10)
def log_once(log, msg: str):
log(msg)
| text-generation-inference/server/text_generation_server/utils/log.py/0 | {
"file_path": "text-generation-inference/server/text_generation_server/utils/log.py",
"repo_id": "text-generation-inference",
"token_count": 40
} | 219 |
exclude = ["node_modules/**/*.toml"]
# https://taplo.tamasfe.dev/configuration/formatter-options.html
[formatting]
align_entries = true
indent_tables = true
reorder_keys = true
| tokenizers/bindings/node/.taplo.toml/0 | {
"file_path": "tokenizers/bindings/node/.taplo.toml",
"repo_id": "tokenizers",
"token_count": 66
} | 220 |
import {
bpeDecoder,
byteFallbackDecoder,
ctcDecoder,
fuseDecoder,
metaspaceDecoder,
replaceDecoder,
sequenceDecoder,
stripDecoder,
wordPieceDecoder,
} from '../../'
describe('wordPieceDecoder', () => {
it('accepts `undefined` as first parameter', () => {
expect(wordPieceDecoder(undefined)).toBeDefined()
})
it('accepts `undefined` as second parameter', () => {
expect(wordPieceDecoder('test', undefined)).toBeDefined()
})
it('can decode arrays of strings', () => {
expect(wordPieceDecoder().decode(['Hel', '##lo', 'there', 'my', 'fr', '##iend'])).toEqual('Hello there my friend')
})
})
describe('byteFallbackDecoder', () => {
it('accepts `undefined` as first parameter', () => {
expect(byteFallbackDecoder()).toBeDefined()
})
it('can decode arrays of strings', () => {
expect(byteFallbackDecoder().decode(['Hel', 'lo'])).toEqual('Hello')
expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a')
expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a')
expect(byteFallbackDecoder().decode(['My', ' na', 'me'])).toEqual('My name')
expect(byteFallbackDecoder().decode(['<0x61>'])).toEqual('a')
expect(byteFallbackDecoder().decode(['<0xE5>'])).toEqual('๏ฟฝ')
expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>'])).toEqual('๏ฟฝ๏ฟฝ')
expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', '<0xab>'])).toEqual('ๅซ')
expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', 'a'])).toEqual('๏ฟฝ๏ฟฝa')
expect(byteFallbackDecoder().decode(['<0xE5>', '<0x8f>', '<0xab>', 'a'])).toEqual('ๅซa')
})
})
describe('replaceDecoder', () => {
it('can decode arrays of strings', () => {
expect(replaceDecoder('_', ' ').decode(['Hello', '_Hello'])).toEqual('Hello Hello')
})
})
describe('fuseDecoder', () => {
it('accepts `undefined` as first parameter', () => {
expect(fuseDecoder()).toBeDefined()
})
it('can decode arrays of strings', () => {
expect(fuseDecoder().decode(['Hel', 'lo'])).toEqual('Hello')
})
})
describe('stripDecoder', () => {
it('accepts `undefined` as first parameter', () => {
expect(stripDecoder('_', 0, 0)).toBeDefined()
})
it('can decode arrays of strings', () => {
expect(stripDecoder('_', 1, 0).decode(['_Hel', 'lo', '__there'])).toEqual('Hello_there')
})
})
describe('metaspaceDecoder', () => {
it('accepts `undefined` as first parameter', () => {
expect(metaspaceDecoder(undefined)).toBeDefined()
})
it('accepts `undefined` as second parameter', () => {
expect(metaspaceDecoder('t', undefined)).toBeDefined()
})
it('works', () => {
expect(metaspaceDecoder().decode(['โHello'])).toEqual('Hello')
})
})
describe('bpeDecoder', () => {
it('accepts `undefined` as parameter', () => {
expect(bpeDecoder(undefined)).toBeDefined()
})
})
describe('ctcDecoder', () => {
it('accepts `undefined` as parameter', () => {
expect(ctcDecoder(undefined)).toBeDefined()
})
it('encodes correctly', () => {
expect(ctcDecoder().decode(['<pad>', 'h', 'h', 'e', 'e', 'l', 'l', '<pad>', 'l', 'l', 'o'])).toEqual('hello')
})
})
describe('sequenceDecoder', () => {
it('accepts `empty list` as parameter', () => {
expect(sequenceDecoder([])).toBeDefined()
})
it('encodes correctly', () => {
expect(
sequenceDecoder([ctcDecoder(), metaspaceDecoder()]).decode(['โ', 'โ', 'H', 'H', 'i', 'i', 'โ', 'y', 'o', 'u']),
).toEqual('Hi you')
})
})
| tokenizers/bindings/node/lib/bindings/decoders.test.ts/0 | {
"file_path": "tokenizers/bindings/node/lib/bindings/decoders.test.ts",
"repo_id": "tokenizers",
"token_count": 1393
} | 221 |
# `tokenizers-freebsd-x64`
This is the **x86_64-unknown-freebsd** binary for `tokenizers`
| tokenizers/bindings/node/npm/freebsd-x64/README.md/0 | {
"file_path": "tokenizers/bindings/node/npm/freebsd-x64/README.md",
"repo_id": "tokenizers",
"token_count": 36
} | 222 |
# `tokenizers-win32-x64-msvc`
This is the **x86_64-pc-windows-msvc** binary for `tokenizers`
| tokenizers/bindings/node/npm/win32-x64-msvc/README.md/0 | {
"file_path": "tokenizers/bindings/node/npm/win32-x64-msvc/README.md",
"repo_id": "tokenizers",
"token_count": 39
} | 223 |
use crate::models::Model;
use napi_derive::napi;
use std::sync::{Arc, RwLock};
use tokenizers as tk;
use tokenizers::models::TrainerWrapper;
#[napi]
pub struct Trainer {
trainer: Option<Arc<RwLock<TrainerWrapper>>>,
}
impl From<TrainerWrapper> for Trainer {
fn from(trainer: TrainerWrapper) -> Self {
Self {
trainer: Some(Arc::new(RwLock::new(trainer))),
}
}
}
impl tk::Trainer for Trainer {
type Model = Model;
fn should_show_progress(&self) -> bool {
self
.trainer
.as_ref()
.expect("Uninitialized Trainer")
.read()
.unwrap()
.should_show_progress()
}
fn train(&self, model: &mut Self::Model) -> tk::Result<Vec<tk::AddedToken>> {
let special_tokens = self
.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.read()
.unwrap()
.train(
&mut model
.model
.as_ref()
.ok_or("Uninitialized Model")?
.write()
.unwrap(),
)?;
Ok(special_tokens)
}
fn feed<I, S, F>(&mut self, iterator: I, process: F) -> tk::Result<()>
where
I: Iterator<Item = S> + Send,
S: AsRef<str> + Send,
F: Fn(&str) -> tk::Result<Vec<String>> + Sync,
{
self
.trainer
.as_ref()
.ok_or("Uninitialized Trainer")?
.write()
.unwrap()
.feed(iterator, process)
}
}
| tokenizers/bindings/node/src/trainers.rs/0 | {
"file_path": "tokenizers/bindings/node/src/trainers.rs",
"repo_id": "tokenizers",
"token_count": 641
} | 224 |
import argparse
import glob
from os.path import join
from tokenizers import ByteLevelBPETokenizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--files",
default=None,
metavar="path",
type=str,
required=True,
help="The files to use as training; accept '**/*.txt' type of patterns \
if enclosed in quotes",
)
parser.add_argument(
"--out",
default="./",
type=str,
help="Path to the output directory, where the files will be saved",
)
parser.add_argument("--name", default="bpe-bytelevel", type=str, help="The name of the output vocab files")
args = parser.parse_args()
files = glob.glob(args.files)
if not files:
print(f"File does not exist: {args.files}")
exit(1)
# Initialize an empty tokenizer
tokenizer = ByteLevelBPETokenizer(add_prefix_space=True)
# And then train
tokenizer.train(
files,
vocab_size=10000,
min_frequency=2,
show_progress=True,
special_tokens=["<s>", "<pad>", "</s>"],
)
# Save the files
tokenizer.save_model(args.out, args.name)
# Restoring model from learned vocab/merges
tokenizer = ByteLevelBPETokenizer(
join(args.out, "{}-vocab.json".format(args.name)),
join(args.out, "{}-merges.txt".format(args.name)),
add_prefix_space=True,
)
# Test encoding
print(tokenizer.encode("Training ByteLevel BPE is very easy").tokens)
| tokenizers/bindings/python/examples/train_bytelevel_bpe.py/0 | {
"file_path": "tokenizers/bindings/python/examples/train_bytelevel_bpe.py",
"repo_id": "tokenizers",
"token_count": 521
} | 225 |
from .. import normalizers
Normalizer = normalizers.Normalizer
BertNormalizer = normalizers.BertNormalizer
NFD = normalizers.NFD
NFKD = normalizers.NFKD
NFC = normalizers.NFC
NFKC = normalizers.NFKC
Sequence = normalizers.Sequence
Lowercase = normalizers.Lowercase
Prepend = normalizers.Prepend
Strip = normalizers.Strip
StripAccents = normalizers.StripAccents
Nmt = normalizers.Nmt
Precompiled = normalizers.Precompiled
Replace = normalizers.Replace
NORMALIZERS = {"nfc": NFC, "nfd": NFD, "nfkc": NFKC, "nfkd": NFKD}
def unicode_normalizer_from_str(normalizer: str) -> Normalizer:
if normalizer not in NORMALIZERS:
raise ValueError(
"{} is not a known unicode normalizer. Available are {}".format(normalizer, NORMALIZERS.keys())
)
return NORMALIZERS[normalizer]()
| tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py/0 | {
"file_path": "tokenizers/bindings/python/py_src/tokenizers/normalizers/__init__.py",
"repo_id": "tokenizers",
"token_count": 295
} | 226 |
[isort]
default_section = FIRSTPARTY
ensure_newline_before_comments = True
force_grid_wrap = 0
include_trailing_comma = True
known_first_party = transformers
known_third_party =
absl
conllu
datasets
elasticsearch
fairseq
faiss-cpu
fastprogress
fire
fugashi
git
h5py
matplotlib
nltk
numpy
packaging
pandas
PIL
psutil
pytest
pytorch_lightning
rouge_score
sacrebleu
seqeval
sklearn
streamlit
tensorboardX
tensorflow
tensorflow_datasets
timeout_decorator
torch
torchaudio
torchtext
torchvision
torch_xla
tqdm
line_length = 119
lines_after_imports = 2
multi_line_output = 3
use_parentheses = True
[flake8]
ignore = E203, E501, E741, W503, W605
max-line-length = 119
[tool:pytest]
doctest_optionflags=NUMBER NORMALIZE_WHITESPACE ELLIPSIS
| tokenizers/bindings/python/setup.cfg/0 | {
"file_path": "tokenizers/bindings/python/setup.cfg",
"repo_id": "tokenizers",
"token_count": 386
} | 227 |
use onig::Regex;
use pyo3::exceptions;
use pyo3::prelude::*;
/// Instantiate a new Regex with the given pattern
#[pyclass(module = "tokenizers", name = "Regex")]
pub struct PyRegex {
pub inner: Regex,
pub pattern: String,
}
#[pymethods]
impl PyRegex {
#[new]
#[pyo3(text_signature = "(self, pattern)")]
fn new(s: &str) -> PyResult<Self> {
Ok(Self {
inner: Regex::new(s)
.map_err(|e| exceptions::PyException::new_err(e.description().to_owned()))?,
pattern: s.to_owned(),
})
}
}
| tokenizers/bindings/python/src/utils/regex.rs/0 | {
"file_path": "tokenizers/bindings/python/src/utils/regex.rs",
"repo_id": "tokenizers",
"token_count": 264
} | 228 |
import gzip
import os
import datasets
import pytest
from ..utils import data_dir, train_files
class TestTrainFromIterators:
@staticmethod
def get_tokenizer_trainer():
# START init_tokenizer_trainer
from tokenizers import Tokenizer, decoders, models, normalizers, pre_tokenizers, trainers
tokenizer = Tokenizer(models.Unigram())
tokenizer.normalizer = normalizers.NFKC()
tokenizer.pre_tokenizer = pre_tokenizers.ByteLevel()
tokenizer.decoder = decoders.ByteLevel()
trainer = trainers.UnigramTrainer(
vocab_size=20000,
initial_alphabet=pre_tokenizers.ByteLevel.alphabet(),
special_tokens=["<PAD>", "<BOS>", "<EOS>"],
)
# END init_tokenizer_trainer
trainer.show_progress = False
return tokenizer, trainer
@staticmethod
def load_dummy_dataset():
# START load_dataset
import datasets
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train+test+validation")
# END load_dataset
@pytest.fixture(scope="class")
def setup_gzip_files(self, train_files):
with open(train_files["small"], "rt") as small:
for n in range(3):
path = f"data/my-file.{n}.gz"
with gzip.open(path, "wt") as f:
f.write(small.read())
def test_train_basic(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# START train_basic
# First few lines of the "Zen of Python" https://www.python.org/dev/peps/pep-0020/
data = [
"Beautiful is better than ugly."
"Explicit is better than implicit."
"Simple is better than complex."
"Complex is better than complicated."
"Flat is better than nested."
"Sparse is better than dense."
"Readability counts."
]
tokenizer.train_from_iterator(data, trainer=trainer)
# END train_basic
def test_datasets(self):
tokenizer, trainer = self.get_tokenizer_trainer()
# In order to keep tests fast, we only use the first 100 examples
os.environ["TOKENIZERS_PARALLELISM"] = "true"
dataset = datasets.load_dataset("wikitext", "wikitext-103-raw-v1", split="train[0:100]")
# START def_batch_iterator
def batch_iterator(batch_size=1000):
# Only keep the text column to avoid decoding the rest of the columns unnecessarily
tok_dataset = dataset.select_columns("text")
for batch in tok_dataset.iter(batch_size):
yield batch["text"]
# END def_batch_iterator
# START train_datasets
tokenizer.train_from_iterator(batch_iterator(), trainer=trainer, length=len(dataset))
# END train_datasets
def test_gzip(self, setup_gzip_files):
tokenizer, trainer = self.get_tokenizer_trainer()
# START single_gzip
import gzip
with gzip.open("data/my-file.0.gz", "rt") as f:
tokenizer.train_from_iterator(f, trainer=trainer)
# END single_gzip
# START multi_gzip
files = ["data/my-file.0.gz", "data/my-file.1.gz", "data/my-file.2.gz"]
def gzip_iterator():
for path in files:
with gzip.open(path, "rt") as f:
for line in f:
yield line
tokenizer.train_from_iterator(gzip_iterator(), trainer=trainer)
# END multi_gzip
| tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py/0 | {
"file_path": "tokenizers/bindings/python/tests/documentation/test_tutorial_train_from_iterators.py",
"repo_id": "tokenizers",
"token_count": 1587
} | 229 |
# Input Sequences
<tokenizerslangcontent>
<python>
These types represent all the different kinds of sequence that can be used as input of a Tokenizer.
Globally, any sequence can be either a string or a list of strings, according to the operating
mode of the tokenizer: `raw text` vs `pre-tokenized`.
## TextInputSequence[[tokenizers.TextInputSequence]]
<code>tokenizers.TextInputSequence</code>
A `str` that represents an input sequence
## PreTokenizedInputSequence[[tokenizers.PreTokenizedInputSequence]]
<code>tokenizers.PreTokenizedInputSequence</code>
A pre-tokenized input sequence. Can be one of:
- A `List` of `str`
- A `Tuple` of `str`
alias of `Union[List[str], Tuple[str]]`.
## InputSequence[[tokenizers.InputSequence]]
<code>tokenizers.InputSequence</code>
Represents all the possible types of input sequences for encoding. Can be:
- When `is_pretokenized=False`: [TextInputSequence](#tokenizers.TextInputSequence)
- When `is_pretokenized=True`: [PreTokenizedInputSequence](#tokenizers.PreTokenizedInputSequence)
alias of `Union[str, List[str], Tuple[str]]`.
</python>
<rust>
The Rust API Reference is available directly on the [Docs.rs](https://docs.rs/tokenizers/latest/tokenizers/) website.
</rust>
<node>
The node API has not been documented yet.
</node>
</tokenizerslangcontent> | tokenizers/docs/source-doc-builder/api/input-sequences.mdx/0 | {
"file_path": "tokenizers/docs/source-doc-builder/api/input-sequences.mdx",
"repo_id": "tokenizers",
"token_count": 402
} | 230 |
import re
from sphinx.directives.other import TocTree
class TocTreeTags(TocTree):
hasPat = re.compile("^\s*:(.+):(.+)$")
def filter_entries(self, entries):
filtered = []
for e in entries:
m = self.hasPat.match(e)
if m != None:
if self.env.app.tags.has(m.groups()[0]):
filtered.append(m.groups()[1])
else:
filtered.append(e)
return filtered
def run(self):
self.content = self.filter_entries(self.content)
return super().run()
def setup(app):
app.add_directive("toctree-tags", TocTreeTags)
return {
"version": "0.1",
}
| tokenizers/docs/source/_ext/toctree_tags.py/0 | {
"file_path": "tokenizers/docs/source/_ext/toctree_tags.py",
"repo_id": "tokenizers",
"token_count": 345
} | 231 |
Installation
====================================================================================================
.. only:: python
.. include:: python.inc
.. only:: rust
.. include:: rust.inc
.. only:: node
.. include:: node.inc
| tokenizers/docs/source/installation/main.rst/0 | {
"file_path": "tokenizers/docs/source/installation/main.rst",
"repo_id": "tokenizers",
"token_count": 54
} | 232 |
#[macro_use]
extern crate criterion;
use std::fs::File;
use std::io::{BufRead, BufReader};
use std::path::Path;
use std::time::{Duration, Instant};
use criterion::black_box;
use criterion::Criterion;
use tokenizers::processors::template::TemplateProcessing;
use tokenizers::{EncodeInput, Encoding, PostProcessor, Tokenizer};
/// Simple TemplateProcessing
fn create_processor() -> TemplateProcessing {
TemplateProcessing::builder()
.try_single("[CLS]:0 $A:0 [SEP]:0")
.unwrap()
.try_pair("[CLS]:0 $A:0 [SEP]:0 $B:1 [SEP]:1")
.unwrap()
.special_tokens(vec![("[CLS]", 0), ("[SEP]", 1)])
.build()
.unwrap()
}
pub fn bench_layout(c: &mut Criterion) {
let processor = create_processor();
let tokenizer = Tokenizer::from_file("data/albert-base-v1-tokenizer.json").unwrap();
let mut encodeds: Vec<Encoding> = vec![];
for line in BufReader::new(File::open(Path::new("data/big.txt")).unwrap()).lines() {
let line: EncodeInput = line.unwrap().into();
let encoded: Encoding = tokenizer.encode(line, false).unwrap();
encodeds.push(encoded);
}
c.bench_function("TemplateProcessing single encode", |b| {
b.iter_custom(|iters| {
let mut duration = Duration::new(0, 0);
for i in 0..iters as usize {
let encoded_index = i % encodeds.len();
let encoded: Encoding = encodeds[encoded_index].clone();
let start = Instant::now();
let _ = black_box(processor.process(encoded, None, false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
})
});
c.bench_function("TemplateProcessing pair encode", |b| {
b.iter_custom(|iters| {
let mut duration = Duration::new(0, 0);
for i in 0..iters as usize {
let encoded_index = i % encodeds.len();
let encoded: Encoding = encodeds[encoded_index].clone();
let encoded_index2 = (i + 1) % encodeds.len();
let pair: Encoding = encodeds[encoded_index2].clone();
let start = Instant::now();
let _ = black_box(processor.process(encoded, Some(pair), false));
duration = duration.checked_add(start.elapsed()).unwrap();
}
duration
})
});
}
criterion_group! {
name = layout_benches;
config = Criterion::default().sample_size(20);
targets = bench_layout
}
criterion_main!(layout_benches);
| tokenizers/tokenizers/benches/layout_benchmark.rs/0 | {
"file_path": "tokenizers/tokenizers/benches/layout_benchmark.rs",
"repo_id": "tokenizers",
"token_count": 1158
} | 233 |
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Hello wasm-pack!</title>
</head>
<body>
<noscript>This page contains webassembly and javascript content, please enable javascript in your browser.</noscript>
<script src="./bootstrap.js"></script>
</body>
</html>
| tokenizers/tokenizers/examples/unstable_wasm/www/index.html/0 | {
"file_path": "tokenizers/tokenizers/examples/unstable_wasm/www/index.html",
"repo_id": "tokenizers",
"token_count": 110
} | 234 |
//! [Byte Pair Encoding](https://www.aclweb.org/anthology/P16-1162/) model.
use std::{iter, mem};
mod model;
mod serialization;
pub mod trainer;
mod word;
type Pair = (u32, u32);
/// Errors that can be encountered while using or constructing a `BPE` model.
#[derive(thiserror::Error, Debug)]
pub enum Error {
/// An error encountered while reading files mainly.
#[error("IoError: {0}")]
Io(#[from] std::io::Error),
/// An error forwarded from Serde, while parsing JSON
#[error("JsonError: {0}")]
JsonError(#[from] serde_json::Error),
/// When the vocab.json file is in the wrong format
#[error("Bad vocabulary json file")]
BadVocabulary,
/// When the merges.txt file is in the wrong format. This error holds the line
/// number of the line that caused the error.
#[error("Merges text file invalid at line {0}")]
BadMerges(usize),
/// If a token found in merges, is not in the vocab
#[error("Token `{0}` out of vocabulary")]
MergeTokenOutOfVocabulary(String),
/// If the provided unk token is out of vocabulary
#[error("Unk token `{0}` not found in the vocabulary")]
UnkTokenOutOfVocabulary(String),
/// Dropout not between 0 and 1.
#[error("Dropout should be between 0 and 1")]
InvalidDropout,
}
/// Provides access to the `FirstLastIterator` to any Iterator
pub(crate) trait WithFirstLastIterator: Iterator + Sized {
fn with_first_and_last(self) -> FirstLastIterator<Self>;
}
impl<I> WithFirstLastIterator for I
where
I: Iterator,
{
fn with_first_and_last(self) -> FirstLastIterator<Self> {
FirstLastIterator {
first: true,
iter: self.peekable(),
}
}
}
/// Provides information about whether an item is the first and/or the last of the iterator
pub(crate) struct FirstLastIterator<I>
where
I: Iterator,
{
first: bool,
iter: iter::Peekable<I>,
}
impl<I> Iterator for FirstLastIterator<I>
where
I: Iterator,
{
/// (is_first, is_last, item)
type Item = (bool, bool, I::Item);
fn next(&mut self) -> Option<Self::Item> {
let first = mem::replace(&mut self.first, false);
self.iter
.next()
.map(|e| (first, self.iter.peek().is_none(), e))
}
}
// Re-export
pub use model::*;
pub use trainer::*;
use word::*;
| tokenizers/tokenizers/src/models/bpe/mod.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/bpe/mod.rs",
"repo_id": "tokenizers",
"token_count": 891
} | 235 |
use super::{super::OrderedVocabIter, WordPiece, WordPieceBuilder};
use serde::{
de::{MapAccess, Visitor},
ser::SerializeStruct,
Deserialize, Deserializer, Serialize, Serializer,
};
use std::collections::HashSet;
impl Serialize for WordPiece {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut model = serializer.serialize_struct("WordPiece", 5)?;
// Small fields first
model.serialize_field("type", "WordPiece")?;
model.serialize_field("unk_token", &self.unk_token)?;
model.serialize_field("continuing_subword_prefix", &self.continuing_subword_prefix)?;
model.serialize_field("max_input_chars_per_word", &self.max_input_chars_per_word)?;
// Then large ones
let ordered_vocab = OrderedVocabIter::new(&self.vocab_r);
model.serialize_field("vocab", &ordered_vocab)?;
model.end()
}
}
impl<'de> Deserialize<'de> for WordPiece {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_struct(
"WordPiece",
&[
"type",
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
],
WordPieceVisitor,
)
}
}
struct WordPieceVisitor;
impl<'de> Visitor<'de> for WordPieceVisitor {
type Value = WordPiece;
fn expecting(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(fmt, "struct WordPiece")
}
fn visit_map<V>(self, mut map: V) -> std::result::Result<Self::Value, V::Error>
where
V: MapAccess<'de>,
{
let mut builder = WordPieceBuilder::new();
let mut missing_fields = vec![
// for retrocompatibility the "type" field is not mandatory
"unk_token",
"continuing_subword_prefix",
"max_input_chars_per_word",
"vocab",
]
.into_iter()
.collect::<HashSet<_>>();
while let Some(key) = map.next_key::<String>()? {
match key.as_ref() {
"unk_token" => builder = builder.unk_token(map.next_value()?),
"continuing_subword_prefix" => {
builder = builder.continuing_subword_prefix(map.next_value()?)
}
"max_input_chars_per_word" => {
builder = builder.max_input_chars_per_word(map.next_value()?)
}
"vocab" => builder = builder.vocab(map.next_value()?),
"type" => match map.next_value()? {
"WordPiece" => {}
u => {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(u),
&"WordPiece",
))
}
},
_ => {}
}
missing_fields.remove::<str>(&key);
}
if !missing_fields.is_empty() {
Err(serde::de::Error::missing_field(
missing_fields.iter().next().unwrap(),
))
} else {
Ok(builder.build().map_err(serde::de::Error::custom)?)
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn serde() {
let wp = WordPiece::default();
let wp_s = "{\
\"type\":\"WordPiece\",\
\"unk_token\":\"[UNK]\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert_eq!(serde_json::to_string(&wp).unwrap(), wp_s);
assert_eq!(serde_json::from_str::<WordPiece>(wp_s).unwrap(), wp);
}
#[test]
fn deserialization_should_fail() {
let missing_unk = "{\
\"type\":\"WordPiece\",\
\"continuing_subword_prefix\":\"##\",\
\"max_input_chars_per_word\":100,\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(missing_unk)
.unwrap_err()
.to_string()
.starts_with("missing field `unk_token`"));
let wrong_type = "{\
\"type\":\"WordLevel\",\
\"unk_token\":\"[UNK]\",\
\"vocab\":{}\
}";
assert!(serde_json::from_str::<WordPiece>(wrong_type)
.unwrap_err()
.to_string()
.starts_with("invalid value: string \"WordLevel\", expected WordPiece"));
}
}
| tokenizers/tokenizers/src/models/wordpiece/serialization.rs/0 | {
"file_path": "tokenizers/tokenizers/src/models/wordpiece/serialization.rs",
"repo_id": "tokenizers",
"token_count": 2453
} | 236 |
use serde::{Deserialize, Serialize};
use crate::tokenizer::{PreTokenizedString, PreTokenizer, Result, SplitDelimiterBehavior};
use crate::utils::macro_rules_attribute;
use unicode_categories::UnicodeCategories;
fn is_punc(x: char) -> bool {
char::is_ascii_punctuation(&x) || x.is_punctuation()
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[macro_rules_attribute(impl_serde_type!)]
pub struct Punctuation {
#[serde(default = "default_split")]
behavior: SplitDelimiterBehavior,
}
fn default_split() -> SplitDelimiterBehavior {
SplitDelimiterBehavior::Isolated
}
impl Punctuation {
pub fn new(behavior: SplitDelimiterBehavior) -> Self {
Self { behavior }
}
}
impl Default for Punctuation {
fn default() -> Self {
Self::new(SplitDelimiterBehavior::Isolated)
}
}
impl PreTokenizer for Punctuation {
fn pre_tokenize(&self, pretokenized: &mut PreTokenizedString) -> Result<()> {
pretokenized.split(|_, s| s.split(is_punc, self.behavior))
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{OffsetReferential, OffsetType};
#[test]
fn punctuation_basic() {
let pretok = Punctuation::default();
let mut pretokenized: PreTokenizedString = "Hey friend! How are you?!?".into();
pretok.pre_tokenize(&mut pretokenized).unwrap();
assert_eq!(
pretokenized
.get_splits(OffsetReferential::Original, OffsetType::Byte)
.into_iter()
.map(|(s, o, _)| (s, o))
.collect::<Vec<_>>(),
vec![
("Hey friend", (0, 10)),
("!", (10, 11)),
(" How are you", (11, 27)),
("?", (27, 28)),
("!", (28, 29)),
("?", (29, 30)),
]
);
}
#[test]
fn deserialization() {
let punctuation: Punctuation = serde_json::from_str(r#"{"type": "Punctuation"}"#).unwrap();
assert_eq!(punctuation, Punctuation::default());
assert_eq!(
punctuation,
Punctuation::new(SplitDelimiterBehavior::Isolated)
);
}
#[test]
#[should_panic]
fn deserialization_erroneous() {
let _punctuation: Punctuation =
serde_json::from_str(r#"{"type": "WhitespaceSplit"}"#).unwrap();
}
}
| tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs/0 | {
"file_path": "tokenizers/tokenizers/src/pre_tokenizers/punctuation.rs",
"repo_id": "tokenizers",
"token_count": 1102
} | 237 |
use crate::utils::SysRegex;
use crate::{Offsets, Result};
use regex::Regex;
/// Pattern used to split a NormalizedString
pub trait Pattern {
/// Slice the given string in a list of pattern match positions, with
/// a boolean indicating whether this is a match or not.
///
/// This method *must* cover the whole string in its outputs, with
/// contiguous ordered slices.
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>>;
}
impl Pattern for char {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let is_char = |c: char| -> bool { c == *self };
is_char.find_matches(inside)
}
}
impl Pattern for &str {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if self.is_empty() {
// If we try to find the matches with an empty string, just don't match anything
return Ok(vec![((0, inside.chars().count()), false)]);
}
let re = Regex::new(®ex::escape(self))?;
(&re).find_matches(inside)
}
}
impl Pattern for &String {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
let s: &str = self;
s.find_matches(inside)
}
}
impl Pattern for &Regex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for m in self.find_iter(inside) {
if prev != m.start() {
splits.push(((prev, m.start()), false));
}
splits.push(((m.start(), m.end()), true));
prev = m.end();
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl Pattern for &SysRegex {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut prev = 0;
let mut splits = Vec::with_capacity(inside.len());
for (start, end) in self.find_iter(inside) {
if prev != start {
splits.push(((prev, start), false));
}
splits.push(((start, end), true));
prev = end;
}
if prev != inside.len() {
splits.push(((prev, inside.len()), false))
}
Ok(splits)
}
}
impl<F> Pattern for F
where
F: Fn(char) -> bool,
{
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
if inside.is_empty() {
return Ok(vec![((0, 0), false)]);
}
let mut last_offset = 0;
let mut last_seen = 0;
let mut matches = inside
.char_indices()
.flat_map(|(b, c)| {
last_seen = b + c.len_utf8();
if self(c) {
let mut events = Vec::with_capacity(2);
if last_offset < b {
// We need to emit what was before this match
events.push(((last_offset, b), false));
}
events.push(((b, b + c.len_utf8()), true));
last_offset = b + c.len_utf8();
events
} else {
vec![]
}
})
.collect::<Vec<_>>();
// Do not forget the last potential split
if last_seen > last_offset {
matches.push(((last_offset, last_seen), false));
}
Ok(matches)
}
}
/// Invert the `is_match` flags for the wrapped Pattern. This is usefull
/// for example when we use a regex that matches words instead of a delimiter,
/// and we want to match the delimiter.
pub struct Invert<P: Pattern>(pub P);
impl<P: Pattern> Pattern for Invert<P> {
fn find_matches(&self, inside: &str) -> Result<Vec<(Offsets, bool)>> {
Ok(self
.0
.find_matches(inside)?
.into_iter()
.map(|(offsets, flag)| (offsets, !flag))
.collect())
}
}
#[cfg(test)]
mod tests {
use super::*;
use regex::Regex;
macro_rules! do_test {
($inside: expr, $pattern: expr => @ERROR) => {
assert!($pattern.find_matches($inside).is_err());
};
($inside: expr, $pattern: expr => $result: expr) => {
assert_eq!($pattern.find_matches($inside).unwrap(), $result);
assert_eq!(
Invert($pattern).find_matches($inside).unwrap(),
$result
.into_iter()
.map(|v: (Offsets, bool)| (v.0, !v.1))
.collect::<Vec<_>>()
);
};
}
#[test]
fn char() {
do_test!("aba", 'a' => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", 'a' => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", 'a' => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", 'a' => vec![((0, 0), false)]);
do_test!("aaa", 'b' => vec![((0, 3), false)]);
}
#[test]
fn str() {
do_test!("aba", "a" => vec![((0, 1), true), ((1, 2), false), ((2, 3), true)]);
do_test!("bbbba", "a" => vec![((0, 4), false), ((4, 5), true)]);
do_test!("aabbb", "a" => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("aabbb", "ab" => vec![((0, 1), false), ((1, 3), true), ((3, 5), false)]);
do_test!("aabbab", "ab" =>
vec![((0, 1), false), ((1, 3), true), ((3, 4), false), ((4, 6), true)]
);
do_test!("", "" => vec![((0, 0), false)]);
do_test!("aaa", "" => vec![((0, 3), false)]);
do_test!("aaa", "b" => vec![((0, 3), false)]);
}
#[test]
fn functions() {
let is_b = |c| c == 'b';
do_test!("aba", is_b => vec![((0, 1), false), ((1, 2), true), ((2, 3), false)]);
do_test!("aaaab", is_b => vec![((0, 4), false), ((4, 5), true)]);
do_test!("bbaaa", is_b => vec![((0, 1), true), ((1, 2), true), ((2, 5), false)]);
do_test!("", is_b => vec![((0, 0), false)]);
do_test!("aaa", is_b => vec![((0, 3), false)]);
}
#[test]
fn regex() {
let is_whitespace = Regex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("๐พ๐ ๐ ๐ ๐๐ ๐ฃ๐๐๐๐", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
#[test]
fn sys_regex() {
let is_whitespace = SysRegex::new(r"\s+").unwrap();
do_test!("a b", &is_whitespace => vec![((0, 1), false), ((1, 4), true), ((4, 5), false)]);
do_test!(" a b ", &is_whitespace =>
vec![((0, 3), true), ((3, 4), false), ((4, 7), true), ((7, 8), false), ((8, 11), true)]
);
do_test!("", &is_whitespace => vec![((0, 0), false)]);
do_test!("๐พ๐ ๐ ๐ ๐๐ ๐ฃ๐๐๐๐", &is_whitespace =>
vec![((0, 16), false), ((16, 17), true), ((17, 45), false)]
);
do_test!("aaa", &is_whitespace => vec![((0, 3), false)]);
}
}
| tokenizers/tokenizers/src/tokenizer/pattern.rs/0 | {
"file_path": "tokenizers/tokenizers/src/tokenizer/pattern.rs",
"repo_id": "tokenizers",
"token_count": 3903
} | 238 |
#![cfg(feature = "http")]
use tokenizers::{FromPretrainedParameters, Result, Tokenizer};
#[test]
fn test_from_pretrained() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("bert-base-cased", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "there", "dear", "friend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_revision() -> Result<()> {
let tokenizer = Tokenizer::from_pretrained("anthony/tokenizers-test", None)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["hey", "there", "dear", "friend", "!"]
);
let tokenizer = Tokenizer::from_pretrained(
"anthony/tokenizers-test",
Some(FromPretrainedParameters {
revision: "gpt-2".to_string(),
..Default::default()
}),
)?;
let encoding = tokenizer.encode("Hey there dear friend!", false)?;
assert_eq!(
encoding.get_tokens(),
&["Hey", "ฤ there", "ฤ dear", "ฤ friend", "!"]
);
Ok(())
}
#[test]
fn test_from_pretrained_invalid_model() {
let tokenizer = Tokenizer::from_pretrained("docs?", None);
assert!(tokenizer.is_err());
}
#[test]
fn test_from_pretrained_invalid_revision() {
let tokenizer = Tokenizer::from_pretrained(
"bert-base-cased",
Some(FromPretrainedParameters {
revision: "gpt?".to_string(),
..Default::default()
}),
);
assert!(tokenizer.is_err());
}
| tokenizers/tokenizers/tests/from_pretrained.rs/0 | {
"file_path": "tokenizers/tokenizers/tests/from_pretrained.rs",
"repo_id": "tokenizers",
"token_count": 683
} | 239 |
ARG BASE_DOCKER_IMAGE
FROM $BASE_DOCKER_IMAGE
LABEL maintainer="Hugging Face"
ARG DEBIAN_FRONTEND=noninteractive
# Use login shell to read variables from `~/.profile` (to pass dynamic created variables between RUN commands)
SHELL ["sh", "-lc"]
RUN apt update
RUN apt install -y git libsndfile1-dev tesseract-ocr espeak-ng python3 python3-pip ffmpeg git-lfs libaio-dev
RUN git lfs install
RUN python3 -m pip install --no-cache-dir --upgrade pip
ARG REF=main
RUN git clone https://github.com/huggingface/transformers && cd transformers && git checkout $REF
RUN python3 -m pip install --no-cache-dir -e ./transformers[dev,onnxruntime]
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
ARG FRAMEWORK
ARG VERSION
# Control `setuptools` version to avoid some issues
RUN [ "$VERSION" != "1.10" ] && python3 -m pip install -U setuptools || python3 -m pip install -U "setuptools<=59.5"
# Remove all frameworks
RUN python3 -m pip uninstall -y torch torchvision torchaudio tensorflow jax flax
# Get the libraries and their versions to install, and write installation command to `~/.profile`.
RUN python3 ./transformers/utils/past_ci_versions.py --framework $FRAMEWORK --version $VERSION
# Install the target framework
RUN echo "INSTALL_CMD = $INSTALL_CMD"
RUN $INSTALL_CMD
RUN [ "$FRAMEWORK" != "pytorch" ] && echo "`deepspeed-testing` installation is skipped" || python3 -m pip install --no-cache-dir ./transformers[deepspeed-testing]
# Remove `accelerate`: it requires `torch`, and this causes import issues for TF-only testing
# We will install `accelerate@main` in Past CI workflow file
RUN python3 -m pip uninstall -y accelerate
# Uninstall `torch-tensorrt` and `apex` shipped with the base image
RUN python3 -m pip uninstall -y torch-tensorrt apex
# Pre-build **nightly** release of DeepSpeed, so it would be ready for testing (otherwise, the 1st deepspeed test will timeout)
RUN python3 -m pip uninstall -y deepspeed
# This has to be run inside the GPU VMs running the tests. (So far, it fails here due to GPU checks during compilation.)
# Issue: https://github.com/microsoft/DeepSpeed/issues/2010
# RUN git clone https://github.com/microsoft/DeepSpeed && cd DeepSpeed && rm -rf build && \
# DS_BUILD_CPU_ADAM=1 DS_BUILD_FUSED_ADAM=1 DS_BUILD_UTILS=1 python3 -m pip install . --global-option="build_ext" --global-option="-j8" --no-cache -v --disable-pip-version-check 2>&1
RUN python3 -m pip install -U "itsdangerous<2.1.0"
# When installing in editable mode, `transformers` is not recognized as a package.
# this line must be added in order for python to be aware of transformers.
RUN cd transformers && python3 setup.py develop
| transformers/docker/transformers-past-gpu/Dockerfile/0 | {
"file_path": "transformers/docker/transformers-past-gpu/Dockerfile",
"repo_id": "transformers",
"token_count": 886
} | 240 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Verteiltes Training mit ๐ค Accelerate
Da die Modelle immer grรถรer werden, hat sich die Parallelitรคt als Strategie zum Trainieren grรถรerer Modelle auf begrenzter Hardware und zur Beschleunigung der Trainingsgeschwindigkeit um mehrere Grรถรenordnungen erwiesen. Bei Hugging Face haben wir die Bibliothek [๐ค Accelerate](https://huggingface.co/docs/accelerate) entwickelt, um Nutzern zu helfen, ein ๐ค Transformers-Modell auf jeder Art von verteiltem Setup zu trainieren, egal ob es sich um mehrere GPUs auf einer Maschine oder mehrere GPUs auf mehreren Maschinen handelt. In diesem Tutorial lernen Sie, wie Sie Ihre native PyTorch-Trainingsschleife anpassen, um das Training in einer verteilten Umgebung zu ermรถglichen.
## Einrichtung
Beginnen Sie mit der Installation von ๐ค Accelerate:
```bash
pip install accelerate
```
Dann importieren und erstellen Sie ein [`~accelerate.Accelerator`]-Objekt. Der [`~accelerate.Accelerator`] wird automatisch Ihre Art der verteilten Einrichtung erkennen und alle notwendigen Komponenten fรผr das Training initialisieren. Sie mรผssen Ihr Modell nicht explizit auf einem Gerรคt platzieren.
```py
>>> from accelerate import Accelerator
>>> accelerator = Accelerator()
```
## Vorbereiten auf die Beschleunigung
Der nรคchste Schritt ist die รbergabe aller relevanten Trainingsobjekte an die Methode [`~accelerate.Accelerator.prepare`]. Dazu gehรถren Ihre Trainings- und Evaluierungs-DataLoader, ein Modell und ein Optimierer:
```py
>>> train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
... train_dataloader, eval_dataloader, model, optimizer
... )
```
## Rรผckwรคrts
Die letzte Ergรคnzung besteht darin, das typische `loss.backward()` in der Trainingsschleife durch die ๐ค Accelerate-Methode [`~accelerate.Accelerator.backward`] zu ersetzen:
```py
>>> for epoch in range(num_epochs):
... for batch in train_dataloader:
... outputs = model(**batch)
... loss = outputs.loss
... accelerator.backward(loss)
... optimizer.step()
... lr_scheduler.step()
... optimizer.zero_grad()
... progress_bar.update(1)
```
Wie Sie im folgenden Code sehen kรถnnen, mรผssen Sie nur vier zusรคtzliche Codezeilen zu Ihrer Trainingsschleife hinzufรผgen, um verteiltes Training zu ermรถglichen!
```diff
+ from accelerate import Accelerator
from transformers import AdamW, AutoModelForSequenceClassification, get_scheduler
+ accelerator = Accelerator()
model = AutoModelForSequenceClassification.from_pretrained(checkpoint, num_labels=2)
optimizer = AdamW(model.parameters(), lr=3e-5)
- device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
- model.to(device)
+ train_dataloader, eval_dataloader, model, optimizer = accelerator.prepare(
+ train_dataloader, eval_dataloader, model, optimizer
+ )
num_epochs = 3
num_training_steps = num_epochs * len(train_dataloader)
lr_scheduler = get_scheduler(
"linear",
optimizer=optimizer,
num_warmup_steps=0,
num_training_steps=num_training_steps
)
progress_bar = tqdm(range(num_training_steps))
model.train()
for epoch in range(num_epochs):
for batch in train_dataloader:
- batch = {k: v.to(device) for k, v in batch.items()}
outputs = model(**batch)
loss = outputs.loss
- loss.backward()
+ accelerator.backward(loss)
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
progress_bar.update(1)
```
## Trainieren
Sobald Sie die entsprechenden Codezeilen hinzugefรผgt haben, starten Sie Ihr Training in einem Skript oder einem Notebook wie Colaboratory.
### Trainieren mit einem Skript
Wenn Sie Ihr Training mit einem Skript durchfรผhren, fรผhren Sie den folgenden Befehl aus, um eine Konfigurationsdatei zu erstellen und zu speichern:
```bash
accelerate config
```
Dann starten Sie Ihr Training mit:
```bash
accelerate launch train.py
```
### Trainieren mit einem Notebook
๐ค Accelerate kann auch in einem Notebook laufen, wenn Sie planen, die TPUs von Colaboratory zu verwenden. Verpacken Sie den gesamten Code, der fรผr das Training verantwortlich ist, in eine Funktion und รผbergeben Sie diese an [`~accelerate.notebook_launcher`]:
```py
>>> from accelerate import notebook_launcher
>>> notebook_launcher(training_function)
```
Weitere Informationen รผber ๐ค Accelerate und seine umfangreichen Funktionen finden Sie in der [Dokumentation](https://huggingface.co/docs/accelerate). | transformers/docs/source/de/accelerate.md/0 | {
"file_path": "transformers/docs/source/de/accelerate.md",
"repo_id": "transformers",
"token_count": 1929
} | 241 |
<!---
Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
# Contribute to ๐ค Transformers
Everyone is welcome to contribute, and we value everybody's contribution. Code
contributions are not the only way to help the community. Answering questions, helping
others, and improving the documentation are also immensely valuable.
It also helps us if you spread the word! Reference the library in blog posts
about the awesome projects it made possible, shout out on Twitter every time it has
helped you, or simply โญ๏ธ the repository to say thank you.
However you choose to contribute, please be mindful and respect our
[code of conduct](https://github.com/huggingface/transformers/blob/main/CODE_OF_CONDUCT.md).
**This guide was heavily inspired by the awesome [scikit-learn guide to contributing](https://github.com/scikit-learn/scikit-learn/blob/main/CONTRIBUTING.md).**
## Ways to contribute
There are several ways you can contribute to ๐ค Transformers:
* Fix outstanding issues with the existing code.
* Submit issues related to bugs or desired new features.
* Implement new models.
* Contribute to the examples or to the documentation.
If you don't know where to start, there is a special [Good First
Issue](https://github.com/huggingface/transformers/contribute) listing. It will give you a list of
open issues that are beginner-friendly and help you start contributing to open-source. Just comment on the issue that you'd like to work
on.
For something slightly more challenging, you can also take a look at the [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) list. In general though, if you feel like you know what you're doing, go for it and we'll help you get there! ๐
> All contributions are equally valuable to the community. ๐ฅฐ
## Fixing outstanding issues
If you notice an issue with the existing code and have a fix in mind, feel free to [start contributing](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#create-a-pull-request) and open a Pull Request!
## Submitting a bug-related issue or feature request
Do your best to follow these guidelines when submitting a bug-related issue or a feature
request. It will make it easier for us to come back to you quickly and with good
feedback.
### Did you find a bug?
The ๐ค Transformers library is robust and reliable thanks to users who report the problems they encounter.
Before you report an issue, we would really appreciate it if you could **make sure the bug was not
already reported** (use the search bar on GitHub under Issues). Your issue should also be related to bugs in the library itself, and not your code. If you're unsure whether the bug is in your code or the library, please ask in the [forum](https://discuss.huggingface.co/) first. This helps us respond quicker to fixing issues related to the library versus general questions.
Once you've confirmed the bug hasn't already been reported, please include the following information in your issue so we can quickly resolve it:
* Your **OS type and version** and **Python**, **PyTorch** and
**TensorFlow** versions when applicable.
* A short, self-contained, code snippet that allows us to reproduce the bug in
less than 30s.
* The *full* traceback if an exception is raised.
* Attach any other additional information, like screenshots, you think may help.
To get the OS and software versions automatically, run the following command:
```bash
transformers-cli env
```
You can also run the same command from the root of the repository:
```bash
python src/transformers/commands/transformers_cli.py env
```
### Do you want a new feature?
If there is a new feature you'd like to see in ๐ค Transformers, please open an issue and describe:
1. What is the *motivation* behind this feature? Is it related to a problem or frustration with the library? Is it a feature related to something you need for a project? Is it something you worked on and think it could benefit the community?
Whatever it is, we'd love to hear about it!
2. Describe your requested feature in as much detail as possible. The more you can tell us about it, the better we'll be able to help you.
3. Provide a *code snippet* that demonstrates the features usage.
4. If the feature is related to a paper, please include a link.
If your issue is well written we're already 80% of the way there by the time you create it.
We have added [templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with your issue.
## Do you want to implement a new model?
New models are constantly released and if you want to implement a new model, please provide the following information
* A short description of the model and a link to the paper.
* Link to the implementation if it is open-sourced.
* Link to the model weights if they are available.
If you are willing to contribute the model yourself, let us know so we can help you add it to ๐ค Transformers!
We have added a [detailed guide and templates](https://github.com/huggingface/transformers/tree/main/templates) to help you get started with adding a new model, and we also have a more technical guide for [how to add a model to ๐ค Transformers](https://huggingface.co/docs/transformers/add_new_model).
## Do you want to add documentation?
We're always looking for improvements to the documentation that make it more clear and accurate. Please let us know how the documentation can be improved such as typos and any content that is missing, unclear or inaccurate. We'll be happy to make the changes or help you make a contribution if you're interested!
For more details about how to generate, build, and write the documentation, take a look at the documentation [README](https://github.com/huggingface/transformers/tree/main/docs).
## Create a Pull Request
Before writing any code, we strongly advise you to search through the existing PRs or
issues to make sure nobody is already working on the same thing. If you are
unsure, it is always a good idea to open an issue to get some feedback.
You will need basic `git` proficiency to contribute to
๐ค Transformers. While `git` is not the easiest tool to use, it has the greatest
manual. Type `git --help` in a shell and enjoy! If you prefer books, [Pro
Git](https://git-scm.com/book/en/v2) is a very good reference.
You'll need **[Python 3.8]((https://github.com/huggingface/transformers/blob/main/setup.py#L426))** or above to contribute to ๐ค Transformers. Follow the steps below to start contributing:
1. Fork the [repository](https://github.com/huggingface/transformers) by
clicking on the **[Fork](https://github.com/huggingface/transformers/fork)** button on the repository's page. This creates a copy of the code
under your GitHub user account.
2. Clone your fork to your local disk, and add the base repository as a remote:
```bash
git clone [email protected]:<your Github handle>/transformers.git
cd transformers
git remote add upstream https://github.com/huggingface/transformers.git
```
3. Create a new branch to hold your development changes:
```bash
git checkout -b a-descriptive-name-for-my-changes
```
๐จ **Do not** work on the `main` branch!
4. Set up a development environment by running the following command in a virtual environment:
```bash
pip install -e ".[dev]"
```
If ๐ค Transformers was already installed in the virtual environment, remove
it with `pip uninstall transformers` before reinstalling it in editable
mode with the `-e` flag.
Depending on your OS, and since the number of optional dependencies of Transformers is growing, you might get a
failure with this command. If that's the case make sure to install the Deep Learning framework you are working with
(PyTorch, TensorFlow and/or Flax) then do:
```bash
pip install -e ".[quality]"
```
which should be enough for most use cases.
5. Develop the features in your branch.
As you work on your code, you should make sure the test suite
passes. Run the tests impacted by your changes like this:
```bash
pytest tests/<TEST_TO_RUN>.py
```
For more information about tests, check out the
[Testing](https://huggingface.co/docs/transformers/testing) guide.
๐ค Transformers relies on `black` and `ruff` to format its source code
consistently. After you make changes, apply automatic style corrections and code verifications
that can't be automated in one go with:
```bash
make fixup
```
This target is also optimized to only work with files modified by the PR you're working on.
If you prefer to run the checks one after the other, the following command applies the
style corrections:
```bash
make style
```
๐ค Transformers also uses `ruff` and a few custom scripts to check for coding mistakes. Quality
controls are run by the CI, but you can run the same checks with:
```bash
make quality
```
Finally, we have a lot of scripts to make sure we don't forget to update
some files when adding a new model. You can run these scripts with:
```bash
make repo-consistency
```
To learn more about those checks and how to fix any issues with them, check out the
[Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
If you're modifying documents under the `docs/source` directory, make sure the documentation can still be built. This check will also run in the CI when you open a pull request. To run a local check
make sure you install the documentation builder:
```bash
pip install ".[docs]"
```
Run the following command from the root of the repository:
```bash
doc-builder build transformers docs/source/en --build_dir ~/tmp/test-build
```
This will build the documentation in the `~/tmp/test-build` folder where you can inspect the generated
Markdown files with your favorite editor. You can also preview the docs on GitHub when you open a pull request.
Once you're happy with your changes, add the changed files with `git add` and
record your changes locally with `git commit`:
```bash
git add modified_file.py
git commit
```
Please remember to write [good commit
messages](https://chris.beams.io/posts/git-commit/) to clearly communicate the changes you made!
To keep your copy of the code up to date with the original
repository, rebase your branch on `upstream/branch` *before* you open a pull request or if requested by a maintainer:
```bash
git fetch upstream
git rebase upstream/main
```
Push your changes to your branch:
```bash
git push -u origin a-descriptive-name-for-my-changes
```
If you've already opened a pull request, you'll need to force push with the `--force` flag. Otherwise, if the pull request hasn't been opened yet, you can just push your changes normally.
6. Now you can go to your fork of the repository on GitHub and click on **Pull Request** to open a pull request. Make sure you tick off all the boxes on our [checklist](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md/#pull-request-checklist) below. When you're ready, you can send your changes to the project maintainers for review.
7. It's ok if maintainers request changes, it happens to our core contributors
too! So everyone can see the changes in the pull request, work in your local
branch and push the changes to your fork. They will automatically appear in
the pull request.
### Pull request checklist
โ The pull request title should summarize your contribution.<br>
โ If your pull request addresses an issue, please mention the issue number in the pull
request description to make sure they are linked (and people viewing the issue know you
are working on it).<br>
โ To indicate a work in progress please prefix the title with `[WIP]`. These are
useful to avoid duplicated work, and to differentiate it from PRs ready to be merged.<br>
โ Make sure existing tests pass.<br>
โ If adding a new feature, also add tests for it.<br>
- If you are adding a new model, make sure you use
`ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)` to trigger the common tests.
- If you are adding new `@slow` tests, make sure they pass using
`RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`.
- If you are adding a new tokenizer, write tests and make sure
`RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py` passes.
- CircleCI does not run the slow tests, but GitHub Actions does every night!<br>
โ All public methods must have informative docstrings (see
[`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py)
for an example).<br>
โ Due to the rapidly growing repository, don't add any images, videos and other
non-text files that'll significantly weigh down the repository. Instead, use a Hub
repository such as [`hf-internal-testing`](https://huggingface.co/hf-internal-testing)
to host these files and reference them by URL. We recommend placing documentation
related images in the following repository:
[huggingface/documentation-images](https://huggingface.co/datasets/huggingface/documentation-images).
You can open a PR on this dataset repository and ask a Hugging Face member to merge it.
For more information about the checks run on a pull request, take a look at our [Checks on a Pull Request](https://huggingface.co/docs/transformers/pr_checks) guide.
### Tests
An extensive test suite is included to test the library behavior and several examples. Library tests can be found in
the [tests](https://github.com/huggingface/transformers/tree/main/tests) folder and examples tests in the
[examples](https://github.com/huggingface/transformers/tree/main/examples) folder.
We like `pytest` and `pytest-xdist` because it's faster. From the root of the
repository, specify a *path to a subfolder or a test file* to run the test.
```bash
python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
```
Similarly, for the `examples` directory, specify a *path to a subfolder or test file* to run the test. For example, the following command tests the text classification subfolder in the PyTorch `examples` directory:
```bash
pip install -r examples/xxx/requirements.txt # only needed the first time
python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
```
In fact, this is actually how our `make test` and `make test-examples` commands are implemented (not including the `pip install`)!
You can also specify a smaller set of tests in order to test only the feature
you're working on.
By default, slow tests are skipped but you can set the `RUN_SLOW` environment variable to
`yes` to run them. This will download many gigabytes of models so make sure you
have enough disk space, a good internet connection or a lot of patience!
<Tip warning={true}>
Remember to specify a *path to a subfolder or a test file* to run the test. Otherwise, you'll run all the tests in the `tests` or `examples` folder, which will take a very long time!
</Tip>
```bash
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./tests/models/my_new_model
RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/text-classification
```
Like the slow tests, there are other environment variables available which not enabled by default during testing:
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
More environment variables and additional information can be found in the [testing_utils.py](src/transformers/testing_utils.py).
๐ค Transformers uses `pytest` as a test runner only. It doesn't use any
`pytest`-specific features in the test suite itself.
This means `unittest` is fully supported. Here's how to run tests with
`unittest`:
```bash
python -m unittest discover -s tests -t . -v
python -m unittest discover -s examples -t examples -v
```
### Style guide
For documentation strings, ๐ค Transformers follows the [Google Python Style Guide](https://google.github.io/styleguide/pyguide.html).
Check our [documentation writing guide](https://github.com/huggingface/transformers/tree/main/docs#writing-documentation---specification)
for more information.
### Develop on Windows
On Windows (unless you're working in [Windows Subsystem for Linux](https://learn.microsoft.com/en-us/windows/wsl/) or WSL), you need to configure git to transform Windows `CRLF` line endings to Linux `LF` line endings:
```bash
git config core.autocrlf input
```
One way to run the `make` command on Windows is with MSYS2:
1. [Download MSYS2](https://www.msys2.org/), and we assume it's installed in `C:\msys64`.
2. Open the command line `C:\msys64\msys2.exe` (it should be available from the **Start** menu).
3. Run in the shell: `pacman -Syu` and install `make` with `pacman -S make`.
4. Add `C:\msys64\usr\bin` to your PATH environment variable.
You can now use `make` from any terminal (Powershell, cmd.exe, etc.)! ๐
### Sync a forked repository with upstream main (the Hugging Face repository)
When updating the main branch of a forked repository, please follow these steps to avoid pinging the upstream repository which adds reference notes to each upstream PR, and sends unnecessary notifications to the developers involved in these PRs.
1. When possible, avoid syncing with the upstream using a branch and PR on the forked repository. Instead, merge directly into the forked main.
2. If a PR is absolutely necessary, use the following steps after checking out your branch:
```bash
git checkout -b your-branch-for-syncing
git pull --squash --no-commit upstream main
git commit -m '<your message without GitHub references>'
git push --set-upstream origin your-branch-for-syncing
```
| transformers/docs/source/en/contributing.md/0 | {
"file_path": "transformers/docs/source/en/contributing.md",
"repo_id": "transformers",
"token_count": 5130
} | 242 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Utilities for Generation
This page lists all the utility functions used by [`~generation.GenerationMixin.generate`],
[`~generation.GenerationMixin.greedy_search`],
[`~generation.GenerationMixin.contrastive_search`],
[`~generation.GenerationMixin.sample`],
[`~generation.GenerationMixin.beam_search`],
[`~generation.GenerationMixin.beam_sample`],
[`~generation.GenerationMixin.group_beam_search`], and
[`~generation.GenerationMixin.constrained_beam_search`].
Most of those are only useful if you are studying the code of the generate methods in the library.
## Generate Outputs
The output of [`~generation.GenerationMixin.generate`] is an instance of a subclass of
[`~utils.ModelOutput`]. This output is a data structure containing all the information returned
by [`~generation.GenerationMixin.generate`], but that can also be used as tuple or dictionary.
Here's an example:
```python
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt")
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
```
The `generation_output` object is a [`~generation.GenerateDecoderOnlyOutput`], as we can
see in the documentation of that class below, it means it has the following attributes:
- `sequences`: the generated sequences of tokens
- `scores` (optional): the prediction scores of the language modelling head, for each generation step
- `hidden_states` (optional): the hidden states of the model, for each generation step
- `attentions` (optional): the attention weights of the model, for each generation step
Here we have the `scores` since we passed along `output_scores=True`, but we don't have `hidden_states` and
`attentions` because we didn't pass `output_hidden_states=True` or `output_attentions=True`.
You can access each attribute as you would usually do, and if that attribute has not been returned by the model, you
will get `None`. Here for instance `generation_output.scores` are all the generated prediction scores of the
language modeling head, and `generation_output.attentions` is `None`.
When using our `generation_output` object as a tuple, it only keeps the attributes that don't have `None` values.
Here, for instance, it has two elements, `loss` then `logits`, so
```python
generation_output[:2]
```
will return the tuple `(generation_output.sequences, generation_output.scores)` for instance.
When using our `generation_output` object as a dictionary, it only keeps the attributes that don't have `None`
values. Here, for instance, it has two keys that are `sequences` and `scores`.
We document here all output types.
### PyTorch
[[autodoc]] generation.GenerateDecoderOnlyOutput
[[autodoc]] generation.GenerateEncoderDecoderOutput
[[autodoc]] generation.GenerateBeamDecoderOnlyOutput
[[autodoc]] generation.GenerateBeamEncoderDecoderOutput
### TensorFlow
[[autodoc]] generation.TFGreedySearchEncoderDecoderOutput
[[autodoc]] generation.TFGreedySearchDecoderOnlyOutput
[[autodoc]] generation.TFSampleEncoderDecoderOutput
[[autodoc]] generation.TFSampleDecoderOnlyOutput
[[autodoc]] generation.TFBeamSearchEncoderDecoderOutput
[[autodoc]] generation.TFBeamSearchDecoderOnlyOutput
[[autodoc]] generation.TFBeamSampleEncoderDecoderOutput
[[autodoc]] generation.TFBeamSampleDecoderOnlyOutput
[[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput
[[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput
### FLAX
[[autodoc]] generation.FlaxSampleOutput
[[autodoc]] generation.FlaxGreedySearchOutput
[[autodoc]] generation.FlaxBeamSearchOutput
## LogitsProcessor
A [`LogitsProcessor`] can be used to modify the prediction scores of a language model head for
generation.
### PyTorch
[[autodoc]] AlternatingCodebooksLogitsProcessor
- __call__
[[autodoc]] ClassifierFreeGuidanceLogitsProcessor
- __call__
[[autodoc]] EncoderNoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] EncoderRepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] EpsilonLogitsWarper
- __call__
[[autodoc]] EtaLogitsWarper
- __call__
[[autodoc]] ExponentialDecayLengthPenalty
- __call__
[[autodoc]] ForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] ForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] ForceTokensLogitsProcessor
- __call__
[[autodoc]] HammingDiversityLogitsProcessor
- __call__
[[autodoc]] InfNanRemoveLogitsProcessor
- __call__
[[autodoc]] LogitNormalization
- __call__
[[autodoc]] LogitsProcessor
- __call__
[[autodoc]] LogitsProcessorList
- __call__
[[autodoc]] LogitsWarper
- __call__
[[autodoc]] MinLengthLogitsProcessor
- __call__
[[autodoc]] MinNewTokensLengthLogitsProcessor
- __call__
[[autodoc]] NoBadWordsLogitsProcessor
- __call__
[[autodoc]] NoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] PrefixConstrainedLogitsProcessor
- __call__
[[autodoc]] RepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] SequenceBiasLogitsProcessor
- __call__
[[autodoc]] SuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] SuppressTokensLogitsProcessor
- __call__
[[autodoc]] TemperatureLogitsWarper
- __call__
[[autodoc]] TopKLogitsWarper
- __call__
[[autodoc]] TopPLogitsWarper
- __call__
[[autodoc]] TypicalLogitsWarper
- __call__
[[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor
- __call__
[[autodoc]] WhisperTimeStampLogitsProcessor
- __call__
### TensorFlow
[[autodoc]] TFForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] TFForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] TFForceTokensLogitsProcessor
- __call__
[[autodoc]] TFLogitsProcessor
- __call__
[[autodoc]] TFLogitsProcessorList
- __call__
[[autodoc]] TFLogitsWarper
- __call__
[[autodoc]] TFMinLengthLogitsProcessor
- __call__
[[autodoc]] TFNoBadWordsLogitsProcessor
- __call__
[[autodoc]] TFNoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] TFRepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] TFSuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] TFSuppressTokensLogitsProcessor
- __call__
[[autodoc]] TFTemperatureLogitsWarper
- __call__
[[autodoc]] TFTopKLogitsWarper
- __call__
[[autodoc]] TFTopPLogitsWarper
- __call__
### FLAX
[[autodoc]] FlaxForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] FlaxForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] FlaxForceTokensLogitsProcessor
- __call__
[[autodoc]] FlaxLogitsProcessor
- __call__
[[autodoc]] FlaxLogitsProcessorList
- __call__
[[autodoc]] FlaxLogitsWarper
- __call__
[[autodoc]] FlaxMinLengthLogitsProcessor
- __call__
[[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] FlaxSuppressTokensLogitsProcessor
- __call__
[[autodoc]] FlaxTemperatureLogitsWarper
- __call__
[[autodoc]] FlaxTopKLogitsWarper
- __call__
[[autodoc]] FlaxTopPLogitsWarper
- __call__
[[autodoc]] FlaxWhisperTimeStampLogitsProcessor
- __call__
## StoppingCriteria
A [`StoppingCriteria`] can be used to change when to stop generation (other than EOS token). Please note that this is exclusively available to our PyTorch implementations.
[[autodoc]] StoppingCriteria
- __call__
[[autodoc]] StoppingCriteriaList
- __call__
[[autodoc]] MaxLengthCriteria
- __call__
[[autodoc]] MaxTimeCriteria
- __call__
## Constraints
A [`Constraint`] can be used to force the generation to include specific tokens or sequences in the output. Please note that this is exclusively available to our PyTorch implementations.
[[autodoc]] Constraint
[[autodoc]] PhrasalConstraint
[[autodoc]] DisjunctiveConstraint
[[autodoc]] ConstraintListState
## BeamSearch
[[autodoc]] BeamScorer
- process
- finalize
[[autodoc]] BeamSearchScorer
- process
- finalize
[[autodoc]] ConstrainedBeamSearchScorer
- process
- finalize
## Utilities
[[autodoc]] top_k_top_p_filtering
[[autodoc]] tf_top_k_top_p_filtering
## Streamers
[[autodoc]] TextStreamer
[[autodoc]] TextIteratorStreamer
## Caches
[[autodoc]] Cache
- update
[[autodoc]] DynamicCache
- update
- get_seq_length
- reorder_cache
- to_legacy_cache
- from_legacy_cache
[[autodoc]] SinkCache
- update
- get_seq_length
- reorder_cache
| transformers/docs/source/en/internal/generation_utils.md/0 | {
"file_path": "transformers/docs/source/en/internal/generation_utils.md",
"repo_id": "transformers",
"token_count": 3136
} | 243 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Image Processor
An image processor is in charge of preparing input features for vision models and post processing their outputs. This includes transformations such as resizing, normalization, and conversion to PyTorch, TensorFlow, Flax and Numpy tensors. It may also include model specific post-processing such as converting logits to segmentation masks.
## ImageProcessingMixin
[[autodoc]] image_processing_utils.ImageProcessingMixin
- from_pretrained
- save_pretrained
## BatchFeature
[[autodoc]] BatchFeature
## BaseImageProcessor
[[autodoc]] image_processing_utils.BaseImageProcessor
| transformers/docs/source/en/main_classes/image_processor.md/0 | {
"file_path": "transformers/docs/source/en/main_classes/image_processor.md",
"repo_id": "transformers",
"token_count": 343
} | 244 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Audio Spectrogram Transformer
## Overview
The Audio Spectrogram Transformer model was proposed in [AST: Audio Spectrogram Transformer](https://arxiv.org/abs/2104.01778) by Yuan Gong, Yu-An Chung, James Glass.
The Audio Spectrogram Transformer applies a [Vision Transformer](vit) to audio, by turning audio into an image (spectrogram). The model obtains state-of-the-art results
for audio classification.
The abstract from the paper is the following:
*In the past decade, convolutional neural networks (CNNs) have been widely adopted as the main building block for end-to-end audio classification models, which aim to learn a direct mapping from audio spectrograms to corresponding labels. To better capture long-range global context, a recent trend is to add a self-attention mechanism on top of the CNN, forming a CNN-attention hybrid model. However, it is unclear whether the reliance on a CNN is necessary, and if neural networks purely based on attention are sufficient to obtain good performance in audio classification. In this paper, we answer the question by introducing the Audio Spectrogram Transformer (AST), the first convolution-free, purely attention-based model for audio classification. We evaluate AST on various audio classification benchmarks, where it achieves new state-of-the-art results of 0.485 mAP on AudioSet, 95.6% accuracy on ESC-50, and 98.1% accuracy on Speech Commands V2.*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/audio_spectogram_transformer_architecture.png"
alt="drawing" width="600"/>
<small> Audio Spectrogram Transformer architecture. Taken from the <a href="https://arxiv.org/abs/2104.01778">original paper</a>.</small>
This model was contributed by [nielsr](https://huggingface.co/nielsr).
The original code can be found [here](https://github.com/YuanGongND/ast).
## Usage tips
- When fine-tuning the Audio Spectrogram Transformer (AST) on your own dataset, it's recommended to take care of the input normalization (to make
sure the input has mean of 0 and std of 0.5). [`ASTFeatureExtractor`] takes care of this. Note that it uses the AudioSet
mean and std by default. You can check [`ast/src/get_norm_stats.py`](https://github.com/YuanGongND/ast/blob/master/src/get_norm_stats.py) to see how
the authors compute the stats for a downstream dataset.
- Note that the AST needs a low learning rate (the authors use a 10 times smaller learning rate compared to their CNN model proposed in the
[PSLA paper](https://arxiv.org/abs/2102.01243)) and converges quickly, so please search for a suitable learning rate and learning rate scheduler for your task.
## Resources
A list of official Hugging Face and community (indicated by ๐) resources to help you get started with the Audio Spectrogram Transformer.
<PipelineTag pipeline="audio-classification"/>
- A notebook illustrating inference with AST for audio classification can be found [here](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/AST).
- [`ASTForAudioClassification`] is supported by this [example script](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) and [notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/audio_classification.ipynb).
- See also: [Audio classification](../tasks/audio_classification).
If you're interested in submitting a resource to be included here, please feel free to open a Pull Request and we'll review it! The resource should ideally demonstrate something new instead of duplicating an existing resource.
## ASTConfig
[[autodoc]] ASTConfig
## ASTFeatureExtractor
[[autodoc]] ASTFeatureExtractor
- __call__
## ASTModel
[[autodoc]] ASTModel
- forward
## ASTForAudioClassification
[[autodoc]] ASTForAudioClassification
- forward
| transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/audio-spectrogram-transformer.md",
"repo_id": "transformers",
"token_count": 1220
} | 245 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Blenderbot Small
Note that [`BlenderbotSmallModel`] and
[`BlenderbotSmallForConditionalGeneration`] are only used in combination with the checkpoint
[facebook/blenderbot-90M](https://huggingface.co/facebook/blenderbot-90M). Larger Blenderbot checkpoints should
instead be used with [`BlenderbotModel`] and
[`BlenderbotForConditionalGeneration`]
## Overview
The Blender chatbot model was proposed in [Recipes for building an open-domain chatbot](https://arxiv.org/pdf/2004.13637.pdf) Stephen Roller, Emily Dinan, Naman Goyal, Da Ju, Mary Williamson, Yinhan Liu,
Jing Xu, Myle Ott, Kurt Shuster, Eric M. Smith, Y-Lan Boureau, Jason Weston on 30 Apr 2020.
The abstract of the paper is the following:
*Building open-domain chatbots is a challenging area for machine learning research. While prior work has shown that
scaling neural models in the number of parameters and the size of the data they are trained on gives improved results,
we show that other ingredients are important for a high-performing chatbot. Good conversation requires a number of
skills that an expert conversationalist blends in a seamless way: providing engaging talking points and listening to
their partners, and displaying knowledge, empathy and personality appropriately, while maintaining a consistent
persona. We show that large scale models can learn these skills when given appropriate training data and choice of
generation strategy. We build variants of these recipes with 90M, 2.7B and 9.4B parameter models, and make our models
and code publicly available. Human evaluations show our best models are superior to existing approaches in multi-turn
dialogue in terms of engagingness and humanness measurements. We then discuss the limitations of this work by analyzing
failure cases of our models.*
This model was contributed by [patrickvonplaten](https://huggingface.co/patrickvonplaten). The authors' code can be
found [here](https://github.com/facebookresearch/ParlAI).
## Usage tips
Blenderbot Small is a model with absolute position embeddings so it's usually advised to pad the inputs on the right rather than
the left.
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
## BlenderbotSmallConfig
[[autodoc]] BlenderbotSmallConfig
## BlenderbotSmallTokenizer
[[autodoc]] BlenderbotSmallTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## BlenderbotSmallTokenizerFast
[[autodoc]] BlenderbotSmallTokenizerFast
<frameworkcontent>
<pt>
## BlenderbotSmallModel
[[autodoc]] BlenderbotSmallModel
- forward
## BlenderbotSmallForConditionalGeneration
[[autodoc]] BlenderbotSmallForConditionalGeneration
- forward
## BlenderbotSmallForCausalLM
[[autodoc]] BlenderbotSmallForCausalLM
- forward
</pt>
<tf>
## TFBlenderbotSmallModel
[[autodoc]] TFBlenderbotSmallModel
- call
## TFBlenderbotSmallForConditionalGeneration
[[autodoc]] TFBlenderbotSmallForConditionalGeneration
- call
</tf>
<jax>
## FlaxBlenderbotSmallModel
[[autodoc]] FlaxBlenderbotSmallModel
- __call__
- encode
- decode
## FlaxBlenderbotForConditionalGeneration
[[autodoc]] FlaxBlenderbotSmallForConditionalGeneration
- __call__
- encode
- decode
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/blenderbot-small.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/blenderbot-small.md",
"repo_id": "transformers",
"token_count": 1170
} | 246 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Encoder Decoder Models
## Overview
The [`EncoderDecoderModel`] can be used to initialize a sequence-to-sequence model with any
pretrained autoencoding model as the encoder and any pretrained autoregressive model as the decoder.
The effectiveness of initializing sequence-to-sequence models with pretrained checkpoints for sequence generation tasks
was shown in [Leveraging Pre-trained Checkpoints for Sequence Generation Tasks](https://arxiv.org/abs/1907.12461) by
Sascha Rothe, Shashi Narayan, Aliaksei Severyn.
After such an [`EncoderDecoderModel`] has been trained/fine-tuned, it can be saved/loaded just like
any other models (see the examples for more information).
An application of this architecture could be to leverage two pretrained [`BertModel`] as the encoder
and decoder for a summarization model as was shown in: [Text Summarization with Pretrained Encoders](https://arxiv.org/abs/1908.08345) by Yang Liu and Mirella Lapata.
## Randomly initializing `EncoderDecoderModel` from model configurations.
[`EncoderDecoderModel`] can be randomly initialized from an encoder and a decoder config. In the following example, we show how to do this using the default [`BertModel`] configuration for the encoder and the default [`BertForCausalLM`] configuration for the decoder.
```python
>>> from transformers import BertConfig, EncoderDecoderConfig, EncoderDecoderModel
>>> config_encoder = BertConfig()
>>> config_decoder = BertConfig()
>>> config = EncoderDecoderConfig.from_encoder_decoder_configs(config_encoder, config_decoder)
>>> model = EncoderDecoderModel(config=config)
```
## Initialising `EncoderDecoderModel` from a pretrained encoder and a pretrained decoder.
[`EncoderDecoderModel`] can be initialized from a pretrained encoder checkpoint and a pretrained decoder checkpoint. Note that any pretrained auto-encoding model, *e.g.* BERT, can serve as the encoder and both pretrained auto-encoding models, *e.g.* BERT, pretrained causal language models, *e.g.* GPT2, as well as the pretrained decoder part of sequence-to-sequence models, *e.g.* decoder of BART, can be used as the decoder.
Depending on which architecture you choose as the decoder, the cross-attention layers might be randomly initialized.
Initializing [`EncoderDecoderModel`] from a pretrained encoder and decoder checkpoint requires the model to be fine-tuned on a downstream task, as has been shown in [the *Warm-starting-encoder-decoder blog post*](https://huggingface.co/blog/warm-starting-encoder-decoder).
To do so, the `EncoderDecoderModel` class provides a [`EncoderDecoderModel.from_encoder_decoder_pretrained`] method.
```python
>>> from transformers import EncoderDecoderModel, BertTokenizer
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
```
## Loading an existing `EncoderDecoderModel` checkpoint and perform inference.
To load fine-tuned checkpoints of the `EncoderDecoderModel` class, [`EncoderDecoderModel`] provides the `from_pretrained(...)` method just like any other model architecture in Transformers.
To perform inference, one uses the [`generate`] method, which allows to autoregressively generate text. This method supports various forms of decoding, such as greedy, beam search and multinomial sampling.
```python
>>> from transformers import AutoTokenizer, EncoderDecoderModel
>>> # load a fine-tuned seq2seq model and corresponding tokenizer
>>> model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
>>> tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
>>> # let's perform inference on a long piece of text
>>> ARTICLE_TO_SUMMARIZE = (
... "PG&E stated it scheduled the blackouts in response to forecasts for high winds "
... "amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were "
... "scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow."
... )
>>> input_ids = tokenizer(ARTICLE_TO_SUMMARIZE, return_tensors="pt").input_ids
>>> # autoregressively generate summary (uses greedy decoding by default)
>>> generated_ids = model.generate(input_ids)
>>> generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
>>> print(generated_text)
nearly 800 thousand customers were affected by the shutoffs. the aim is to reduce the risk of wildfires. nearly 800, 000 customers were expected to be affected by high winds amid dry conditions. pg & e said it scheduled the blackouts to last through at least midday tomorrow.
```
## Loading a PyTorch checkpoint into `TFEncoderDecoderModel`.
[`TFEncoderDecoderModel.from_pretrained`] currently doesn't support initializing the model from a
pytorch checkpoint. Passing `from_pt=True` to this method will throw an exception. If there are only pytorch
checkpoints for a particular encoder-decoder model, a workaround is:
```python
>>> # a workaround to load from pytorch checkpoint
>>> from transformers import EncoderDecoderModel, TFEncoderDecoderModel
>>> _model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
>>> _model.encoder.save_pretrained("./encoder")
>>> _model.decoder.save_pretrained("./decoder")
>>> model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
... "./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
... )
>>> # This is only for copying some specific attributes of this particular model.
>>> model.config = _model.config
```
## Training
Once the model is created, it can be fine-tuned similar to BART, T5 or any other encoder-decoder model.
As you can see, only 2 inputs are required for the model in order to compute a loss: `input_ids` (which are the
`input_ids` of the encoded input sequence) and `labels` (which are the `input_ids` of the encoded
target sequence).
```python
>>> from transformers import BertTokenizer, EncoderDecoderModel
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> model = EncoderDecoderModel.from_encoder_decoder_pretrained("bert-base-uncased", "bert-base-uncased")
>>> model.config.decoder_start_token_id = tokenizer.cls_token_id
>>> model.config.pad_token_id = tokenizer.pad_token_id
>>> input_ids = tokenizer(
... "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side.During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft).Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.",
... return_tensors="pt",
... ).input_ids
>>> labels = tokenizer(
... "the eiffel tower surpassed the washington monument to become the tallest structure in the world. it was the first structure to reach a height of 300 metres in paris in 1930. it is now taller than the chrysler building by 5. 2 metres ( 17 ft ) and is the second tallest free - standing structure in paris.",
... return_tensors="pt",
... ).input_ids
>>> # the forward function automatically creates the correct decoder_input_ids
>>> loss = model(input_ids=input_ids, labels=labels).loss
```
Detailed [colab](https://colab.research.google.com/drive/1WIk2bxglElfZewOHboPFNj8H44_VAyKE?usp=sharing#scrollTo=ZwQIEhKOrJpl) for training.
This model was contributed by [thomwolf](https://github.com/thomwolf). This model's TensorFlow and Flax versions
were contributed by [ydshieh](https://github.com/ydshieh).
## EncoderDecoderConfig
[[autodoc]] EncoderDecoderConfig
<frameworkcontent>
<pt>
## EncoderDecoderModel
[[autodoc]] EncoderDecoderModel
- forward
- from_encoder_decoder_pretrained
</pt>
<tf>
## TFEncoderDecoderModel
[[autodoc]] TFEncoderDecoderModel
- call
- from_encoder_decoder_pretrained
</tf>
<jax>
## FlaxEncoderDecoderModel
[[autodoc]] FlaxEncoderDecoderModel
- __call__
- from_encoder_decoder_pretrained
</jax>
</frameworkcontent>
| transformers/docs/source/en/model_doc/encoder-decoder.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/encoder-decoder.md",
"repo_id": "transformers",
"token_count": 2640
} | 247 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# LUKE
## Overview
The LUKE model was proposed in [LUKE: Deep Contextualized Entity Representations with Entity-aware Self-attention](https://arxiv.org/abs/2010.01057) by Ikuya Yamada, Akari Asai, Hiroyuki Shindo, Hideaki Takeda and Yuji Matsumoto.
It is based on RoBERTa and adds entity embeddings as well as an entity-aware self-attention mechanism, which helps
improve performance on various downstream tasks involving reasoning about entities such as named entity recognition,
extractive and cloze-style question answering, entity typing, and relation classification.
The abstract from the paper is the following:
*Entity representations are useful in natural language tasks involving entities. In this paper, we propose new
pretrained contextualized representations of words and entities based on the bidirectional transformer. The proposed
model treats words and entities in a given text as independent tokens, and outputs contextualized representations of
them. Our model is trained using a new pretraining task based on the masked language model of BERT. The task involves
predicting randomly masked words and entities in a large entity-annotated corpus retrieved from Wikipedia. We also
propose an entity-aware self-attention mechanism that is an extension of the self-attention mechanism of the
transformer, and considers the types of tokens (words or entities) when computing attention scores. The proposed model
achieves impressive empirical performance on a wide range of entity-related tasks. In particular, it obtains
state-of-the-art results on five well-known datasets: Open Entity (entity typing), TACRED (relation classification),
CoNLL-2003 (named entity recognition), ReCoRD (cloze-style question answering), and SQuAD 1.1 (extractive question
answering).*
This model was contributed by [ikuyamada](https://huggingface.co/ikuyamada) and [nielsr](https://huggingface.co/nielsr). The original code can be found [here](https://github.com/studio-ousia/luke).
## Usage tips
- This implementation is the same as [`RobertaModel`] with the addition of entity embeddings as well
as an entity-aware self-attention mechanism, which improves performance on tasks involving reasoning about entities.
- LUKE treats entities as input tokens; therefore, it takes `entity_ids`, `entity_attention_mask`,
`entity_token_type_ids` and `entity_position_ids` as extra input. You can obtain those using
[`LukeTokenizer`].
- [`LukeTokenizer`] takes `entities` and `entity_spans` (character-based start and end
positions of the entities in the input text) as extra input. `entities` typically consist of [MASK] entities or
Wikipedia entities. The brief description when inputting these entities are as follows:
- *Inputting [MASK] entities to compute entity representations*: The [MASK] entity is used to mask entities to be
predicted during pretraining. When LUKE receives the [MASK] entity, it tries to predict the original entity by
gathering the information about the entity from the input text. Therefore, the [MASK] entity can be used to address
downstream tasks requiring the information of entities in text such as entity typing, relation classification, and
named entity recognition.
- *Inputting Wikipedia entities to compute knowledge-enhanced token representations*: LUKE learns rich information
(or knowledge) about Wikipedia entities during pretraining and stores the information in its entity embedding. By
using Wikipedia entities as input tokens, LUKE outputs token representations enriched by the information stored in
the embeddings of these entities. This is particularly effective for tasks requiring real-world knowledge, such as
question answering.
- There are three head models for the former use case:
- [`LukeForEntityClassification`], for tasks to classify a single entity in an input text such as
entity typing, e.g. the [Open Entity dataset](https://www.cs.utexas.edu/~eunsol/html_pages/open_entity.html).
This model places a linear head on top of the output entity representation.
- [`LukeForEntityPairClassification`], for tasks to classify the relationship between two entities
such as relation classification, e.g. the [TACRED dataset](https://nlp.stanford.edu/projects/tacred/). This
model places a linear head on top of the concatenated output representation of the pair of given entities.
- [`LukeForEntitySpanClassification`], for tasks to classify the sequence of entity spans, such as
named entity recognition (NER). This model places a linear head on top of the output entity representations. You
can address NER using this model by inputting all possible entity spans in the text to the model.
[`LukeTokenizer`] has a `task` argument, which enables you to easily create an input to these
head models by specifying `task="entity_classification"`, `task="entity_pair_classification"`, or
`task="entity_span_classification"`. Please refer to the example code of each head models.
Usage example:
```python
>>> from transformers import LukeTokenizer, LukeModel, LukeForEntityPairClassification
>>> model = LukeModel.from_pretrained("studio-ousia/luke-base")
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-base")
# Example 1: Computing the contextualized entity representation corresponding to the entity mention "Beyoncรฉ"
>>> text = "Beyoncรฉ lives in Los Angeles."
>>> entity_spans = [(0, 7)] # character-based entity span corresponding to "Beyoncรฉ"
>>> inputs = tokenizer(text, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
# Example 2: Inputting Wikipedia entities to obtain enriched contextualized representations
>>> entities = [
... "Beyoncรฉ",
... "Los Angeles",
... ] # Wikipedia entity titles corresponding to the entity mentions "Beyoncรฉ" and "Los Angeles"
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncรฉ" and "Los Angeles"
>>> inputs = tokenizer(text, entities=entities, entity_spans=entity_spans, add_prefix_space=True, return_tensors="pt")
>>> outputs = model(**inputs)
>>> word_last_hidden_state = outputs.last_hidden_state
>>> entity_last_hidden_state = outputs.entity_last_hidden_state
# Example 3: Classifying the relationship between two entities using LukeForEntityPairClassification head model
>>> model = LukeForEntityPairClassification.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> tokenizer = LukeTokenizer.from_pretrained("studio-ousia/luke-large-finetuned-tacred")
>>> entity_spans = [(0, 7), (17, 28)] # character-based entity spans corresponding to "Beyoncรฉ" and "Los Angeles"
>>> inputs = tokenizer(text, entity_spans=entity_spans, return_tensors="pt")
>>> outputs = model(**inputs)
>>> logits = outputs.logits
>>> predicted_class_idx = int(logits[0].argmax())
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
```
## Resources
- [A demo notebook on how to fine-tune [`LukeForEntityPairClassification`] for relation classification](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/LUKE)
- [Notebooks showcasing how you to reproduce the results as reported in the paper with the HuggingFace implementation of LUKE](https://github.com/studio-ousia/luke/tree/master/notebooks)
- [Text classification task guide](../tasks/sequence_classification)
- [Token classification task guide](../tasks/token_classification)
- [Question answering task guide](../tasks/question_answering)
- [Masked language modeling task guide](../tasks/masked_language_modeling)
- [Multiple choice task guide](../tasks/multiple_choice)
## LukeConfig
[[autodoc]] LukeConfig
## LukeTokenizer
[[autodoc]] LukeTokenizer
- __call__
- save_vocabulary
## LukeModel
[[autodoc]] LukeModel
- forward
## LukeForMaskedLM
[[autodoc]] LukeForMaskedLM
- forward
## LukeForEntityClassification
[[autodoc]] LukeForEntityClassification
- forward
## LukeForEntityPairClassification
[[autodoc]] LukeForEntityPairClassification
- forward
## LukeForEntitySpanClassification
[[autodoc]] LukeForEntitySpanClassification
- forward
## LukeForSequenceClassification
[[autodoc]] LukeForSequenceClassification
- forward
## LukeForMultipleChoice
[[autodoc]] LukeForMultipleChoice
- forward
## LukeForTokenClassification
[[autodoc]] LukeForTokenClassification
- forward
## LukeForQuestionAnswering
[[autodoc]] LukeForQuestionAnswering
- forward
| transformers/docs/source/en/model_doc/luke.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/luke.md",
"repo_id": "transformers",
"token_count": 2521
} | 248 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# NLLB-MOE
## Overview
The NLLB model was presented in [No Language Left Behind: Scaling Human-Centered Machine Translation](https://arxiv.org/abs/2207.04672) by Marta R. Costa-jussร , James Cross, Onur รelebi,
Maha Elbayad, Kenneth Heafield, Kevin Heffernan, Elahe Kalbassi, Janice Lam, Daniel Licht, Jean Maillard, Anna Sun, Skyler Wang, Guillaume Wenzek, Al Youngblood, Bapi Akula,
Loic Barrault, Gabriel Mejia Gonzalez, Prangthip Hansanti, John Hoffman, Semarley Jarrett, Kaushik Ram Sadagopan, Dirk Rowe, Shannon Spruit, Chau Tran, Pierre Andrews,
Necip Fazil Ayan, Shruti Bhosale, Sergey Edunov, Angela Fan, Cynthia Gao, Vedanuj Goswami, Francisco Guzmรกn, Philipp Koehn, Alexandre Mourachko, Christophe Ropers,
Safiyyah Saleem, Holger Schwenk, and Jeff Wang.
The abstract of the paper is the following:
*Driven by the goal of eradicating language barriers on a global scale, machine translation has solidified itself as a key focus of artificial intelligence research today.
However, such efforts have coalesced around a small subset of languages, leaving behind the vast majority of mostly low-resource languages. What does it take to break the
200 language barrier while ensuring safe, high quality results, all while keeping ethical considerations in mind? In No Language Left Behind, we took on this challenge by
first contextualizing the need for low-resource language translation support through exploratory interviews with native speakers. Then, we created datasets and models aimed
at narrowing the performance gap between low and high-resource languages. More specifically, we developed a conditional compute model based on Sparsely Gated Mixture of
Experts that is trained on data obtained with novel and effective data mining techniques tailored for low-resource languages. We propose multiple architectural and training
improvements to counteract overfitting while training on thousands of tasks. Critically, we evaluated the performance of over 40,000 different translation directions using
a human-translated benchmark, Flores-200, and combined human evaluation with a novel toxicity benchmark covering all languages in Flores-200 to assess translation safety.
Our model achieves an improvement of 44% BLEU relative to the previous state-of-the-art, laying important groundwork towards realizing a universal translation system.*
This model was contributed by [Arthur Zucker](https://huggingface.co/ArthurZ).
The original code can be found [here](https://github.com/facebookresearch/fairseq).
## Usage tips
- M2M100ForConditionalGeneration is the base model for both NLLB and NLLB MoE
- The NLLB-MoE is very similar to the NLLB model, but it's feed forward layer is based on the implementation of SwitchTransformers.
- The tokenizer is the same as the NLLB models.
## Implementation differences with SwitchTransformers
The biggest difference is the way the tokens are routed. NLLB-MoE uses a `top-2-gate` which means that for each input, only the top two experts are selected based on the
highest predicted probabilities from the gating network, and the remaining experts are ignored. In `SwitchTransformers`, only the top-1 probabilities are computed,
which means that tokens have less probability of being forwarded. Moreover, if a token is not routed to any expert, `SwitchTransformers` still adds its unmodified hidden
states (kind of like a residual connection) while they are masked in `NLLB`'s top-2 routing mechanism.
## Generating with NLLB-MoE
The available checkpoints require around 350GB of storage. Make sure to use `accelerate` if you do not have enough RAM on your machine.
While generating the target text set the `forced_bos_token_id` to the target language id. The following
example shows how to translate English to French using the *facebook/nllb-200-distilled-600M* model.
Note that we're using the BCP-47 code for French `fra_Latn`. See [here](https://github.com/facebookresearch/flores/blob/main/flores200/README.md#languages-in-flores-200)
for the list of all BCP-47 in the Flores 200 dataset.
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b")
>>> article = "Previously, Ring's CEO, Jamie Siminoff, remarked the company started when his doorbell wasn't audible from his shop in his garage."
>>> inputs = tokenizer(article, return_tensors="pt")
>>> translated_tokens = model.generate(
... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["fra_Latn"], max_length=50
... )
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
"Auparavant, le PDG de Ring, Jamie Siminoff, a fait remarquer que la sociรฉtรฉ avait commencรฉ lorsque sa sonnette n'รฉtait pas audible depuis son magasin dans son garage."
```
### Generating from any other language than English
English (`eng_Latn`) is set as the default language from which to translate. In order to specify that you'd like to translate from a different language,
you should specify the BCP-47 code in the `src_lang` keyword argument of the tokenizer initialization.
See example below for a translation from romanian to german:
```python
>>> from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
>>> tokenizer = AutoTokenizer.from_pretrained("facebook/nllb-moe-54b", src_lang="ron_Latn")
>>> model = AutoModelForSeq2SeqLM.from_pretrained("facebook/nllb-moe-54b")
>>> article = "ลeful ONU spune cฤ nu existฤ o soluลฃie militarฤ รฎn Siria"
>>> inputs = tokenizer(article, return_tensors="pt")
>>> translated_tokens = model.generate(
... **inputs, forced_bos_token_id=tokenizer.lang_code_to_id["deu_Latn"], max_length=30
... )
>>> tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
```
## Resources
- [Translation task guide](../tasks/translation)
- [Summarization task guide](../tasks/summarization)
## NllbMoeConfig
[[autodoc]] NllbMoeConfig
## NllbMoeTop2Router
[[autodoc]] NllbMoeTop2Router
- route_tokens
- forward
## NllbMoeSparseMLP
[[autodoc]] NllbMoeSparseMLP
- forward
## NllbMoeModel
[[autodoc]] NllbMoeModel
- forward
## NllbMoeForConditionalGeneration
[[autodoc]] NllbMoeForConditionalGeneration
- forward
| transformers/docs/source/en/model_doc/nllb-moe.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/nllb-moe.md",
"repo_id": "transformers",
"token_count": 2003
} | 249 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Phi
## Overview
The Phi-1 model was proposed in [Textbooks Are All You Need](https://arxiv.org/abs/2306.11644) by Suriya Gunasekar, Yi Zhang, Jyoti Aneja, Caio Cรฉsar Teodoro Mendes, Allie Del Giorno, Sivakanth Gopi, Mojan Javaheripi, Piero Kauffmann, Gustavo de Rosa, Olli Saarikivi, Adil Salim, Shital Shah, Harkirat Singh Behl, Xin Wang, Sรฉbastien Bubeck, Ronen Eldan, Adam Tauman Kalai, Yin Tat Lee and Yuanzhi Li.
The Phi-1.5 model was proposed in [Textbooks Are All You Need II: phi-1.5 technical report](https://arxiv.org/abs/2309.05463) by Yuanzhi Li, Sรฉbastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar and Yin Tat Lee.
### Summary
In Phi-1 and Phi-1.5 papers, the authors showed how important the quality of the data is in training relative to the model size.
They selected high quality "textbook" data alongside with synthetically generated data for training their small sized Transformer
based model Phi-1 with 1.3B parameters. Despite this small scale, phi-1 attains pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP.
They follow the same strategy for Phi-1.5 and created another 1.3B parameter model with performance on natural language tasks comparable
to models 5x larger, and surpassing most non-frontier LLMs. Phi-1.5 exhibits many of the traits of much larger LLMs such as the ability
to โthink step by stepโ or perform some rudimentary in-context learning.
With these two experiments the authors successfully showed the huge impact of quality of training data when training machine learning models.
The abstract from the Phi-1 paper is the following:
*We introduce phi-1, a new large language model for code, with significantly smaller size than
competing models: phi-1 is a Transformer-based model with 1.3B parameters, trained for 4 days on
8 A100s, using a selection of โtextbook qualityโ data from the web (6B tokens) and synthetically
generated textbooks and exercises with GPT-3.5 (1B tokens). Despite this small scale, phi-1 attains
pass@1 accuracy 50.6% on HumanEval and 55.5% on MBPP. It also displays surprising emergent
properties compared to phi-1-base, our model before our finetuning stage on a dataset of coding
exercises, and phi-1-small, a smaller model with 350M parameters trained with the same pipeline as
phi-1 that still achieves 45% on HumanEval.*
The abstract from the Phi-1.5 paper is the following:
*We continue the investigation into the power of smaller Transformer-based language models as
initiated by TinyStories โ a 10 million parameter model that can produce coherent English โ and
the follow-up work on phi-1, a 1.3 billion parameter model with Python coding performance close
to the state-of-the-art. The latter work proposed to use existing Large Language Models (LLMs) to
generate โtextbook qualityโ data as a way to enhance the learning process compared to traditional
web data. We follow the โTextbooks Are All You Needโ approach, focusing this time on common
sense reasoning in natural language, and create a new 1.3 billion parameter model named phi-1.5,
with performance on natural language tasks comparable to models 5x larger, and surpassing most
non-frontier LLMs on more complex reasoning tasks such as grade-school mathematics and basic
coding. More generally, phi-1.5 exhibits many of the traits of much larger LLMs, both good โsuch
as the ability to โthink step by stepโ or perform some rudimentary in-context learningโ and bad,
including hallucinations and the potential for toxic and biased generations โencouragingly though, we
are seeing improvement on that front thanks to the absence of web data. We open-source phi-1.5 to
promote further research on these urgent topics.*
This model was contributed by [Susnato Dhar](https://huggingface.co/susnato).
The original code for Phi-1, Phi-1.5 and Phi-2 can be found [here](https://huggingface.co/microsoft/phi-1), [here](https://huggingface.co/microsoft/phi-1_5) and [here](https://huggingface.co/microsoft/phi-2), respectively.
## Usage tips
- This model is quite similar to `Llama` with the main difference in [`PhiDecoderLayer`], where they used [`PhiAttention`] and [`PhiMLP`] layers in parallel configuration.
- The tokenizer used for this model is identical to the [`CodeGenTokenizer`].
## How to use Phi-2
<Tip warning={true}>
Phi-2 has been integrated in the development version (4.37.0.dev) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:
* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.
* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.
</Tip>
```python
>>> from transformers import AutoModelForCausalLM, AutoTokenizer
>>> model = AutoModelForCausalLM.from_pretrained("microsoft/phi-2")
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-2")
>>> inputs = tokenizer('Can you help me write a formal email to a potential business partner proposing a joint venture?', return_tensors="pt", return_attention_mask=False)
>>> outputs = model.generate(**inputs, max_length=30)
>>> text = tokenizer.batch_decode(outputs)[0]
>>> print(text)
'Can you help me write a formal email to a potential business partner proposing a joint venture?\nInput: Company A: ABC Inc.\nCompany B: XYZ Ltd.\nJoint Venture: A new online platform for e-commerce'
```
### Example :
```python
>>> from transformers import PhiForCausalLM, AutoTokenizer
>>> # define the model and tokenizer.
>>> model = PhiForCausalLM.from_pretrained("microsoft/phi-1_5")
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5")
>>> # feel free to change the prompt to your liking.
>>> prompt = "If I were an AI that had just achieved"
>>> # apply the tokenizer.
>>> tokens = tokenizer(prompt, return_tensors="pt")
>>> # use the model to generate new tokens.
>>> generated_output = model.generate(**tokens, use_cache=True, max_new_tokens=10)
>>> tokenizer.batch_decode(generated_output)[0]
'If I were an AI that had just achieved a breakthrough in machine learning, I would be thrilled'
```
## Combining Phi and Flash Attention 2
First, make sure to install the latest version of Flash Attention 2 to include the sliding window attention feature.
```bash
pip install -U flash-attn --no-build-isolation
```
Make also sure that you have a hardware that is compatible with Flash-Attention 2. Read more about it in the official documentation of flash-attn repository. Make also sure to load your model in half-precision (e.g. `torch.float16``)
To load and run a model using Flash Attention 2, refer to the snippet below:
```python
>>> import torch
>>> from transformers import PhiForCausalLM, AutoTokenizer
>>> # define the model and tokenizer and push the model and tokens to the GPU.
>>> model = PhiForCausalLM.from_pretrained("microsoft/phi-1_5", torch_dtype=torch.float16, attn_implementation="flash_attention_2").to("cuda")
>>> tokenizer = AutoTokenizer.from_pretrained("microsoft/phi-1_5")
>>> # feel free to change the prompt to your liking.
>>> prompt = "If I were an AI that had just achieved"
>>> # apply the tokenizer.
>>> tokens = tokenizer(prompt, return_tensors="pt").to("cuda")
>>> # use the model to generate new tokens.
>>> generated_output = model.generate(**tokens, use_cache=True, max_new_tokens=10)
>>> tokenizer.batch_decode(generated_output)[0]
'If I were an AI that had just achieved a breakthrough in machine learning, I would be thrilled'
```
### Expected speedups
Below is an expected speedup diagram that compares pure inference time between the native implementation in transformers using `microsoft/phi-1` checkpoint and the Flash Attention 2 version of the model using a sequence length of 2048.
<div style="text-align: center">
<img src="https://huggingface.co/datasets/ybelkada/documentation-images/resolve/main/phi_1_speedup_plot.jpg">
</div>
## PhiConfig
[[autodoc]] PhiConfig
<frameworkcontent>
<pt>
## PhiModel
[[autodoc]] PhiModel
- forward
## PhiForCausalLM
[[autodoc]] PhiForCausalLM
- forward
- generate
## PhiForSequenceClassification
[[autodoc]] PhiForSequenceClassification
- forward
## PhiForTokenClassification
[[autodoc]] PhiForTokenClassification
- forward
</pt>
</frameworkcontent>
| transformers/docs/source/en/model_doc/phi.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/phi.md",
"repo_id": "transformers",
"token_count": 2611
} | 250 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# RetriBERT
<Tip warning={true}>
This model is in maintenance mode only, so we won't accept any new PRs changing its code.
If you run into any issues running this model, please reinstall the last version that supported this model: v4.30.0.
You can do so by running the following command: `pip install -U transformers==4.30.0`.
</Tip>
## Overview
The RetriBERT model was proposed in the blog post [Explain Anything Like I'm Five: A Model for Open Domain Long Form
Question Answering](https://yjernite.github.io/lfqa.html). RetriBERT is a small model that uses either a single or
pair of BERT encoders with lower-dimension projection for dense semantic indexing of text.
This model was contributed by [yjernite](https://huggingface.co/yjernite). Code to train and use the model can be
found [here](https://github.com/huggingface/transformers/tree/main/examples/research-projects/distillation).
## RetriBertConfig
[[autodoc]] RetriBertConfig
## RetriBertTokenizer
[[autodoc]] RetriBertTokenizer
## RetriBertTokenizerFast
[[autodoc]] RetriBertTokenizerFast
## RetriBertModel
[[autodoc]] RetriBertModel
- forward
| transformers/docs/source/en/model_doc/retribert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/retribert.md",
"repo_id": "transformers",
"token_count": 536
} | 251 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Transformer XL
<Tip warning={true}>
This model is in maintenance mode only, so we won't accept any new PRs changing its code. This model was deprecated due to security issues linked to `pickle.load`.
We recommend switching to more recent models for improved security.
In case you would still like to use `TransfoXL` in your experiments, we recommend using the [Hub checkpoint](https://huggingface.co/transfo-xl-wt103) with a specific revision to ensure you are downloading safe files from the Hub.
You will need to set the environment variable `TRUST_REMOTE_CODE` to `True` in order to allow the
usage of `pickle.load()`:
```python
import os
from transformers import TransfoXLTokenizer, TransfoXLLMHeadModel
os.environ["TRUST_REMOTE_CODE"] = "True"
checkpoint = 'transfo-xl-wt103'
revision = '40a186da79458c9f9de846edfaea79c412137f97'
tokenizer = TransfoXLTokenizer.from_pretrained(checkpoint, revision=revision)
model = TransfoXLLMHeadModel.from_pretrained(checkpoint, revision=revision)
```
If you run into any issues running this model, please reinstall the last version that supported this model: v4.35.0.
You can do so by running the following command: `pip install -U transformers==4.35.0`.
</Tip>
<div class="flex flex-wrap space-x-1">
<a href="https://huggingface.co/models?filter=transfo-xl">
<img alt="Models" src="https://img.shields.io/badge/All_model_pages-transfo--xl-blueviolet">
</a>
<a href="https://huggingface.co/spaces/docs-demos/transfo-xl-wt103">
<img alt="Spaces" src="https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue">
</a>
</div>
## Overview
The Transformer-XL model was proposed in [Transformer-XL: Attentive Language Models Beyond a Fixed-Length Context](https://arxiv.org/abs/1901.02860) by Zihang Dai, Zhilin Yang, Yiming Yang, Jaime Carbonell, Quoc V. Le, Ruslan
Salakhutdinov. It's a causal (uni-directional) transformer with relative positioning (sinusoรฏdal) embeddings which can
reuse previously computed hidden-states to attend to longer context (memory). This model also uses adaptive softmax
inputs and outputs (tied).
The abstract from the paper is the following:
*Transformers have a potential of learning longer-term dependency, but are limited by a fixed-length context in the
setting of language modeling. We propose a novel neural architecture Transformer-XL that enables learning dependency
beyond a fixed length without disrupting temporal coherence. It consists of a segment-level recurrence mechanism and a
novel positional encoding scheme. Our method not only enables capturing longer-term dependency, but also resolves the
context fragmentation problem. As a result, Transformer-XL learns dependency that is 80% longer than RNNs and 450%
longer than vanilla Transformers, achieves better performance on both short and long sequences, and is up to 1,800+
times faster than vanilla Transformers during evaluation. Notably, we improve the state-of-the-art results of
bpc/perplexity to 0.99 on enwiki8, 1.08 on text8, 18.3 on WikiText-103, 21.8 on One Billion Word, and 54.5 on Penn
Treebank (without finetuning). When trained only on WikiText-103, Transformer-XL manages to generate reasonably
coherent, novel text articles with thousands of tokens.*
This model was contributed by [thomwolf](https://huggingface.co/thomwolf). The original code can be found [here](https://github.com/kimiyoung/transformer-xl).
## Usage tips
- Transformer-XL uses relative sinusoidal positional embeddings. Padding can be done on the left or on the right. The
original implementation trains on SQuAD with padding on the left, therefore the padding defaults are set to left.
- Transformer-XL is one of the few models that has no sequence length limit.
- Same as a regular GPT model, but introduces a recurrence mechanism for two consecutive segments (similar to a regular RNNs with two consecutive inputs). In this context, a segment is a number of consecutive tokens (for instance 512) that may span across multiple documents, and segments are fed in order to the model.
- Basically, the hidden states of the previous segment are concatenated to the current input to compute the attention scores. This allows the model to pay attention to information that was in the previous segment as well as the current one. By stacking multiple attention layers, the receptive field can be increased to multiple previous segments.
- This changes the positional embeddings to positional relative embeddings (as the regular positional embeddings would give the same results in the current input and the current hidden state at a given position) and needs to make some adjustments in the way attention scores are computed.
<Tip warning={true}>
TransformerXL does **not** work with *torch.nn.DataParallel* due to a bug in PyTorch, see [issue #36035](https://github.com/pytorch/pytorch/issues/36035)
</Tip>
## Resources
- [Text classification task guide](../tasks/sequence_classification)
- [Causal language modeling task guide](../tasks/language_modeling)
## TransfoXLConfig
[[autodoc]] TransfoXLConfig
## TransfoXLTokenizer
[[autodoc]] TransfoXLTokenizer
- save_vocabulary
## TransfoXL specific outputs
[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLModelOutput
[[autodoc]] models.deprecated.transfo_xl.modeling_transfo_xl.TransfoXLLMHeadModelOutput
[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLModelOutput
[[autodoc]] models.deprecated.transfo_xl.modeling_tf_transfo_xl.TFTransfoXLLMHeadModelOutput
<frameworkcontent>
<pt>
## TransfoXLModel
[[autodoc]] TransfoXLModel
- forward
## TransfoXLLMHeadModel
[[autodoc]] TransfoXLLMHeadModel
- forward
## TransfoXLForSequenceClassification
[[autodoc]] TransfoXLForSequenceClassification
- forward
</pt>
<tf>
## TFTransfoXLModel
[[autodoc]] TFTransfoXLModel
- call
## TFTransfoXLLMHeadModel
[[autodoc]] TFTransfoXLLMHeadModel
- call
## TFTransfoXLForSequenceClassification
[[autodoc]] TFTransfoXLForSequenceClassification
- call
</tf>
</frameworkcontent>
## Internal Layers
[[autodoc]] AdaptiveEmbedding
[[autodoc]] TFAdaptiveEmbedding
| transformers/docs/source/en/model_doc/transfo-xl.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/transfo-xl.md",
"repo_id": "transformers",
"token_count": 1988
} | 252 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# VisualBERT
## Overview
The VisualBERT model was proposed in [VisualBERT: A Simple and Performant Baseline for Vision and Language](https://arxiv.org/pdf/1908.03557) by Liunian Harold Li, Mark Yatskar, Da Yin, Cho-Jui Hsieh, Kai-Wei Chang.
VisualBERT is a neural network trained on a variety of (image, text) pairs.
The abstract from the paper is the following:
*We propose VisualBERT, a simple and flexible framework for modeling a broad range of vision-and-language tasks.
VisualBERT consists of a stack of Transformer layers that implicitly align elements of an input text and regions in an
associated input image with self-attention. We further propose two visually-grounded language model objectives for
pre-training VisualBERT on image caption data. Experiments on four vision-and-language tasks including VQA, VCR, NLVR2,
and Flickr30K show that VisualBERT outperforms or rivals with state-of-the-art models while being significantly
simpler. Further analysis demonstrates that VisualBERT can ground elements of language to image regions without any
explicit supervision and is even sensitive to syntactic relationships, tracking, for example, associations between
verbs and image regions corresponding to their arguments.*
This model was contributed by [gchhablani](https://huggingface.co/gchhablani). The original code can be found [here](https://github.com/uclanlp/visualbert).
## Usage tips
1. Most of the checkpoints provided work with the [`VisualBertForPreTraining`] configuration. Other
checkpoints provided are the fine-tuned checkpoints for down-stream tasks - VQA ('visualbert-vqa'), VCR
('visualbert-vcr'), NLVR2 ('visualbert-nlvr2'). Hence, if you are not working on these downstream tasks, it is
recommended that you use the pretrained checkpoints.
2. For the VCR task, the authors use a fine-tuned detector for generating visual embeddings, for all the checkpoints.
We do not provide the detector and its weights as a part of the package, but it will be available in the research
projects, and the states can be loaded directly into the detector provided.
VisualBERT is a multi-modal vision and language model. It can be used for visual question answering, multiple choice,
visual reasoning and region-to-phrase correspondence tasks. VisualBERT uses a BERT-like transformer to prepare
embeddings for image-text pairs. Both the text and visual features are then projected to a latent space with identical
dimension.
To feed images to the model, each image is passed through a pre-trained object detector and the regions and the
bounding boxes are extracted. The authors use the features generated after passing these regions through a pre-trained
CNN like ResNet as visual embeddings. They also add absolute position embeddings, and feed the resulting sequence of
vectors to a standard BERT model. The text input is concatenated in the front of the visual embeddings in the embedding
layer, and is expected to be bound by [CLS] and a [SEP] tokens, as in BERT. The segment IDs must also be set
appropriately for the textual and visual parts.
The [`BertTokenizer`] is used to encode the text. A custom detector/image processor must be used
to get the visual embeddings. The following example notebooks show how to use VisualBERT with Detectron-like models:
- [VisualBERT VQA demo notebook](https://github.com/huggingface/transformers/tree/main/examples/research_projects/visual_bert) : This notebook
contains an example on VisualBERT VQA.
- [Generate Embeddings for VisualBERT (Colab Notebook)](https://colab.research.google.com/drive/1bLGxKdldwqnMVA5x4neY7-l_8fKGWQYI?usp=sharing) : This notebook contains
an example on how to generate visual embeddings.
The following example shows how to get the last hidden state using [`VisualBertModel`]:
```python
>>> import torch
>>> from transformers import BertTokenizer, VisualBertModel
>>> model = VisualBertModel.from_pretrained("uclanlp/visualbert-vqa-coco-pre")
>>> tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
>>> inputs = tokenizer("What is the man eating?", return_tensors="pt")
>>> # this is a custom function that returns the visual embeddings given the image path
>>> visual_embeds = get_visual_embeddings(image_path)
>>> visual_token_type_ids = torch.ones(visual_embeds.shape[:-1], dtype=torch.long)
>>> visual_attention_mask = torch.ones(visual_embeds.shape[:-1], dtype=torch.float)
>>> inputs.update(
... {
... "visual_embeds": visual_embeds,
... "visual_token_type_ids": visual_token_type_ids,
... "visual_attention_mask": visual_attention_mask,
... }
... )
>>> outputs = model(**inputs)
>>> last_hidden_state = outputs.last_hidden_state
```
## VisualBertConfig
[[autodoc]] VisualBertConfig
## VisualBertModel
[[autodoc]] VisualBertModel
- forward
## VisualBertForPreTraining
[[autodoc]] VisualBertForPreTraining
- forward
## VisualBertForQuestionAnswering
[[autodoc]] VisualBertForQuestionAnswering
- forward
## VisualBertForMultipleChoice
[[autodoc]] VisualBertForMultipleChoice
- forward
## VisualBertForVisualReasoning
[[autodoc]] VisualBertForVisualReasoning
- forward
## VisualBertForRegionToPhraseAlignment
[[autodoc]] VisualBertForRegionToPhraseAlignment
- forward
| transformers/docs/source/en/model_doc/visual_bert.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/visual_bert.md",
"repo_id": "transformers",
"token_count": 1676
} | 253 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# XGLM
## Overview
The XGLM model was proposed in [Few-shot Learning with Multilingual Language Models](https://arxiv.org/abs/2112.10668)
by Xi Victoria Lin, Todor Mihaylov, Mikel Artetxe, Tianlu Wang, Shuohui Chen, Daniel Simig, Myle Ott, Naman Goyal,
Shruti Bhosale, Jingfei Du, Ramakanth Pasunuru, Sam Shleifer, Punit Singh Koura, Vishrav Chaudhary, Brian O'Horo,
Jeff Wang, Luke Zettlemoyer, Zornitsa Kozareva, Mona Diab, Veselin Stoyanov, Xian Li.
The abstract from the paper is the following:
*Large-scale autoregressive language models such as GPT-3 are few-shot learners that can perform a wide range of language
tasks without fine-tuning. While these models are known to be able to jointly represent many different languages,
their training data is dominated by English, potentially limiting their cross-lingual generalization.
In this work, we train multilingual autoregressive language models on a balanced corpus covering a diverse set of languages,
and study their few- and zero-shot learning capabilities in a wide range of tasks. Our largest model with 7.5 billion parameters
sets new state of the art in few-shot learning in more than 20 representative languages, outperforming GPT-3 of comparable size
in multilingual commonsense reasoning (with +7.4% absolute accuracy improvement in 0-shot settings and +9.4% in 4-shot settings)
and natural language inference (+5.4% in each of 0-shot and 4-shot settings). On the FLORES-101 machine translation benchmark,
our model outperforms GPT-3 on 171 out of 182 translation directions with 32 training examples, while surpassing the
official supervised baseline in 45 directions. We present a detailed analysis of where the model succeeds and fails,
showing in particular that it enables cross-lingual in-context learning on some tasks, while there is still room for improvement
on surface form robustness and adaptation to tasks that do not have a natural cloze form. Finally, we evaluate our models
in social value tasks such as hate speech detection in five languages and find it has limitations similar to comparable sized GPT-3 models.*
This model was contributed by [Suraj](https://huggingface.co/valhalla). The original code can be found [here](https://github.com/pytorch/fairseq/tree/main/examples/xglm).
## Resources
- [Causal language modeling task guide](../tasks/language_modeling)
## XGLMConfig
[[autodoc]] XGLMConfig
## XGLMTokenizer
[[autodoc]] XGLMTokenizer
- build_inputs_with_special_tokens
- get_special_tokens_mask
- create_token_type_ids_from_sequences
- save_vocabulary
## XGLMTokenizerFast
[[autodoc]] XGLMTokenizerFast
<frameworkcontent>
<pt>
## XGLMModel
[[autodoc]] XGLMModel
- forward
## XGLMForCausalLM
[[autodoc]] XGLMForCausalLM
- forward
</pt>
<tf>
## TFXGLMModel
[[autodoc]] TFXGLMModel
- call
## TFXGLMForCausalLM
[[autodoc]] TFXGLMForCausalLM
- call
</tf>
<jax>
## FlaxXGLMModel
[[autodoc]] FlaxXGLMModel
- __call__
## FlaxXGLMForCausalLM
[[autodoc]] FlaxXGLMForCausalLM
- __call__
</jax>
</frameworkcontent> | transformers/docs/source/en/model_doc/xglm.md/0 | {
"file_path": "transformers/docs/source/en/model_doc/xglm.md",
"repo_id": "transformers",
"token_count": 1137
} | 254 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Pipelines for inference
The [`pipeline`] makes it simple to use any model from the [Hub](https://huggingface.co/models) for inference on any language, computer vision, speech, and multimodal tasks. Even if you don't have experience with a specific modality or aren't familiar with the underlying code behind the models, you can still use them for inference with the [`pipeline`]! This tutorial will teach you to:
* Use a [`pipeline`] for inference.
* Use a specific tokenizer or model.
* Use a [`pipeline`] for audio, vision, and multimodal tasks.
<Tip>
Take a look at the [`pipeline`] documentation for a complete list of supported tasks and available parameters.
</Tip>
## Pipeline usage
While each task has an associated [`pipeline`], it is simpler to use the general [`pipeline`] abstraction which contains
all the task-specific pipelines. The [`pipeline`] automatically loads a default model and a preprocessing class capable
of inference for your task. Let's take the example of using the [`pipeline`] for automatic speech recognition (ASR), or
speech-to-text.
1. Start by creating a [`pipeline`] and specify the inference task:
```py
>>> from transformers import pipeline
>>> transcriber = pipeline(task="automatic-speech-recognition")
```
2. Pass your input to the [`pipeline`]. In the case of speech recognition, this is an audio input file:
```py
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': 'I HAVE A DREAM BUT ONE DAY THIS NATION WILL RISE UP LIVE UP THE TRUE MEANING OF ITS TREES'}
```
Not the result you had in mind? Check out some of the [most downloaded automatic speech recognition models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=trending)
on the Hub to see if you can get a better transcription.
Let's try the [Whisper large-v2](https://huggingface.co/openai/whisper-large) model from OpenAI. Whisper was released
2 years later than Wav2Vec2, and was trained on close to 10x more data. As such, it beats Wav2Vec2 on most downstream
benchmarks. It also has the added benefit of predicting punctuation and casing, neither of which are possible with
Wav2Vec2.
Let's give it a try here to see how it performs:
```py
>>> transcriber = pipeline(model="openai/whisper-large-v2")
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.'}
```
Now this result looks more accurate! For a deep-dive comparison on Wav2Vec2 vs Whisper, refer to the [Audio Transformers Course](https://huggingface.co/learn/audio-course/chapter5/asr_models).
We really encourage you to check out the Hub for models in different languages, models specialized in your field, and more.
You can check out and compare model results directly from your browser on the Hub to see if it fits or
handles corner cases better than other ones.
And if you don't find a model for your use case, you can always start [training](training) your own!
If you have several inputs, you can pass your input as a list:
```py
transcriber(
[
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac",
"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/1.flac",
]
)
```
Pipelines are great for experimentation as switching from one model to another is trivial; however, there are some ways to optimize them for larger workloads than experimentation. See the following guides that dive into iterating over whole datasets or using pipelines in a webserver:
of the docs:
* [Using pipelines on a dataset](#using-pipelines-on-a-dataset)
* [Using pipelines for a webserver](./pipeline_webserver)
## Parameters
[`pipeline`] supports many parameters; some are task specific, and some are general to all pipelines.
In general, you can specify parameters anywhere you want:
```py
transcriber = pipeline(model="openai/whisper-large-v2", my_parameter=1)
out = transcriber(...) # This will use `my_parameter=1`.
out = transcriber(..., my_parameter=2) # This will override and use `my_parameter=2`.
out = transcriber(...) # This will go back to using `my_parameter=1`.
```
Let's check out 3 important ones:
### Device
If you use `device=n`, the pipeline automatically puts the model on the specified device.
This will work regardless of whether you are using PyTorch or Tensorflow.
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0)
```
If the model is too large for a single GPU and you are using PyTorch, you can set `device_map="auto"` to automatically
determine how to load and store the model weights. Using the `device_map` argument requires the ๐ค [Accelerate](https://huggingface.co/docs/accelerate)
package:
```bash
pip install --upgrade accelerate
```
The following code automatically loads and stores model weights across devices:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device_map="auto")
```
Note that if `device_map="auto"` is passed, there is no need to add the argument `device=device` when instantiating your `pipeline` as you may encounter some unexpected behavior!
### Batch size
By default, pipelines will not batch inference for reasons explained in detail [here](https://huggingface.co/docs/transformers/main_classes/pipelines#pipeline-batching). The reason is that batching is not necessarily faster, and can actually be quite slower in some cases.
But if it works in your use case, you can use:
```py
transcriber = pipeline(model="openai/whisper-large-v2", device=0, batch_size=2)
audio_filenames = [f"https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/{i}.flac" for i in range(1, 5)]
texts = transcriber(audio_filenames)
```
This runs the pipeline on the 4 provided audio files, but it will pass them in batches of 2
to the model (which is on a GPU, where batching is more likely to help) without requiring any further code from you.
The output should always match what you would have received without batching. It is only meant as a way to help you get more speed out of a pipeline.
Pipelines can also alleviate some of the complexities of batching because, for some pipelines, a single item (like a long audio file) needs to be chunked into multiple parts to be processed by a model. The pipeline performs this [*chunk batching*](./main_classes/pipelines#pipeline-chunk-batching) for you.
### Task specific parameters
All tasks provide task specific parameters which allow for additional flexibility and options to help you get your job done.
For instance, the [`transformers.AutomaticSpeechRecognitionPipeline.__call__`] method has a `return_timestamps` parameter which sounds promising for subtitling videos:
```py
>>> transcriber = pipeline(model="openai/whisper-large-v2", return_timestamps=True)
>>> transcriber("https://huggingface.co/datasets/Narsil/asr_dummy/resolve/main/mlk.flac")
{'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its creed.', 'chunks': [{'timestamp': (0.0, 11.88), 'text': ' I have a dream that one day this nation will rise up and live out the true meaning of its'}, {'timestamp': (11.88, 12.38), 'text': ' creed.'}]}
```
As you can see, the model inferred the text and also outputted **when** the various sentences were pronounced.
There are many parameters available for each task, so check out each task's API reference to see what you can tinker with!
For instance, the [`~transformers.AutomaticSpeechRecognitionPipeline`] has a `chunk_length_s` parameter which is helpful
for working on really long audio files (for example, subtitling entire movies or hour-long videos) that a model typically
cannot handle on its own:
```python
>>> transcriber = pipeline(model="openai/whisper-large-v2", chunk_length_s=30, return_timestamps=True)
>>> transcriber("https://huggingface.co/datasets/sanchit-gandhi/librispeech_long/resolve/main/audio.wav")
{'text': " Chapter 16. I might have told you of the beginning of this liaison in a few lines, but I wanted you to see every step by which we came. I, too, agree to whatever Marguerite wished, Marguerite to be unable to live apart from me. It was the day after the evening...
```
If you can't find a parameter that would really help you out, feel free to [request it](https://github.com/huggingface/transformers/issues/new?assignees=&labels=feature&template=feature-request.yml)!
## Using pipelines on a dataset
The pipeline can also run inference on a large dataset. The easiest way we recommend doing this is by using an iterator:
```py
def data():
for i in range(1000):
yield f"My example {i}"
pipe = pipeline(model="gpt2", device=0)
generated_characters = 0
for out in pipe(data()):
generated_characters += len(out[0]["generated_text"])
```
The iterator `data()` yields each result, and the pipeline automatically
recognizes the input is iterable and will start fetching the data while
it continues to process it on the GPU (this uses [DataLoader](https://pytorch.org/docs/stable/data.html#torch.utils.data.DataLoader) under the hood).
This is important because you don't have to allocate memory for the whole dataset
and you can feed the GPU as fast as possible.
Since batching could speed things up, it may be useful to try tuning the `batch_size` parameter here.
The simplest way to iterate over a dataset is to just load one from ๐ค [Datasets](https://github.com/huggingface/datasets/):
```py
# KeyDataset is a util that will just output the item we're interested in.
from transformers.pipelines.pt_utils import KeyDataset
from datasets import load_dataset
pipe = pipeline(model="hf-internal-testing/tiny-random-wav2vec2", device=0)
dataset = load_dataset("hf-internal-testing/librispeech_asr_dummy", "clean", split="validation[:10]")
for out in pipe(KeyDataset(dataset, "audio")):
print(out)
```
## Using pipelines for a webserver
<Tip>
Creating an inference engine is a complex topic which deserves it's own
page.
</Tip>
[Link](./pipeline_webserver)
## Vision pipeline
Using a [`pipeline`] for vision tasks is practically identical.
Specify your task and pass your image to the classifier. The image can be a link, a local path or a base64-encoded image. For example, what species of cat is shown below?
![pipeline-cat-chonk](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg)
```py
>>> from transformers import pipeline
>>> vision_classifier = pipeline(model="google/vit-base-patch16-224")
>>> preds = vision_classifier(
... images="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/pipeline-cat-chonk.jpeg"
... )
>>> preds = [{"score": round(pred["score"], 4), "label": pred["label"]} for pred in preds]
>>> preds
[{'score': 0.4335, 'label': 'lynx, catamount'}, {'score': 0.0348, 'label': 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor'}, {'score': 0.0324, 'label': 'snow leopard, ounce, Panthera uncia'}, {'score': 0.0239, 'label': 'Egyptian cat'}, {'score': 0.0229, 'label': 'tiger cat'}]
```
## Text pipeline
Using a [`pipeline`] for NLP tasks is practically identical.
```py
>>> from transformers import pipeline
>>> # This model is a `zero-shot-classification` model.
>>> # It will classify text, except you are free to choose any label you might imagine
>>> classifier = pipeline(model="facebook/bart-large-mnli")
>>> classifier(
... "I have a problem with my iphone that needs to be resolved asap!!",
... candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"],
... )
{'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]}
```
## Multimodal pipeline
The [`pipeline`] supports more than one modality. For example, a visual question answering (VQA) task combines text and image. Feel free to use any image link you like and a question you want to ask about the image. The image can be a URL or a local path to the image.
For example, if you use this [invoice image](https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png):
```py
>>> from transformers import pipeline
>>> vqa = pipeline(model="impira/layoutlm-document-qa")
>>> vqa(
... image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png",
... question="What is the invoice number?",
... )
[{'score': 0.42515, 'answer': 'us-001', 'start': 16, 'end': 16}]
```
<Tip>
To run the example above you need to have [`pytesseract`](https://pypi.org/project/pytesseract/) installed in addition to ๐ค Transformers:
```bash
sudo apt install -y tesseract-ocr
pip install pytesseract
```
</Tip>
## Using `pipeline` on large models with ๐ค `accelerate`:
You can easily run `pipeline` on large models using ๐ค `accelerate`! First make sure you have installed `accelerate` with `pip install accelerate`.
First load your model using `device_map="auto"`! We will use `facebook/opt-1.3b` for our example.
```py
# pip install accelerate
import torch
from transformers import pipeline
pipe = pipeline(model="facebook/opt-1.3b", torch_dtype=torch.bfloat16, device_map="auto")
output = pipe("This is a cool example!", do_sample=True, top_p=0.95)
```
You can also pass 8-bit loaded models if you install `bitsandbytes` and add the argument `load_in_8bit=True`
```py
# pip install accelerate bitsandbytes
import torch
from transformers import pipeline
pipe = pipeline(model="facebook/opt-1.3b", device_map="auto", model_kwargs={"load_in_8bit": True})
output = pipe("This is a cool example!", do_sample=True, top_p=0.95)
```
Note that you can replace the checkpoint with any of the Hugging Face model that supports large model loading such as BLOOM!
| transformers/docs/source/en/pipeline_tutorial.md/0 | {
"file_path": "transformers/docs/source/en/pipeline_tutorial.md",
"repo_id": "transformers",
"token_count": 4495
} | 255 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Image-to-Image Task Guide
[[open-in-colab]]
Image-to-Image task is the task where an application receives an image and outputs another image. This has various subtasks, including image enhancement (super resolution, low light enhancement, deraining and so on), image inpainting, and more.
This guide will show you how to:
- Use an image-to-image pipeline for super resolution task,
- Run image-to-image models for same task without a pipeline.
Note that as of the time this guide is released, `image-to-image` pipeline only supports super resolution task.
Let's begin by installing the necessary libraries.
```bash
pip install transformers
```
We can now initialize the pipeline with a [Swin2SR model](https://huggingface.co/caidas/swin2SR-lightweight-x2-64). We can then infer with the pipeline by calling it with an image. As of now, only [Swin2SR models](https://huggingface.co/models?sort=trending&search=swin2sr) are supported in this pipeline.
```python
from transformers import pipeline
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
pipe = pipeline(task="image-to-image", model="caidas/swin2SR-lightweight-x2-64", device=device)
```
Now, let's load an image.
```python
from PIL import Image
import requests
url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg"
image = Image.open(requests.get(url, stream=True).raw)
print(image.size)
```
```bash
# (532, 432)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat.jpg" alt="Photo of a cat"/>
</div>
We can now do inference with the pipeline. We will get an upscaled version of the cat image.
```python
upscaled = pipe(image)
print(upscaled.size)
```
```bash
# (1072, 880)
```
If you wish to do inference yourself with no pipeline, you can use the `Swin2SRForImageSuperResolution` and `Swin2SRImageProcessor` classes of transformers. We will use the same model checkpoint for this. Let's initialize the model and the processor.
```python
from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor
model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2SR-lightweight-x2-64").to(device)
processor = Swin2SRImageProcessor("caidas/swin2SR-lightweight-x2-64")
```
`pipeline` abstracts away the preprocessing and postprocessing steps that we have to do ourselves, so let's preprocess the image. We will pass the image to the processor and then move the pixel values to GPU.
```python
pixel_values = processor(image, return_tensors="pt").pixel_values
print(pixel_values.shape)
pixel_values = pixel_values.to(device)
```
We can now infer the image by passing pixel values to the model.
```python
import torch
with torch.no_grad():
outputs = model(pixel_values)
```
Output is an object of type `ImageSuperResolutionOutput` that looks like below ๐
```
(loss=None, reconstruction=tensor([[[[0.8270, 0.8269, 0.8275, ..., 0.7463, 0.7446, 0.7453],
[0.8287, 0.8278, 0.8283, ..., 0.7451, 0.7448, 0.7457],
[0.8280, 0.8273, 0.8269, ..., 0.7447, 0.7446, 0.7452],
...,
[0.5923, 0.5933, 0.5924, ..., 0.0697, 0.0695, 0.0706],
[0.5926, 0.5932, 0.5926, ..., 0.0673, 0.0687, 0.0705],
[0.5927, 0.5914, 0.5922, ..., 0.0664, 0.0694, 0.0718]]]],
device='cuda:0'), hidden_states=None, attentions=None)
```
We need to get the `reconstruction` and post-process it for visualization. Let's see how it looks like.
```python
outputs.reconstruction.data.shape
# torch.Size([1, 3, 880, 1072])
```
We need to squeeze the output and get rid of axis 0, clip the values, then convert it to be numpy float. Then we will arrange axes to have the shape [1072, 880], and finally, bring the output back to range [0, 255].
```python
import numpy as np
# squeeze, take to CPU and clip the values
output = outputs.reconstruction.data.squeeze().cpu().clamp_(0, 1).numpy()
# rearrange the axes
output = np.moveaxis(output, source=0, destination=-1)
# bring values back to pixel values range
output = (output * 255.0).round().astype(np.uint8)
Image.fromarray(output)
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/cat_upscaled.png" alt="Upscaled photo of a cat"/>
</div>
| transformers/docs/source/en/tasks/image_to_image.md/0 | {
"file_path": "transformers/docs/source/en/tasks/image_to_image.md",
"repo_id": "transformers",
"token_count": 1725
} | 256 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Visual Question Answering
[[open-in-colab]]
Visual Question Answering (VQA) is the task of answering open-ended questions based on an image.
The input to models supporting this task is typically a combination of an image and a question, and the output is an
answer expressed in natural language.
Some noteworthy use case examples for VQA include:
* Accessibility applications for visually impaired individuals.
* Education: posing questions about visual materials presented in lectures or textbooks. VQA can also be utilized in interactive museum exhibits or historical sites.
* Customer service and e-commerce: VQA can enhance user experience by letting users ask questions about products.
* Image retrieval: VQA models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images.
In this guide you'll learn how to:
- Fine-tune a classification VQA model, specifically [ViLT](../model_doc/vilt), on the [`Graphcore/vqa` dataset](https://huggingface.co/datasets/Graphcore/vqa).
- Use your fine-tuned ViLT for inference.
- Run zero-shot VQA inference with a generative model, like BLIP-2.
## Fine-tuning ViLT
ViLT model incorporates text embeddings into a Vision Transformer (ViT), allowing it to have a minimal design for
Vision-and-Language Pre-training (VLP). This model can be used for several downstream tasks. For the VQA task, a classifier
head is placed on top (a linear layer on top of the final hidden state of the `[CLS]` token) and randomly initialized.
Visual Question Answering is thus treated as a **classification problem**.
More recent models, such as BLIP, BLIP-2, and InstructBLIP, treat VQA as a generative task. Later in this guide we
illustrate how to use them for zero-shot VQA inference.
Before you begin, make sure you have all the necessary libraries installed.
```bash
pip install -q transformers datasets
```
We encourage you to share your model with the community. Log in to your Hugging Face account to upload it to the ๐ค Hub.
When prompted, enter your token to log in:
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
Let's define the model checkpoint as a global variable.
```py
>>> model_checkpoint = "dandelin/vilt-b32-mlm"
```
## Load the data
For illustration purposes, in this guide we use a very small sample of the annotated visual question answering `Graphcore/vqa` dataset.
You can find the full dataset on [๐ค Hub](https://huggingface.co/datasets/Graphcore/vqa).
As an alternative to the [`Graphcore/vqa` dataset](https://huggingface.co/datasets/Graphcore/vqa), you can download the
same data manually from the official [VQA dataset page](https://visualqa.org/download.html). If you prefer to follow the
tutorial with your custom data, check out how to [Create an image dataset](https://huggingface.co/docs/datasets/image_dataset#loading-script)
guide in the ๐ค Datasets documentation.
Let's load the first 200 examples from the validation split and explore the dataset's features:
```python
>>> from datasets import load_dataset
>>> dataset = load_dataset("Graphcore/vqa", split="validation[:200]")
>>> dataset
Dataset({
features: ['question', 'question_type', 'question_id', 'image_id', 'answer_type', 'label'],
num_rows: 200
})
```
Let's take a look at an example to understand the dataset's features:
```py
>>> dataset[0]
{'question': 'Where is he looking?',
'question_type': 'none of the above',
'question_id': 262148000,
'image_id': '/root/.cache/huggingface/datasets/downloads/extracted/ca733e0e000fb2d7a09fbcc94dbfe7b5a30750681d0e965f8e0a23b1c2f98c75/val2014/COCO_val2014_000000262148.jpg',
'answer_type': 'other',
'label': {'ids': ['at table', 'down', 'skateboard', 'table'],
'weights': [0.30000001192092896,
1.0,
0.30000001192092896,
0.30000001192092896]}}
```
The features relevant to the task include:
* `question`: the question to be answered from the image
* `image_id`: the path to the image the question refers to
* `label`: the annotations
We can remove the rest of the features as they won't be necessary:
```py
>>> dataset = dataset.remove_columns(['question_type', 'question_id', 'answer_type'])
```
As you can see, the `label` feature contains several answers to the same question (called `ids` here) collected by different human annotators.
This is because the answer to a question can be subjective. In this case, the question is "where is he looking?". Some people
annotated this with "down", others with "at table", another one with "skateboard", etc.
Take a look at the image and consider which answer would you give:
```python
>>> from PIL import Image
>>> image = Image.open(dataset[0]['image_id'])
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/vqa-example.png" alt="VQA Image Example"/>
</div>
Due to the questions' and answers' ambiguity, datasets like this are treated as a multi-label classification problem (as
multiple answers are possibly valid). Moreover, rather than just creating a one-hot encoded vector, one creates a
soft encoding, based on the number of times a certain answer appeared in the annotations.
For instance, in the example above, because the answer "down" is selected way more often than other answers, it has a
score (called `weight` in the dataset) of 1.0, and the rest of the answers have scores < 1.0.
To later instantiate the model with an appropriate classification head, let's create two dictionaries: one that maps
the label name to an integer and vice versa:
```py
>>> import itertools
>>> labels = [item['ids'] for item in dataset['label']]
>>> flattened_labels = list(itertools.chain(*labels))
>>> unique_labels = list(set(flattened_labels))
>>> label2id = {label: idx for idx, label in enumerate(unique_labels)}
>>> id2label = {idx: label for label, idx in label2id.items()}
```
Now that we have the mappings, we can replace the string answers with their ids, and flatten the dataset for a more convenient further preprocessing.
```python
>>> def replace_ids(inputs):
... inputs["label"]["ids"] = [label2id[x] for x in inputs["label"]["ids"]]
... return inputs
>>> dataset = dataset.map(replace_ids)
>>> flat_dataset = dataset.flatten()
>>> flat_dataset.features
{'question': Value(dtype='string', id=None),
'image_id': Value(dtype='string', id=None),
'label.ids': Sequence(feature=Value(dtype='int64', id=None), length=-1, id=None),
'label.weights': Sequence(feature=Value(dtype='float64', id=None), length=-1, id=None)}
```
## Preprocessing data
The next step is to load a ViLT processor to prepare the image and text data for the model.
[`ViltProcessor`] wraps a BERT tokenizer and ViLT image processor into a convenient single processor:
```py
>>> from transformers import ViltProcessor
>>> processor = ViltProcessor.from_pretrained(model_checkpoint)
```
To preprocess the data we need to encode the images and questions using the [`ViltProcessor`]. The processor will use
the [`BertTokenizerFast`] to tokenize the text and create `input_ids`, `attention_mask` and `token_type_ids` for the text data.
As for images, the processor will leverage [`ViltImageProcessor`] to resize and normalize the image, and create `pixel_values` and `pixel_mask`.
All these preprocessing steps are done under the hood, we only need to call the `processor`. However, we still need to
prepare the target labels. In this representation, each element corresponds to a possible answer (label). For correct answers, the element holds
their respective score (weight), while the remaining elements are set to zero.
The following function applies the `processor` to the images and questions and formats the labels as described above:
```py
>>> import torch
>>> def preprocess_data(examples):
... image_paths = examples['image_id']
... images = [Image.open(image_path) for image_path in image_paths]
... texts = examples['question']
... encoding = processor(images, texts, padding="max_length", truncation=True, return_tensors="pt")
... for k, v in encoding.items():
... encoding[k] = v.squeeze()
... targets = []
... for labels, scores in zip(examples['label.ids'], examples['label.weights']):
... target = torch.zeros(len(id2label))
... for label, score in zip(labels, scores):
... target[label] = score
... targets.append(target)
... encoding["labels"] = targets
... return encoding
```
To apply the preprocessing function over the entire dataset, use ๐ค Datasets [`~datasets.map`] function. You can speed up `map` by
setting `batched=True` to process multiple elements of the dataset at once. At this point, feel free to remove the columns you don't need.
```py
>>> processed_dataset = flat_dataset.map(preprocess_data, batched=True, remove_columns=['question','question_type', 'question_id', 'image_id', 'answer_type', 'label.ids', 'label.weights'])
>>> processed_dataset
Dataset({
features: ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values', 'pixel_mask', 'labels'],
num_rows: 200
})
```
As a final step, create a batch of examples using [`DefaultDataCollator`]:
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator()
```
## Train the model
Youโre ready to start training your model now! Load ViLT with [`ViltForQuestionAnswering`]. Specify the number of labels
along with the label mappings:
```py
>>> from transformers import ViltForQuestionAnswering
>>> model = ViltForQuestionAnswering.from_pretrained(model_checkpoint, num_labels=len(id2label), id2label=id2label, label2id=label2id)
```
At this point, only three steps remain:
1. Define your training hyperparameters in [`TrainingArguments`]:
```py
>>> from transformers import TrainingArguments
>>> repo_id = "MariaK/vilt_finetuned_200"
>>> training_args = TrainingArguments(
... output_dir=repo_id,
... per_device_train_batch_size=4,
... num_train_epochs=20,
... save_steps=200,
... logging_steps=50,
... learning_rate=5e-5,
... save_total_limit=2,
... remove_unused_columns=False,
... push_to_hub=True,
... )
```
2. Pass the training arguments to [`Trainer`] along with the model, dataset, processor, and data collator.
```py
>>> from transformers import Trainer
>>> trainer = Trainer(
... model=model,
... args=training_args,
... data_collator=data_collator,
... train_dataset=processed_dataset,
... tokenizer=processor,
... )
```
3. Call [`~Trainer.train`] to finetune your model.
```py
>>> trainer.train()
```
Once training is completed, share your model to the Hub with the [`~Trainer.push_to_hub`] method to share your final model on the ๐ค Hub:
```py
>>> trainer.push_to_hub()
```
## Inference
Now that you have fine-tuned a ViLT model, and uploaded it to the ๐ค Hub, you can use it for inference. The simplest
way to try out your fine-tuned model for inference is to use it in a [`Pipeline`].
```py
>>> from transformers import pipeline
>>> pipe = pipeline("visual-question-answering", model="MariaK/vilt_finetuned_200")
```
The model in this guide has only been trained on 200 examples, so don't expect a lot from it. Let's see if it at least
learned something from the data and take the first example from the dataset to illustrate inference:
```py
>>> example = dataset[0]
>>> image = Image.open(example['image_id'])
>>> question = example['question']
>>> print(question)
>>> pipe(image, question, top_k=1)
"Where is he looking?"
[{'score': 0.5498199462890625, 'answer': 'down'}]
```
Even though not very confident, the model indeed has learned something. With more examples and longer training, you'll get far better results!
You can also manually replicate the results of the pipeline if you'd like:
1. Take an image and a question, prepare them for the model using the processor from your model.
2. Forward the result or preprocessing through the model.
3. From the logits, get the most likely answer's id, and find the actual answer in the `id2label`.
```py
>>> processor = ViltProcessor.from_pretrained("MariaK/vilt_finetuned_200")
>>> image = Image.open(example['image_id'])
>>> question = example['question']
>>> # prepare inputs
>>> inputs = processor(image, question, return_tensors="pt")
>>> model = ViltForQuestionAnswering.from_pretrained("MariaK/vilt_finetuned_200")
>>> # forward pass
>>> with torch.no_grad():
... outputs = model(**inputs)
>>> logits = outputs.logits
>>> idx = logits.argmax(-1).item()
>>> print("Predicted answer:", model.config.id2label[idx])
Predicted answer: down
```
## Zero-shot VQA
The previous model treated VQA as a classification task. Some recent models, such as BLIP, BLIP-2, and InstructBLIP approach
VQA as a generative task. Let's take [BLIP-2](../model_doc/blip-2) as an example. It introduced a new visual-language pre-training
paradigm in which any combination of pre-trained vision encoder and LLM can be used (learn more in the [BLIP-2 blog post](https://huggingface.co/blog/blip-2)).
This enables achieving state-of-the-art results on multiple visual-language tasks including visual question answering.
Let's illustrate how you can use this model for VQA. First, let's load the model. Here we'll explicitly send the model to a
GPU, if available, which we didn't need to do earlier when training, as [`Trainer`] handles this automatically:
```py
>>> from transformers import AutoProcessor, Blip2ForConditionalGeneration
>>> import torch
>>> processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b")
>>> model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16)
>>> device = "cuda" if torch.cuda.is_available() else "cpu"
>>> model.to(device)
```
The model takes image and text as input, so let's use the exact same image/question pair from the first example in the VQA dataset:
```py
>>> example = dataset[0]
>>> image = Image.open(example['image_id'])
>>> question = example['question']
```
To use BLIP-2 for visual question answering task, the textual prompt has to follow a specific format: `Question: {} Answer:`.
```py
>>> prompt = f"Question: {question} Answer:"
```
Now we need to preprocess the image/prompt with the model's processor, pass the processed input through the model, and decode the output:
```py
>>> inputs = processor(image, text=prompt, return_tensors="pt").to(device, torch.float16)
>>> generated_ids = model.generate(**inputs, max_new_tokens=10)
>>> generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip()
>>> print(generated_text)
"He is looking at the crowd"
```
As you can see, the model recognized the crowd, and the direction of the face (looking down), however, it seems to miss
the fact the crowd is behind the skater. Still, in cases where acquiring human-annotated datasets is not feasible, this
approach can quickly produce useful results.
| transformers/docs/source/en/tasks/visual_question_answering.md/0 | {
"file_path": "transformers/docs/source/en/tasks/visual_question_answering.md",
"repo_id": "transformers",
"token_count": 4862
} | 257 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ยฟCรณmo puedo crear un pipeline personalizado?
En esta guรญa, veremos cรณmo crear un pipeline personalizado y cรณmo compartirlo en el [Hub](https://hf.co/models) o aรฑadirlo
a la biblioteca ๐ค Transformers.
En primer lugar, debes decidir las entradas que tu pipeline podrรก recibir. Pueden ser strings, bytes,
diccionarios o lo que te parezca que vaya a ser la entrada mรกs apropiada. Intenta mantener estas entradas en un
formato que sea tan Python puro como sea posible, puesto que esto facilita la compatibilidad (incluso con otros
lenguajes de programaciรณn por medio de JSON). Estos serรกn los `inputs` (entradas) del pipeline (`preprocess`).
Ahora debes definir los `outputs` (salidas). Al igual que con los `inputs`, entre mรกs simple el formato, mejor.
Estas serรกn las salidas del mรฉtodo `postprocess` (posprocesamiento).
Empieza heredando la clase base `Pipeline` con los 4 mรฉtodos que debemos implementar: `preprocess` (preprocesamiento),
`_forward` (ejecuciรณn), `postprocess` (posprocesamiento) y `_sanitize_parameters` (verificar parรกmetros).
```python
from transformers import Pipeline
class MyPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
return preprocess_kwargs, {}, {}
def preprocess(self, inputs, maybe_arg=2):
model_input = Tensor(inputs["input_ids"])
return {"model_input": model_input}
def _forward(self, model_inputs):
# model_inputs == {"model_input": model_input}
outputs = self.model(**model_inputs)
# Quizรก {"logits": Tensor(...)}
return outputs
def postprocess(self, model_outputs):
best_class = model_outputs["logits"].softmax(-1)
return best_class
```
La estructura de este desglose es asรญ para garantizar una compatibilidad mรกs o menos transparente con el uso de
CPU/GPU y el pre/posprocesamiento en CPU en varios hilos.
`preprocess` tomarรก las entradas definidas originalmente y las convertirรก en algo que se le pueda pasar al modelo.
Podrรญa contener mรกs informaciรณn y a menudo es un objeto `Dict` (diccionario).
`_forward` contiene los detalles de la implementaciรณn y no deberรญa ser invocado de forma directa. `forward` es el
mรฉtodo preferido a utilizar pues contiene verificaciones para asegurar que todo funcione en el dispositivo correcto.
Cualquier cosa que estรฉ relacionada con un modelo real deberรญa ir en el mรฉtodo `_forward`, todo lo demรกs va en
los mรฉtodos de preprocesamiento y posprocesamiento.
Los mรฉtodos `postprocess` reciben la salida `_forward` y la convierten en la salida final que decidimos
anteriormente.
`_sanitize_parameters` existe para permitir a los usuarios pasar cualesquiera parรกmetros cuando lo deseen, ya
sea al momento de inicializar el pipeline `pipeline(...., maybe_arg=4)` o al momento de invocarlo
`pipe = pipeline(...); output = pipe(...., maybe_arg=4)`.
El mรฉtodo `_sanitize_parameters` devuelve 3 diccionarios de kwargs que serรกn pasados directamente a `preprocess`,
`_forward` y `postprocess`. No ingreses nada si el caller no se va a invocar con parรกmetros adicionales.
Esto permite mantener los parรกmetros por defecto de la definiciรณn de la funciรณn, lo que es mรกs "natural".
Un ejemplo clรกsico serรญa un argumento `top_k` en el posprocesamiento de una tarea de clasificaciรณn.
```python
>>> pipe = pipeline("my-new-task")
>>> pipe("This is a test")
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}, {"label": "3-star", "score": 0.05}
{"label": "4-star", "score": 0.025}, {"label": "5-star", "score": 0.025}]
>>> pipe("This is a test", top_k=2)
[{"label": "1-star", "score": 0.8}, {"label": "2-star", "score": 0.1}]
```
Para lograrlo, actualizaremos nuestro mรฉtodo `postprocess` con un valor por defecto de `5` y modificaremos
`_sanitize_parameters` para permitir este nuevo parรกmetro.
```python
def postprocess(self, model_outputs, top_k=5):
best_class = model_outputs["logits"].softmax(-1)
# Aรฑade la lรณgica para manejar el top_k
return best_class
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "maybe_arg" in kwargs:
preprocess_kwargs["maybe_arg"] = kwargs["maybe_arg"]
postprocess_kwargs = {}
if "top_k" in kwargs:
postprocess_kwargs["top_k"] = kwargs["top_k"]
return preprocess_kwargs, {}, postprocess_kwargs
```
Intenta que las entradas y salidas sean muy simples e, idealmente, que puedan serializarse como JSON, pues esto
hace el uso del pipeline muy sencillo sin que el usuario tenga que preocuparse por conocer nuevos tipos de objetos.
Tambiรฉn es relativamente comรบn tener compatibilidad con muchos tipos diferentes de argumentos por facilidad de uso
(por ejemplo, los archivos de audio pueden ser nombres de archivo, URLs o bytes).
## Aรฑadirlo a la lista de tareas
Para registrar tu `new-task` (nueva tarea) en la lista de tareas, debes aรฑadirla al
`PIPELINE_REGISTRY` (registro de pipelines):
```python
from transformers.pipelines import PIPELINE_REGISTRY
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
)
```
Puedes especificar un modelo por defecto si lo deseas, en cuyo caso debe venir con una versiรณn especรญfica (que puede ser el nombre de un branch o hash de commit, en este caso usamos `"abcdef"`), asรญ como el tipo:
```python
PIPELINE_REGISTRY.register_pipeline(
"new-task",
pipeline_class=MyPipeline,
pt_model=AutoModelForSequenceClassification,
default={"pt": ("user/awesome_model", "abcdef")},
type="text", # tipo de datos que maneja: texto, audio, imagen, multi-modalidad
)
```
## Comparte tu pipeline en el Hub
Para compartir tu pipeline personalizado en el Hub, solo tienes que guardar el cรณdigo personalizado de tu sub-clase
`Pipeline` en un archivo de Python. Por ejemplo, digamos que queremos usar un pipeline personalizado para la
clasificaciรณn de duplas de oraciones de esta forma:
```py
import numpy as np
from transformers import Pipeline
def softmax(outputs):
maxes = np.max(outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class PairClassificationPipeline(Pipeline):
def _sanitize_parameters(self, **kwargs):
preprocess_kwargs = {}
if "second_text" in kwargs:
preprocess_kwargs["second_text"] = kwargs["second_text"]
return preprocess_kwargs, {}, {}
def preprocess(self, text, second_text=None):
return self.tokenizer(text, text_pair=second_text, return_tensors=self.framework)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs):
logits = model_outputs.logits[0].numpy()
probabilities = softmax(logits)
best_class = np.argmax(probabilities)
label = self.model.config.id2label[best_class]
score = probabilities[best_class].item()
logits = logits.tolist()
return {"label": label, "score": score, "logits": logits}
```
La implementaciรณn es independiente del framework y funcionarรก con modelos de PyTorch y TensorFlow. Si guardamos
esto en un archivo llamado `pair_classification.py`, podemos importarlo y registrarlo de la siguiente manera:
```py
from pair_classification import PairClassificationPipeline
from transformers.pipelines import PIPELINE_REGISTRY
from transformers import AutoModelForSequenceClassification, TFAutoModelForSequenceClassification
PIPELINE_REGISTRY.register_pipeline(
"pair-classification",
pipeline_class=PairClassificationPipeline,
pt_model=AutoModelForSequenceClassification,
tf_model=TFAutoModelForSequenceClassification,
)
```
Una vez hecho esto, podemos usarlo con un modelo pre-entrenado. Por ejemplo, al modelo `sgugger/finetuned-bert-mrpc`
se le hizo fine-tuning con el dataset MRPC, en el cual se clasifican duplas de oraciones como parรกfrasis o no.
```py
from transformers import pipeline
classifier = pipeline("pair-classification", model="sgugger/finetuned-bert-mrpc")
```
Ahora podemos compartirlo en el Hub usando el mรฉtodo `save_pretrained` (guardar pre-entrenado) en un `Repository`:
```py
from huggingface_hub import Repository
repo = Repository("test-dynamic-pipeline", clone_from="{your_username}/test-dynamic-pipeline")
classifier.save_pretrained("test-dynamic-pipeline")
repo.push_to_hub()
```
Esto copiarรก el archivo donde definiste `PairClassificationPipeline` dentro de la carpeta `"test-dynamic-pipeline"`,
y ademรกs guardarรก el modelo y el tokenizer del pipeline, antes de enviar todo al repositorio
`{your_username}/test-dynamic-pipeline`. Despuรฉs de esto, cualquier persona puede usarlo siempre que usen la opciรณn
`trust_remote_code=True` (confiar en cรณdigo remoto):
```py
from transformers import pipeline
classifier = pipeline(model="{your_username}/test-dynamic-pipeline", trust_remote_code=True)
```
## Aรฑadir el pipeline a ๐ค Transformers
Si quieres contribuir tu pipeline a la biblioteca ๐ค Transformers, tendrรกs que aรฑadirlo a un nuevo mรณdulo en el
sub-mรณdulo `pipelines` con el cรณdigo de tu pipeline. Luego, debes aรฑadirlo a la lista de tareas definidas en
`pipelines/__init__.py`.
A continuaciรณn tienes que aรฑadir las pruebas. Crea un nuevo archivo llamado `tests/test_pipelines_MY_PIPELINE.py`
basรกndote en las pruebas existentes.
La funciรณn `run_pipeline_test` serรก muy genรฉrica y se correrรก sobre modelos pequeรฑos escogidos al azar sobre todas las
arquitecturas posibles definidas en `model_mapping` y `tf_model_mapping`.
Esto es muy importante para probar compatibilidades a futuro, lo que significa que si alguien aรฑade un nuevo modelo
para `XXXForQuestionAnswering` entonces el pipeline intentarรก ejecutarse con ese modelo. Ya que los modelos son aleatorios,
es imposible verificar los valores como tales, y es por eso que hay un helper `ANY` que simplemente intentarรก que la
salida tenga el mismo tipo que la salida esperada del pipeline.
Tambiรฉn *debes* implementar 2 (preferiblemente 4) pruebas:
- `test_small_model_pt` : Define un (1) modelo pequeรฑo para este pipeline (no importa si los resultados no tienen sentido)
y prueba las salidas del pipeline. Los resultados deberรญan ser los mismos que en `test_small_model_tf`.
- `test_small_model_tf` : Define un (1) modelo pequeรฑo para este pipeline (no importa si los resultados no tienen sentido)
y prueba las salidas del pipeline. Los resultados deberรญan ser los mismos que en `test_small_model_pt`.
- `test_large_model_pt` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido.
Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que
no haya divergencias en versiones futuras.
- `test_large_model_tf` (`optional`): Prueba el pipeline en una tarea real en la que los resultados deben tener sentido.
Estas pruebas son lentas y deben marcarse como tales. El objetivo de esto es ejemplificar el pipeline y asegurarse de que
no haya divergencias en versiones futuras.
| transformers/docs/source/es/add_new_pipeline.md/0 | {
"file_path": "transformers/docs/source/es/add_new_pipeline.md",
"repo_id": "transformers",
"token_count": 4318
} | 258 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Perplejidad de los modelos de longitud fija
[[open-in-colab]]
La perplejidad, perplexity en inglรฉs (PPL), es una de las mรฉtricas mรกs comunes para evaluar modelos de lenguaje. Antes de sumergirnos, debemos tener en cuenta que esta mรฉtrica se aplica especรญficamente a modelos de lenguaje clรกsicos (a veces llamados modelos autorregresivos o causales) y no estรก bien definida para modelos de lenguaje enmascarados como BERT (ver [resumen del modelo](model_summary)).
La perplejidad se define como la media negativa exponenciada del log-likelihood de una secuencia. Si tenemos una secuencia tokenizada \\(X = (x_0, x_1, \dots, x_t)\\), entonces la perplejidad de \\(X\\) es,
$$\text{PPL}(X) = \exp \left\{ {-\frac{1}{t}\sum_i^t \log p_\theta (x_i|x_{<i}) } \right\}$$
donde \\(\log p_\theta (x_i|x_{<i})\\) es el log-likelihood del token i-รฉsimo condicionado a los tokens precedentes \\(x_{<i}\\) segรบn nuestro modelo. De manera intuitiva, se puede pensar en esto como una evaluaciรณn de la capacidad del modelo para predecir de manera uniforme entre el conjunto de tokens especificados en un corpus. Es importante destacar que el procedimiento de tokenizaciรณn tiene un impacto directo en la perplejidad de un modelo, lo cual siempre debe tenerse en cuenta al comparar diferentes modelos.
Esto tambiรฉn es equivalente a la exponenciaciรณn de la entropรญa cruzada entre los datos y las predicciones del modelo. Para obtener mรกs intuiciรณn sobre la perplejidad y su relaciรณn con los Bits Por Carรกcter (BPC) y la compresiรณn de datos, echa un vistazo a esta [fantรกstica publicaciรณn en el blog de "The Gradient"](https://thegradient.pub/understanding-evaluation-metrics-for-language-models/).
## Cรกlculo de PPL con modelos de longitud fija
Si no estuviรฉramos limitados por el tamaรฑo del contexto de un modelo, evaluarรญamos la perplejidad (PPL) del modelo auto regresivamente factorizando una secuencia y condicionรกndonos en toda la subsecuencia precedente en cada paso, como se muestra a continuaciรณn.
<img width="600" alt="Full decomposition of a sequence with unlimited context length" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_full.gif"/>
Sin embargo, al trabajar con modelos aproximados, generalmente tenemos una restricciรณn en la cantidad de tokens que el modelo puede procesar. La versiรณn mรกs grande de [GPT-2](model_doc/gpt2), por ejemplo, tiene una longitud fija de 1024 tokens, por lo que no podemos calcular \\(p_\theta(x_t|x_{<t})\\) directamente cuando \\(t\\) es mayor que 1024.
En cambio, la secuencia se divide tรญpicamente en subsecuencias iguales al tamaรฑo mรกximo de entrada del modelo. Si el tamaรฑo mรกximo de entrada, de un modelo es \\(k\\), entonces aproximamos la probabilidad de un token \\(x_t\\) condicionรกndonos solo en los \\(k-1\\) tokens que lo preceden en lugar de todo el contexto. Al evaluar la perplejidad del modelo en una secuencia, un enfoque tentador pero sub รณptimo es dividir la secuencia en fragmentos independientes y sumar los log-likelihood descompuestos de cada segmento de manera independiente.
<img width="600" alt="Suboptimal PPL not taking advantage of full available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_chunked.gif"/>
Esto es rรกpido de calcular, ya que la perplejidad de cada segmento se puede calcular en un solo pase hacia adelante, pero sirve como una aproximaciรณn pobre de la perplejidad completamente factorizada y generalmente darรก como resultado una PPL mรกs alta (peor) porque el modelo tendrรก menos contexto en la mayorรญa de los pasos de predicciรณn.
En cambio, la PPL de modelos de longitud fija deberรญa evaluarse con una estrategia de ventana deslizante. Esto implica deslizar repetidamente la ventana de contexto para que el modelo tenga mรกs contexto al hacer cada predicciรณn.
<img width="600" alt="Sliding window PPL taking advantage of all available context" src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/ppl_sliding.gif"/>
Esta es una aproximaciรณn mรกs cercana a la verdadera descomposiciรณn de la probabilidad de la secuencia y generalmente darรก como resultado una puntuaciรณn mรกs favorable. La desventaja es que requiere un pase hacia adelante separado para cada token en el corpus. Un buen compromiso prรกctico es emplear una ventana deslizante estratificada, moviendo el contexto con pasos mรกs grandes en lugar de deslizarse de 1 token a la vez. Esto permite que la computaciรณn avance mucho mรกs rรกpido, mientras le da al modelo un contexto amplio para hacer
predicciones en cada paso.
## Ejemplo: Cรกlculo de la perplejidad con GPT-2 en ๐ค Transformers
Demostremos este proceso con GPT-2.
```python
from transformers import GPT2LMHeadModel, GPT2TokenizerFast
device = "cuda"
model_id = "gpt2-large"
model = GPT2LMHeadModel.from_pretrained(model_id).to(device)
tokenizer = GPT2TokenizerFast.from_pretrained(model_id)
```
Carguemos el conjunto de datos WikiText-2 y evaluemos la perplejidad utilizando algunas estrategias de ventana deslizante diferentes. Dado que este conjunto de datos es pequeรฑo y solo estamos realizando un pase hacia adelante sobre el conjunto, podemos cargar y codificar todo el conjunto de datos en la memoria.
```python
from datasets import load_dataset
test = load_dataset("wikitext", "wikitext-2-raw-v1", split="test")
encodings = tokenizer("\n\n".join(test["text"]), return_tensors="pt")
```
Con ๐ค Transformers, simplemente podemos pasar los `input_ids` como las `labels` a nuestro modelo, y la media negativa del log-likelihood para cada token se devuelve como la pรฉrdida. Sin embargo, con nuestro enfoque de ventana deslizante, hay superposiciรณn en los tokens que pasamos al modelo en cada iteraciรณn. No queremos que el log-likelihood de los tokens que estamos tratando solo como contexto se incluya en nuestra pรฉrdida, por lo que podemos establecer estos objetivos en `-100` para que se ignoren. El siguiente es un ejemplo de cรณmo podrรญamos hacer esto con un paso de `512`. Esto significa que el modelo tendrรก al menos `512` tokens como contexto al calcular el log-likelihood condicional de cualquier token (siempre que haya `512` tokens precedentes disponibles para condicionar).
```python
import torch
from tqdm import tqdm
max_length = model.config.n_positions
stride = 512
seq_len = encodings.input_ids.size(1)
nlls = []
prev_end_loc = 0
for begin_loc in tqdm(range(0, seq_len, stride)):
end_loc = min(begin_loc + max_length, seq_len)
trg_len = end_loc - prev_end_loc # puede ser diferente del paso en el รบltimo bucle
input_ids = encodings.input_ids[:, begin_loc:end_loc].to(device)
target_ids = input_ids.clone()
target_ids[:, :-trg_len] = -100
with torch.no_grad():
outputs = model(input_ids, labels=target_ids)
# la pรฉrdida se calcula utilizando CrossEntropyLoss, que promedia las etiquetas vรกlidas
# N.B. el modelo solo calcula la pรฉrdida sobre trg_len - 1 etiquetas, porque desplaza las etiqueta internamente
# a la izquierda por 1.
neg_log_likelihood = outputs.loss
nlls.append(neg_log_likelihood)
prev_end_loc = end_loc
if end_loc == seq_len:
break
ppl = torch.exp(torch.stack(nlls).mean())
```
Ejecuta esto con la longitud de paso igual a la longitud mรกxima de entrada es equivalente a la estrategia sub รณptima,
sin ventana deslizante, que discutimos anteriormente. Cuanto menor sea el paso, mรกs contexto tendrรก el modelo para
realizar cada predicciรณn y, por lo general, mejor serรก la perplejidad informada.
Cuando ejecutamos lo anterior con `stride = 1024`, es decir, sin superposiciรณn, la PPL resultante es `19.44`, que es
aproximadamente la misma que la `19.93` informada en el artรญculo de GPT-2. Al utilizar `stride = 512` y, por lo tanto,
emplear nuestra estrategia de ventana deslizante, esto disminuye a `16.45`. Esto no solo es una puntuaciรณn mรกs favorable, sino que se calcula de una manera mรกs cercana a la verdadera descomposiciรณn autorregresiva de la probabilidad de una secuencia.
| transformers/docs/source/es/perplexity.md/0 | {
"file_path": "transformers/docs/source/es/perplexity.md",
"repo_id": "transformers",
"token_count": 3114
} | 259 |
# docstyle-ignore
INSTALL_CONTENT = """
# Installation de Transformers
! pip install transformers datasets
# Pour installer ร partir du code source au lieu de la derniรจre version, commentez la commande ci-dessus et dรฉcommentez la suivante.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
notebook_first_cells = [{"type": "code", "content": INSTALL_CONTENT}]
black_avoid_patterns = {
"{processor_class}": "FakeProcessorClass",
"{model_class}": "FakeModelClass",
"{object_class}": "FakeObjectClass",
}
| transformers/docs/source/fr/_config.py/0 | {
"file_path": "transformers/docs/source/fr/_config.py",
"repo_id": "transformers",
"token_count": 173
} | 260 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Addestramento effciente su multiple CPU
Quando l'addestramento su una singola CPU รจ troppo lento, possiamo usare CPU multiple. Quasta guida si concentra su DDP basato su PyTorch abilitando l'addetramento distribuito su CPU in maniera efficiente.
## Intelยฎ oneCCL Bindings per PyTorch
[Intelยฎ oneCCL](https://github.com/oneapi-src/oneCCL) (collective communications library) รจ una libreria per l'addestramento efficiente del deep learning in distribuito e implementa collettivi come allreduce, allgather, alltoall. Per maggiori informazioni su oneCCL, fai riferimento a [oneCCL documentation](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html) e [oneCCL specification](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html).
Il modulo `oneccl_bindings_for_pytorch` (`torch_ccl` precedentemente alla versione 1.12) implementa PyTorch C10D ProcessGroup API e puรฒ essere caricato dinamicamente com external ProcessGroup e funziona solo su piattaforma Linux al momento.
Qui trovi informazioni piรน dettagliate per [oneccl_bind_pt](https://github.com/intel/torch-ccl).
### Intelยฎ oneCCL Bindings per l'installazione PyTorch:
I file wheel sono disponibili per le seguenti versioni di Python:
| Extension Version | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 |
| :---------------: | :--------: | :--------: | :--------: | :--------: | :---------: |
| 1.13.0 | | โ | โ | โ | โ |
| 1.12.100 | | โ | โ | โ | โ |
| 1.12.0 | | โ | โ | โ | โ |
| 1.11.0 | | โ | โ | โ | โ |
| 1.10.0 | โ | โ | โ | โ | |
```bash
pip install oneccl_bind_pt=={pytorch_version} -f https://developer.intel.com/ipex-whl-stable-cpu
```
dove `{pytorch_version}` deve essere la tua versione di PyTorch, per l'stanza 1.13.0.
Verifica altri approcci per [oneccl_bind_pt installation](https://github.com/intel/torch-ccl).
Le versioni di oneCCL e PyTorch devono combaciare.
<Tip warning={true}>
oneccl_bindings_for_pytorch 1.12.0 prebuilt wheel does not work with PyTorch 1.12.1 (it is for PyTorch 1.12.0)
PyTorch 1.12.1 should work with oneccl_bindings_for_pytorch 1.12.100
</Tip>
## Intelยฎ MPI library
Usa questa implementazione basata su standard MPI per fornire una architettura flessibile, efficiente, scalabile su cluster per Intelยฎ. Questo componente รจ parte di Intelยฎ oneAPI HPC Toolkit.
oneccl_bindings_for_pytorch รจ installato insieme al set di strumenti MPI. Necessitร di reperire l'ambiente prima di utilizzarlo.
per Intelยฎ oneCCL >= 1.12.0
```bash
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
```
per Intelยฎ oneCCL con versione < 1.12.0
```bash
torch_ccl_path=$(python -c "import torch; import torch_ccl; import os; print(os.path.abspath(os.path.dirname(torch_ccl.__file__)))")
source $torch_ccl_path/env/setvars.sh
```
#### Installazione IPEX:
IPEX fornisce ottimizzazioni delle prestazioni per l'addestramento della CPU sia con Float32 che con BFloat16; puoi fare riferimento a [single CPU section](./perf_train_cpu).
Il seguente "Utilizzo in Trainer" prende come esempio mpirun nella libreria Intelยฎ MPI.
## Utilizzo in Trainer
Per abilitare l'addestramento distribuito multi CPU nel Trainer con il ccl backend, gli utenti devono aggiungere **`--ddp_backend ccl`** negli argomenti del comando.
Vediamo un esempio per il [question-answering example](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)
Il seguente comando abilita due processi sul nodo Xeon, con un processo in esecuzione per ogni socket. Le variabili OMP_NUM_THREADS/CCL_WORKER_COUNT possono essere impostate per una prestazione ottimale.
```shell script
export CCL_WORKER_COUNT=1
export MASTER_ADDR=127.0.0.1
mpirun -n 2 -genv OMP_NUM_THREADS=23 \
python3 run_qa.py \
--model_name_or_path bert-large-uncased \
--dataset_name squad \
--do_train \
--do_eval \
--per_device_train_batch_size 12 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir /tmp/debug_squad/ \
--no_cuda \
--ddp_backend ccl \
--use_ipex
```
Il seguente comando abilita l'addestramento per un totale di quattro processi su due Xeon (node0 e node1, prendendo node0 come processo principale), ppn (processes per node) รจ impostato a 2, on un processo in esecuzione per ogni socket. Le variabili OMP_NUM_THREADS/CCL_WORKER_COUNT possono essere impostate per una prestazione ottimale.
In node0, รจ necessario creare un file di configurazione che contenga gli indirizzi IP di ciascun nodo (per esempio hostfile) e passare il percorso del file di configurazione come parametro.
```shell script
cat hostfile
xxx.xxx.xxx.xxx #node0 ip
xxx.xxx.xxx.xxx #node1 ip
```
A questo punto, esegui il seguente comando nel nodo0 e **4DDP** sarร abilitato in node0 e node1 con BF16 auto mixed precision:
```shell script
export CCL_WORKER_COUNT=1
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
mpirun -f hostfile -n 4 -ppn 2 \
-genv OMP_NUM_THREADS=23 \
python3 run_qa.py \
--model_name_or_path bert-large-uncased \
--dataset_name squad \
--do_train \
--do_eval \
--per_device_train_batch_size 12 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir /tmp/debug_squad/ \
--no_cuda \
--ddp_backend ccl \
--use_ipex \
--bf16
```
| transformers/docs/source/it/perf_train_cpu_many.md/0 | {
"file_path": "transformers/docs/source/it/perf_train_cpu_many.md",
"repo_id": "transformers",
"token_count": 2562
} | 261 |
<!--
Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ ใใฎใใกใคใซใฏMarkdownใงใใใHugging Faceใฎdoc-builder๏ผMDXใซ้กไผผ๏ผๅใใฎ็นๅฎใฎๆงๆใๅซใใงใใใใใ
Markdownใใฅใผใขใงใฏๆญฃใใ่กจ็คบใใใชใใใจใซๆณจๆใใฆใใ ใใใ
-->
# Benchmarks
<Tip warning={true}>
Hugging Faceใฎใใณใใใผใฏใใผใซใฏ้ๆจๅฅจใงใใใTransformerใขใใซใฎ้ๅบฆใจใกใขใชใฎ่ค้ใใๆธฌๅฎใใใใใซๅค้จใฎใใณใใใผใฏใฉใคใใฉใชใไฝฟ็จใใใใจใใๅงใใใพใใ
</Tip>
[[open-in-colab]]
๐ค Transformersใขใใซใใใณใใใผใฏใใใในใใใฉใฏใใฃในใใใงใซๅฉ็จๅฏ่ฝใชใใณใใใผใฏใซใคใใฆ่ฆใฆใฟใพใใใใ
๐ค Transformersใขใใซใใใณใใใผใฏใใๆนๆณใซใคใใฆ่ฉณใใ่ชฌๆใใใใผใใใใฏใฏ[ใใกใ](https://github.com/huggingface/notebooks/tree/main/examples/benchmark.ipynb)ใงๅฉ็จใงใใพใใ
## How to benchmark ๐ค Transformers models
[`PyTorchBenchmark`]ใฏใฉในใจ[`TensorFlowBenchmark`]ใฏใฉในใไฝฟ็จใใใจใ๐ค Transformersใขใใซใๆ่ปใซใใณใใใผใฏใงใใพใใ
ใใณใใใผใฏใฏใฉในใไฝฟ็จใใใจใ_ใใผใฏใกใขใชไฝฟ็จ้_ ใใใณ _ๅฟ
่ฆใชๆ้_ ใ _ๆจ่ซ_ ใใใณ _ใใฌใผใใณใฐ_ ใฎไธกๆนใซใคใใฆๆธฌๅฎใงใใพใใ
<Tip>
ใใใงใฎ _ๆจ่ซ_ ใฏใๅไธใฎใใฉใฏใผใใในใซใใฃใฆๅฎ็พฉใใใ _ใใฌใผใใณใฐ_ ใฏๅไธใฎใใฉใฏใผใใในใจ
ใใใฏใฏใผใใในใซใใฃใฆๅฎ็พฉใใใพใใ
</Tip>
ใใณใใใผใฏใฏใฉใน[`PyTorchBenchmark`]ใจ[`TensorFlowBenchmark`]ใฏใใใใใใฎใใณใใใผใฏใฏใฉในใซๅฏพใใ้ฉๅใช่จญๅฎใๅซใ [`PyTorchBenchmarkArguments`] ใใใณ [`TensorFlowBenchmarkArguments`] ใฟใคใใฎใชใใธใงใฏใใๅฟ
่ฆใจใใพใใ
[`PyTorchBenchmarkArguments`] ใใใณ [`TensorFlowBenchmarkArguments`] ใฏใใผใฟใฏใฉในใงใใใใใใใใฎใใณใใใผใฏใฏใฉในใซๅฏพใใใในใฆใฎ้ข้ฃใใ่จญๅฎใๅซใใงใใพใใ
ๆฌกใฎไพใงใฏใใฟใคใ _bert-base-cased_ ใฎBERTใขใใซใใใณใใใผใฏใใๆนๆณใ็คบใใใฆใใพใใ
<frameworkcontent>
<pt>
```py
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
>>> args = PyTorchBenchmarkArguments(models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512])
>>> benchmark = PyTorchBenchmark(args)
```
</pt>
<tf>
```py
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments
>>> args = TensorFlowBenchmarkArguments(
... models=["bert-base-uncased"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> benchmark = TensorFlowBenchmark(args)
```
</tf>
</frameworkcontent>
ใใใงใฏใใใณใใใผใฏๅผๆฐใฎใใผใฟใฏใฉในใซๅฏพใใฆใ`models`ใ`batch_sizes`
ใใใณ`sequence_lengths`ใฎ3ใคใฎๅผๆฐใๆๅฎใใใฆใใพใใๅผๆฐ`models`ใฏๅฟ
้ ใงใ
[ใขใใซใใ](https://huggingface.co/models)ใใใฎใขใใซ่ญๅฅๅญใฎ`ใชในใ`ใๆๅพ
ใ
ใพใใ`batch_sizes`ใจ`sequence_lengths`ใฎ2ใคใฎ`ใชในใ`ๅผๆฐใฏ
ใขใใซใฎใใณใใใผใฏๅฏพ่ฑกใจใชใ`input_ids`ใฎใตใคใบใๅฎ็พฉใใพใใ
ใใณใใใผใฏๅผๆฐใใผใฟใฏใฉในใไปใใฆ่จญๅฎใงใใไปใฎๅคใใฎใใฉใกใผใฟใใใใพใใใใใใฎ่ฉณ็ดฐใซใคใใฆใฏใ็ดๆฅใใกใคใซ
`src/transformers/benchmark/benchmark_args_utils.py`ใ
`src/transformers/benchmark/benchmark_args.py`๏ผPyTorch็จ๏ผใใใใณ`src/transformers/benchmark/benchmark_args_tf.py`๏ผTensorflow็จ๏ผ
ใๅ็
งใใใใๆฌกใฎใทใงใซใณใใณใใใซใผใใใๅฎ่กใใใจใPyTorchใจTensorflowใฎใใใใใซๅฏพใใฆ่จญๅฎๅฏ่ฝใชใในใฆใฎใใฉใกใผใฟใฎ่จ่ฟฐ็ใชใชในใใ่กจ็คบใใใพใใ
<frameworkcontent>
<pt>
```bash
python examples/pytorch/benchmarking/run_benchmark.py --help
```
ใคใณในใฟใณในๅใใใใใณใใใผใฏใชใใธใงใฏใใฏใๅใซ `benchmark.run()` ใๅผใณๅบใใใจใงๅฎ่กใงใใพใใ
```py
>>> results = benchmark.run()
>>> print(results)
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base-uncased 8 8 0.006
bert-base-uncased 8 32 0.006
bert-base-uncased 8 128 0.018
bert-base-uncased 8 512 0.088
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base-uncased 8 8 1227
bert-base-uncased 8 32 1281
bert-base-uncased 8 128 1307
bert-base-uncased 8 512 1539
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: PyTorch
- use_torchscript: False
- framework_version: 1.4.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 08:58:43.371351
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</pt>
<tf>
```bash
python examples/tensorflow/benchmarking/run_benchmark_tf.py --help
```
ใคใณในใฟใณในๅใใใใใณใใใผใฏใชใใธใงใฏใใฏใๅใซ `benchmark.run()` ใๅผใณๅบใใใจใงๅฎ่กใงใใพใใ
```py
>>> results = benchmark.run()
>>> print(results)
>>> results = benchmark.run()
>>> print(results)
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base-uncased 8 8 0.005
bert-base-uncased 8 32 0.008
bert-base-uncased 8 128 0.022
bert-base-uncased 8 512 0.105
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base-uncased 8 8 1330
bert-base-uncased 8 32 1330
bert-base-uncased 8 128 1330
bert-base-uncased 8 512 1770
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: Tensorflow
- use_xla: False
- framework_version: 2.2.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:26:35.617317
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</tf>
</frameworkcontent>
ใใใฉใซใใงใฏใ_ๆจ่ซๆ้_ ใจ _ๅฟ
่ฆใชใกใขใช_ ใใใณใใใผใฏใใใพใใ
ไธ่จใฎไพใฎๅบๅใงใฏใๆๅใฎ2ใคใฎใปใฏใทใงใณใ _ๆจ่ซๆ้_ ใจ _ๆจ่ซใกใขใช_
ใซๅฏพๅฟใใ็ตๆใ็คบใใฆใใพใใใใใซใ่จ็ฎ็ฐๅขใซ้ขใใใในใฆใฎ้ข้ฃๆ
ๅ ฑใ
ไพใใฐ GPU ใฟใคใใใทในใใ ใใฉใคใใฉใชใฎใใผใธใงใณใชใฉใใ_ENVIRONMENT INFORMATION_ ใฎไธใซ่กจ็คบใใใพใใใใฎๆ
ๅ ฑใฏใ[`PyTorchBenchmarkArguments`]
ใใใณ [`TensorFlowBenchmarkArguments`] ใซๅผๆฐ `save_to_csv=True`
ใ่ฟฝๅ ใใใใจใงใใชใใทใงใณใง _.csv_ ใใกใคใซใซไฟๅญใใใใจใใงใใพใใใใฎๅ ดๅใๅใปใฏใทใงใณใฏๅฅใ
ใฎ _.csv_ ใใกใคใซใซไฟๅญใใใพใใ_.csv_
ใใกใคใซใธใฎใในใฏใใใผใฟใฏใฉในใฎๅผๆฐใไฝฟ็จใใฆใชใใทใงใณใงๅฎ็พฉใงใใพใใ
ใขใใซ่ญๅฅๅญใไพใใฐ `bert-base-uncased` ใไฝฟ็จใใฆไบๅๅญฆ็ฟๆธใฟใขใใซใใใณใใใผใฏใใไปฃใใใซใๅฉ็จๅฏ่ฝใชไปปๆใฎใขใใซใฏใฉในใฎไปปๆใฎ่จญๅฎใใใณใใใผใฏใใใใจใใงใใพใใใใฎๅ ดๅใใใณใใใผใฏๅผๆฐใจๅ
ฑใซ่จญๅฎใฎ `list` ใๆฟๅ
ฅใใๅฟ
่ฆใใใใพใใ
<frameworkcontent>
<pt>
```py
>>> from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments, BertConfig
>>> args = PyTorchBenchmarkArguments(
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> config_base = BertConfig()
>>> config_384_hid = BertConfig(hidden_size=384)
>>> config_6_lay = BertConfig(num_hidden_layers=6)
>>> benchmark = PyTorchBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
>>> benchmark.run()
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base 8 128 0.006
bert-base 8 512 0.006
bert-base 8 128 0.018
bert-base 8 512 0.088
bert-384-hid 8 8 0.006
bert-384-hid 8 32 0.006
bert-384-hid 8 128 0.011
bert-384-hid 8 512 0.054
bert-6-lay 8 8 0.003
bert-6-lay 8 32 0.004
bert-6-lay 8 128 0.009
bert-6-lay 8 512 0.044
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base 8 8 1277
bert-base 8 32 1281
bert-base 8 128 1307
bert-base 8 512 1539
bert-384-hid 8 8 1005
bert-384-hid 8 32 1027
bert-384-hid 8 128 1035
bert-384-hid 8 512 1255
bert-6-lay 8 8 1097
bert-6-lay 8 32 1101
bert-6-lay 8 128 1127
bert-6-lay 8 512 1359
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: PyTorch
- use_torchscript: False
- framework_version: 1.4.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:35:25.143267
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</pt>
<tf>
```py
>>> from transformers import TensorFlowBenchmark, TensorFlowBenchmarkArguments, BertConfig
>>> args = TensorFlowBenchmarkArguments(
... models=["bert-base", "bert-384-hid", "bert-6-lay"], batch_sizes=[8], sequence_lengths=[8, 32, 128, 512]
... )
>>> config_base = BertConfig()
>>> config_384_hid = BertConfig(hidden_size=384)
>>> config_6_lay = BertConfig(num_hidden_layers=6)
>>> benchmark = TensorFlowBenchmark(args, configs=[config_base, config_384_hid, config_6_lay])
>>> benchmark.run()
==================== INFERENCE - SPEED - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Time in s
--------------------------------------------------------------------------------
bert-base 8 8 0.005
bert-base 8 32 0.008
bert-base 8 128 0.022
bert-base 8 512 0.106
bert-384-hid 8 8 0.005
bert-384-hid 8 32 0.007
bert-384-hid 8 128 0.018
bert-384-hid 8 512 0.064
bert-6-lay 8 8 0.002
bert-6-lay 8 32 0.003
bert-6-lay 8 128 0.0011
bert-6-lay 8 512 0.074
--------------------------------------------------------------------------------
==================== INFERENCE - MEMORY - RESULT ====================
--------------------------------------------------------------------------------
Model Name Batch Size Seq Length Memory in MB
--------------------------------------------------------------------------------
bert-base 8 8 1330
bert-base 8 32 1330
bert-base 8 128 1330
bert-base 8 512 1770
bert-384-hid 8 8 1330
bert-384-hid 8 32 1330
bert-384-hid 8 128 1330
bert-384-hid 8 512 1540
bert-6-lay 8 8 1330
bert-6-lay 8 32 1330
bert-6-lay 8 128 1330
bert-6-lay 8 512 1540
--------------------------------------------------------------------------------
==================== ENVIRONMENT INFORMATION ====================
- transformers_version: 2.11.0
- framework: Tensorflow
- use_xla: False
- framework_version: 2.2.0
- python_version: 3.6.10
- system: Linux
- cpu: x86_64
- architecture: 64bit
- date: 2020-06-29
- time: 09:38:15.487125
- fp16: False
- use_multiprocessing: True
- only_pretrain_model: False
- cpu_ram_mb: 32088
- use_gpu: True
- num_gpus: 1
- gpu: TITAN RTX
- gpu_ram_mb: 24217
- gpu_power_watts: 280.0
- gpu_performance_state: 2
- use_tpu: False
```
</tf>
</frameworkcontent>
ใซในใฟใใคใบใใใBertModelใฏใฉในใฎๆงๆใซๅฏพใใๆจ่ซๆ้ใจๅฟ
่ฆใชใกใขใชใฎใใณใใใผใฏ
ใใฎๆฉ่ฝใฏใใขใใซใใใฌใผใใณใฐใใ้ใซใฉใฎๆงๆใ้ธๆใในใใใๆฑบๅฎใใ้ใซ็นใซๅฝน็ซใคใใจใใใใพใใ
## Benchmark best practices
ใใฎใปใฏใทใงใณใงใฏใใขใใซใใใณใใใผใฏใใ้ใซๆณจๆใในใใใใคใใฎใในใใใฉใฏใใฃในใใชในใใขใใใใฆใใพใใ
- ็พๅจใๅไธใใใคในใฎใใณใใใผใฏใใใตใใผใใใใฆใใพใใใGPUใงใใณใใใผใฏใๅฎ่กใใๅ ดๅใใณใผใใๅฎ่กใใใใใคในใใฆใผใถใผใๆๅฎใใใใจใๆจๅฅจใใพใใ
ใใใฏใทใงใซใง`CUDA_VISIBLE_DEVICES`็ฐๅขๅคๆฐใ่จญๅฎใใใใจใง่กใใพใใไพ๏ผ`export CUDA_VISIBLE_DEVICES=0`ใๅฎ่กใใฆใใใณใผใใๅฎ่กใใพใใ
- `no_multi_processing`ใชใใทใงใณใฏใใในใใใใณใใใใฐ็จใซใฎใฟ`True`ใซ่จญๅฎใในใใงใใๆญฃ็ขบใชใกใขใช่จๆธฌใ็ขบไฟใใใใใซใๅใกใขใชใใณใใใผใฏใๅฅใ
ใฎใใญใปในใงๅฎ่กใใใใจใใๅงใใใพใใใใใซใใใ`no_multi_processing`ใ`True`ใซ่จญๅฎใใใพใใ
- ใขใใซใฎใใณใใใผใฏ็ตๆใๅ
ฑๆใใ้ใซใฏใๅธธใซ็ฐๅขๆ
ๅ ฑใ่จ่ฟฐใใในใใงใใ็ฐใชใGPUใใใคในใใฉใคใใฉใชใใผใธใงใณใชใฉใงใใณใใใผใฏ็ตๆใๅคงใใ็ฐใชใๅฏ่ฝๆงใใใใใใใใณใใใผใฏ็ตๆๅไฝใงใฏใณใใฅใใใฃใซใจใฃใฆใใพใๆ็จใงใฏใใใพใใใ
## Sharing your benchmark
ไปฅๅใใในใฆใฎๅฉ็จๅฏ่ฝใชใณใขใขใใซ๏ผๅฝๆ10ใขใใซ๏ผใซๅฏพใใฆใๅคใใฎ็ฐใชใ่จญๅฎใงๆจ่ซๆ้ใฎใใณใใใผใฏใ่กใใใพใใ๏ผPyTorchใไฝฟ็จใใTorchScriptใฎๆ็กใTensorFlowใไฝฟ็จใใXLAใฎๆ็กใชใฉใงใใใใใใฎใในใใฏใในใฆCPUใง่กใใใพใใ๏ผTensorFlow XLAใ้คใ๏ผใ
ใใฎใขใใญใผใใฎ่ฉณ็ดฐใซใคใใฆใฏใ[ๆฌกใฎใใญใฐใในใ](https://medium.com/huggingface/benchmarking-transformers-pytorch-and-tensorflow-e2917fb891c2)ใซ่ฉณใใ่ชฌๆใใใฆใใใ็ตๆใฏ[ใใกใ](https://docs.google.com/spreadsheets/d/1sryqufw2D0XlUH4sq3e9Wnxu5EAQkaohzrJbd5HdQ_w/edit?usp=sharing)ใงๅฉ็จใงใใพใใ
ๆฐใใใใณใใใผใฏใใผใซใไฝฟ็จใใใจใใณใใฅใใใฃใจใใณใใใผใฏ็ตๆใๅ
ฑๆใใใใจใใใใพใงไปฅไธใซ็ฐกๅใซใชใใพใใ
- [PyTorchใใณใใใผใฏ็ตๆ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/benchmarking/README.md)ใ
- [TensorFlowใใณใใใผใฏ็ตๆ](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/benchmarking/README.md)ใ
| transformers/docs/source/ja/benchmarks.md/0 | {
"file_path": "transformers/docs/source/ja/benchmarks.md",
"repo_id": "transformers",
"token_count": 8797
} | 262 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ็บ้ป็จใฆใผใใฃใชใใฃ
ใใฎใใผใธใซใฏใ[`~generation.GenerationMixin.generate`] ใงไฝฟ็จใใใใในใฆใฎใฆใผใใฃใชใใฃ้ขๆฐใใชในใใใใฆใใพใใ
[`~generation.GenerationMixin.greedy_search`],
[`~generation.GenerationMixin.contrastive_search`],
[`~generation.GenerationMixin.sample`],
[`~generation.GenerationMixin.beam_search`],
[`~generation.GenerationMixin.beam_sample`],
[`~generation.GenerationMixin.group_beam_search`]ใใใใณ
[`~generation.GenerationMixin.constrained_beam_search`]ใ
ใใใใฎใปใจใใฉใฏใใฉใคใใฉใชๅ
ใฎ็ๆใกใฝใใใฎใณใผใใๅญฆ็ฟใใๅ ดๅใซใฎใฟๅฝนใซ็ซใกใพใใ
## ๅบๅใ็ๆใใ
[`~generation.GenerationMixin.generate`] ใฎๅบๅใฏใๆฌกใฎใตใใฏใฉในใฎใคใณในใฟใณในใงใใ
[`~utils.ModelOutput`]ใใใฎๅบๅใฏใ่ฟใใใใในใฆใฎๆ
ๅ ฑใๅซใใใผใฟๆง้ ใงใใ
[`~generation.GenerationMixin.generate`] ใซใใฃใฆไฝๆใใใพใใใใฟใใซใพใใฏ่พๆธใจใใฆใไฝฟ็จใงใใพใใ
ไปฅไธใซไพใ็คบใใพใใ
```python
from transformers import GPT2Tokenizer, GPT2LMHeadModel
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
model = GPT2LMHeadModel.from_pretrained("gpt2")
inputs = tokenizer("Hello, my dog is cute and ", return_tensors="pt")
generation_output = model.generate(**inputs, return_dict_in_generate=True, output_scores=True)
```
`generation_output` ใชใใธใงใฏใใฏใใงใใ้ใ [`~generation.GenerateDecoderOnlyOutput`] ใงใใ
ไปฅไธใฎใใฎใฏใฉในใฎใใญใฅใกใณใใๅ็
งใใฆใใ ใใใใใใฏใๆฌกใฎๅฑๆงใใใใใจใๆๅณใใพใใ
- `sequences`: ็ๆใใใใใผใฏใณใฎใทใผใฑใณใน
- `scores` (ใชใใทใงใณ): ๅ็ๆในใใใใฎ่จ่ชใขใใชใณใฐ ใใใใฎไบๆธฌในใณใข
- `hidden_โโstates` (ใชใใทใงใณ): ็ๆในใใใใใจใฎใขใใซใฎ้ ใใ็ถๆ
- `attentions` (ใชใใทใงใณ): ็ๆในใใใใใจใฎใขใใซใฎใขใใณใทใงใณใฎ้ใฟ
ใใใงใฏใ`output_scores=True`ใๆธกใใใฎใง `scores` ใใใใพใใใ`hidden_โโstates` ใฏใใใพใใใ
`attentions` ใฏใ`output_hidden_โโstates=True`ใพใใฏ`output_attentions=True`ใๆธกใใชใใฃใใใใงใใ
้ๅธธใจๅใใใใซๅๅฑๆงใซใขใฏใปในใงใใพใใใใฎๅฑๆงใใขใใซใใ่ฟใใใชใใฃใๅ ดๅใฏใ
ใฏใใชใใใๅๅพใใพใใใใใงใใใจใใฐ`generation_output.scores`ใฏใ็ๆใใใใในใฆใฎไบๆธฌในใณใขใงใใ
่จ่ชใขใใชใณใฐใฎใใใใงใใใ`generation_output.attentions`ใฏ`None`ใงใใ
`generation_output` ใชใใธใงใฏใใใฟใใซใจใใฆไฝฟ็จใใๅ ดๅใ`None` ๅคใๆใใชใๅฑๆงใฎใฟใไฟๆใใใพใใ
ใใจใใฐใใใใซใฏ 2 ใคใฎ่ฆ็ด ใ`loss`ใๆฌกใซ`logits`ใใใใพใใ
```python
generation_output[:2]
```
ใใจใใฐใใฟใใซ `(generation_output.sequences,generation_output.scores)` ใ่ฟใใพใใ
`generation_output` ใชใใธใงใฏใใ่พๆธใจใใฆไฝฟ็จใใๅ ดๅใ`None` ใๆใใชใๅฑๆงใฎใฟใไฟๆใใใพใใ
ใใใงใฏใใใจใใฐใ`sequences`ใจ`scores`ใจใใ 2 ใคใฎใญใผใใใใพใใ
ใใใงใฏใในใฆใฎๅบๅใฟใคใใๆๆธๅใใพใใ
### PyTorch
[[autodoc]] generation.GenerateDecoderOnlyOutput
[[autodoc]] generation.GenerateEncoderDecoderOutput
[[autodoc]] generation.GenerateBeamDecoderOnlyOutput
[[autodoc]] generation.GenerateBeamEncoderDecoderOutput
### TensorFlow
[[autodoc]] generation.TFGreedySearchEncoderDecoderOutput
[[autodoc]] generation.TFGreedySearchDecoderOnlyOutput
[[autodoc]] generation.TFSampleEncoderDecoderOutput
[[autodoc]] generation.TFSampleDecoderOnlyOutput
[[autodoc]] generation.TFBeamSearchEncoderDecoderOutput
[[autodoc]] generation.TFBeamSearchDecoderOnlyOutput
[[autodoc]] generation.TFBeamSampleEncoderDecoderOutput
[[autodoc]] generation.TFBeamSampleDecoderOnlyOutput
[[autodoc]] generation.TFContrastiveSearchEncoderDecoderOutput
[[autodoc]] generation.TFContrastiveSearchDecoderOnlyOutput
### FLAX
[[autodoc]] generation.FlaxSampleOutput
[[autodoc]] generation.FlaxGreedySearchOutput
[[autodoc]] generation.FlaxBeamSearchOutput
## LogitsProcessor
[`LogitsProcessor`] ใไฝฟ็จใใฆใ่จ่ชใขใใซใฎใใใใฎไบๆธฌในใณใขใๅคๆดใงใใพใใ
ไธไปฃใ
### PyTorch
[[autodoc]] AlternatingCodebooksLogitsProcessor
- __call__
[[autodoc]] ClassifierFreeGuidanceLogitsProcessor
- __call__
[[autodoc]] EncoderNoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] EncoderRepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] EpsilonLogitsWarper
- __call__
[[autodoc]] EtaLogitsWarper
- __call__
[[autodoc]] ExponentialDecayLengthPenalty
- __call__
[[autodoc]] ForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] ForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] ForceTokensLogitsProcessor
- __call__
[[autodoc]] HammingDiversityLogitsProcessor
- __call__
[[autodoc]] InfNanRemoveLogitsProcessor
- __call__
[[autodoc]] LogitNormalization
- __call__
[[autodoc]] LogitsProcessor
- __call__
[[autodoc]] LogitsProcessorList
- __call__
[[autodoc]] LogitsWarper
- __call__
[[autodoc]] MinLengthLogitsProcessor
- __call__
[[autodoc]] MinNewTokensLengthLogitsProcessor
- __call__
[[autodoc]] NoBadWordsLogitsProcessor
- __call__
[[autodoc]] NoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] PrefixConstrainedLogitsProcessor
- __call__
[[autodoc]] RepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] SequenceBiasLogitsProcessor
- __call__
[[autodoc]] SuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] SuppressTokensLogitsProcessor
- __call__
[[autodoc]] TemperatureLogitsWarper
- __call__
[[autodoc]] TopKLogitsWarper
- __call__
[[autodoc]] TopPLogitsWarper
- __call__
[[autodoc]] TypicalLogitsWarper
- __call__
[[autodoc]] UnbatchedClassifierFreeGuidanceLogitsProcessor
- __call__
[[autodoc]] WhisperTimeStampLogitsProcessor
- __call__
### TensorFlow
[[autodoc]] TFForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] TFForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] TFForceTokensLogitsProcessor
- __call__
[[autodoc]] TFLogitsProcessor
- __call__
[[autodoc]] TFLogitsProcessorList
- __call__
[[autodoc]] TFLogitsWarper
- __call__
[[autodoc]] TFMinLengthLogitsProcessor
- __call__
[[autodoc]] TFNoBadWordsLogitsProcessor
- __call__
[[autodoc]] TFNoRepeatNGramLogitsProcessor
- __call__
[[autodoc]] TFRepetitionPenaltyLogitsProcessor
- __call__
[[autodoc]] TFSuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] TFSuppressTokensLogitsProcessor
- __call__
[[autodoc]] TFTemperatureLogitsWarper
- __call__
[[autodoc]] TFTopKLogitsWarper
- __call__
[[autodoc]] TFTopPLogitsWarper
- __call__
### FLAX
[[autodoc]] FlaxForcedBOSTokenLogitsProcessor
- __call__
[[autodoc]] FlaxForcedEOSTokenLogitsProcessor
- __call__
[[autodoc]] FlaxForceTokensLogitsProcessor
- __call__
[[autodoc]] FlaxLogitsProcessor
- __call__
[[autodoc]] FlaxLogitsProcessorList
- __call__
[[autodoc]] FlaxLogitsWarper
- __call__
[[autodoc]] FlaxMinLengthLogitsProcessor
- __call__
[[autodoc]] FlaxSuppressTokensAtBeginLogitsProcessor
- __call__
[[autodoc]] FlaxSuppressTokensLogitsProcessor
- __call__
[[autodoc]] FlaxTemperatureLogitsWarper
- __call__
[[autodoc]] FlaxTopKLogitsWarper
- __call__
[[autodoc]] FlaxTopPLogitsWarper
- __call__
[[autodoc]] FlaxWhisperTimeStampLogitsProcessor
- __call__
## StoppingCriteria
[`StoppingCriteria`] ใไฝฟ็จใใฆใ(EOS ใใผใฏใณไปฅๅคใฎ) ็ๆใๅๆญขใใใฟใคใใณใฐใๅคๆดใงใใพใใใใใฏ PyTorch ๅฎ่ฃ
ใงใฎใฟๅฉ็จๅฏ่ฝใงใใใใจใซๆณจๆใใฆใใ ใใใ
[[autodoc]] StoppingCriteria
- __call__
[[autodoc]] StoppingCriteriaList
- __call__
[[autodoc]] MaxLengthCriteria
- __call__
[[autodoc]] MaxTimeCriteria
- __call__
## Constraints
[`Constraint`] ใไฝฟ็จใใใจใ็ๆๆใซๅบๅใซ็นๅฎใฎใใผใฏใณใพใใฏใทใผใฑใณในใๅซใพใใใใใซๅผทๅถใงใใพใใใใใฏ PyTorch ๅฎ่ฃ
ใงใฎใฟๅฉ็จๅฏ่ฝใงใใใใจใซๆณจๆใใฆใใ ใใใ
[[autodoc]] Constraint
[[autodoc]] PhrasalConstraint
[[autodoc]] DisjunctiveConstraint
[[autodoc]] ConstraintListState
## BeamSearch
[[autodoc]] BeamScorer
- process
- finalize
[[autodoc]] BeamSearchScorer
- process
- finalize
[[autodoc]] ConstrainedBeamSearchScorer
- process
- finalize
## Utilities
[[autodoc]] top_k_top_p_filtering
[[autodoc]] tf_top_k_top_p_filtering
## Streamers
[[autodoc]] TextStreamer
[[autodoc]] TextIteratorStreamer
| transformers/docs/source/ja/internal/generation_utils.md/0 | {
"file_path": "transformers/docs/source/ja/internal/generation_utils.md",
"repo_id": "transformers",
"token_count": 3735
} | 263 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Logging
๐ค Transformersใซใฏใใฉใคใใฉใชใฎ่ฉณ็ดฐๅบฆใ็ฐกๅใซ่จญๅฎใงใใไธญๅคฎ้ไธญๅใฎใญใฎใณใฐใทในใใ ใใใใพใใ
็พๅจใใฉใคใใฉใชใฎใใใฉใซใใฎ่ฉณ็ดฐๅบฆใฏใWARNINGใใงใใ
่ฉณ็ดฐๅบฆใๅคๆดใใใซใฏใ็ดๆฅ่จญๅฎใกใฝใใใฎ1ใคใไฝฟ็จใใใ ใใงใใไพใใฐใ่ฉณ็ดฐๅบฆใINFOใฌใใซใซๅคๆดใใๆนๆณใฏไปฅไธใฎ้ใใงใใ
```python
import transformers
transformers.logging.set_verbosity_info()
```
็ฐๅขๅคๆฐ `TRANSFORMERS_VERBOSITY` ใไฝฟ็จใใฆใใใใฉใซใใฎๅ้ทๆงใใชใผใใผใฉใคใใใใใจใใงใใพใใ่จญๅฎใงใใพใ
`debug`ใ`info`ใ`warning`ใ`error`ใ`critical` ใฎใใใใใซๅคๆดใใพใใไพใใฐ๏ผ
```bash
TRANSFORMERS_VERBOSITY=error ./myprogram.py
```
ใใใซใไธ้จใฎใ่ญฆๅใใฏ็ฐๅขๅคๆฐใ่จญๅฎใใใใจใง็กๅนใซใงใใพใใ
`TRANSFORMERS_NO_ADVISORY_WARNINGS` ใ *1* ใชใฉใฎ true ๅคใซ่จญๅฎใใพใใใใใซใใใๆฌกใไฝฟ็จใใฆใญใฐใซ่จ้ฒใใใ่ญฆๅใ็กๅนใซใชใใพใใ
[`logger.warning_advice`]ใไพใใฐ๏ผ
```bash
TRANSFORMERS_NO_ADVISORY_WARNINGS=1 ./myprogram.py
```
ไปฅไธใฏใ็ฌ่ชใฎใขใธใฅใผใซใพใใฏในใฏใชใใใงใฉใคใใฉใชใจๅใใญใฌใผใไฝฟ็จใใๆนๆณใฎไพใงใใ
```python
from transformers.utils import logging
logging.set_verbosity_info()
logger = logging.get_logger("transformers")
logger.info("INFO")
logger.warning("WARN")
```
ใใฎใญใฎใณใฐ ใขใธใฅใผใซใฎใในใฆใฎใกใฝใใใฏไปฅไธใซๆๆธๅใใใฆใใพใใไธปใชใกใฝใใใฏๆฌกใฎใจใใใงใใ
[`logging.get_verbosity`] ใญใฌใผใฎ็พๅจใฎๅ้ทใฌใใซใๅๅพใใพใใ
[`logging.set_verbosity`] ใไฝฟ็จใใฆใๅ้ทๆงใ้ธๆใใใฌใใซใซ่จญๅฎใใพใใ้ ็ชใซ๏ผๅฐใชใใใฎใใ๏ผ
ๅ้ทใใๆใๅ้ทใพใง)ใใใใใฎใฌใใซ (ๆฌๅผงๅ
ใฏๅฏพๅฟใใ int ๅค) ใฏๆฌกใฎใจใใใงใใ
- `transformers.logging.CRITICAL` ใพใใฏ `transformers.logging.FATAL` (int ๅคใ50): ๆใๅคใใใฎใฎใฟใใฌใใผใใใพใใ
้ๅคงใชใจใฉใผใ
- `transformers.logging.ERROR` (int ๅคใ40): ใจใฉใผใฎใฟใๅ ฑๅใใพใใ
- `transformers.logging.WARNING` ใพใใฏ `transformers.logging.WARN` (int ๅคใ30): ใจใฉใผใจ
่ญฆๅใใใใฏใฉใคใใฉใชใงไฝฟ็จใใใใใใฉใซใใฎใฌใใซใงใใ
- `transformers.logging.INFO` (int ๅคใ20): ใจใฉใผใ่ญฆๅใใใใณๅบๆฌๆ
ๅ ฑใใฌใใผใใใพใใ
- `transformers.logging.DEBUG` (int ๅคใ10): ใในใฆใฎๆ
ๅ ฑใใฌใใผใใใพใใ
ใใใฉใซใใงใฏใใขใใซใฎใใฆใณใญใผใไธญใซใtqdmใ้ฒ่ก็ถๆณใใผใ่กจ็คบใใใพใใ [`logging.disable_progress_bar`] ใใใณ [`logging.enable_progress_bar`] ใไฝฟ็จใใฆใใใฎๅไฝใๆๅถใพใใฏๆๅถ่งฃ้คใงใใพใใ
## `logging` vs `warnings`
Python ใซใฏใใใ็ตใฟๅใใใฆไฝฟ็จโโใใใ 2 ใคใฎใญใฎใณใฐ ใทในใใ ใใใใพใใไธใง่ชฌๆใใ `logging` ใจ `warnings` ใงใใ
ใใใซใใใ็นๅฎใฎใใฑใใๅ
ใฎ่ญฆๅใใใใซๅ้กใงใใพใ (ไพ: ๆฉ่ฝใพใใฏใในใฎ`FutureWarning`)
ใใใฏใใงใซ้ๆจๅฅจใซใชใฃใฆใใใ`DeprecationWarning`ใฏไปๅพใฎ้ๆจๅฅจใ็คบใใพใใ
ไธกๆนใจใ`transformers`ใฉใคใใฉใชใงไฝฟ็จใใพใใ `logging`ใฎ`captureWarning`ใกใฝใใใๆดป็จใใฆ้ฉๅฟใใใฆใ
ใใใใฎ่ญฆๅใกใใปใผใธใฏใไธ่จใฎๅ้ท่จญๅฎใใผใซใซใใฃใฆ็ฎก็ใใใพใใ
ใใใฏใฉใคใใฉใชใฎ้็บ่
ใซใจใฃใฆไฝใๆๅณใใพใใ?ๆฌกใฎใใฅใผใชในใใฃใใฏใๅฐ้ใใๅฟ
่ฆใใใใพใใ
- `warnings`ใฏใใฉใคใใฉใชใใใณ`transformers`ใซไพๅญใใใฉใคใใฉใชใฎ้็บ่
ใซๅชๅ
ใใใในใใงใใ
- `logging`ใฏใๆฅๅธธใฎใใญใธใงใฏใใงใฉใคใใฉใชใไฝฟ็จใใใฉใคใใฉใชใฎใจใณใใฆใผใถใผใซไฝฟ็จใใๅฟ
่ฆใใใใพใใ
ไปฅไธใฎ`captureWarnings`ใกใฝใใใฎใชใใกใฌใณในใๅ็
งใใฆใใ ใใใ
[[autodoc]] logging.captureWarnings
## Base setters
[[autodoc]] logging.set_verbosity_error
[[autodoc]] logging.set_verbosity_warning
[[autodoc]] logging.set_verbosity_info
[[autodoc]] logging.set_verbosity_debug
## Other functions
[[autodoc]] logging.get_verbosity
[[autodoc]] logging.set_verbosity
[[autodoc]] logging.get_logger
[[autodoc]] logging.enable_default_handler
[[autodoc]] logging.disable_default_handler
[[autodoc]] logging.enable_explicit_format
[[autodoc]] logging.reset_format
[[autodoc]] logging.enable_progress_bar
[[autodoc]] logging.disable_progress_bar
| transformers/docs/source/ja/main_classes/logging.md/0 | {
"file_path": "transformers/docs/source/ja/main_classes/logging.md",
"repo_id": "transformers",
"token_count": 2182
} | 264 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# BLIP-2
## Overview
BLIP-2 ใขใใซใฏใ[BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models](https://arxiv.org/abs/2301.12597) ใงๆๆกใใใพใใใ
Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.ใปใตใใฌใผใผใในใใฃใผใใณใปใใคใ BLIP-2 ใฏใ่ปฝ้ใฎ 12 ๅฑค Transformer ใใใฌใผใใณใฐใใใใจใงใใใชใผใบใใใไบๅใใฌใผใใณใฐๆธใฟ็ปๅใจใณใณใผใใผใจๅคง่ฆๆจก่จ่ชใขใใซ (LLM) ใๆดป็จใใพใใ
ใใใใฎ้ใซใจใณใณใผใใผใ้
็ฝฎใใใใพใใพใช่ฆ่ฆ่จ่ชใฟในใฏใงๆๅ
็ซฏใฎใใใฉใผใใณในใๅฎ็พใใพใใๆใๆณจ็ฎใในใ็นใฏใBLIP-2 ใ 800 ๅใใฉใกใผใฟ ใขใใซใงใใ [Flamingo](https://arxiv.org/abs/2204.14198) ใ 8.7% ๆนๅใใฆใใใใจใงใใ
ใผใญใทใงใใ VQAv2 ใงใฏใใฌใผใใณใฐๅฏ่ฝใชใใฉใกใผใฟใผใ 54 ๅใฎ 1 ใซๆธๅฐใใพใใ
่ซๆใฎ่ฆ็ดใฏๆฌกใฎใจใใใงใใ
*ๅคง่ฆๆจกใขใใซใฎใจใณใใใผใจใณใใฎใใฌใผใใณใฐใซใใใ่ฆ่ฆใจ่จ่ชใฎไบๅใใฌใผใใณใฐใฎใณในใใฏใพใใพใๆณๅคใชใใฎใซใชใฃใฆใใฆใใพใใใใฎ่ซๆใงใฏใๅธ่ฒฉใฎๅ็ตๆธใฟไบๅใใฌใผใใณใฐ็ปๅใจใณใณใผใใจๅ็ตใใใๅคง่ฆๆจก่จ่ชใขใใซใใ่ฆ่ฆ่จ่ชใฎไบๅใใฌใผใใณใฐใใใผใในใใฉใใใใใๆฑ็จ็ใงๅน็็ใชไบๅใใฌใผใใณใฐๆฆ็ฅใงใใ BLIP-2 ใๆๆกใใพใใ BLIP-2 ใฏใ2 ๆฎต้ใงไบๅใใฌใผใใณใฐใใใ่ปฝ้ใฎ Querying Transformer ใงใขใใชใใฃใฎใฎใฃใใใๆฉๆธกใใใพใใๆๅใฎในใใผใธใงใฏใใใชใผใบใใใ็ปๅใจใณใณใผใใผใใๅญฆ็ฟใใ่ฆ่ฆ่จ่ช่กจ็พใใใผใในใใฉใใใใพใใ็ฌฌ 2 ๆฎต้ใงใฏใๅ็ตใใใ่จ่ชใขใใซใใ่ฆ่ฆใใ่จ่ชใธใฎ็ๆๅญฆ็ฟใใใผใในใใฉใใใใพใใ BLIP-2 ใฏใๆขๅญใฎๆนๆณใใใใใฌใผใใณใฐๅฏ่ฝใชใใฉใกใผใฟใผใๅคงๅน
ใซๅฐใชใใซใใใใใใใใใพใใพใช่ฆ่ฆ่จ่ชใฟในใฏใงๆๅ
็ซฏใฎใใใฉใผใใณในใๅฎ็พใใพใใใใจใใฐใ็งใใกใฎใขใใซใฏใใใฌใผใใณใฐๅฏ่ฝใชใใฉใกใผใฟใผใ 54 ๅใฎ 1 ๅฐใชใใผใญใทใงใใ VQAv2 ใงใFlamingo80B ใ 8.7% ไธๅใฃใฆใใพใใใพใใ่ช็ถ่จ่ชใฎๅฝไปคใซๅพใใใจใใงใใใใผใญใทใงใใ็ปๅใใใใญในใใธใฎ็ๆใจใใใขใใซใฎๆฐใใๆฉ่ฝใๅฎ่จผใใพใ*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/blip2_architecture.jpg"
alt="drawing" width="600"/>
<small> BLIP-2 ใขใผใญใใฏใใฃใ <a href="https://arxiv.org/abs/2301.12597">ๅ
ใฎ่ซๆใใๆ็ฒใ</a> </small>
ใใฎใขใใซใฏใ[nielsr](https://huggingface.co/nielsr) ใซใใฃใฆๆไพใใใพใใใ
ๅ
ใฎใณใผใใฏ [ใใ](https://github.com/salesforce/LAVIS/tree/5ee63d688ba4cebff63acee04adaef2dee9af207) ใซใใใพใใ
## Usage tips
- BLIP-2 ใฏใ็ปๅใจใชใใทใงใณใฎใใญในใ ใใญใณใใใๆๅฎใใฆๆกไปถไปใใใญในใใ็ๆใใใใใซไฝฟ็จใงใใพใใๆจ่ซๆใซใฏใ [`generate`] ใกใฝใใใไฝฟ็จใใใใจใใๅงใใใพใใ
- [`Blip2Processor`] ใไฝฟ็จใใฆใขใใซ็จใฎ็ปๅใๆบๅใใไบๆธฌใใใใใผใฏใณ ID ใใใณใผใใใฆใใญในใใซๆปใใใจใใงใใพใใ
## Resources
BLIP-2 ใฎไฝฟ็จใ้ๅงใใใฎใซๅฝน็ซใคๅ
ฌๅผ Hugging Face ใใใณใณใใฅใใใฃ (๐ ใง็คบใใใฆใใ) ใชใฝใผในใฎใชในใใ
- ็ปๅใญใฃใใทใงใณใใใธใฅใขใซ่ณชๅๅฟ็ญ (VQA)ใใใใณใใฃใใใฎใใใชไผ่ฉฑใฎใใใฎ BLIP-2 ใฎใใข ใใผใใใใฏใฏใ[ใใกใ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/BLIP-2) ใซใใใพใใ
ใใใซๅซใใใชใฝใผในใฎ้ไฟกใซ่ๅณใใใๅ ดๅใฏใใๆฐ่ปฝใซใใซ ใชใฏใจในใใ้ใใฆใใ ใใใๅฏฉๆปใใใฆใใใ ใใพใใใชใฝใผในใฏใๆขๅญใฎใชใฝใผในใ่ค่ฃฝใใใฎใงใฏใชใใไฝใๆฐใใใใฎใ็คบใใใจใ็ๆณ็ใงใใ
## Blip2Config
[[autodoc]] Blip2Config
- from_vision_qformer_text_configs
## Blip2VisionConfig
[[autodoc]] Blip2VisionConfig
## Blip2QFormerConfig
[[autodoc]] Blip2QFormerConfig
## Blip2Processor
[[autodoc]] Blip2Processor
## Blip2VisionModel
[[autodoc]] Blip2VisionModel
- forward
## Blip2QFormerModel
[[autodoc]] Blip2QFormerModel
- forward
## Blip2Model
[[autodoc]] Blip2Model
- forward
- get_text_features
- get_image_features
- get_qformer_features
## Blip2ForConditionalGeneration
[[autodoc]] Blip2ForConditionalGeneration
- forward
- generate | transformers/docs/source/ja/model_doc/blip-2.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/blip-2.md",
"repo_id": "transformers",
"token_count": 2258
} | 265 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Conditional DETR
## Overview
ๆกไปถไปใ DETR ใขใใซใฏใ[Conditional DETR for Fast Training Convergence](https://arxiv.org/abs/2108.06152) ใง Depu MengใXiaokang ChenใZejia FanใGang ZengใHouqiang LiใYuhui YuanใLei Sun, Jingdong Wang ใซใใฃใฆๆๆกใใใพใใใ็ไบฌๆฑใๆกไปถไปใ DETR ใฏใ้ซ้ DETR ใใฌใผใใณใฐใฎใใใฎๆกไปถไปใใฏใญในใขใใณใทใงใณ ใกใซใใบใ ใๆไพใใพใใๆกไปถไปใ DETR ใฏ DETR ใใใ 6.7 ๅใใ 10 ๅ้ใๅๆใใพใใ
่ซๆใฎ่ฆ็ดใฏๆฌกใฎใจใใใงใใ
*ๆ่ฟ้็บใใใ DETR ใขใใญใผใใฏใใใฉใณในใใฉใผใใผ ใจใณใณใผใใผใใใณใใณใผใใผ ใขใผใญใใฏใใฃใ็ฉไฝๆคๅบใซ้ฉ็จใใๆๆใชใใใฉใผใใณในใๅฎ็พใใพใใใใฎ่ซๆใงใฏใใใฌใผใใณใฐใฎๅๆใ้
ใใจใใ้่ฆใชๅ้กใๆฑใใ้ซ้ DETR ใใฌใผใใณใฐใฎใใใฎๆกไปถไปใใฏใญในใขใใณใทใงใณ ใกใซใใบใ ใ็ดนไปใใพใใ็งใใกใฎใขใใญใผใใฏใDETR ใซใใใใฏใญในใขใใณใทใงใณใ 4 ใคใฎๅ่ขใฎไฝ็ฝฎ็นๅฎใจใใใฏในใฎไบๆธฌใซใณใณใใณใใฎๅใ่พผใฟใซๅคงใใไพๅญใใฆใใใใใ้ซๅ่ณชใฎใณใณใใณใใฎๅใ่พผใฟใฎๅฟ
่ฆๆงใ้ซใพใใใใฌใผใใณใฐใฎ้ฃๆๅบฆใ้ซใใชใใจใใ็นใซๅๆฉใฅใใใใฆใใพใใๆกไปถไปใ DETR ใจๅผใฐใใ็งใใกใฎใขใใญใผใใฏใใใณใผใใผใฎใใซใใใใ ใฏใญในใขใใณใทใงใณใฎใใใซใใณใผใใผใฎๅใ่พผใฟใใๆกไปถไปใใฎ็ฉบ้ใฏใจใชใๅญฆ็ฟใใพใใๅฉ็นใฏใๆกไปถไปใ็ฉบ้ใฏใจใชใ้ใใฆใๅใฏใญในใขใใณใทใงใณ ใใใใใๅๅฅใฎ้ ๅ (ใใจใใฐใ1 ใคใฎใชใใธใงใฏใใฎ็ซฏใพใใฏใชใใธใงใฏใ ใใใฏในๅ
ใฎ้ ๅ) ใๅซใใใณใใซๆณจ็ฎใงใใใใจใงใใใใใซใใใใชใใธใงใฏใๅ้กใจใใใฏในๅๅธฐใฎใใใฎๅๅฅใฎ้ ๅใใญใผใซใฉใคใบใใใใใฎ็ฉบ้็ฏๅฒใ็ญใพใใใณใณใใณใใฎๅใ่พผใฟใธใฎไพๅญใ็ทฉๅใใใใใฌใผใใณใฐใๅฎนๆใซใชใใพใใๅฎ้จ็ตๆใฏใๆกไปถไปใ DETR ใใใใฏใใผใณ R50 ใใใณ R101 ใง 6.7 ๅ้ใๅๆใใใใๅผทๅใชใใใฏใใผใณ DC5-R50 ใใใณ DC5-R101 ใง 10 ๅ้ใๅๆใใใใจใ็คบใใฆใใพใใใณใผใใฏ https://github.com/Atten4Vis/ConditionalDETR ใงๅ
ฅๆใงใใพใใ*
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/conditional_detr_curve.jpg"
alt="ๆ็ป" width="600"/>
<small> ๆกไปถไปใ DETR ใฏใๅ
ใฎ DETR ใซๆฏในใฆใฏใใใซ้ใๅๆใ็คบใใพใใ <a href="https://arxiv.org/abs/2108.06152">ๅ
ใฎ่ซๆ</a>ใใๅผ็จใ</small>
ใใฎใขใใซใฏ [DepuMeng](https://huggingface.co/DepuMeng) ใซใใฃใฆๅฏ็จฟใใใพใใใๅ
ใฎใณใผใใฏ [ใใ](https://github.com/Atten4Vis/ConditionalDETR) ใซใใใพใใ
## Resources
- [ใชใใธใงใฏใๆคๅบใฟในใฏใฌใคใ](../tasks/object_detection)
## ConditionalDetrConfig
[[autodoc]] ConditionalDetrConfig
## ConditionalDetrImageProcessor
[[autodoc]] ConditionalDetrImageProcessor
- preprocess
- post_process_object_detection
- post_process_instance_segmentation
- post_process_semantic_segmentation
- post_process_panoptic_segmentation
## ConditionalDetrFeatureExtractor
[[autodoc]] ConditionalDetrFeatureExtractor
- __call__
- post_process_object_detection
- post_process_instance_segmentation
- post_process_semantic_segmentation
- post_process_panoptic_segmentation
## ConditionalDetrModel
[[autodoc]] ConditionalDetrModel
- forward
## ConditionalDetrForObjectDetection
[[autodoc]] ConditionalDetrForObjectDetection
- forward
## ConditionalDetrForSegmentation
[[autodoc]] ConditionalDetrForSegmentation
- forward
| transformers/docs/source/ja/model_doc/conditional_detr.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/conditional_detr.md",
"repo_id": "transformers",
"token_count": 1847
} | 266 |
<!--Copyright 2021 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# DETR
## Overview
DETR ใขใใซใฏใ[Transformers ใไฝฟ็จใใใจใณใใใผใจใณใใฎใชใใธใงใฏใๆคๅบ](https://arxiv.org/abs/2005.12872) ใงๆๆกใใใพใใใ
Nicolas Carion, Francisco Massa, Gabriel Synnaeve, Nicolas Usunier, Alexander Kirillov and Sergey Zagoruyko ใซใคใณใ DETR
็ณใฟ่พผใฟใใใฏใใผใณใจใใใฎๅพใซใจใณใใใผใจใณใใงใใฌใผใใณใฐใงใใใจใณใณใผใใผ/ใใณใผใใผ Transformer ใงๆงๆใใใพใใ
็ฉไฝใฎๆคๅบใ Faster-R-CNN ใ Mask-R-CNN ใชใฉใฎใขใใซใฎ่ค้ใใฎๅคใใๅคงๅน
ใซ็ฐก็ด ๅใใใพใใ
้ ๅๆๆกใ้ๆๅคงๆๅถๆ้ ใใขใณใซใผ็ๆใชใฉใงใใใใใซใDETR ใฏๆฌกใฎใใใซใใใใจใใงใใพใใ
ใใณใผใๅบๅใฎไธใซใในใฏ ใใใใ่ฟฝๅ ใใใ ใใงใใใใใใฃใใฏ ใปใฐใกใณใใผใทใงใณใๅฎ่กใงใใใใใซ่ช็ถใซๆกๅผตใใใฆใใพใใ
่ซๆใฎ่ฆ็ดใฏๆฌกใฎใจใใใงใใ
*็ฉไฝๆคๅบใ็ดๆฅ้ๅไบๆธฌๅ้กใจใใฆ่ฆใๆฐใใๆนๆณใ็ดนไปใใพใใ็งใใกใฎใขใใญใผใใฏใ
ๆคๅบใใคใใฉใคใณใซใใใ้ๆๅคงๆๅถใชใฉใฎๅคใใฎๆไฝๆฅญใง่จญ่จใใใใณใณใใผใใณใใฎๅฟ
่ฆๆงใๅนๆ็ใซๆ้คใใใพใใ
ใฟในใฏใซ้ขใใไบๅใฎ็ฅ่ญใๆ็คบ็ใซใจใณใณใผใใใใใญใทใผใธใฃใพใใฏใขใณใซใผใฎ็ๆใใฎไธปใชๆๅใฏใ
DEtection TRansformer ใพใใฏ DETR ใจๅผใฐใใๆฐใใใใฌใผใ ใฏใผใฏใฏใใปใใใใผในใฎใฐใญใผใใซๆๅคฑใงใใใ
ไบ้จใใใใณใฐใใใใณใใฉใณในใใฉใผใใผ ใจใณใณใผใใผ/ใใณใผใใผ ใขใผใญใใฏใใฃใๅญฆ็ฟใใใใชใใธใงใฏใ ใฏใจใชใฎๅบๅฎใใใๅฐใใชใปใใใไธใใใใใจใ
DETR ใฏใใชใใธใงใฏใใจใฐใญใผใใซ ใคใกใผใธ ใณใณใใญในใใฎ้ขไฟใซใคใใฆๆจ่ซใใๆ็ตใปใใใ็ดๆฅๅบๅใใพใใ
ไธฆ่กใใฆไบๆณใใๆฐใใใขใใซใฏๆฆๅฟต็ใซใทใณใใซใงใใใๅคใใฎใขใใซใจใฏ็ฐใชใใ็นๆฎใชใฉใคใใฉใชใๅฟ
่ฆใจใใพใใใ
ไปใฎๆๆฐใฎๆคๅบๅจใ DETR ใฏใ็ขบ็ซใใใใใใใณๅ็ญใฎ็ฒพๅบฆใจๅฎ่กๆใฎใใใฉใผใใณในใๅฎ่จผใใพใใ
ๅฐ้ฃใช COCO ็ฉไฝๆคๅบใใผใฟใปใใใซๅบใฅใใ้ซๅบฆใซๆ้ฉๅใใใ Faster RCNN ใใผในใฉใคใณใใใใซใDETR ใฏ็ฐกๅใซๅฎ่กใงใใพใใ
็ตฑไธใใใๆนๆณใงใใใใใฃใใฏ ใปใฐใกใณใใผใทใงใณใ็ๆใใใใใซไธ่ฌๅใใใพใใใ็ซถๅไป็คพใๅคงๅน
ใซไธๅใใใใฉใผใใณในใ็คบใใฆใใพใ
ใใผในใฉใคใณ*
ใใฎใขใใซใฏใ[nielsr](https://huggingface.co/nielsr) ใซใใฃใฆๆไพใใใพใใใๅ
ใฎใณใผใใฏ [ใใกใ](https://github.com/facebookresearch/detr) ใซใใใพใใ
## How DETR works
[`~transformers.DetrForObjectDetection`] ใใฉใฎใใใซๆฉ่ฝใใใใ่ชฌๆใใ TLDR ใฏๆฌกใฎใจใใใงใใ
ใพใใไบๅใซใใฌใผใใณใฐใใใ็ณใฟ่พผใฟใใใฏใใผใณใ้ใใฆ็ปๅใ้ไฟกใใใพใ (่ซๆใงใฏใ่่
ใใฏๆฌกใฎใใใซไฝฟ็จใใฆใใพใ)ใ
ResNet-50/ResNet-101)ใใใใ ใใฃใกใณใทใงใณใ่ฟฝๅ ใใใจไปฎๅฎใใพใใใใใฏใใใใฏใใผใณใธใฎๅ
ฅๅใ
็ปๅใซ 3 ใคใฎใซใฉใผ ใใฃใใซ (RGB) ใใใใจไปฎๅฎใใๅ ดๅใฎใๅฝข็ถ `(batch_size, 3, height, width)` ใฎใใณใฝใซใ CNNใฎใใใฏใใผใณ
้ๅธธใฏ `(batch_size, 2048, height/32, width/32)` ใฎๅฝข็ถใฎใๆฐใใไฝ่งฃๅๅบฆใฎ็นๅพดใใใใๅบๅใใพใใใใใฏ
ๆฌกใซใDETR ใฎ Transformer ใฎ้ ใๆฌกๅ
(ใใใฉใซใใงใฏ `256`) ใซไธ่ดใใใใใซๆๅฝฑใใใพใใ
`nn.Conv2D` ใฌใคใคใผใใใใงใๅฝข็ถ `(batch_size, 256, height/32, width/32)` ใฎใใณใฝใซใๅฎๆใใพใใใ
็นๅพดใใใใฏๅนณๅฆๅใใใณ่ปข็ฝฎใใใๅฝข็ถ `(batch_size, seq_len, d_model)` ใฎใใณใฝใซใๅๅพใใพใ =
`(batch_size, width/32*height/32, 256)`ใใใใใฃใฆใNLP ใขใใซใจใฎ้ใใฏใใทใผใฑใณในใฎ้ทใใๅฎ้ใซใฏ
้ๅธธใใใ้ทใใชใใพใใใใd_modelใใฏๅฐใใใชใใพใ (NLP ใงใฏ้ๅธธ 768 ไปฅไธใงใ)ใ
ๆฌกใซใใใใใจใณใณใผใใไปใใฆ้ไฟกใใใๅใๅฝข็ถใฎ `encoder_hidden_โโstates` ใๅบๅใใใพใ (ๆฌกใฎใใใซ่ใใใใจใใงใใพใ)ใ
ใใใใฏ็ปๅใฎ็นๅพดใจใใฆ๏ผใๆฌกใซใใใใใ **ใชใใธใงใฏใ ใฏใจใช**ใใใณใผใใ้ใใฆ้ไฟกใใใพใใใใใฏๅฝข็ถใฎใใณใฝใซใงใ
`(batch_size, num_queries, d_model)`ใ้ๅธธใ`num_queries` ใฏ 100 ใซ่จญๅฎใใใใผใญใงๅๆๅใใใพใใ
ใใใใฎๅ
ฅๅๅใ่พผใฟใฏๅญฆ็ฟใใใไฝ็ฝฎใจใณใณใผใใฃใณใฐใงใใใไฝๆ่
ใฏใใใใชใใธใงใฏใ ใฏใจใชใจๅผใณใๅๆงใซ
ใจใณใณใผใใงใฏใใใใใฏๅใขใใณใทใงใณๅฑคใฎๅ
ฅๅใซ่ฟฝๅ ใใใพใใๅใชใใธใงใฏใ ใฏใจใชใฏ็นๅฎใฎใชใใธใงใฏใใๆค็ดขใใพใใ
็ปๅใงใฏใใใณใผใใฏใ่คๆฐใฎใปใซใ ใขใใณใทใงใณ ใฌใคใคใจใจใณใณใผใ ใใณใผใ ใขใใณใทใงใณ ใฌใคใคใ้ใใฆใใใใฎๅใ่พผใฟใๆดๆฐใใพใใ
ๅใๅฝข็ถใฎ `decoder_hidden_โโstates` ใๅบๅใใพใ: `(batch_size, num_queries, d_model)`ใๆฌกใซ้ ญใ๏ผใค
ใชใใธใงใฏใๆคๅบใฎใใใซไธ้จใซ่ฟฝๅ ใใใพใใๅใชใใธใงใฏใ ใฏใจใชใใชใใธใงใฏใใฎ 1 ใคใซๅ้กใใใใใฎ็ทๅฝขใฌใคใคใผใใพใใฏใใใใใ
ใชใใธใงใฏใใใใใใณๅใฏใจใชใฎๅข็ใใใฏในใไบๆธฌใใ MLPใ
ใขใใซใฏ **2 ้จใใใใณใฐๆๅคฑ**ใไฝฟ็จใใฆใใฌใผใใณใฐใใใพใใใคใพใใๅฎ้ใซ่กใใใจใฏใไบๆธฌใใใใฏใฉในใๆฏ่ผใใใใจใงใ +
ใฐใฉใฆใณใ ใใฅใซใผใน ใขใใใผใทใงใณใซๅฏพใใ N = 100 ๅใฎๅใชใใธใงใฏใ ใฏใจใชใฎๅข็ใใใฏใน (ๅใ้ทใ N ใพใงใใใฃใณใฐ)
(ใใใใฃใฆใ็ปๅใซใชใใธใงใฏใใ 4 ใคใใๅซใพใใฆใใชใๅ ดๅใ96 ๅใฎๆณจ้ใซใฏใฏใฉในใจใใฆใใชใใธใงใฏใใชใใใใใใณใฏใฉในใจใใฆใๅข็ใใใฏในใชใใใๅซใพใใใ ใใซใชใใพใใ
ๅข็ใใใฏใน)ใ [Hungarian matching algorithm](https://en.wikipedia.org/wiki/Hungarian_algorithm) ใฏใๆค็ดขใซไฝฟ็จใใใพใใ
N ๅใฎใฏใจใชใฎใใใใใใ N ๅใฎๆณจ้ใฎใใใใใธใฎๆ้ฉใช 1 ๅฏพ 1 ใฎใใใใณใฐใๆฌกใซใๆจๆบใฏใญในใจใณใใญใใผ (
ใฏใฉใน)ใใใใณ L1 ใจ [generalized IoU loss](https://giou.stanford.edu/) ใฎ็ทๅฝข็ตๅ (
ๅข็ใใใฏใน) ใฏใใขใใซใฎใใฉใกใผใฟใผใๆ้ฉๅใใใใใซไฝฟ็จใใใพใใ
DETR ใฏใใใใใใฃใใฏ ใปใฐใกใณใใผใทใงใณ (ใปใใณใใฃใใฏ ใปใฐใกใณใใผใทใงใณใจใคใณในใฟใณในใ็ตฑๅใใ) ใๅฎ่กใใใใใซ่ช็ถใซๆกๅผตใงใใพใใ
ใปใฐใกใณใใผใทใงใณ๏ผใ [`~transformers.DetrForSegmentation`] ใฏใปใฐใกใณใใผใทใงใณ ใในใฏ ใใใใไธใซ่ฟฝๅ ใใพใ
[`~transformers.DetrForObjectDetection`]ใใในใฏ ใใใใฏใๅ
ฑๅใงใใฌใผใใณใฐใใใใจใใ2 ๆฎต้ใฎใใญใปในใงใใฌใผใใณใฐใใใใจใใงใใพใใ
ใใใงใๆๅใซ [`~transformers.DetrForObjectDetection`] ใขใใซใใใฌใผใใณใฐใใฆใไธกๆนใฎๅจๅฒใฎๅข็ใใใฏในใๆคๅบใใพใใ
ใใใฎใ๏ผใคใณในใฟใณใน๏ผใจใใใฎใ๏ผๆจใ้่ทฏใ็ฉบใชใฉใฎ่ๆฏใฎใใฎ๏ผใใในใฆๅ็ตใใใในใฆใฎ้ใฟใใใชใผใบใใฆใฎใฟใใฌใผใใณใฐใใพใใ
25 ใจใใใฏใฎใในใฏใใใใๅฎ้จ็ใซใฏใใใใ 2 ใคใฎใขใใญใผใใฏๅๆงใฎ็ตๆใใใใใใพใใใใใฏในใฎไบๆธฌใฏ
ใใณใฌใชใผ่ชใฎใใใใณใฐใฏใใใฏใน้ใฎ่ท้ขใไฝฟ็จใใฆ่จ็ฎใใใใใใใใฌใผใใณใฐใๅฏ่ฝใซใใใใใซใฏใใใๅฟ
่ฆใงใใ
## Usage tips
- DETR ใฏใใใใใ **ใชใใธใงใฏใ ใฏใจใช** ใไฝฟ็จใใฆใ็ปๅๅ
ใฎใชใใธใงใฏใใๆคๅบใใพใใใฏใจใชใฎๆฐใซใใฃใฆๆๅคงๅคใๆฑบใพใใพใ
ๅไธใฎ็ปๅๅ
ใงๆคๅบใงใใใชใใธใงใฏใใฎๆฐใใใใฉใซใใงใฏ 100 ใซ่จญๅฎใใใพใ (ใใฉใกใผใฟใผใๅ็
ง)
[`~transformers.DetrConfig`] ใฎ `num_queries`)ใใใ็จๅบฆใฎไฝ่ฃใใใใฎใฏ่ฏใใใจใงใ (COCO ใงใฏใ
่่
ใฏ 100 ใไฝฟ็จใใพใใใใCOCO ใคใกใผใธๅ
ใฎใชใใธใงใฏใใฎๆๅคงๆฐใฏ็ด 70 ใงใ)ใ
- DETR ใฎใใณใผใใผใฏใใฏใจใชใฎๅใ่พผใฟใไธฆ่กใใฆๆดๆฐใใพใใใใใฏ GPT-2 ใฎใใใช่จ่ชใขใใซใจใฏ็ฐใชใใพใใ
ไธฆๅใงใฏใชใ่ชๅทฑๅๅธฐใใณใผใใไฝฟ็จใใพใใใใใใฃใฆใๅ ๆ็ๆณจๆใในใฏใฏไฝฟ็จใใใพใใใ
- DETR ใฏใๆๅฝฑๅใซๅใปใซใใขใใณใทใงใณๅฑคใจใฏใญในใขใใณใทใงใณๅฑคใฎ้ ใ็ถๆ
ใซไฝ็ฝฎๅใ่พผใฟใ่ฟฝๅ ใใพใใ
ใฏใจใชใจใญใผใซใ็ปๅใฎไฝ็ฝฎๅใ่พผใฟใซใคใใฆใฏใๅบๅฎๆญฃๅผฆๆณขใพใใฏๅญฆ็ฟๆธใฟใฎใฉใกใใใ้ธๆใงใใพใใ
็ตถๅฏพไฝ็ฝฎๅใ่พผใฟใใใใฉใซใใงใฏใใใฉใกใผใฟ `position_embedding_type` ใฏ
[`~transformers.DetrConfig`] ใฏ `"sine"` ใซ่จญๅฎใใใพใใ
- DETR ใฎไฝๆ่
ใฏใใใฌใผใใณใฐไธญใซใ็นใซใใณใผใใง่ฃๅฉๆๅคฑใไฝฟ็จใใใจๅฝน็ซใคใใจใซๆฐใฅใใพใใใ
ใขใใซใฏๅใฏใฉในใฎๆญฃใใๆฐใฎใชใใธใงใฏใใๅบๅใใพใใใใฉใกใผใฟ `auxiliary_loss` ใ่จญๅฎใใใจใ
[`~transformers.DetrConfig`] ใ`True`ใซ่จญๅฎใใใใฃใผใใใฉใฏใผใ ใใฅใผใฉใซ ใใใใฏใผใฏใจใใณใฌใชใผๆๅคฑใไบๆธฌใใพใ
ใฏๅใใณใผใๅฑคใฎๅพใซ่ฟฝๅ ใใใพใ (FFN ใใใฉใกใผใฟใๅ
ฑๆใใ)ใ
- ่คๆฐใฎใใผใใซใใใๅๆฃ็ฐๅขใงใขใใซใใใฌใผใใณใฐใใๅ ดๅใฏใ
_modeling_detr.py_ ใฎ _DetrLoss_ ใฏใฉในใฎ _num_boxes_ ๅคๆฐใ่คๆฐใฎใใผใใงใใฌใผใใณใฐใใๅ ดๅใใใใฏๆฌกใฎใใใซใใๅฟ
่ฆใใใใพใ
ๅ
ใฎๅฎ่ฃ
ใง่ฆใใใใใใซใใในใฆใฎใใผใใซใใใใฟใผใฒใใ ใใใฏในใฎๅนณๅๆฐใซ่จญๅฎใใใพใ [ใใกใ](https://github.com/facebookresearch/detr/blob/a54b77800eb8e64e3ad0d8237789fcbf2f8350c5/models/detr.py#L227-L232) ใ
- [`~transformers.DetrForObjectDetection`] ใใใณ [`~transformers.DetrForSegmentation`] ใฏๆฌกใฎใใใซๅๆๅใงใใพใใ
[timm ใฉใคใใฉใช](https://github.com/rwightman/pytorch-image-models) ใงๅฉ็จๅฏ่ฝใช็ณใฟ่พผใฟใใใฏใใผใณใ
ใใจใใฐใMobileNet ใใใฏใใผใณใไฝฟ็จใใๅๆๅใฏใๆฌกใฎ `backbone` ๅฑๆงใ่จญๅฎใใใใจใงๅฎ่กใงใใพใใ
[`~transformers.DetrConfig`] ใ `"tf_mobilenetv3_small_075"` ใซ่จญๅฎใใใใใไฝฟ็จใใฆใขใใซใๅๆๅใใพใใ
ๆงๆใ
- DETR ใฏใๆ็ญ่พบใไธๅฎใฎใใฏใปใซๆฐไปฅไธใซใชใใๆ้ท่พบใไธๅฎ้ไปฅไธใซใชใใใใซๅ
ฅๅ็ปๅใฎใตใคใบใๅคๆดใใพใใ
ๆๅคง 1333 ใใฏใปใซใใใฌใผใใณใฐๆใซใๆ็ญ่พบใใฉใณใใ ใซ ใซ่จญๅฎใใใใใใซในใฑใผใซๆกๅผตใไฝฟ็จใใใพใใ
ๆๅฐ 480ใๆๅคง 800 ใใฏใปใซใๆจ่ซๆใซใฏใๆ็ญ่พบใ 800 ใซ่จญๅฎใใใพใใ
ไฝฟ็จใงใใพใ
[`~transformers.DetrImageProcessor`] ็จใฎ็ปๅ (ใใใณใชใใทใงใณใฎ COCO ๅฝขๅผใฎๆณจ้) ใๆบๅใใพใใ
ใขใใซใใใฎใตใคใบๅคๆดใซใใใใใใๅ
ใฎ็ปๅใฎใตใคใบใ็ฐใชใๅ ดๅใใใใพใใ DETR ใฏใ็ปๅใๆๅคงใพใงใใใฃใณใฐใใใใจใงใใฎๅ้กใ่งฃๆฑบใใพใใ
ใฉใฎใใฏใปใซใๅฎๆฐใงใฉใฎใใฏใปใซใใใใฃใณใฐใงใใใใ็คบใใใฏใปใซ ใในใฏใไฝๆใใใใจใซใใฃใฆใใใใๅ
ใฎๆๅคงใตใคใบใๆฑบๅฎใใพใใ
ใใใใฏใ็ปๅใใใใๅฆ็ใใใใใซใซในใฟใ ใฎ `collatโโe_fn` ใๅฎ็พฉใใใใจใใงใใพใใ
[`~transformers.DetrImageProcessor.pad_and_create_pixel_mask`]ใ
- ็ปๅใฎใตใคใบใซใใฃใฆไฝฟ็จใใใใกใขใชใฎ้ใๆฑบใพใใใใใใฃใฆใbatch_sizeใใๆฑบใพใใพใใ
GPU ใใใ 2 ใฎใใใ ใตใคใบใไฝฟ็จใใใใจใใๅงใใใพใใ่ฉณ็ดฐใซใคใใฆใฏใ[ใใฎ Github ในใฌใใ](https://github.com/facebookresearch/detr/issues/150) ใๅ็
งใใฆใใ ใใใ
DETR ใขใใซใใคใณในใฟใณในๅใใใซใฏ 3 ใคใฎๆนๆณใใใใพใ (ๅฅฝใฟใซๅฟใใฆ)ใ
ใชใใทใงใณ 1: ใขใใซๅ
จไฝใฎไบๅใใฌใผใใณใฐใใใ้ใฟใไฝฟ็จใใฆ DETR ใใคใณในใฟใณในๅใใ
```py
>>> from transformers import DetrForObjectDetection
>>> model = DetrForObjectDetection.from_pretrained("facebook/detr-resnet-50")
```
ใชใใทใงใณ 2: Transformer ใซใคใใฆใฏใฉใณใใ ใซๅๆๅใใใ้ใฟใไฝฟ็จใใฆ DETR ใใคใณในใฟใณในๅใใพใใใใใใฏใใผใณใซใคใใฆใฏไบๅใซใใฌใผใใณใฐใใใ้ใฟใไฝฟ็จใใพใ
```py
>>> from transformers import DetrConfig, DetrForObjectDetection
>>> config = DetrConfig()
>>> model = DetrForObjectDetection(config)
```
ใชใใทใงใณ 3: ใใใฏใใผใณ + ใใฉใณในใใฉใผใใผใฎใฉใณใใ ใซๅๆๅใใใ้ใฟใไฝฟ็จใใฆ DETR ใใคใณในใฟใณในๅใใพใใ
```py
>>> config = DetrConfig(use_pretrained_backbone=False)
>>> model = DetrForObjectDetection(config)
```
| Task | Object detection | Instance segmentation | Panoptic segmentation |
|------|------------------|-----------------------|-----------------------|
| **Description** |็ปๅๅ
ใฎใชใใธใงใฏใใฎๅจๅฒใฎๅข็ใใใฏในใจใฏใฉใน ใฉใใซใไบๆธฌใใ | ็ปๅๅ
ใฎใชใใธใงใฏใ (ใคใพใใคใณในใฟใณใน) ใฎๅจๅฒใฎใในใฏใไบๆธฌใใ | ็ปๅๅ
ใฎใชใใธใงใฏใ (ใคใณในใฟใณใน) ใจใใใฎใ (ๆจใ้่ทฏใชใฉใฎ่ๆฏ) ใฎไธกๆนใฎๅจๅฒใฎใในใฏใไบๆธฌใใพใ |
| **Model** | [`~transformers.DetrForObjectDetection`] | [`~transformers.DetrForSegmentation`] | [`~transformers.DetrForSegmentation`] |
| **Example dataset** | COCO detection | COCO detection, COCO panoptic | COCO panoptic | |
| **Format of annotations to provide to** [`~transformers.DetrImageProcessor`] | {'image_id': `int`, 'annotations': `List[Dict]`} each Dict being a COCO object annotation | {'image_id': `int`, 'annotations': `List[Dict]`} (in case of COCO detection) or {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} (in case of COCO panoptic) | {'file_name': `str`, 'image_id': `int`, 'segments_info': `List[Dict]`} and masks_path (path to directory containing PNG files of the masks) |
| **Postprocessing** (i.e. converting the output of the model to Pascal VOC format) | [`~transformers.DetrImageProcessor.post_process`] | [`~transformers.DetrImageProcessor.post_process_segmentation`] | [`~transformers.DetrImageProcessor.post_process_segmentation`], [`~transformers.DetrImageProcessor.post_process_panoptic`] |
| **evaluators** | `CocoEvaluator` with `iou_types="bbox"` | `CocoEvaluator` with `iou_types="bbox"` or `"segm"` | `CocoEvaluator` with `iou_tupes="bbox"` or `"segm"`, `PanopticEvaluator` |
ใคใพใใCOCO ๆคๅบใพใใฏ COCO ใใใใใฃใใฏๅฝขๅผใงใใผใฟใๆบๅใใฆใใใๆฌกใไฝฟ็จใใๅฟ
่ฆใใใใพใใ
[`~transformers.DetrImageProcessor`] `pixel_values`ใ`pixel_mask`ใใใใณใชใใทใงใณใไฝๆใใพใใ
ใใฉใใซใใใใใไฝฟ็จใใฆใขใใซใใใฌใผใใณใฐ (ใพใใฏๅพฎ่ชฟๆด) ใงใใพใใ่ฉไพกใใใซใฏใใพใใ
[`~transformers.DetrImageProcessor`] ใฎๅพๅฆ็ใกใฝใใใฎ 1 ใคใไฝฟ็จใใใขใใซใฎๅบๅใใใใใฏใงใใพใ
`CocoEvaluator` ใพใใฏ `PanopticEvaluator` ใฎใใใใใซๆไพใใใๆฌกใฎใใใชใกใใชใฏในใ่จ็ฎใงใใพใใ
ๅนณๅๅนณๅ็ฒพๅบฆ (mAP) ใจใใใฉใๅ่ณช (PQ)ใๅพ่
ใฎใชใใธใงใฏใใฏ [ๅ
ใฎใชใใธใใช](https://github.com/facebookresearch/detr) ใซๅฎ่ฃ
ใใใฆใใพใใ่ฉไพกใฎ่ฉณ็ดฐใซใคใใฆใฏใ[ใตใณใใซ ใใผใใใใฏ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR) ใๅ็
งใใฆใใ ใใใ
## Resources
DETR ใฎไฝฟ็จใ้ๅงใใใฎใซๅฝน็ซใคๅ
ฌๅผ Hugging Face ใใใณใณใใฅใใใฃ (๐ ใง็คบใใใฆใใ) ใชใฝใผในใฎใชในใใ
<PipelineTag pipeline="object-detection"/>
- ใซในใฟใ ใใผใฟใปใใใฎ [`DetrForObjectDetection`] ใจ [`DetrForSegmentation`] ใฎๅพฎ่ชฟๆดใ่ชฌๆใใใในใฆใฎใตใณใใซ ใใผใใใใฏใฏใ[ใใกใ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/DETR) ใง่ฆใคใใใใจใใงใใพใใ ใ
- ๅ็
ง: [ใชใใธใงใฏใๆคๅบใฟในใฏ ใฌใคใ](../tasks/object_detection)
ใใใซๅซใใใชใฝใผในใฎ้ไฟกใซ่ๅณใใใๅ ดๅใฏใใๆฐ่ปฝใซใใซ ใชใฏใจในใใ้ใใฆใใ ใใใๅฏฉๆปใใใฆใใใ ใใพใใใชใฝใผในใฏใๆขๅญใฎใชใฝใผในใ่ค่ฃฝใใใฎใงใฏใชใใไฝใๆฐใใใใฎใ็คบใใใจใ็ๆณ็ใงใใ
## DetrConfig
[[autodoc]] DetrConfig
## DetrImageProcessor
[[autodoc]] DetrImageProcessor
- preprocess
- post_process_object_detection
- post_process_semantic_segmentation
- post_process_instance_segmentation
- post_process_panoptic_segmentation
## DetrFeatureExtractor
[[autodoc]] DetrFeatureExtractor
- __call__
- post_process_object_detection
- post_process_semantic_segmentation
- post_process_instance_segmentation
- post_process_panoptic_segmentation
## DETR specific outputs
[[autodoc]] models.detr.modeling_detr.DetrModelOutput
[[autodoc]] models.detr.modeling_detr.DetrObjectDetectionOutput
[[autodoc]] models.detr.modeling_detr.DetrSegmentationOutput
## DetrModel
[[autodoc]] DetrModel
- forward
## DetrForObjectDetection
[[autodoc]] DetrForObjectDetection
- forward
## DetrForSegmentation
[[autodoc]] DetrForSegmentation
- forward
| transformers/docs/source/ja/model_doc/detr.md/0 | {
"file_path": "transformers/docs/source/ja/model_doc/detr.md",
"repo_id": "transformers",
"token_count": 7983
} | 267 |
<!--Copyright 2023 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Efficient Training on Multiple CPUs
1ใคใฎCPUใงใฎใใฌใผใใณใฐใ้
ใใใๅ ดๅใ่คๆฐใฎCPUใไฝฟ็จใงใใพใใใใฎใฌใคใใฏใPyTorchใใผในใฎDDPใไฝฟ็จใใๅๆฃCPUใใฌใผใใณใฐใซ็ฆ็นใๅฝใฆใฆใใพใใ
## Intelยฎ oneCCL Bindings for PyTorch
[Intelยฎ oneCCL](https://github.com/oneapi-src/oneCCL)๏ผ้ๅ้ไฟกใฉใคใใฉใช๏ผใฏใallreduceใallgatherใalltoallใชใฉใฎๅ้้ไฟกใๅฎ่ฃ
ใใๅน็็ใชๅๆฃใใฃใผใใฉใผใใณใฐใใฌใผใใณใฐ็จใฎใฉใคใใฉใชใงใใoneCCLใฎ่ฉณ็ดฐใซใคใใฆใฏใ[oneCCLใใญใฅใกใณใ](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html)ใจ[oneCCLไปๆง](https://spec.oneapi.com/versions/latest/elements/oneCCL/source/index.html)ใๅ็
งใใฆใใ ใใใ
ใขใธใฅใผใซ`oneccl_bindings_for_pytorch`๏ผใใผใธใงใณ1.12ไปฅๅใฏ`torch_ccl`๏ผใฏใPyTorch C10D ProcessGroup APIใๅฎ่ฃ
ใใๅค้จใฎProcessGroupใจใใฆๅ็ใซใญใผใใงใใ็พๅจใฏLinuxใใฉใใใใฉใผใ ใงใฎใฟๅไฝใใพใใ
[torch-ccl](https://github.com/intel/torch-ccl)ใฎ่ฉณ็ดฐๆ
ๅ ฑใ็ขบ่ชใใฆใใ ใใใ
### Intelยฎ oneCCL Bindings for PyTorch installation:
WheelใใกใคใซใฏใไปฅไธใฎPythonใใผใธใงใณ็จใซๅฉ็จๅฏ่ฝใงใ:
| Extension Version | Python 3.6 | Python 3.7 | Python 3.8 | Python 3.9 | Python 3.10 |
| :---------------: | :--------: | :--------: | :--------: | :--------: | :---------: |
| 1.13.0 | | โ | โ | โ | โ |
| 1.12.100 | | โ | โ | โ | โ |
| 1.12.0 | | โ | โ | โ | โ |
| 1.11.0 | | โ | โ | โ | โ |
| 1.10.0 | โ | โ | โ | โ | |
```
pip install oneccl_bind_pt=={pytorch_version} -f https://developer.intel.com/ipex-whl-stable-cpu
```
where `{pytorch_version}` should be your PyTorch version, for instance 1.13.0.
Check more approaches for [oneccl_bind_pt installation](https://github.com/intel/torch-ccl).
Versions of oneCCL and PyTorch must match.
<Tip warning={true}>
oneccl_bindings_for_pytorch 1.12.0 prebuilt wheel does not work with PyTorch 1.12.1 (it is for PyTorch 1.12.0)
PyTorch 1.12.1 should work with oneccl_bindings_for_pytorch 1.12.100
</Tip>
`{pytorch_version}` ใฏใใใชใใฎPyTorchใฎใใผใธใงใณ๏ผไพ๏ผ1.13.0๏ผใซ็ฝฎใๆใใๅฟ
่ฆใใใใพใใ้่ฆใชใฎใฏใoneCCLใจPyTorchใฎใใผใธใงใณใไธ่ดใใฆใใใใจใงใใ[oneccl_bind_ptใฎใคใณในใใผใซ](https://github.com/intel/torch-ccl)ใซ้ขใใใใใชใใขใใญใผใใ็ขบ่ชใงใใพใใ
<Tip warning={true}>
`oneccl_bindings_for_pytorch`ใฎ1.12.0ใใชใใซใใใคใผใซใฏPyTorch 1.12.1ใจไบๆๆงใใใใพใใ๏ผใใใฏPyTorch 1.12.0็จใงใ๏ผใPyTorch 1.12.1ใไฝฟ็จใใๅ ดๅใฏใ`oneccl_bindings_for_pytorch`ใใผใธใงใณ1.12.100ใไฝฟ็จใใๅฟ
่ฆใใใใพใใ
</Tip>
## Intelยฎ MPI library
ใใฎๅบๆบใใผในใฎMPIๅฎ่ฃ
ใไฝฟ็จใใฆใIntelยฎใขใผใญใใฏใใฃไธใงๆ่ปใงๅน็็ใในใฑใผใฉใใซใชใฏใฉในใฟใกใใปใผใธใณใฐใๆไพใใพใใใใฎใณใณใใผใใณใใฏใIntelยฎ oneAPI HPC Toolkitใฎไธ้จใงใใ
oneccl_bindings_for_pytorchใฏMPIใใผใซใปใใใจไธ็ทใซใคใณในใใผใซใใใพใใไฝฟ็จใใๅใซ็ฐๅขใใฝใผในๅใใๅฟ
่ฆใใใใพใใ
for Intelยฎ oneCCL >= 1.12.0
```
oneccl_bindings_for_pytorch_path=$(python -c "from oneccl_bindings_for_pytorch import cwd; print(cwd)")
source $oneccl_bindings_for_pytorch_path/env/setvars.sh
```
for Intelยฎ oneCCL whose version < 1.12.0
```
torch_ccl_path=$(python -c "import torch; import torch_ccl; import os; print(os.path.abspath(os.path.dirname(torch_ccl.__file__)))")
source $torch_ccl_path/env/setvars.sh
```
#### IPEX installation:
IPEXใฏใFloat32ใใใณBFloat16ใฎไธกๆนใงCPUใใฌใผใใณใฐใฎใใใฉใผใใณในๆ้ฉๅใๆไพใใพใใ่ฉณ็ดฐใฏ[ใใกใใฎใทใณใฐใซCPUใปใฏใทใงใณ](./perf_train_cpu)ใใๅ็
งใใ ใใใ
ไปฅไธใฎใใใฌใผใใผใงใฎไฝฟ็จใใฏใIntelยฎ MPIใฉใคใใฉใชใงmpirunใไฝฟ็จใใไพใ็คบใใฆใใพใใ
## Usage in Trainer
ใใฌใผใใผใงใฎใใซใCPUๅๆฃใใฌใผใใณใฐใๆๅนใซใใใใใซใใฆใผใถใผใฏใณใใณใๅผๆฐใซ **`--ddp_backend ccl`** ใ่ฟฝๅ ใใๅฟ
่ฆใใใใพใใ
ไพใ่ฆใฆใฟใพใใใใ[่ณชๅๅฟ็ญใฎไพ](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering)
ไปฅไธใฎใณใใณใใฏใ1ใคใฎXeonใใผใใง2ใคใฎใใญใปในใไฝฟ็จใใฆใใฌใผใใณใฐใๆๅนใซใใพใใ1ใคใฎใใญใปในใ1ใคใฎใฝใฑใใใงๅฎ่กใใใพใใOMP_NUM_THREADS/CCL_WORKER_COUNTๅคๆฐใฏใๆ้ฉใชใใใฉใผใใณในใ่ชฟๆดใใใใใซ่ชฟๆดใงใใพใใ
```shell script
export CCL_WORKER_COUNT=1
export MASTER_ADDR=127.0.0.1
mpirun -n 2 -genv OMP_NUM_THREADS=23 \
python3 run_qa.py \
--model_name_or_path bert-large-uncased \
--dataset_name squad \
--do_train \
--do_eval \
--per_device_train_batch_size 12 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir /tmp/debug_squad/ \
--no_cuda \
--ddp_backend ccl \
--use_ipex
```
ไปฅไธใฎใณใใณใใฏใ2ใคใฎXeonใใญใปใใต๏ผnode0ใจnode1ใnode0ใใกใคใณใใญใปในใจใใฆไฝฟ็จ๏ผใงๅ่จ4ใคใฎใใญใปในใไฝฟ็จใใฆใใฌใผใใณใฐใๆๅนใซใใพใใppn๏ผใใผใใใจใฎใใญใปในๆฐ๏ผใฏ2ใซ่จญๅฎใใใ1ใคใฎใฝใฑใใใใจใซ1ใคใฎใใญใปในใๅฎ่กใใใพใใๆ้ฉใชใใใฉใผใใณในใๅพใใใใซใOMP_NUM_THREADS/CCL_WORKER_COUNTๅคๆฐใ่ชฟๆดใงใใพใใ
node0ใงใฏใๅใใผใใฎIPใขใใฌในใๅซใๆงๆใใกใคใซใไฝๆใใใใฎๆงๆใใกใคใซใฎใในใๅผๆฐใจใใฆๆธกใๅฟ
่ฆใใใใพใใ
```shell script
cat hostfile
xxx.xxx.xxx.xxx #node0 ip
xxx.xxx.xxx.xxx #node1 ip
```
ใใผใ0ใงๆฌกใฎใณใใณใใๅฎ่กใใใจใใใผใ0ใจใใผใ1ใง**4DDP**ใBF16่ชๅๆททๅ็ฒพๅบฆใงๆๅนใซใชใใพใใ
```shell script
export CCL_WORKER_COUNT=1
export MASTER_ADDR=xxx.xxx.xxx.xxx #node0 ip
mpirun -f hostfile -n 4 -ppn 2 \
-genv OMP_NUM_THREADS=23 \
python3 run_qa.py \
--model_name_or_path bert-large-uncased \
--dataset_name squad \
--do_train \
--do_eval \
--per_device_train_batch_size 12 \
--learning_rate 3e-5 \
--num_train_epochs 2 \
--max_seq_length 384 \
--doc_stride 128 \
--output_dir /tmp/debug_squad/ \
--no_cuda \
--ddp_backend ccl \
--use_ipex \
--bf16
``` | transformers/docs/source/ja/perf_train_cpu_many.md/0 | {
"file_path": "transformers/docs/source/ja/perf_train_cpu_many.md",
"repo_id": "transformers",
"token_count": 3272
} | 268 |
<!--Copyright 2022 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# Semantic segmentation
[[open-in-colab]]
<Youtube id="dKE8SIt9C-w"/>
ใปใใณใใฃใใฏ ใปใฐใกใณใใผใทใงใณใงใฏใ็ปๅใฎๅใ
ใฎใใฏใปใซใซใฉใใซใพใใฏใฏใฉในใๅฒใๅฝใฆใพใใใปใฐใกใณใใผใทใงใณใซใฏใใใคใใฎใฟใคใใใใใพใใใใปใใณใใฃใใฏ ใปใฐใกใณใใผใทใงใณใฎๅ ดๅใๅใใชใใธใงใฏใใฎไธๆใฎใคใณในใฟใณใน้ใฎๅบๅฅใฏ่กใใใพใใใไธกๆนใฎใชใใธใงใฏใใซๅใใฉใใซใไปใใใใพใ (ใใจใใฐใ`car-1`ใจ`car-2`ใฎไปฃใใใซ`car`)ใใปใใณใใฃใใฏ ใปใฐใกใณใใผใทใงใณใฎไธ่ฌ็ใช็พๅฎไธ็ใฎใขใใชใฑใผใทใงใณใซใฏใๆญฉ่ก่
ใ้่ฆใชไบค้ๆ
ๅ ฑใ่ญๅฅใใใใใฎ่ชๅ้่ปข่ปใฎใใฌใผใใณใฐใๅป็็ปๅๅ
ใฎ็ดฐ่ใจ็ฐๅธธใฎ่ญๅฅใ่กๆ็ปๅใใใฎ็ฐๅขๅคๅใฎ็ฃ่ฆใชใฉใๅซใพใใพใใ
ใใฎใฌใคใใงใฏใๆฌกใฎๆนๆณใ่ชฌๆใใพใใ
1. [SceneParse150](https://huggingface.co/datasets/scene_parse_150) ใใผใฟใปใใใฎ [SegFormer](https://huggingface.co/docs/transformers/main/en/model_doc/segformer#segformer) ใๅพฎ่ชฟๆดใใพใใ
2. ๅพฎ่ชฟๆดใใใขใใซใๆจ่ซใซไฝฟ็จใใพใใ
<Tip>
ใใฎใใฅใผใใชใขใซใง่ชฌๆใใใฟในใฏใฏใๆฌกใฎใขใใซ ใขใผใญใใฏใใฃใงใตใใผใใใใฆใใพใใ
<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->
[BEiT](../model_doc/beit), [Data2VecVision](../model_doc/data2vec-vision), [DPT](../model_doc/dpt), [MobileNetV2](../model_doc/mobilenet_v2), [MobileViT](../model_doc/mobilevit), [MobileViTV2](../model_doc/mobilevitv2), [SegFormer](../model_doc/segformer), [UPerNet](../model_doc/upernet)
<!--End of the generated tip-->
</Tip>
ๅงใใๅใซใๅฟ
่ฆใชใฉใคใใฉใชใใในใฆใคใณในใใผใซใใใฆใใใใจใ็ขบ่ชใใฆใใ ใใใ
```bash
pip install -q datasets transformers evaluate
```
ใขใใซใใขใใใญใผใใใฆใณใใฅใใใฃใจๅ
ฑๆใงใใใใใซใHugging Face ใขใซใฆใณใใซใญใฐใคใณใใใใจใใๅงใใใพใใใใญใณใใใ่กจ็คบใใใใใใใผใฏใณใๅ
ฅๅใใฆใญใฐใคใณใใพใใ
```py
>>> from huggingface_hub import notebook_login
>>> notebook_login()
```
## Load SceneParse150 dataset
ใพใใSceneParse150 ใใผใฟใปใใใฎๅฐใใใตใใปใใใ ๐ค ใใผใฟใปใใ ใฉใคใใฉใชใใ่ชญใฟ่พผใฟใพใใใใใซใใใๅฎๅ
จใชใใผใฟใปใใใฎใใฌใผใใณใฐใซใใใซๆ้ใ่ฒปใใๅใซใๅฎ้จใใฆใในใฆใๆฉ่ฝใใใใจใ็ขบ่ชใใๆฉไผใๅพใใใพใใ
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("scene_parse_150", split="train[:50]")
```
[`~datasets.Dataset.train_test_split`] ใกใฝใใใไฝฟ็จใใฆใใใผใฟใปใใใฎ `train` ๅๅฒใใใฌใคใณ ใปใใใจใในใ ใปใใใซๅๅฒใใพใใ
```py
>>> ds = ds.train_test_split(test_size=0.2)
>>> train_ds = ds["train"]
>>> test_ds = ds["test"]
```
ๆฌกใซใไพใ่ฆใฆใฟใพใใใใ
```py
>>> train_ds[0]
{'image': <PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=512x683 at 0x7F9B0C201F90>,
'annotation': <PIL.PngImagePlugin.PngImageFile image mode=L size=512x683 at 0x7F9B0C201DD0>,
'scene_category': 368}
```
- `image`: ใทใผใณใฎ PIL ใคใกใผใธใ
- `annotation`: ใปใฐใกใณใใผใทใงใณ ใใใใฎ PIL ใคใกใผใธใใขใใซใฎใฟใผใฒใใใงใใใใพใใ
- `scene_category`: "kitchen"ใ"office"ใชใฉใฎ็ปๅใทใผใณใ่ชฌๆใใใซใใดใช IDใใใฎใฌใคใใงใฏใ`image`ใจ`annotation`ใฎใฟใๅฟ
่ฆใซใชใใพใใใฉใกใใ PIL ใคใกใผใธใงใใ
ใพใใใฉใใซ ID ใใฉใใซ ใฏใฉในใซใใใใใ่พๆธใไฝๆใใใใจใใงใใพใใใใใฏใๅพใงใขใใซใ่จญๅฎใใใจใใซๅฝน็ซใกใพใใใใใใใใใใณใฐใใใฆใณใญใผใใใ`id2label` ใใใณ `label2id` ใใฃใฏใทใงใใชใไฝๆใใพใใ
```py
>>> import json
>>> from huggingface_hub import cached_download, hf_hub_url
>>> repo_id = "huggingface/label-files"
>>> filename = "ade20k-id2label.json"
>>> id2label = json.load(open(cached_download(hf_hub_url(repo_id, filename, repo_type="dataset")), "r"))
>>> id2label = {int(k): v for k, v in id2label.items()}
>>> label2id = {v: k for k, v in id2label.items()}
>>> num_labels = len(id2label)
```
## Preprocess
ๆฌกใฎในใใใใงใฏใSegFormer ็ปๅใใญใปใใตใใญใผใใใฆใใขใใซใฎ็ปๅใจๆณจ้ใๆบๅใใพใใใใฎใใผใฟใปใใใฎใใใชไธ้จใฎใใผใฟใปใใใฏใใใใฏใฐใฉใฆใณใ ใฏใฉในใจใใฆใผใญใคใณใใใฏในใไฝฟ็จใใพใใใใ ใใๅฎ้ใซใฏ่ๆฏใฏใฉในใฏ 150 ๅใฎใฏใฉในใซๅซใพใใฆใใชใใใใ`reduce_labels=True`ใ่จญๅฎใใฆใในใฆใฎใฉใใซใใ 1 ใคใๅผใๅฟ
่ฆใใใใพใใใผใญใคใณใใใฏในใฏ `255` ใซ็ฝฎใๆใใใใใใใSegFormer ใฎๆๅคฑ้ขๆฐใซใใฃใฆ็ก่ฆใใใพใใ
```py
>>> from transformers import AutoImageProcessor
>>> checkpoint = "nvidia/mit-b0"
>>> image_processor = AutoImageProcessor.from_pretrained(checkpoint, reduce_labels=True)
```
<frameworkcontent>
<pt>
ใขใใซใ้ๅญฆ็ฟใซๅฏพใใฆใใๅ
็ขใซใใใใใซใ็ปๅใใผใฟใปใใใซใใใคใใฎใใผใฟๆกๅผตใ้ฉ็จใใใฎใไธ่ฌ็ใงใใใใฎใฌใคใใงใฏใ[torchvision](https://pytorch.org/vision/stable/index.html) ใฎ [`ColorJitter`](https://pytorch.org/vision/stable/generated/torchvision.transforms.ColorJitter.html) ้ขๆฐใไฝฟ็จใใพใใ ) ใไฝฟ็จใใฆ็ปๅใฎ่ฒใฎใใญใใใฃใใฉใณใใ ใซๅคๆดใใพใใใไปปๆใฎ็ปๅใฉใคใใฉใชใไฝฟ็จใใใใจใใงใใพใใ
```py
>>> from torchvision.transforms import ColorJitter
>>> jitter = ColorJitter(brightness=0.25, contrast=0.25, saturation=0.25, hue=0.1)
```
ๆฌกใซใใขใใซใฎ็ปๅใจๆณจ้ใๆบๅใใใใใฎ 2 ใคใฎๅๅฆ็้ขๆฐใไฝๆใใพใใใใใใฎ้ขๆฐใฏใ็ปๅใ`pixel_values`ใซๅคๆใใๆณจ้ใ`labels`ใซๅคๆใใพใใใใฌใผใใณใฐ ใปใใใฎๅ ดๅใ็ปๅใ็ปๅใใญใปใใตใซๆไพใใๅใซ `jitter` ใ้ฉ็จใใใพใใใในใ ใปใใใฎๅ ดๅใใในใไธญใซใใผใฟๆกๅผตใ้ฉ็จใใใชใใใใ็ปๅใใญใปใใตใฏ`images`ใๅใๅใฃใฆๆญฃ่ฆๅใใ`ใฉใใซ`ใฎใฟใๅใๅใใพใใ
```py
>>> def train_transforms(example_batch):
... images = [jitter(x) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
>>> def val_transforms(example_batch):
... images = [x for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
```
ใใผใฟใปใใๅ
จไฝใซ`jitter`ใ้ฉ็จใใใซใฏใ๐ค Datasets [`~datasets.Dataset.set_transform`] ้ขๆฐใไฝฟ็จใใพใใๅคๆใฏใชใณใถใใฉใคใง้ฉ็จใใใใใใ้ซ้ใงๆถ่ฒปใใใใฃในใฏๅฎน้ใๅฐใชใใชใใพใใ
```py
>>> train_ds.set_transform(train_transforms)
>>> test_ds.set_transform(val_transforms)
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
ใขใใซใ้ๅญฆ็ฟใซๅฏพใใฆใใๅ
็ขใซใใใใใซใ็ปๅใใผใฟใปใใใซใใใคใใฎใใผใฟๆกๅผตใ้ฉ็จใใใฎใไธ่ฌ็ใงใใ
ใใฎใฌใคใใงใฏใ[`tf.image`](https://www.tensorflow.org/api_docs/python/tf/image) ใไฝฟ็จใใฆ็ปๅใฎ่ฒใฎใใญใใใฃใใฉใณใใ ใซๅคๆดใใพใใใไปปๆใฎใใญใใใฃใไฝฟ็จใใใใจใใงใใพใใ็ปๅ
ๅฅฝใใชๅณๆธ้คจใ
2 ใคใฎๅฅใ
ใฎๅคๆ้ขๆฐใๅฎ็พฉใใพใใ
- ็ปๅๆกๅผตใๅซใใใฌใผใใณใฐ ใใผใฟๅคๆ
- ๐ค Transformers ใฎใณใณใใฅใผใฟใผ ใใธใงใณ ใขใใซใฏใใฃใใซๅชๅ
ใฎใฌใคใขใฆใใๆณๅฎใใฆใใใใใ็ปๅใ่ปข็ฝฎใใใ ใใฎๆค่จผใใผใฟๅคๆ
```py
>>> import tensorflow as tf
>>> def aug_transforms(image):
... image = tf.keras.utils.img_to_array(image)
... image = tf.image.random_brightness(image, 0.25)
... image = tf.image.random_contrast(image, 0.5, 2.0)
... image = tf.image.random_saturation(image, 0.75, 1.25)
... image = tf.image.random_hue(image, 0.1)
... image = tf.transpose(image, (2, 0, 1))
... return image
>>> def transforms(image):
... image = tf.keras.utils.img_to_array(image)
... image = tf.transpose(image, (2, 0, 1))
... return image
```
ๆฌกใซใใขใใซใฎ็ปๅใจๆณจ้ใฎใใใใๆบๅใใ 2 ใคใฎๅๅฆ็้ขๆฐใไฝๆใใพใใใใใใฎๆฉ่ฝใ้ฉ็จใใใพใ
็ปๅๅคๆใ่กใใไปฅๅใซใญใผใใใใ `image_processor` ใไฝฟ็จใใฆ็ปๅใ `pixel_values` ใซๅคๆใใ
`labels`ใธใฎๆณจ้ใ `ImageProcessor` ใฏใ็ปๅใฎใตใคใบๅคๆดใจๆญฃ่ฆๅใๅฆ็ใใพใใ
```py
>>> def train_transforms(example_batch):
... images = [aug_transforms(x.convert("RGB")) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
>>> def val_transforms(example_batch):
... images = [transforms(x.convert("RGB")) for x in example_batch["image"]]
... labels = [x for x in example_batch["annotation"]]
... inputs = image_processor(images, labels)
... return inputs
```
ใใผใฟใปใใๅ
จไฝใซๅๅฆ็ๅคๆใ้ฉ็จใใใซใฏใ๐ค Datasets [`~datasets.Dataset.set_transform`] ้ขๆฐใไฝฟ็จใใพใใ
ๅคๆใฏใชใณใถใใฉใคใง้ฉ็จใใใใใใ้ซ้ใงๆถ่ฒปใใใใฃในใฏๅฎน้ใๅฐใชใใชใใพใใ
```py
>>> train_ds.set_transform(train_transforms)
>>> test_ds.set_transform(val_transforms)
```
</tf>
</frameworkcontent>
## Evaluate
ใใฌใผใใณใฐไธญใซใกใใชใฏในใๅซใใใจใๅคใใฎๅ ดๅใใขใใซใฎใใใฉใผใใณในใ่ฉไพกใใใฎใซๅฝน็ซใกใพใใ ๐ค [Evaluate](https://huggingface.co/docs/evaluate/index) ใฉใคใใฉใชใไฝฟ็จใใฆใ่ฉไพกใกใฝใใใใใฐใใใญใผใใงใใพใใใใฎใฟในใฏใงใฏใ[Mean Intersection over Union](https://huggingface.co/spaces/evaluate-metric/accuracy) (IoU) ใกใใชใใฏใใญใผใใใพใ (๐ค Evaluate [ใฏใคใใฏ ใใขใผ](https://huggingface.co/docs/evaluate/a_quick_tour) ใๅ็
งใใฆใใกใใชใฏในใใญใผใใใฆ่จ็ฎใใๆนๆณใฎ่ฉณ็ดฐใ็ขบ่ชใใฆใใ ใใ)ใ
```py
>>> import evaluate
>>> metric = evaluate.load("mean_iou")
```
ๆฌกใซใใกใใชใฏในใ [`~evaluate.EvaluationModule.compute`] ใใ้ขๆฐใไฝๆใใพใใไบๆธฌใๆฌกใฎใใใซๅคๆใใๅฟ
่ฆใใใใพใ
ๆๅใซใญใธใใใไฝๆใใๆฌกใซ [`~evaluate.EvaluationModule.compute`] ใๅผใณๅบใๅใซใฉใใซใฎใตใคใบใซไธ่ดใใใใใซๅๅฝขๆใใพใใ
<frameworkcontent>
<pt>
```py
>>> import numpy as np
>>> import torch
>>> from torch import nn
>>> def compute_metrics(eval_pred):
... with torch.no_grad():
... logits, labels = eval_pred
... logits_tensor = torch.from_numpy(logits)
... logits_tensor = nn.functional.interpolate(
... logits_tensor,
... size=labels.shape[-2:],
... mode="bilinear",
... align_corners=False,
... ).argmax(dim=1)
... pred_labels = logits_tensor.detach().cpu().numpy()
... metrics = metric.compute(
... predictions=pred_labels,
... references=labels,
... num_labels=num_labels,
... ignore_index=255,
... reduce_labels=False,
... )
... for key, value in metrics.items():
... if type(value) is np.ndarray:
... metrics[key] = value.tolist()
... return metrics
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
```py
>>> def compute_metrics(eval_pred):
... logits, labels = eval_pred
... logits = tf.transpose(logits, perm=[0, 2, 3, 1])
... logits_resized = tf.image.resize(
... logits,
... size=tf.shape(labels)[1:],
... method="bilinear",
... )
... pred_labels = tf.argmax(logits_resized, axis=-1)
... metrics = metric.compute(
... predictions=pred_labels,
... references=labels,
... num_labels=num_labels,
... ignore_index=-1,
... reduce_labels=image_processor.do_reduce_labels,
... )
... per_category_accuracy = metrics.pop("per_category_accuracy").tolist()
... per_category_iou = metrics.pop("per_category_iou").tolist()
... metrics.update({f"accuracy_{id2label[i]}": v for i, v in enumerate(per_category_accuracy)})
... metrics.update({f"iou_{id2label[i]}": v for i, v in enumerate(per_category_iou)})
... return {"val_" + k: v for k, v in metrics.items()}
```
</tf>
</frameworkcontent>
ใใใง`compute_metrics`้ขๆฐใฎๆบๅใๆดใใพใใใใใฌใผใใณใฐใใปใใใขใใใใใจใใซใใฎ้ขๆฐใซๆปใใพใใ
## Train
<frameworkcontent>
<pt>
<Tip>
[`Trainer`] ใไฝฟ็จใใใขใใซใฎๅพฎ่ชฟๆดใซๆ
ฃใใฆใใชใๅ ดๅใฏใ[ใใ](../training#finetune-with-trainer) ใฎๅบๆฌ็ใชใใฅใผใใชใขใซใใ่ฆงใใ ใใใ
</Tip>
ใใใงใขใใซใฎใใฌใผใใณใฐใ้ๅงใใๆบๅใๆดใใพใใใ [`AutoModelForSemanticSegmentation`] ใไฝฟ็จใใฆ SegFormer ใใญใผใใใใฉใใซ ID ใจใฉใใซ ใฏใฉใน้ใฎใใใใณใฐใใขใใซใซๆธกใใพใใ
```py
>>> from transformers import AutoModelForSemanticSegmentation, TrainingArguments, Trainer
>>> model = AutoModelForSemanticSegmentation.from_pretrained(checkpoint, id2label=id2label, label2id=label2id)
```
ใใฎๆ็นใงๆฎใฃใฆใใๆ้ ใฏๆฌกใฎ 3 ใคใ ใใงใใ
1. [`TrainingArguments`] ใงใใฌใผใใณใฐ ใใคใใผใใฉใกใผใฟใๅฎ็พฉใใพใใ `image` ๅใๅ้คใใใใใใๆชไฝฟ็จใฎๅใๅ้คใใชใใใจใ้่ฆใงใใ `image` ๅใใชใใจใ`pixel_values` ใไฝๆใงใใพใใใใใฎๅไฝใ้ฒใใซใฏใ`remove_unused_columns=False`ใ่จญๅฎใใฆใใ ใใใไปใซๅฟ
่ฆใชใใฉใกใผใฟใฏใใขใใซใฎไฟๅญๅ ดๆใๆๅฎใใ `output_dir` ใ ใใงใใ `push_to_hub=True`ใ่จญๅฎใใฆใใใฎใขใใซใใใใซใใใทใฅใใพใ (ใขใใซใใขใใใญใผใใใใซใฏใHugging Face ใซใตใคใณใคใณใใๅฟ
่ฆใใใใพใ)ใๅใจใใใฏใฎ็ตไบๆใซใ[`Trainer`] ใฏ IoU ใกใใชใใฏใ่ฉไพกใใใใฌใผใใณใฐ ใใงใใฏใใคใณใใไฟๅญใใพใใ
2. ใใฌใผใใณใฐๅผๆฐใใใขใใซใใใผใฟใปใใใใใผใฏใใคใถใผใใใผใฟ็
งๅๅจใใใใณ `compute_metrics` ้ขๆฐใจใจใใซ [`Trainer`] ใซๆธกใใพใใ
3. [`~Trainer.train`] ใๅผใณๅบใใฆใขใใซใๅพฎ่ชฟๆดใใพใใ
```py
>>> training_args = TrainingArguments(
... output_dir="segformer-b0-scene-parse-150",
... learning_rate=6e-5,
... num_train_epochs=50,
... per_device_train_batch_size=2,
... per_device_eval_batch_size=2,
... save_total_limit=3,
... evaluation_strategy="steps",
... save_strategy="steps",
... save_steps=20,
... eval_steps=20,
... logging_steps=1,
... eval_accumulation_steps=5,
... remove_unused_columns=False,
... push_to_hub=True,
... )
>>> trainer = Trainer(
... model=model,
... args=training_args,
... train_dataset=train_ds,
... eval_dataset=test_ds,
... compute_metrics=compute_metrics,
... )
>>> trainer.train()
```
ใใฌใผใใณใฐใๅฎไบใใใใ [`~transformers.Trainer.push_to_hub`] ใกใฝใใใไฝฟ็จใใฆใขใใซใใใใซๅ
ฑๆใใ่ชฐใใใขใใซใไฝฟ็จใงใใใใใซใใพใใ
```py
>>> trainer.push_to_hub()
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
<Tip>
Keras ใไฝฟ็จใใใขใใซใฎๅพฎ่ชฟๆดใซๆ
ฃใใฆใใชใๅ ดๅใฏใใพใ [ๅบๆฌใใฅใผใใชใขใซ](./training#train-a-tensorflow-model-with-keras) ใ็ขบ่ชใใฆใใ ใใใ
</Tip>
TensorFlow ใงใขใใซใๅพฎ่ชฟๆดใใใซใฏใๆฌกใฎๆ้ ใซๅพใใพใใ
1. ใใฌใผใใณใฐใฎใใคใใผใใฉใกใผใฟใๅฎ็พฉใใใชใใใฃใใคใถใผใจๅญฆ็ฟ็ในใฑใธใฅใผใซใ่จญๅฎใใพใใ
2. ไบๅใใฌใผใใณใฐใใใใขใใซใใคใณในใฟใณในๅใใพใใ
3. ๐ค ใใผใฟใปใใใ `tf.data.Dataset` ใซๅคๆใใพใใ
4. ใขใใซใใณใณใใคใซใใพใใ
5. ใณใผใซใใใฏใ่ฟฝๅ ใใฆใกใใชใฏในใ่จ็ฎใใใขใใซใ ๐ค Hub ใซใขใใใญใผใใใพใ
6. `fit()` ใกใฝใใใไฝฟ็จใใฆใใฌใผใใณใฐใๅฎ่กใใพใใ
ใพใใใใคใใผใใฉใกใผใฟใผใใชใใใฃใใคใถใผใๅญฆ็ฟ็ในใฑใธใฅใผใซใๅฎ็พฉใใพใใ
```py
>>> from transformers import create_optimizer
>>> batch_size = 2
>>> num_epochs = 50
>>> num_train_steps = len(train_ds) * num_epochs
>>> learning_rate = 6e-5
>>> weight_decay_rate = 0.01
>>> optimizer, lr_schedule = create_optimizer(
... init_lr=learning_rate,
... num_train_steps=num_train_steps,
... weight_decay_rate=weight_decay_rate,
... num_warmup_steps=0,
... )
```
ๆฌกใซใใฉใใซ ใใใใณใฐใจใจใใซ [`TFAutoModelForSemanticSegmentation`] ใไฝฟ็จใใฆ SegFormer ใใญใผใใใใใใใณใณใใคใซใใพใใ
ใชใใใฃใใคใถใ Transformers ใขใใซใซใฏใในใฆใใใฉใซใใฎใฟในใฏ้ข้ฃใฎๆๅคฑ้ขๆฐใใใใใใๆฌกใฎๅ ดๅใ้คใใๆๅคฑ้ขๆฐใๆๅฎใใๅฟ
่ฆใฏใชใใใจใซๆณจๆใใฆใใ ใใใ
```py
>>> from transformers import TFAutoModelForSemanticSegmentation
>>> model = TFAutoModelForSemanticSegmentation.from_pretrained(
... checkpoint,
... id2label=id2label,
... label2id=label2id,
... )
>>> model.compile(optimizer=optimizer) # No loss argument!
```
[`~datasets.Dataset.to_tf_dataset`] ใจ [`DefaultDataCollatโโor`] ใไฝฟ็จใใฆใใใผใฟใปใใใ `tf.data.Dataset` ๅฝขๅผใซๅคๆใใพใใ
```py
>>> from transformers import DefaultDataCollator
>>> data_collator = DefaultDataCollator(return_tensors="tf")
>>> tf_train_dataset = train_ds.to_tf_dataset(
... columns=["pixel_values", "label"],
... shuffle=True,
... batch_size=batch_size,
... collate_fn=data_collator,
... )
>>> tf_eval_dataset = test_ds.to_tf_dataset(
... columns=["pixel_values", "label"],
... shuffle=True,
... batch_size=batch_size,
... collate_fn=data_collator,
... )
```
ไบๆธฌใใ็ฒพๅบฆใ่จ็ฎใใใขใใซใ ๐ค ใใใซใใใทใฅใใใซใฏใ[Keras callbacks](../main_classes/keras_callbacks) ใไฝฟ็จใใพใใ
`compute_metrics` ้ขๆฐใ [`KerasMetricCallback`] ใซๆธกใใพใใ
ใใใฆ [`PushToHubCallback`] ใไฝฟ็จใใฆใขใใซใใขใใใญใผใใใพใใ
```py
>>> from transformers.keras_callbacks import KerasMetricCallback, PushToHubCallback
>>> metric_callback = KerasMetricCallback(
... metric_fn=compute_metrics, eval_dataset=tf_eval_dataset, batch_size=batch_size, label_cols=["labels"]
... )
>>> push_to_hub_callback = PushToHubCallback(output_dir="scene_segmentation", tokenizer=image_processor)
>>> callbacks = [metric_callback, push_to_hub_callback]
```
ใคใใซใใขใใซใใใฌใผใใณใฐใใๆบๅใๆดใใพใใใใใฌใผใใณใฐใใใณๆค่จผใใผใฟใปใใใใจใใใฏๆฐใ
ใขใใซใๅพฎ่ชฟๆดใใใใใฎใณใผใซใใใฏ:
```py
>>> model.fit(
... tf_train_dataset,
... validation_data=tf_eval_dataset,
... callbacks=callbacks,
... epochs=num_epochs,
... )
```
ใใใงใจใ๏ผใขใใซใๅพฎ่ชฟๆดใใ๐ค Hub ใงๅ
ฑๆใใพใใใใใใงๆจ่ซใซไฝฟ็จใงใใใใใซใชใใพใใใ
</tf>
</frameworkcontent>
## Inference
ใขใใซใๅพฎ่ชฟๆดใใใฎใงใใใใๆจ่ซใซไฝฟ็จใงใใใใใซใชใใพใใใ
ๆจ่ซใฎใใใซ็ปๅใใญใผใใใพใใ
```py
>>> image = ds[0]["image"]
>>> image
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-image.png" alt="Image of bedroom"/>
</div>
<frameworkcontent>
<pt>
ๆจ่ซ็จใซๅพฎ่ชฟๆดใใใใขใใซใ่ฉฆใๆใ็ฐกๅใชๆนๆณใฏใใใใ [`pipeline`] ใงไฝฟ็จใใใใจใงใใใขใใซใไฝฟ็จใใฆ็ปๅใปใฐใกใณใใผใทใงใณ็จใฎ `pipeline`ใใคใณในใฟใณในๅใใใใใซ็ปๅใๆธกใใพใใ
```py
>>> from transformers import pipeline
>>> segmenter = pipeline("image-segmentation", model="my_awesome_seg_model")
>>> segmenter(image)
[{'score': None,
'label': 'wall',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062690>},
{'score': None,
'label': 'sky',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A50>},
{'score': None,
'label': 'floor',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062B50>},
{'score': None,
'label': 'ceiling',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062A10>},
{'score': None,
'label': 'bed ',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E90>},
{'score': None,
'label': 'windowpane',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062390>},
{'score': None,
'label': 'cabinet',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062550>},
{'score': None,
'label': 'chair',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062D90>},
{'score': None,
'label': 'armchair',
'mask': <PIL.Image.Image image mode=L size=640x427 at 0x7FD5B2062E10>}]
```
ๅฟ
่ฆใซๅฟใใฆใ`pipeline`ใฎ็ตๆใๆๅใง่ค่ฃฝใใใใจใใงใใพใใ็ปๅใ็ปๅใใญใปใใตใงๅฆ็ใใ`pixel_values` ใ GPU ใซ้
็ฝฎใใพใใ
```py
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # use GPU if available, otherwise use a CPU
>>> encoding = image_processor(image, return_tensors="pt")
>>> pixel_values = encoding.pixel_values.to(device)
```
ๅ
ฅๅใใขใใซใซๆธกใใ`logits`ใ่ฟใใพใใ
```py
>>> outputs = model(pixel_values=pixel_values)
>>> logits = outputs.logits.cpu()
```
ๆฌกใซใใญใธใใใๅ
ใฎ็ปๅใตใคใบใซๅในใฑใผใซใใพใใ
```py
>>> upsampled_logits = nn.functional.interpolate(
... logits,
... size=image.size[::-1],
... mode="bilinear",
... align_corners=False,
... )
>>> pred_seg = upsampled_logits.argmax(dim=1)[0]
```
</pt>
</frameworkcontent>
<frameworkcontent>
<tf>
็ปๅใใญใปใใตใใญใผใใใฆ็ปๅใๅๅฆ็ใใๅ
ฅๅใ TensorFlow ใใณใฝใซใจใใฆ่ฟใใพใใ
```py
>>> from transformers import AutoImageProcessor
>>> image_processor = AutoImageProcessor.from_pretrained("MariaK/scene_segmentation")
>>> inputs = image_processor(image, return_tensors="tf")
```
ๅ
ฅๅใใขใใซใซๆธกใใ`logits`ใ่ฟใใพใใ
```py
>>> from transformers import TFAutoModelForSemanticSegmentation
>>> model = TFAutoModelForSemanticSegmentation.from_pretrained("MariaK/scene_segmentation")
>>> logits = model(**inputs).logits
```
ๆฌกใซใใญใธใใใๅ
ใฎ็ปๅใตใคใบใซๅในใฑใผใซใใใฏใฉในๆฌกๅ
ใซ argmax ใ้ฉ็จใใพใใ
```py
>>> logits = tf.transpose(logits, [0, 2, 3, 1])
>>> upsampled_logits = tf.image.resize(
... logits,
... # We reverse the shape of `image` because `image.size` returns width and height.
... image.size[::-1],
... )
>>> pred_seg = tf.math.argmax(upsampled_logits, axis=-1)[0]
```
</tf>
</frameworkcontent>
็ตๆใ่ฆ่ฆๅใใใซใฏใ[ใใผใฟใปใใ ใซใฉใผ ใใฌใใ](https://github.com/tensorflow/models/blob/3f1ca33afe3c1631b733ea7e40c294273b9e406d/research/deeplab/utils/get_dataset_colormap.py#L51) ใใใใใใใใใใใใ `ade_palette()` ใจใใฆใญใผใใใพใใใฏใฉในใ RGB ๅคใซๅคๆใใพใใๆฌกใซใ็ปๅใจไบๆธฌใใใใปใฐใกใณใใผใทใงใณ ใใใใ็ตใฟๅใใใฆใใญใใใงใใพใใ
```py
>>> import matplotlib.pyplot as plt
>>> import numpy as np
>>> color_seg = np.zeros((pred_seg.shape[0], pred_seg.shape[1], 3), dtype=np.uint8)
>>> palette = np.array(ade_palette())
>>> for label, color in enumerate(palette):
... color_seg[pred_seg == label, :] = color
>>> color_seg = color_seg[..., ::-1] # convert to BGR
>>> img = np.array(image) * 0.5 + color_seg * 0.5 # plot the image with the segmentation map
>>> img = img.astype(np.uint8)
>>> plt.figure(figsize=(15, 10))
>>> plt.imshow(img)
>>> plt.show()
```
<div class="flex justify-center">
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/semantic-seg-preds.png" alt="Image of bedroom overlaid with segmentation map"/>
</div>
| transformers/docs/source/ja/tasks/semantic_segmentation.md/0 | {
"file_path": "transformers/docs/source/ja/tasks/semantic_segmentation.md",
"repo_id": "transformers",
"token_count": 10621
} | 269 |
<!--Copyright 2020 The HuggingFace Team. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
โ ๏ธ Note that this file is in Markdown but contain specific syntax for our doc-builder (similar to MDX) that may not be
rendered properly in your Markdown viewer.
-->
# ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ ๊ณต์ ํ๊ธฐ[[sharing-custom-models]]
๐ค Transformers ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ ์ฝ๊ฒ ํ์ฅํ ์ ์๋๋ก ์ค๊ณ๋์์ต๋๋ค.
๋ชจ๋ ๋ชจ๋ธ์ ์ถ์ํ ์์ด ์ ์ฅ์์ ์ง์ ๋ ํ์ ํด๋์ ์์ ํ ์ฝ๋ฉ๋์ด ์์ผ๋ฏ๋ก, ์์ฝ๊ฒ ๋ชจ๋ธ๋ง ํ์ผ์ ๋ณต์ฌํ๊ณ ํ์์ ๋ฐ๋ผ ์กฐ์ ํ ์ ์์ต๋๋ค.
์์ ํ ์๋ก์ด ๋ชจ๋ธ์ ๋ง๋๋ ๊ฒฝ์ฐ์๋ ์ฒ์๋ถํฐ ์์ํ๋ ๊ฒ์ด ๋ ์ฌ์ธ ์ ์์ต๋๋ค.
์ด ํํ ๋ฆฌ์ผ์์๋ Transformers ๋ด์์ ์ฌ์ฉํ ์ ์๋๋ก ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ๊ณผ ๊ตฌ์ฑ์ ์์ฑํ๋ ๋ฐฉ๋ฒ๊ณผ
๐ค Transformers ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ์๋ ๊ฒฝ์ฐ์๋ ๋๊ตฌ๋ ์ฌ์ฉํ ์ ์๋๋ก (์์กด์ฑ๊ณผ ํจ๊ป) ์ปค๋ฎค๋ํฐ์ ๊ณต์ ํ๋ ๋ฐฉ๋ฒ์ ๋ฐฐ์ธ ์ ์์ต๋๋ค.
[timm ๋ผ์ด๋ธ๋ฌ๋ฆฌ](https://github.com/rwightman/pytorch-image-models)์ ResNet ํด๋์ค๋ฅผ [`PreTrainedModel`]๋ก ๋ํํ ResNet ๋ชจ๋ธ์ ์๋ก ๋ชจ๋ ๊ฒ์ ์ค๋ช
ํฉ๋๋ค.
## ์ฌ์ฉ์ ์ ์ ๊ตฌ์ฑ ์์ฑํ๊ธฐ[[writing-a-custom-configuration]]
๋ชจ๋ธ์ ๋ค์ด๊ฐ๊ธฐ ์ ์ ๋จผ์ ๊ตฌ์ฑ์ ์์ฑํด๋ณด๋๋ก ํ๊ฒ ์ต๋๋ค.
๋ชจ๋ธ์ `configuration`์ ๋ชจ๋ธ์ ๋ง๋ค๊ธฐ ์ํด ํ์ํ ๋ชจ๋ ์ค์ํ ๊ฒ๋ค์ ํฌํจํ๊ณ ์๋ ๊ฐ์ฒด์
๋๋ค.
๋ค์ ์น์
์์ ๋ณผ ์ ์๋ฏ์ด, ๋ชจ๋ธ์ `config`๋ฅผ ์ฌ์ฉํด์๋ง ์ด๊ธฐํํ ์ ์๊ธฐ ๋๋ฌธ์ ์๋ฒฝํ ๊ตฌ์ฑ์ด ํ์ํฉ๋๋ค.
์๋ ์์์์๋ ResNet ํด๋์ค์ ์ธ์(argument)๋ฅผ ์กฐ์ ํด๋ณด๊ฒ ์ต๋๋ค.
๋ค๋ฅธ ๊ตฌ์ฑ์ ๊ฐ๋ฅํ ResNet ์ค ๋ค๋ฅธ ์ ํ์ ์ ๊ณตํฉ๋๋ค.
๊ทธ๋ฐ ๋ค์ ๋ช ๊ฐ์ง ์ ํจ์ฑ์ ํ์ธํ ํ ํด๋น ์ธ์๋ฅผ ์ ์ฅํฉ๋๋ค.
```python
from transformers import PretrainedConfig
from typing import List
class ResnetConfig(PretrainedConfig):
model_type = "resnet"
def __init__(
self,
block_type="bottleneck",
layers: List[int] = [3, 4, 6, 3],
num_classes: int = 1000,
input_channels: int = 3,
cardinality: int = 1,
base_width: int = 64,
stem_width: int = 64,
stem_type: str = "",
avg_down: bool = False,
**kwargs,
):
if block_type not in ["basic", "bottleneck"]:
raise ValueError(f"`block_type` must be 'basic' or bottleneck', got {block_type}.")
if stem_type not in ["", "deep", "deep-tiered"]:
raise ValueError(f"`stem_type` must be '', 'deep' or 'deep-tiered', got {stem_type}.")
self.block_type = block_type
self.layers = layers
self.num_classes = num_classes
self.input_channels = input_channels
self.cardinality = cardinality
self.base_width = base_width
self.stem_width = stem_width
self.stem_type = stem_type
self.avg_down = avg_down
super().__init__(**kwargs)
```
์ฌ์ฉ์ ์ ์ `configuration`์ ์์ฑํ ๋ ๊ธฐ์ตํด์ผ ํ ์ธ ๊ฐ์ง ์ค์ํ ์ฌํญ์ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
- `PretrainedConfig`์ ์์ํด์ผ ํฉ๋๋ค.
- `PretrainedConfig`์ `__init__`์ ๋ชจ๋ kwargs๋ฅผ ํ์ฉํด์ผ ํ๊ณ ,
- ์ด๋ฌํ `kwargs`๋ ์์ ํด๋์ค `__init__`์ ์ ๋ฌ๋์ด์ผ ํฉ๋๋ค.
์์์ ๐ค Transformers ๋ผ์ด๋ธ๋ฌ๋ฆฌ์์ ๋ชจ๋ ๊ธฐ๋ฅ์ ๊ฐ์ ธ์ค๋ ๊ฒ์
๋๋ค.
์ด๋ฌํ ์ ์ผ๋ก๋ถํฐ ๋น๋กฏ๋๋ ๋ ๊ฐ์ง ์ ์ฝ ์กฐ๊ฑด์ `PretrainedConfig`์ ์ค์ ํ๋ ๊ฒ๋ณด๋ค ๋ ๋ง์ ํ๋๊ฐ ์์ต๋๋ค.
`from_pretrained` ๋ฉ์๋๋ก ๊ตฌ์ฑ์ ๋ค์ ๋ก๋ํ ๋ ํด๋น ํ๋๋ ๊ตฌ์ฑ์์ ์๋ฝํ ํ ์์ ํด๋์ค๋ก ๋ณด๋ด์ผ ํฉ๋๋ค.
๋ชจ๋ธ์ auto ํด๋์ค์ ๋ฑ๋กํ์ง ์๋ ํ, `configuration`์์ `model_type`์ ์ ์(์ฌ๊ธฐ์ `model_type="resnet"`)ํ๋ ๊ฒ์ ํ์ ์ฌํญ์ด ์๋๋๋ค (๋ง์ง๋ง ์น์
์ฐธ์กฐ).
์ด๋ ๊ฒ ํ๋ฉด ๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ๋ค๋ฅธ ๋ชจ๋ธ ๊ตฌ์ฑ๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก ๊ตฌ์ฑ์ ์ฝ๊ฒ ๋ง๋ค๊ณ ์ ์ฅํ ์ ์์ต๋๋ค.
๋ค์์ resnet50d ๊ตฌ์ฑ์ ์์ฑํ๊ณ ์ ์ฅํ๋ ๋ฐฉ๋ฒ์
๋๋ค:
```py
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
resnet50d_config.save_pretrained("custom-resnet")
```
์ด๋ ๊ฒ ํ๋ฉด `custom-resnet` ํด๋ ์์ `config.json`์ด๋ผ๋ ํ์ผ์ด ์ ์ฅ๋ฉ๋๋ค.
๊ทธ๋ฐ ๋ค์ `from_pretrained` ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ๊ตฌ์ฑ์ ๋ค์ ๋ก๋ํ ์ ์์ต๋๋ค.
```py
resnet50d_config = ResnetConfig.from_pretrained("custom-resnet")
```
๊ตฌ์ฑ์ Hub์ ์ง์ ์
๋ก๋ํ๊ธฐ ์ํด [`PretrainedConfig`] ํด๋์ค์ [`~PretrainedConfig.push_to_hub`]์ ๊ฐ์ ๋ค๋ฅธ ๋ฉ์๋๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.
## ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ ์์ฑํ๊ธฐ[[writing-a-custom-model]]
์ด์ ResNet ๊ตฌ์ฑ์ด ์์ผ๋ฏ๋ก ๋ชจ๋ธ์ ์์ฑํ ์ ์์ต๋๋ค.
์ค์ ๋ก๋ ๋ ๊ฐ๋ฅผ ์์ฑํ ๊ฒ์
๋๋ค. ํ๋๋ ์ด๋ฏธ์ง ๋ฐฐ์น์์ hidden features๋ฅผ ์ถ์ถํ๋ ๊ฒ([`BertModel`]๊ณผ ๊ฐ์ด), ๋ค๋ฅธ ํ๋๋ ์ด๋ฏธ์ง ๋ถ๋ฅ์ ์ ํฉํ ๊ฒ์
๋๋ค([`BertForSequenceClassification`]๊ณผ ๊ฐ์ด).
์ด์ ์ ์ธ๊ธํ๋ฏ์ด ์ด ์์ ์์๋ ๋จ์ํ๊ฒ ํ๊ธฐ ์ํด ๋ชจ๋ธ์ ๋์จํ ๋ํผ(loose wrapper)๋ง ์์ฑํ ๊ฒ์
๋๋ค.
์ด ํด๋์ค๋ฅผ ์์ฑํ๊ธฐ ์ ์ ๋ธ๋ก ์ ํ๊ณผ ์ค์ ๋ธ๋ก ํด๋์ค ๊ฐ์ ๋งคํ ์์
๋ง ํ๋ฉด ๋ฉ๋๋ค.
๊ทธ๋ฐ ๋ค์ `ResNet` ํด๋์ค๋ก ์ ๋ฌ๋์ด `configuration`์ ํตํด ๋ชจ๋ธ์ด ์ ์ธ๋ฉ๋๋ค:
```py
from transformers import PreTrainedModel
from timm.models.resnet import BasicBlock, Bottleneck, ResNet
from .configuration_resnet import ResnetConfig
BLOCK_MAPPING = {"basic": BasicBlock, "bottleneck": Bottleneck}
class ResnetModel(PreTrainedModel):
config_class = ResnetConfig
def __init__(self, config):
super().__init__(config)
block_layer = BLOCK_MAPPING[config.block_type]
self.model = ResNet(
block_layer,
config.layers,
num_classes=config.num_classes,
in_chans=config.input_channels,
cardinality=config.cardinality,
base_width=config.base_width,
stem_width=config.stem_width,
stem_type=config.stem_type,
avg_down=config.avg_down,
)
def forward(self, tensor):
return self.model.forward_features(tensor)
```
์ด๋ฏธ์ง ๋ถ๋ฅ ๋ชจ๋ธ์ ๋ง๋ค๊ธฐ ์ํด์๋ forward ๋ฉ์๋๋ง ๋ณ๊ฒฝํ๋ฉด ๋ฉ๋๋ค:
```py
import torch
class ResnetModelForImageClassification(PreTrainedModel):
config_class = ResnetConfig
def __init__(self, config):
super().__init__(config)
block_layer = BLOCK_MAPPING[config.block_type]
self.model = ResNet(
block_layer,
config.layers,
num_classes=config.num_classes,
in_chans=config.input_channels,
cardinality=config.cardinality,
base_width=config.base_width,
stem_width=config.stem_width,
stem_type=config.stem_type,
avg_down=config.avg_down,
)
def forward(self, tensor, labels=None):
logits = self.model(tensor)
if labels is not None:
loss = torch.nn.cross_entropy(logits, labels)
return {"loss": loss, "logits": logits}
return {"logits": logits}
```
๋ ๊ฒฝ์ฐ ๋ชจ๋ `PreTrainedModel`๋ฅผ ์์๋ฐ๊ณ , `config`๋ฅผ ํตํด ์์ ํด๋์ค ์ด๊ธฐํ๋ฅผ ํธ์ถํ๋ค๋ ์ ์ ๊ธฐ์ตํ์ธ์ (์ผ๋ฐ์ ์ธ `torch.nn.Module`์ ์์ฑํ ๋์ ๋น์ทํจ).
๋ชจ๋ธ์ auto ํด๋์ค์ ๋ฑ๋กํ๊ณ ์ถ์ ๊ฒฝ์ฐ์๋ `config_class`๋ฅผ ์ค์ ํ๋ ๋ถ๋ถ์ด ํ์์
๋๋ค (๋ง์ง๋ง ์น์
์ฐธ์กฐ).
<Tip>
๋ผ์ด๋ธ๋ฌ๋ฆฌ์ ์กด์ฌํ๋ ๋ชจ๋ธ๊ณผ ๊ต์ฅํ ์ ์ฌํ๋ค๋ฉด, ๋ชจ๋ธ์ ์์ฑํ ๋ ๊ตฌ์ฑ์ ์ฐธ์กฐํด ์ฌ์ฌ์ฉํ ์ ์์ต๋๋ค.
</Tip>
์ํ๋ ๊ฒ์ ๋ชจ๋ธ์ด ๋ฐํํ๋๋ก ํ ์ ์์ง๋ง, `ResnetModelForImageClassification`์์ ํ๋ ๊ฒ ์ฒ๋ผ
๋ ์ด๋ธ์ ํต๊ณผ์์ผฐ์ ๋ ์์ค๊ณผ ํจ๊ป ์ฌ์ ํํ๋ก ๋ฐํํ๋ ๊ฒ์ด [`Trainer`] ํด๋์ค ๋ด์์ ์ง์ ๋ชจ๋ธ์ ์ฌ์ฉํ๊ธฐ์ ์ ์ฉํฉ๋๋ค.
์์ ๋ง์ ํ์ต ๋ฃจํ ๋๋ ๋ค๋ฅธ ํ์ต ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ฌ์ฉํ ๊ณํ์ด๋ผ๋ฉด ๋ค๋ฅธ ์ถ๋ ฅ ํ์์ ์ฌ์ฉํด๋ ์ข์ต๋๋ค.
์ด์ ๋ชจ๋ธ ํด๋์ค๊ฐ ์์ผ๋ฏ๋ก ํ๋ ์์ฑํด ๋ณด๊ฒ ์ต๋๋ค:
```py
resnet50d = ResnetModelForImageClassification(resnet50d_config)
```
๋ค์ ๋งํ์ง๋ง, [`~PreTrainedModel.save_pretrained`]๋๋ [`~PreTrainedModel.push_to_hub`]์ฒ๋ผ [`PreTrainedModel`]์ ์ํ๋ ๋ชจ๋ ๋ฉ์๋๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.
๋ค์ ์น์
์์ ๋ ๋ฒ์งธ ๋ฉ์๋๋ฅผ ์ฌ์ฉํด ๋ชจ๋ธ ์ฝ๋์ ๋ชจ๋ธ ๊ฐ์ค์น๋ฅผ ์
๋ก๋ํ๋ ๋ฐฉ๋ฒ์ ์ดํด๋ณด๊ฒ ์ต๋๋ค.
๋จผ์ , ๋ชจ๋ธ ๋ด๋ถ์ ์ฌ์ ํ๋ จ๋ ๊ฐ์ค์น๋ฅผ ๋ก๋ํด ๋ณด๊ฒ ์ต๋๋ค.
์ด ์์ ๋ฅผ ํ์ฉํ ๋๋, ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ์ ์์ ๋ง์ ๋ฐ์ดํฐ๋ก ํ์ต์ํฌ ๊ฒ์
๋๋ค.
์ด ํํ ๋ฆฌ์ผ์์๋ ๋น ๋ฅด๊ฒ ์งํํ๊ธฐ ์ํด ์ฌ์ ํ๋ จ๋ resnet50d๋ฅผ ์ฌ์ฉํ๊ฒ ์ต๋๋ค.
์๋ ๋ชจ๋ธ์ resnet50d์ ๋ํผ์ด๊ธฐ ๋๋ฌธ์, ๊ฐ์ค์น๋ฅผ ์ฝ๊ฒ ๋ก๋ํ ์ ์์ต๋๋ค.
```py
import timm
pretrained_model = timm.create_model("resnet50d", pretrained=True)
resnet50d.model.load_state_dict(pretrained_model.state_dict())
```
์ด์ [`~PreTrainedModel.save_pretrained`] ๋๋ [`~PreTrainedModel.push_to_hub`]๋ฅผ ์ฌ์ฉํ ๋ ๋ชจ๋ธ ์ฝ๋๊ฐ ์ ์ฅ๋๋์ง ํ์ธํด๋ด
์๋ค.
## Hub๋ก ์ฝ๋ ์
๋ก๋ํ๊ธฐ[[sending-the-code-to-the-hub]]
<Tip warning={true}>
์ด API๋ ์คํ์ ์ด๋ฉฐ ๋ค์ ๋ฆด๋ฆฌ์ค์์ ์ฝ๊ฐ์ ๋ณ๊ฒฝ ์ฌํญ์ด ์์ ์ ์์ต๋๋ค.
</Tip>
๋จผ์ ๋ชจ๋ธ์ด `.py` ํ์ผ์ ์์ ํ ์ ์๋์ด ์๋์ง ํ์ธํ์ธ์.
๋ชจ๋ ํ์ผ์ด ๋์ผํ ์์
๊ฒฝ๋ก์ ์๊ธฐ ๋๋ฌธ์ ์๋๊ฒฝ๋ก ์ํฌํธ(relative import)์ ์์กดํ ์ ์์ต๋๋ค (transformers์์๋ ์ด ๊ธฐ๋ฅ์ ๋ํ ํ์ ๋ชจ๋์ ์ง์ํ์ง ์์ต๋๋ค).
์ด ์์์์๋ ์์
๊ฒฝ๋ก ์์ `resnet_model`์์ `modeling_resnet.py` ํ์ผ๊ณผ `configuration_resnet.py` ํ์ผ์ ์ ์ํฉ๋๋ค.
๊ตฌ์ฑ ํ์ผ์๋ `ResnetConfig`์ ๋ํ ์ฝ๋๊ฐ ์๊ณ ๋ชจ๋ธ๋ง ํ์ผ์๋ `ResnetModel` ๋ฐ `ResnetModelForImageClassification`์ ๋ํ ์ฝ๋๊ฐ ์์ต๋๋ค.
```
.
โโโ resnet_model
โโโ __init__.py
โโโ configuration_resnet.py
โโโ modeling_resnet.py
```
Python์ด `resnet_model`์ ๋ชจ๋๋ก ์ฌ์ฉํ ์ ์๋๋ก ๊ฐ์งํ๋ ๋ชฉ์ ์ด๊ธฐ ๋๋ฌธ์ `__init__.py`๋ ๋น์ด ์์ ์ ์์ต๋๋ค.
<Tip warning={true}>
๋ผ์ด๋ธ๋ฌ๋ฆฌ์์ ๋ชจ๋ธ๋ง ํ์ผ์ ๋ณต์ฌํ๋ ๊ฒฝ์ฐ,
๋ชจ๋ ํ์ผ ์๋จ์ ์๋ ์๋ ๊ฒฝ๋ก ์ํฌํธ(relative import) ๋ถ๋ถ์ `transformers` ํจํค์ง์์ ์ํฌํธ ํ๋๋ก ๋ณ๊ฒฝํด์ผ ํฉ๋๋ค.
</Tip>
๊ธฐ์กด ๊ตฌ์ฑ์ด๋ ๋ชจ๋ธ์ ์ฌ์ฌ์ฉ(๋๋ ์๋ธ ํด๋์คํ)ํ ์ ์์ต๋๋ค.
์ปค๋ฎค๋ํฐ์ ๋ชจ๋ธ์ ๊ณต์ ํ๊ธฐ ์ํด์๋ ๋ค์ ๋จ๊ณ๋ฅผ ๋ฐ๋ผ์ผ ํฉ๋๋ค:
๋จผ์ , ์๋ก ๋ง๋ ํ์ผ์ ResNet ๋ชจ๋ธ๊ณผ ๊ตฌ์ฑ์ ์ํฌํธํฉ๋๋ค:
```py
from resnet_model.configuration_resnet import ResnetConfig
from resnet_model.modeling_resnet import ResnetModel, ResnetModelForImageClassification
```
๋ค์์ผ๋ก `save_pretrained` ๋ฉ์๋๋ฅผ ์ฌ์ฉํด ํด๋น ๊ฐ์ฒด์ ์ฝ๋ ํ์ผ์ ๋ณต์ฌํ๊ณ ,
๋ณต์ฌํ ํ์ผ์ Auto ํด๋์ค๋ก ๋ฑ๋กํ๊ณ (๋ชจ๋ธ์ธ ๊ฒฝ์ฐ) ์คํํฉ๋๋ค:
```py
ResnetConfig.register_for_auto_class()
ResnetModel.register_for_auto_class("AutoModel")
ResnetModelForImageClassification.register_for_auto_class("AutoModelForImageClassification")
```
`configuration`์ ๋ํ auto ํด๋์ค๋ฅผ ์ง์ ํ ํ์๋ ์์ง๋ง(`configuration` ๊ด๋ จ auto ํด๋์ค๋ AutoConfig ํด๋์ค ํ๋๋ง ์์), ๋ชจ๋ธ์ ๊ฒฝ์ฐ์๋ ์ง์ ํด์ผ ํฉ๋๋ค.
์ฌ์ฉ์ ์ง์ ๋ชจ๋ธ์ ๋ค์ํ ์์
์ ์ ํฉํ ์ ์์ผ๋ฏ๋ก, ๋ชจ๋ธ์ ๋ง๋ auto ํด๋์ค๋ฅผ ์ง์ ํด์ผ ํฉ๋๋ค.
๋ค์์ผ๋ก, ์ด์ ์ ์์
ํ๋ ๊ฒ๊ณผ ๋ง์ฐฌ๊ฐ์ง๋ก ๊ตฌ์ฑ๊ณผ ๋ชจ๋ธ์ ์์ฑํฉ๋๋ค:
```py
resnet50d_config = ResnetConfig(block_type="bottleneck", stem_width=32, stem_type="deep", avg_down=True)
resnet50d = ResnetModelForImageClassification(resnet50d_config)
pretrained_model = timm.create_model("resnet50d", pretrained=True)
resnet50d.model.load_state_dict(pretrained_model.state_dict())
```
์ด์ ๋ชจ๋ธ์ Hub๋ก ์
๋ก๋ํ๊ธฐ ์ํด ๋ก๊ทธ์ธ ์ํ์ธ์ง ํ์ธํ์ธ์.
ํฐ๋ฏธ๋์์ ๋ค์ ์ฝ๋๋ฅผ ์คํํด ํ์ธํ ์ ์์ต๋๋ค:
```bash
huggingface-cli login
```
์ฃผํผํฐ ๋
ธํธ๋ถ์ ๊ฒฝ์ฐ์๋ ๋ค์๊ณผ ๊ฐ์ต๋๋ค:
```py
from huggingface_hub import notebook_login
notebook_login()
```
๊ทธ๋ฐ ๋ค์ ์ด๋ ๊ฒ ์์ ์ ๋ค์์คํ์ด์ค(๋๋ ์์ ์ด ์ํ ์กฐ์ง)์ ์
๋ก๋ํ ์ ์์ต๋๋ค:
```py
resnet50d.push_to_hub("custom-resnet50d")
```
On top of the modeling weights and the configuration in json format, this also copied the modeling and
configuration `.py` files in the folder `custom-resnet50d` and uploaded the result to the Hub. You can check the result
in this [model repo](https://huggingface.co/sgugger/custom-resnet50d).
json ํ์์ ๋ชจ๋ธ๋ง ๊ฐ์ค์น์ ๊ตฌ์ฑ ์ธ์๋ `custom-resnet50d` ํด๋ ์์ ๋ชจ๋ธ๋ง๊ณผ ๊ตฌ์ฑ `.py` ํ์ผ์ ๋ณต์ฌํํด Hub์ ์
๋ก๋ํฉ๋๋ค.
[๋ชจ๋ธ ์ ์ฅ์](https://huggingface.co/sgugger/custom-resnet50d)์์ ๊ฒฐ๊ณผ๋ฅผ ํ์ธํ ์ ์์ต๋๋ค.
[sharing tutorial](model_sharing) ๋ฌธ์์ `push_to_hub` ๋ฉ์๋์์ ์์ธํ ๋ด์ฉ์ ํ์ธํ ์ ์์ต๋๋ค.
## ์ฌ์ฉ์ ์ ์ ์ฝ๋๋ก ๋ชจ๋ธ ์ฌ์ฉํ๊ธฐ[[using-a-model-with-custom-code]]
auto ํด๋์ค์ `from_pretrained` ๋ฉ์๋๋ฅผ ์ฌ์ฉํ์ฌ ์ฌ์ฉ์ ์ง์ ์ฝ๋ ํ์ผ๊ณผ ํจ๊ป ๋ชจ๋ ๊ตฌ์ฑ, ๋ชจ๋ธ, ํ ํฌ๋์ด์ ๋ฅผ ์ฌ์ฉํ ์ ์์ต๋๋ค.
Hub์ ์
๋ก๋๋ ๋ชจ๋ ํ์ผ ๋ฐ ์ฝ๋๋ ๋ฉ์จ์ด๊ฐ ์๋์ง ๊ฒ์ฌ๋์ง๋ง (์์ธํ ๋ด์ฉ์ [Hub ๋ณด์](https://huggingface.co/docs/hub/security#malware-scanning) ์ค๋ช
์ฐธ์กฐ),
์์ ์ ์ปดํจํฐ์์ ๋ชจ๋ธ ์ฝ๋์ ์์ฑ์๊ฐ ์
์ฑ ์ฝ๋๋ฅผ ์คํํ์ง ์๋์ง ํ์ธํด์ผ ํฉ๋๋ค.
์ฌ์ฉ์ ์ ์ ์ฝ๋๋ก ๋ชจ๋ธ์ ์ฌ์ฉํ๋ ค๋ฉด `trust_remote_code=True`๋ก ์ค์ ํ์ธ์:
```py
from transformers import AutoModelForImageClassification
model = AutoModelForImageClassification.from_pretrained("sgugger/custom-resnet50d", trust_remote_code=True)
```
๋ชจ๋ธ ์์ฑ์๊ฐ ์
์์ ์ผ๋ก ์ฝ๋๋ฅผ ์
๋ฐ์ดํธํ์ง ์์๋ค๋ ์ ์ ํ์ธํ๊ธฐ ์ํด, ์ปค๋ฐ ํด์(commit hash)๋ฅผ `revision`์ผ๋ก ์ ๋ฌํ๋ ๊ฒ๋ ๊ฐ๋ ฅํ ๊ถ์ฅ๋ฉ๋๋ค (๋ชจ๋ธ ์์ฑ์๋ฅผ ์์ ํ ์ ๋ขฐํ์ง ์๋ ๊ฒฝ์ฐ).
```py
commit_hash = "ed94a7c6247d8aedce4647f00f20de6875b5b292"
model = AutoModelForImageClassification.from_pretrained(
"sgugger/custom-resnet50d", trust_remote_code=True, revision=commit_hash
)
```
Hub์์ ๋ชจ๋ธ ์ ์ฅ์์ ์ปค๋ฐ ๊ธฐ๋ก์ ์ฐพ์๋ณผ ๋, ๋ชจ๋ ์ปค๋ฐ์ ์ปค๋ฐ ํด์๋ฅผ ์ฝ๊ฒ ๋ณต์ฌํ ์ ์๋ ๋ฒํผ์ด ์์ต๋๋ค.
## ์ฌ์ฉ์ ์ ์ ์ฝ๋๋ก ๋ง๋ ๋ชจ๋ธ์ auto ํด๋์ค๋ก ๋ฑ๋กํ๊ธฐ[[registering-a-model-with-custom-code-to-the-auto-classes]]
๐ค Transformers๋ฅผ ์์ํ๋ ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์์ฑํ๋ ๊ฒฝ์ฐ ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ์ auto ํด๋์ค์ ์ถ๊ฐํ ์ ์์ต๋๋ค.
์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ์ ์ฌ์ฉํ๊ธฐ ์ํด ํด๋น ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ํฌํธํด์ผ ํ๊ธฐ ๋๋ฌธ์, ์ด๋ Hub๋ก ์ฝ๋๋ฅผ ์
๋ก๋ํ๋ ๊ฒ๊ณผ ๋ค๋ฆ
๋๋ค (Hub์์ ์๋์ ์ผ๋ก ๋ชจ๋ธ ์ฝ๋๋ฅผ ๋ค์ด๋ก๋ ํ๋ ๊ฒ๊ณผ ๋ฐ๋).
๊ตฌ์ฑ์ ๊ธฐ์กด ๋ชจ๋ธ ์ ํ๊ณผ ๋ค๋ฅธ `model_type` ์์ฑ์ด ์๊ณ ๋ชจ๋ธ ํด๋์ค์ ์ฌ๋ฐ๋ฅธ `config_class` ์์ฑ์ด ์๋ ํ,
๋ค์๊ณผ ๊ฐ์ด auto ํด๋์ค์ ์ถ๊ฐํ ์ ์์ต๋๋ค:
```py
from transformers import AutoConfig, AutoModel, AutoModelForImageClassification
AutoConfig.register("resnet", ResnetConfig)
AutoModel.register(ResnetConfig, ResnetModel)
AutoModelForImageClassification.register(ResnetConfig, ResnetModelForImageClassification)
```
์ฌ์ฉ์ ์ ์ ๊ตฌ์ฑ์ [`AutoConfig`]์ ๋ฑ๋กํ ๋ ์ฌ์ฉ๋๋ ์ฒซ ๋ฒ์งธ ์ธ์๋ ์ฌ์ฉ์ ์ ์ ๊ตฌ์ฑ์ `model_type`๊ณผ ์ผ์นํด์ผ ํฉ๋๋ค.
๋ํ, ์ฌ์ฉ์ ์ ์ ๋ชจ๋ธ์ auto ํด๋์ค์ ๋ฑ๋กํ ๋ ์ฌ์ฉ๋๋ ์ฒซ ๋ฒ์งธ ์ธ์๋ ํด๋น ๋ชจ๋ธ์ `config_class`์ ์ผ์นํด์ผ ํฉ๋๋ค. | transformers/docs/source/ko/custom_models.md/0 | {
"file_path": "transformers/docs/source/ko/custom_models.md",
"repo_id": "transformers",
"token_count": 10729
} | 270 |