liyy201912's picture
Upload folder using huggingface_hub
cc0dd3c
# Copyright (c) OpenMMLab. All rights reserved.
from typing import Optional, Sequence, Union
from mmcv.cnn import build_conv_layer, build_upsample_layer
from torch import nn
from mmpose.registry import KEYPOINT_CODECS, MODELS
from mmpose.utils.typing import ConfigType, OptConfigType
from .heatmap_head import HeatmapHead
OptIntSeq = Optional[Sequence[int]]
@MODELS.register_module()
class ViPNASHead(HeatmapHead):
"""ViPNAS heatmap head introduced in `ViPNAS`_ by Xu et al (2021). The head
is composed of a few deconvolutional layers followed by a convolutional
layer to generate heatmaps from low-resolution feature maps. Specifically,
different from the :class: `HeatmapHead` introduced by `Simple Baselines`_,
the group numbers in the deconvolutional layers are elastic and thus can be
optimized by neural architecture search (NAS).
Args:
in_channels (int | Sequence[int]): Number of channels in the input
feature map
out_channels (int): Number of channels in the output heatmap
deconv_out_channels (Sequence[int], optional): The output channel
number of each deconv layer. Defaults to ``(144, 144, 144)``
deconv_kernel_sizes (Sequence[int | tuple], optional): The kernel size
of each deconv layer. Each element should be either an integer for
both height and width dimensions, or a tuple of two integers for
the height and the width dimension respectively.Defaults to
``(4, 4, 4)``
deconv_num_groups (Sequence[int], optional): The group number of each
deconv layer. Defaults to ``(16, 16, 16)``
conv_out_channels (Sequence[int], optional): The output channel number
of each intermediate conv layer. ``None`` means no intermediate
conv layer between deconv layers and the final conv layer.
Defaults to ``None``
conv_kernel_sizes (Sequence[int | tuple], optional): The kernel size
of each intermediate conv layer. Defaults to ``None``
final_layer (dict): Arguments of the final Conv2d layer.
Defaults to ``dict(kernel_size=1)``
loss (Config): Config of the keypoint loss. Defaults to use
:class:`KeypointMSELoss`
decoder (Config, optional): The decoder config that controls decoding
keypoint coordinates from the network output. Defaults to ``None``
init_cfg (Config, optional): Config to control the initialization. See
:attr:`default_init_cfg` for default settings
.. _`ViPNAS`: https://arxiv.org/abs/2105.10154
.. _`Simple Baselines`: https://arxiv.org/abs/1804.06208
"""
_version = 2
def __init__(self,
in_channels: Union[int, Sequence[int]],
out_channels: int,
deconv_out_channels: OptIntSeq = (144, 144, 144),
deconv_kernel_sizes: OptIntSeq = (4, 4, 4),
deconv_num_groups: OptIntSeq = (16, 16, 16),
conv_out_channels: OptIntSeq = None,
conv_kernel_sizes: OptIntSeq = None,
final_layer: dict = dict(kernel_size=1),
loss: ConfigType = dict(
type='KeypointMSELoss', use_target_weight=True),
decoder: OptConfigType = None,
init_cfg: OptConfigType = None):
if init_cfg is None:
init_cfg = self.default_init_cfg
super(HeatmapHead, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.loss_module = MODELS.build(loss)
if decoder is not None:
self.decoder = KEYPOINT_CODECS.build(decoder)
else:
self.decoder = None
if deconv_out_channels:
if deconv_kernel_sizes is None or len(deconv_out_channels) != len(
deconv_kernel_sizes):
raise ValueError(
'"deconv_out_channels" and "deconv_kernel_sizes" should '
'be integer sequences with the same length. Got '
f'mismatched lengths {deconv_out_channels} and '
f'{deconv_kernel_sizes}')
if deconv_num_groups is None or len(deconv_out_channels) != len(
deconv_num_groups):
raise ValueError(
'"deconv_out_channels" and "deconv_num_groups" should '
'be integer sequences with the same length. Got '
f'mismatched lengths {deconv_out_channels} and '
f'{deconv_num_groups}')
self.deconv_layers = self._make_deconv_layers(
in_channels=in_channels,
layer_out_channels=deconv_out_channels,
layer_kernel_sizes=deconv_kernel_sizes,
layer_groups=deconv_num_groups,
)
in_channels = deconv_out_channels[-1]
else:
self.deconv_layers = nn.Identity()
if conv_out_channels:
if conv_kernel_sizes is None or len(conv_out_channels) != len(
conv_kernel_sizes):
raise ValueError(
'"conv_out_channels" and "conv_kernel_sizes" should '
'be integer sequences with the same length. Got '
f'mismatched lengths {conv_out_channels} and '
f'{conv_kernel_sizes}')
self.conv_layers = self._make_conv_layers(
in_channels=in_channels,
layer_out_channels=conv_out_channels,
layer_kernel_sizes=conv_kernel_sizes)
in_channels = conv_out_channels[-1]
else:
self.conv_layers = nn.Identity()
if final_layer is not None:
cfg = dict(
type='Conv2d',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1)
cfg.update(final_layer)
self.final_layer = build_conv_layer(cfg)
else:
self.final_layer = nn.Identity()
# Register the hook to automatically convert old version state dicts
self._register_load_state_dict_pre_hook(self._load_state_dict_pre_hook)
def _make_deconv_layers(self, in_channels: int,
layer_out_channels: Sequence[int],
layer_kernel_sizes: Sequence[int],
layer_groups: Sequence[int]) -> nn.Module:
"""Create deconvolutional layers by given parameters."""
layers = []
for out_channels, kernel_size, groups in zip(layer_out_channels,
layer_kernel_sizes,
layer_groups):
if kernel_size == 4:
padding = 1
output_padding = 0
elif kernel_size == 3:
padding = 1
output_padding = 1
elif kernel_size == 2:
padding = 0
output_padding = 0
else:
raise ValueError(f'Unsupported kernel size {kernel_size} for'
'deconvlutional layers in '
f'{self.__class__.__name__}')
cfg = dict(
type='deconv',
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
groups=groups,
stride=2,
padding=padding,
output_padding=output_padding,
bias=False)
layers.append(build_upsample_layer(cfg))
layers.append(nn.BatchNorm2d(num_features=out_channels))
layers.append(nn.ReLU(inplace=True))
in_channels = out_channels
return nn.Sequential(*layers)