Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/coarse_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule, Linear
from mmcv.runner import ModuleList, auto_fp16
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class CoarseMaskHead(FCNMaskHead):
"""Coarse mask head used in PointRend.
Compared with standard ``FCNMaskHead``, ``CoarseMaskHead`` will downsample
the input feature map instead of upsample it.
Args:
num_convs (int): Number of conv layers in the head. Default: 0.
num_fcs (int): Number of fc layers in the head. Default: 2.
fc_out_channels (int): Number of output channels of fc layer.
Default: 1024.
downsample_factor (int): The factor that feature map is downsampled by.
Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=0,
num_fcs=2,
fc_out_channels=1024,
downsample_factor=2,
init_cfg=dict(
type='Xavier',
override=[
dict(name='fcs'),
dict(type='Constant', val=0.001, name='fc_logits')
]),
*arg,
**kwarg):
super(CoarseMaskHead, self).__init__(
*arg,
num_convs=num_convs,
upsample_cfg=dict(type=None),
init_cfg=None,
**kwarg)
self.init_cfg = init_cfg
self.num_fcs = num_fcs
assert self.num_fcs > 0
self.fc_out_channels = fc_out_channels
self.downsample_factor = downsample_factor
assert self.downsample_factor >= 1
# remove conv_logit
delattr(self, 'conv_logits')
if downsample_factor > 1:
downsample_in_channels = (
self.conv_out_channels
if self.num_convs > 0 else self.in_channels)
self.downsample_conv = ConvModule(
downsample_in_channels,
self.conv_out_channels,
kernel_size=downsample_factor,
stride=downsample_factor,
padding=0,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
else:
self.downsample_conv = None
self.output_size = (self.roi_feat_size[0] // downsample_factor,
self.roi_feat_size[1] // downsample_factor)
self.output_area = self.output_size[0] * self.output_size[1]
last_layer_dim = self.conv_out_channels * self.output_area
self.fcs = ModuleList()
for i in range(num_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
output_channels = self.num_classes * self.output_area
self.fc_logits = Linear(last_layer_dim, output_channels)
def init_weights(self):
super(FCNMaskHead, self).init_weights()
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.downsample_conv is not None:
x = self.downsample_conv(x)
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_pred = self.fc_logits(x).view(
x.size(0), self.num_classes, *self.output_size)
return mask_pred
| 3,551 | 34.168317 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/dynamic_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import auto_fp16, force_fp32
from mmdet.core import mask_target
from mmdet.models.builder import HEADS
from mmdet.models.dense_heads.atss_head import reduce_mean
from mmdet.models.utils import build_transformer
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class DynamicMaskHead(FCNMaskHead):
r"""Dynamic Mask Head for
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_
Args:
num_convs (int): Number of convolution layer.
Defaults to 4.
roi_feat_size (int): The output size of RoI extractor,
Defaults to 14.
in_channels (int): Input feature channels.
Defaults to 256.
conv_kernel_size (int): Kernel size of convolution layers.
Defaults to 3.
conv_out_channels (int): Output channels of convolution layers.
Defaults to 256.
num_classes (int): Number of classes.
Defaults to 80
class_agnostic (int): Whether generate class agnostic prediction.
Defaults to False.
dropout (float): Probability of drop the channel.
Defaults to 0.0
upsample_cfg (dict): The config for upsample layer.
conv_cfg (dict): The convolution layer config.
norm_cfg (dict): The norm layer config.
dynamic_conv_cfg (dict): The dynamic convolution layer config.
loss_mask (dict): The config for mask loss.
"""
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=14,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_mask=dict(type='DiceLoss', loss_weight=8.0),
**kwargs):
super(DynamicMaskHead, self).__init__(
num_convs=num_convs,
roi_feat_size=roi_feat_size,
in_channels=in_channels,
conv_kernel_size=conv_kernel_size,
conv_out_channels=conv_out_channels,
num_classes=num_classes,
class_agnostic=class_agnostic,
upsample_cfg=upsample_cfg,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
loss_mask=loss_mask,
**kwargs)
assert class_agnostic is False, \
'DynamicMaskHead only support class_agnostic=False'
self.fp16_enabled = False
self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
def init_weights(self):
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
nn.init.constant_(self.conv_logits.bias, 0.)
@auto_fp16()
def forward(self, roi_feat, proposal_feat):
"""Forward function of DynamicMaskHead.
Args:
roi_feat (Tensor): Roi-pooling features with shape
(batch_size*num_proposals, feature_dimensions,
pooling_h , pooling_w).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size*num_proposals, feature_dimensions)
Returns:
mask_pred (Tensor): Predicted foreground masks with shape
(batch_size*num_proposals, num_classes,
pooling_h*2, pooling_w*2).
"""
proposal_feat = proposal_feat.reshape(-1, self.in_channels)
proposal_feat_iic = self.instance_interactive_conv(
proposal_feat, roi_feat)
x = proposal_feat_iic.permute(0, 2, 1).reshape(roi_feat.size())
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
num_pos = labels.new_ones(labels.size()).float().sum()
avg_factor = torch.clamp(reduce_mean(num_pos), min=1.).item()
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
loss_mask = self.loss_mask(
mask_pred[torch.arange(num_pos).long(), labels, ...].sigmoid(),
mask_targets,
avg_factor=avg_factor)
loss['loss_mask'] = loss_mask
return loss
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
| 5,665 | 37.283784 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from warnings import warn
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, build_conv_layer, build_upsample_layer
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import mask_target
from mmdet.models.builder import HEADS, build_loss
BYTES_PER_FLOAT = 4
# TODO: This memory limit may be too much or too little. It would be better to
# determine it based on available resources.
GPU_MEM_LIMIT = 1024**3 # 1 GB memory limit
@HEADS.register_module()
class FCNMaskHead(BaseModule):
def __init__(self,
num_convs=4,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
conv_out_channels=256,
num_classes=80,
class_agnostic=False,
upsample_cfg=dict(type='deconv', scale_factor=2),
conv_cfg=None,
norm_cfg=None,
predictor_cfg=dict(type='Conv'),
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FCNMaskHead, self).__init__(init_cfg)
self.upsample_cfg = upsample_cfg.copy()
if self.upsample_cfg['type'] not in [
None, 'deconv', 'nearest', 'bilinear', 'carafe'
]:
raise ValueError(
f'Invalid upsample method {self.upsample_cfg["type"]}, '
'accepted methods are "deconv", "nearest", "bilinear", '
'"carafe"')
self.num_convs = num_convs
# WARN: roi_feat_size is reserved and not used
self.roi_feat_size = _pair(roi_feat_size)
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.conv_out_channels = conv_out_channels
self.upsample_method = self.upsample_cfg.get('type')
self.scale_factor = self.upsample_cfg.pop('scale_factor', None)
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.predictor_cfg = predictor_cfg
self.fp16_enabled = False
self.loss_mask = build_loss(loss_mask)
self.convs = ModuleList()
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg))
upsample_in_channels = (
self.conv_out_channels if self.num_convs > 0 else in_channels)
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample_method is None:
self.upsample = None
elif self.upsample_method == 'deconv':
upsample_cfg_.update(
in_channels=upsample_in_channels,
out_channels=self.conv_out_channels,
kernel_size=self.scale_factor,
stride=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
elif self.upsample_method == 'carafe':
upsample_cfg_.update(
channels=upsample_in_channels, scale_factor=self.scale_factor)
self.upsample = build_upsample_layer(upsample_cfg_)
else:
# suppress warnings
align_corners = (None
if self.upsample_method == 'nearest' else False)
upsample_cfg_.update(
scale_factor=self.scale_factor,
mode=self.upsample_method,
align_corners=align_corners)
self.upsample = build_upsample_layer(upsample_cfg_)
out_channels = 1 if self.class_agnostic else self.num_classes
logits_in_channel = (
self.conv_out_channels
if self.upsample_method == 'deconv' else upsample_in_channels)
self.conv_logits = build_conv_layer(self.predictor_cfg,
logits_in_channel, out_channels, 1)
self.relu = nn.ReLU(inplace=True)
self.debug_imgs = None
def init_weights(self):
super(FCNMaskHead, self).init_weights()
for m in [self.upsample, self.conv_logits]:
if m is None:
continue
elif isinstance(m, CARAFEPack):
m.init_weights()
elif hasattr(m, 'weight') and hasattr(m, 'bias'):
nn.init.kaiming_normal_(
m.weight, mode='fan_out', nonlinearity='relu')
nn.init.constant_(m.bias, 0)
@auto_fp16()
def forward(self, x):
for conv in self.convs:
x = conv(x)
if self.upsample is not None:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
return mask_pred
def get_targets(self, sampling_results, gt_masks, rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
mask_targets = mask_target(pos_proposals, pos_assigned_gt_inds,
gt_masks, rcnn_train_cfg)
return mask_targets
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, mask_targets, labels):
"""
Example:
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> # There are lots of variations depending on the configuration
>>> self = FCNMaskHead(num_classes=C, num_convs=1)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> sf = self.scale_factor
>>> labels = torch.randint(0, C, size=(N,))
>>> # With the default properties the mask targets should indicate
>>> # a (potentially soft) single-class label
>>> mask_targets = torch.rand(N, H * sf, W * sf)
>>> loss = self.loss(mask_pred, mask_targets, labels)
>>> print('loss = {!r}'.format(loss))
"""
loss = dict()
if mask_pred.size(0) == 0:
loss_mask = mask_pred.sum()
else:
if self.class_agnostic:
loss_mask = self.loss_mask(mask_pred, mask_targets,
torch.zeros_like(labels))
else:
loss_mask = self.loss_mask(mask_pred, mask_targets, labels)
loss['loss_mask'] = loss_mask
return loss
def get_seg_masks(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, scale_factor, rescale):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor or ndarray): shape (n, #class, h, w).
For single-scale testing, mask_pred is the direct output of
model, whose type is Tensor, while for multi-scale testing,
it will be converted to numpy array outside of this method.
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
scale_factor(ndarray | Tensor): If ``rescale is True``, box
coordinates are divided by this scale factor to fit
``ori_shape``.
rescale (bool): If True, the resulting masks will be rescaled to
``ori_shape``.
Returns:
list[list]: encoded masks. The c-th item in the outer list
corresponds to the c-th class. Given the c-th outer list, the
i-th item in that inner list is the mask for the i-th box with
class label c.
Example:
>>> import mmcv
>>> from mmdet.models.roi_heads.mask_heads.fcn_mask_head import * # NOQA
>>> N = 7 # N = number of extracted ROIs
>>> C, H, W = 11, 32, 32
>>> # Create example instance of FCN Mask Head.
>>> self = FCNMaskHead(num_classes=C, num_convs=0)
>>> inputs = torch.rand(N, self.in_channels, H, W)
>>> mask_pred = self.forward(inputs)
>>> # Each input is associated with some bounding box
>>> det_bboxes = torch.Tensor([[1, 1, 42, 42 ]] * N)
>>> det_labels = torch.randint(0, C, size=(N,))
>>> rcnn_test_cfg = mmcv.Config({'mask_thr_binary': 0, })
>>> ori_shape = (H * 4, W * 4)
>>> scale_factor = torch.FloatTensor((1, 1))
>>> rescale = False
>>> # Encoded masks are a list for each category.
>>> encoded_masks = self.get_seg_masks(
>>> mask_pred, det_bboxes, det_labels, rcnn_test_cfg, ori_shape,
>>> scale_factor, rescale
>>> )
>>> assert len(encoded_masks) == C
>>> assert sum(list(map(len, encoded_masks))) == N
"""
if isinstance(mask_pred, torch.Tensor):
mask_pred = mask_pred.sigmoid()
else:
# In AugTest, has been activated before
mask_pred = det_bboxes.new_tensor(mask_pred)
device = mask_pred.device
cls_segms = [[] for _ in range(self.num_classes)
] # BG is not included in num_classes
bboxes = det_bboxes[:, :4]
labels = det_labels
# In most cases, scale_factor should have been
# converted to Tensor when rescale the bbox
if not isinstance(scale_factor, torch.Tensor):
if isinstance(scale_factor, float):
scale_factor = np.array([scale_factor] * 4)
warn('Scale_factor should be a Tensor or ndarray '
'with shape (4,), float would be deprecated. ')
assert isinstance(scale_factor, np.ndarray)
scale_factor = torch.Tensor(scale_factor)
if rescale:
img_h, img_w = ori_shape[:2]
bboxes = bboxes / scale_factor.to(bboxes)
else:
w_scale, h_scale = scale_factor[0], scale_factor[1]
img_h = np.round(ori_shape[0] * h_scale.item()).astype(np.int32)
img_w = np.round(ori_shape[1] * w_scale.item()).astype(np.int32)
N = len(mask_pred)
# The actual implementation split the input into chunks,
# and paste them chunk by chunk.
if device.type == 'cpu':
# CPU is most efficient when they are pasted one by one with
# skip_empty=True, so that it performs minimal number of
# operations.
num_chunks = N
else:
# GPU benefits from parallelism for larger chunks,
# but may have memory issue
# the types of img_w and img_h are np.int32,
# when the image resolution is large,
# the calculation of num_chunks will overflow.
# so we need to change the types of img_w and img_h to int.
# See https://github.com/open-mmlab/mmdetection/pull/5191
num_chunks = int(
np.ceil(N * int(img_h) * int(img_w) * BYTES_PER_FLOAT /
GPU_MEM_LIMIT))
assert (num_chunks <=
N), 'Default GPU_MEM_LIMIT is too small; try increasing it'
chunks = torch.chunk(torch.arange(N, device=device), num_chunks)
threshold = rcnn_test_cfg.mask_thr_binary
im_mask = torch.zeros(
N,
img_h,
img_w,
device=device,
dtype=torch.bool if threshold >= 0 else torch.uint8)
if not self.class_agnostic:
mask_pred = mask_pred[range(N), labels][:, None]
for inds in chunks:
masks_chunk, spatial_inds = _do_paste_mask(
mask_pred[inds],
bboxes[inds],
img_h,
img_w,
skip_empty=device.type == 'cpu')
if threshold >= 0:
masks_chunk = (masks_chunk >= threshold).to(dtype=torch.bool)
else:
# for visualization and debugging
masks_chunk = (masks_chunk * 255).to(dtype=torch.uint8)
im_mask[(inds, ) + spatial_inds] = masks_chunk
for i in range(N):
cls_segms[labels[i]].append(im_mask[i].detach().cpu().numpy())
return cls_segms
def onnx_export(self, mask_pred, det_bboxes, det_labels, rcnn_test_cfg,
ori_shape, **kwargs):
"""Get segmentation masks from mask_pred and bboxes.
Args:
mask_pred (Tensor): shape (n, #class, h, w).
det_bboxes (Tensor): shape (n, 4/5)
det_labels (Tensor): shape (n, )
rcnn_test_cfg (dict): rcnn testing config
ori_shape (Tuple): original image height and width, shape (2,)
Returns:
Tensor: a mask of shape (N, img_h, img_w).
"""
mask_pred = mask_pred.sigmoid()
bboxes = det_bboxes[:, :4]
labels = det_labels
# No need to consider rescale and scale_factor while exporting to ONNX
img_h, img_w = ori_shape[:2]
threshold = rcnn_test_cfg.mask_thr_binary
if not self.class_agnostic:
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, labels][:, None]
masks, _ = _do_paste_mask(
mask_pred, bboxes, img_h, img_w, skip_empty=False)
if threshold >= 0:
# should convert to float to avoid problems in TRT
masks = (masks >= threshold).to(dtype=torch.float)
return masks
def _do_paste_mask(masks, boxes, img_h, img_w, skip_empty=True):
"""Paste instance masks according to boxes.
This implementation is modified from
https://github.com/facebookresearch/detectron2/
Args:
masks (Tensor): N, 1, H, W
boxes (Tensor): N, 4
img_h (int): Height of the image to be pasted.
img_w (int): Width of the image to be pasted.
skip_empty (bool): Only paste masks within the region that
tightly bound all boxes, and returns the results this region only.
An important optimization for CPU.
Returns:
tuple: (Tensor, tuple). The first item is mask tensor, the second one
is the slice object.
If skip_empty == False, the whole image will be pasted. It will
return a mask of shape (N, img_h, img_w) and an empty tuple.
If skip_empty == True, only area around the mask will be pasted.
A mask of shape (N, h', w') and its start and end coordinates
in the original image will be returned.
"""
# On GPU, paste all masks together (up to chunk size)
# by using the entire image to sample the masks
# Compared to pasting them one by one,
# this has more operations but is faster on COCO-scale dataset.
device = masks.device
if skip_empty:
x0_int, y0_int = torch.clamp(
boxes.min(dim=0).values.floor()[:2] - 1,
min=0).to(dtype=torch.int32)
x1_int = torch.clamp(
boxes[:, 2].max().ceil() + 1, max=img_w).to(dtype=torch.int32)
y1_int = torch.clamp(
boxes[:, 3].max().ceil() + 1, max=img_h).to(dtype=torch.int32)
else:
x0_int, y0_int = 0, 0
x1_int, y1_int = img_w, img_h
x0, y0, x1, y1 = torch.split(boxes, 1, dim=1) # each is Nx1
N = masks.shape[0]
img_y = torch.arange(y0_int, y1_int, device=device).to(torch.float32) + 0.5
img_x = torch.arange(x0_int, x1_int, device=device).to(torch.float32) + 0.5
img_y = (img_y - y0) / (y1 - y0) * 2 - 1
img_x = (img_x - x0) / (x1 - x0) * 2 - 1
# img_x, img_y have shapes (N, w), (N, h)
# IsInf op is not supported with ONNX<=1.7.0
if not torch.onnx.is_in_onnx_export():
if torch.isinf(img_x).any():
inds = torch.where(torch.isinf(img_x))
img_x[inds] = 0
if torch.isinf(img_y).any():
inds = torch.where(torch.isinf(img_y))
img_y[inds] = 0
gx = img_x[:, None, :].expand(N, img_y.size(1), img_x.size(1))
gy = img_y[:, :, None].expand(N, img_y.size(1), img_x.size(1))
grid = torch.stack([gx, gy], dim=3)
img_masks = F.grid_sample(
masks.to(dtype=torch.float32), grid, align_corners=False)
if skip_empty:
return img_masks[:, 0], (slice(y0_int, y1_int), slice(x0_int, x1_int))
else:
return img_masks[:, 0], ()
| 17,449 | 41.251816 | 85 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/feature_relay_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import HEADS
@HEADS.register_module()
class FeatureRelayHead(BaseModule):
"""Feature Relay Head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
roi_feat_size (int, optional): roi feat size at box head. Default: 7.
scale_factor (int, optional): scale factor to match roi feat size
at mask head. Default: 2.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels=1024,
out_conv_channels=256,
roi_feat_size=7,
scale_factor=2,
init_cfg=dict(type='Kaiming', layer='Linear')):
super(FeatureRelayHead, self).__init__(init_cfg)
assert isinstance(roi_feat_size, int)
self.in_channels = in_channels
self.out_conv_channels = out_conv_channels
self.roi_feat_size = roi_feat_size
self.out_channels = (roi_feat_size**2) * out_conv_channels
self.scale_factor = scale_factor
self.fp16_enabled = False
self.fc = nn.Linear(self.in_channels, self.out_channels)
self.upsample = nn.Upsample(
scale_factor=scale_factor, mode='bilinear', align_corners=True)
@auto_fp16()
def forward(self, x):
"""Forward function."""
N, in_C = x.shape
if N > 0:
out_C = self.out_conv_channels
out_HW = self.roi_feat_size
x = self.fc(x)
x = x.reshape(N, out_C, out_HW, out_HW)
x = self.upsample(x)
return x
return None
| 1,930 | 34.759259 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/fused_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class FusedSemanticHead(BaseModule):
r"""Multi-level fused semantic segmentation head.
.. code-block:: none
in_1 -> 1x1 conv ---
|
in_2 -> 1x1 conv -- |
||
in_3 -> 1x1 conv - ||
||| /-> 1x1 conv (mask prediction)
in_4 -> 1x1 conv -----> 3x3 convs (*4)
| \-> 1x1 conv (feature)
in_5 -> 1x1 conv ---
""" # noqa: W605
def __init__(self,
num_ins,
fusion_level,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=183,
conv_cfg=None,
norm_cfg=None,
ignore_label=None,
loss_weight=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=0.2),
init_cfg=dict(
type='Kaiming', override=dict(name='conv_logits'))):
super(FusedSemanticHead, self).__init__(init_cfg)
self.num_ins = num_ins
self.fusion_level = fusion_level
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.fp16_enabled = False
self.lateral_convs = nn.ModuleList()
for i in range(self.num_ins):
self.lateral_convs.append(
ConvModule(
self.in_channels,
self.in_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
inplace=False))
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.conv_embedding = ConvModule(
conv_out_channels,
conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.conv_logits = nn.Conv2d(conv_out_channels, self.num_classes, 1)
if ignore_label:
loss_seg['ignore_index'] = ignore_label
if loss_weight:
loss_seg['loss_weight'] = loss_weight
if ignore_label or loss_weight:
warnings.warn('``ignore_label`` and ``loss_weight`` would be '
'deprecated soon. Please set ``ingore_index`` and '
'``loss_weight`` in ``loss_seg`` instead.')
self.criterion = build_loss(loss_seg)
@auto_fp16()
def forward(self, feats):
x = self.lateral_convs[self.fusion_level](feats[self.fusion_level])
fused_size = tuple(x.shape[-2:])
for i, feat in enumerate(feats):
if i != self.fusion_level:
feat = F.interpolate(
feat, size=fused_size, mode='bilinear', align_corners=True)
# fix runtime error of "+=" inplace operation in PyTorch 1.10
x = x + self.lateral_convs[i](feat)
for i in range(self.num_convs):
x = self.convs[i](x)
mask_pred = self.conv_logits(x)
x = self.conv_embedding(x)
return mask_pred, x
@force_fp32(apply_to=('mask_pred', ))
def loss(self, mask_pred, labels):
labels = labels.squeeze(1).long()
loss_semantic_seg = self.criterion(mask_pred, labels)
return loss_semantic_seg
| 4,231 | 34.563025 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/global_context_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
@HEADS.register_module()
class GlobalContextHead(BaseModule):
"""Global context head used in `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_convs (int, optional): number of convolutional layer in GlbCtxHead.
Default: 4.
in_channels (int, optional): number of input channels. Default: 256.
conv_out_channels (int, optional): number of output channels before
classification layer. Default: 256.
num_classes (int, optional): number of classes. Default: 80.
loss_weight (float, optional): global context loss weight. Default: 1.
conv_cfg (dict, optional): config to init conv layer. Default: None.
norm_cfg (dict, optional): config to init norm layer. Default: None.
conv_to_res (bool, optional): if True, 2 convs will be grouped into
1 `SimplifiedBasicBlock` using a skip connection. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=80,
loss_weight=1.0,
conv_cfg=None,
norm_cfg=None,
conv_to_res=False,
init_cfg=dict(
type='Normal', std=0.01, override=dict(name='fc'))):
super(GlobalContextHead, self).__init__(init_cfg)
self.num_convs = num_convs
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.num_classes = num_classes
self.loss_weight = loss_weight
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.conv_to_res = conv_to_res
self.fp16_enabled = False
if self.conv_to_res:
num_res_blocks = num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
else:
self.convs = nn.ModuleList()
for i in range(self.num_convs):
in_channels = self.in_channels if i == 0 else conv_out_channels
self.convs.append(
ConvModule(
in_channels,
conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
self.pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Linear(conv_out_channels, num_classes)
self.criterion = nn.BCEWithLogitsLoss()
@auto_fp16()
def forward(self, feats):
"""Forward function."""
x = feats[-1]
for i in range(self.num_convs):
x = self.convs[i](x)
x = self.pool(x)
# multi-class prediction
mc_pred = x.reshape(x.size(0), -1)
mc_pred = self.fc(mc_pred)
return mc_pred, x
@force_fp32(apply_to=('pred', ))
def loss(self, pred, labels):
"""Loss function."""
labels = [lbl.unique() for lbl in labels]
targets = pred.new_zeros(pred.size())
for i, label in enumerate(labels):
targets[i, label] = 1.0
loss = self.loss_weight * self.criterion(pred, targets)
return loss
| 3,774 | 36.009804 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/grid_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class GridHead(BaseModule):
def __init__(self,
grid_points=9,
num_convs=8,
roi_feat_size=14,
in_channels=256,
conv_kernel_size=3,
point_feat_channels=64,
deconv_kernel_size=4,
class_agnostic=False,
loss_grid=dict(
type='CrossEntropyLoss', use_sigmoid=True,
loss_weight=15),
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=36),
init_cfg=[
dict(type='Kaiming', layer=['Conv2d', 'Linear']),
dict(
type='Normal',
layer='ConvTranspose2d',
std=0.001,
override=dict(
type='Normal',
name='deconv2',
std=0.001,
bias=-np.log(0.99 / 0.01)))
]):
super(GridHead, self).__init__(init_cfg)
self.grid_points = grid_points
self.num_convs = num_convs
self.roi_feat_size = roi_feat_size
self.in_channels = in_channels
self.conv_kernel_size = conv_kernel_size
self.point_feat_channels = point_feat_channels
self.conv_out_channels = self.point_feat_channels * self.grid_points
self.class_agnostic = class_agnostic
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
if isinstance(norm_cfg, dict) and norm_cfg['type'] == 'GN':
assert self.conv_out_channels % norm_cfg['num_groups'] == 0
assert self.grid_points >= 4
self.grid_size = int(np.sqrt(self.grid_points))
if self.grid_size * self.grid_size != self.grid_points:
raise ValueError('grid_points must be a square number')
# the predicted heatmap is half of whole_map_size
if not isinstance(self.roi_feat_size, int):
raise ValueError('Only square RoIs are supporeted in Grid R-CNN')
self.whole_map_size = self.roi_feat_size * 4
# compute point-wise sub-regions
self.sub_regions = self.calc_sub_regions()
self.convs = []
for i in range(self.num_convs):
in_channels = (
self.in_channels if i == 0 else self.conv_out_channels)
stride = 2 if i == 0 else 1
padding = (self.conv_kernel_size - 1) // 2
self.convs.append(
ConvModule(
in_channels,
self.conv_out_channels,
self.conv_kernel_size,
stride=stride,
padding=padding,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg,
bias=True))
self.convs = nn.Sequential(*self.convs)
self.deconv1 = nn.ConvTranspose2d(
self.conv_out_channels,
self.conv_out_channels,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
self.norm1 = nn.GroupNorm(grid_points, self.conv_out_channels)
self.deconv2 = nn.ConvTranspose2d(
self.conv_out_channels,
grid_points,
kernel_size=deconv_kernel_size,
stride=2,
padding=(deconv_kernel_size - 2) // 2,
groups=grid_points)
# find the 4-neighbor of each grid point
self.neighbor_points = []
grid_size = self.grid_size
for i in range(grid_size): # i-th column
for j in range(grid_size): # j-th row
neighbors = []
if i > 0: # left: (i - 1, j)
neighbors.append((i - 1) * grid_size + j)
if j > 0: # up: (i, j - 1)
neighbors.append(i * grid_size + j - 1)
if j < grid_size - 1: # down: (i, j + 1)
neighbors.append(i * grid_size + j + 1)
if i < grid_size - 1: # right: (i + 1, j)
neighbors.append((i + 1) * grid_size + j)
self.neighbor_points.append(tuple(neighbors))
# total edges in the grid
self.num_edges = sum([len(p) for p in self.neighbor_points])
self.forder_trans = nn.ModuleList() # first-order feature transition
self.sorder_trans = nn.ModuleList() # second-order feature transition
for neighbors in self.neighbor_points:
fo_trans = nn.ModuleList()
so_trans = nn.ModuleList()
for _ in range(len(neighbors)):
# each transition module consists of a 5x5 depth-wise conv and
# 1x1 conv.
fo_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
stride=1,
padding=2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
so_trans.append(
nn.Sequential(
nn.Conv2d(
self.point_feat_channels,
self.point_feat_channels,
5,
1,
2,
groups=self.point_feat_channels),
nn.Conv2d(self.point_feat_channels,
self.point_feat_channels, 1)))
self.forder_trans.append(fo_trans)
self.sorder_trans.append(so_trans)
self.loss_grid = build_loss(loss_grid)
def forward(self, x):
assert x.shape[-1] == x.shape[-2] == self.roi_feat_size
# RoI feature transformation, downsample 2x
x = self.convs(x)
c = self.point_feat_channels
# first-order fusion
x_fo = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_fo[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_fo[i] = x_fo[i] + self.forder_trans[i][j](
x[:, point_idx * c:(point_idx + 1) * c])
# second-order fusion
x_so = [None for _ in range(self.grid_points)]
for i, points in enumerate(self.neighbor_points):
x_so[i] = x[:, i * c:(i + 1) * c]
for j, point_idx in enumerate(points):
x_so[i] = x_so[i] + self.sorder_trans[i][j](x_fo[point_idx])
# predicted heatmap with fused features
x2 = torch.cat(x_so, dim=1)
x2 = self.deconv1(x2)
x2 = F.relu(self.norm1(x2), inplace=True)
heatmap = self.deconv2(x2)
# predicted heatmap with original features (applicable during training)
if self.training:
x1 = x
x1 = self.deconv1(x1)
x1 = F.relu(self.norm1(x1), inplace=True)
heatmap_unfused = self.deconv2(x1)
else:
heatmap_unfused = heatmap
return dict(fused=heatmap, unfused=heatmap_unfused)
def calc_sub_regions(self):
"""Compute point specific representation regions.
See Grid R-CNN Plus (https://arxiv.org/abs/1906.05688) for details.
"""
# to make it consistent with the original implementation, half_size
# is computed as 2 * quarter_size, which is smaller
half_size = self.whole_map_size // 4 * 2
sub_regions = []
for i in range(self.grid_points):
x_idx = i // self.grid_size
y_idx = i % self.grid_size
if x_idx == 0:
sub_x1 = 0
elif x_idx == self.grid_size - 1:
sub_x1 = half_size
else:
ratio = x_idx / (self.grid_size - 1) - 0.25
sub_x1 = max(int(ratio * self.whole_map_size), 0)
if y_idx == 0:
sub_y1 = 0
elif y_idx == self.grid_size - 1:
sub_y1 = half_size
else:
ratio = y_idx / (self.grid_size - 1) - 0.25
sub_y1 = max(int(ratio * self.whole_map_size), 0)
sub_regions.append(
(sub_x1, sub_y1, sub_x1 + half_size, sub_y1 + half_size))
return sub_regions
def get_targets(self, sampling_results, rcnn_train_cfg):
# mix all samples (across images) together.
pos_bboxes = torch.cat([res.pos_bboxes for res in sampling_results],
dim=0).cpu()
pos_gt_bboxes = torch.cat(
[res.pos_gt_bboxes for res in sampling_results], dim=0).cpu()
assert pos_bboxes.shape == pos_gt_bboxes.shape
# expand pos_bboxes to 2x of original size
x1 = pos_bboxes[:, 0] - (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y1 = pos_bboxes[:, 1] - (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
x2 = pos_bboxes[:, 2] + (pos_bboxes[:, 2] - pos_bboxes[:, 0]) / 2
y2 = pos_bboxes[:, 3] + (pos_bboxes[:, 3] - pos_bboxes[:, 1]) / 2
pos_bboxes = torch.stack([x1, y1, x2, y2], dim=-1)
pos_bbox_ws = (pos_bboxes[:, 2] - pos_bboxes[:, 0]).unsqueeze(-1)
pos_bbox_hs = (pos_bboxes[:, 3] - pos_bboxes[:, 1]).unsqueeze(-1)
num_rois = pos_bboxes.shape[0]
map_size = self.whole_map_size
# this is not the final target shape
targets = torch.zeros((num_rois, self.grid_points, map_size, map_size),
dtype=torch.float)
# pre-compute interpolation factors for all grid points.
# the first item is the factor of x-dim, and the second is y-dim.
# for a 9-point grid, factors are like (1, 0), (0.5, 0.5), (0, 1)
factors = []
for j in range(self.grid_points):
x_idx = j // self.grid_size
y_idx = j % self.grid_size
factors.append((1 - x_idx / (self.grid_size - 1),
1 - y_idx / (self.grid_size - 1)))
radius = rcnn_train_cfg.pos_radius
radius2 = radius**2
for i in range(num_rois):
# ignore small bboxes
if (pos_bbox_ws[i] <= self.grid_size
or pos_bbox_hs[i] <= self.grid_size):
continue
# for each grid point, mark a small circle as positive
for j in range(self.grid_points):
factor_x, factor_y = factors[j]
gridpoint_x = factor_x * pos_gt_bboxes[i, 0] + (
1 - factor_x) * pos_gt_bboxes[i, 2]
gridpoint_y = factor_y * pos_gt_bboxes[i, 1] + (
1 - factor_y) * pos_gt_bboxes[i, 3]
cx = int((gridpoint_x - pos_bboxes[i, 0]) / pos_bbox_ws[i] *
map_size)
cy = int((gridpoint_y - pos_bboxes[i, 1]) / pos_bbox_hs[i] *
map_size)
for x in range(cx - radius, cx + radius + 1):
for y in range(cy - radius, cy + radius + 1):
if x >= 0 and x < map_size and y >= 0 and y < map_size:
if (x - cx)**2 + (y - cy)**2 <= radius2:
targets[i, j, y, x] = 1
# reduce the target heatmap size by a half
# proposed in Grid R-CNN Plus (https://arxiv.org/abs/1906.05688).
sub_targets = []
for i in range(self.grid_points):
sub_x1, sub_y1, sub_x2, sub_y2 = self.sub_regions[i]
sub_targets.append(targets[:, [i], sub_y1:sub_y2, sub_x1:sub_x2])
sub_targets = torch.cat(sub_targets, dim=1)
sub_targets = sub_targets.to(sampling_results[0].pos_bboxes.device)
return sub_targets
def loss(self, grid_pred, grid_targets):
loss_fused = self.loss_grid(grid_pred['fused'], grid_targets)
loss_unfused = self.loss_grid(grid_pred['unfused'], grid_targets)
loss_grid = loss_fused + loss_unfused
return dict(loss_grid=loss_grid)
def get_bboxes(self, det_bboxes, grid_pred, img_metas):
# TODO: refactoring
assert det_bboxes.shape[0] == grid_pred.shape[0]
det_bboxes = det_bboxes.cpu()
cls_scores = det_bboxes[:, [4]]
det_bboxes = det_bboxes[:, :4]
grid_pred = grid_pred.sigmoid().cpu()
R, c, h, w = grid_pred.shape
half_size = self.whole_map_size // 4 * 2
assert h == w == half_size
assert c == self.grid_points
# find the point with max scores in the half-sized heatmap
grid_pred = grid_pred.view(R * c, h * w)
pred_scores, pred_position = grid_pred.max(dim=1)
xs = pred_position % w
ys = pred_position // w
# get the position in the whole heatmap instead of half-sized heatmap
for i in range(self.grid_points):
xs[i::self.grid_points] += self.sub_regions[i][0]
ys[i::self.grid_points] += self.sub_regions[i][1]
# reshape to (num_rois, grid_points)
pred_scores, xs, ys = tuple(
map(lambda x: x.view(R, c), [pred_scores, xs, ys]))
# get expanded pos_bboxes
widths = (det_bboxes[:, 2] - det_bboxes[:, 0]).unsqueeze(-1)
heights = (det_bboxes[:, 3] - det_bboxes[:, 1]).unsqueeze(-1)
x1 = (det_bboxes[:, 0, None] - widths / 2)
y1 = (det_bboxes[:, 1, None] - heights / 2)
# map the grid point to the absolute coordinates
abs_xs = (xs.float() + 0.5) / w * widths + x1
abs_ys = (ys.float() + 0.5) / h * heights + y1
# get the grid points indices that fall on the bbox boundaries
x1_inds = [i for i in range(self.grid_size)]
y1_inds = [i * self.grid_size for i in range(self.grid_size)]
x2_inds = [
self.grid_points - self.grid_size + i
for i in range(self.grid_size)
]
y2_inds = [(i + 1) * self.grid_size - 1 for i in range(self.grid_size)]
# voting of all grid points on some boundary
bboxes_x1 = (abs_xs[:, x1_inds] * pred_scores[:, x1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x1_inds].sum(dim=1, keepdim=True))
bboxes_y1 = (abs_ys[:, y1_inds] * pred_scores[:, y1_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y1_inds].sum(dim=1, keepdim=True))
bboxes_x2 = (abs_xs[:, x2_inds] * pred_scores[:, x2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, x2_inds].sum(dim=1, keepdim=True))
bboxes_y2 = (abs_ys[:, y2_inds] * pred_scores[:, y2_inds]).sum(
dim=1, keepdim=True) / (
pred_scores[:, y2_inds].sum(dim=1, keepdim=True))
bbox_res = torch.cat(
[bboxes_x1, bboxes_y1, bboxes_x2, bboxes_y2, cls_scores], dim=1)
bbox_res[:, [0, 2]].clamp_(min=0, max=img_metas[0]['img_shape'][1])
bbox_res[:, [1, 3]].clamp_(min=0, max=img_metas[0]['img_shape'][0])
return bbox_res
| 15,579 | 41.802198 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/htc_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class HTCMaskHead(FCNMaskHead):
def __init__(self, with_conv_res=True, *args, **kwargs):
super(HTCMaskHead, self).__init__(*args, **kwargs)
self.with_conv_res = with_conv_res
if self.with_conv_res:
self.conv_res = ConvModule(
self.conv_out_channels,
self.conv_out_channels,
1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, x, res_feat=None, return_logits=True, return_feat=True):
if res_feat is not None:
assert self.with_conv_res
res_feat = self.conv_res(res_feat)
x = x + res_feat
for conv in self.convs:
x = conv(x)
res_feat = x
outs = []
if return_logits:
x = self.upsample(x)
if self.upsample_method == 'deconv':
x = self.relu(x)
mask_pred = self.conv_logits(x)
outs.append(mask_pred)
if return_feat:
outs.append(res_feat)
return outs if len(outs) > 1 else outs[0]
| 1,282 | 31.075 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/mask_point_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend/point_head/point_head.py # noqa
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmcv.runner import BaseModule
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.utils import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
@HEADS.register_module()
class MaskPointHead(BaseModule):
"""A mask point head use in PointRend.
``MaskPointHead`` use shared multi-layer perceptron (equivalent to
nn.Conv1d) to predict the logit of input points. The fine-grained feature
and coarse feature will be concatenate together for predication.
Args:
num_fcs (int): Number of fc layers in the head. Default: 3.
in_channels (int): Number of input channels. Default: 256.
fc_channels (int): Number of fc channels. Default: 256.
num_classes (int): Number of classes for logits. Default: 80.
class_agnostic (bool): Whether use class agnostic classification.
If so, the output channels of logits will be 1. Default: False.
coarse_pred_each_layer (bool): Whether concatenate coarse feature with
the output of each fc layer. Default: True.
conv_cfg (dict | None): Dictionary to construct and config conv layer.
Default: dict(type='Conv1d'))
norm_cfg (dict | None): Dictionary to construct and config norm layer.
Default: None.
loss_point (dict): Dictionary to construct and config loss layer of
point head. Default: dict(type='CrossEntropyLoss', use_mask=True,
loss_weight=1.0).
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_classes,
num_fcs=3,
in_channels=256,
fc_channels=256,
class_agnostic=False,
coarse_pred_each_layer=True,
conv_cfg=dict(type='Conv1d'),
norm_cfg=None,
act_cfg=dict(type='ReLU'),
loss_point=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0),
init_cfg=dict(
type='Normal', std=0.001,
override=dict(name='fc_logits'))):
super().__init__(init_cfg)
self.num_fcs = num_fcs
self.in_channels = in_channels
self.fc_channels = fc_channels
self.num_classes = num_classes
self.class_agnostic = class_agnostic
self.coarse_pred_each_layer = coarse_pred_each_layer
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.loss_point = build_loss(loss_point)
fc_in_channels = in_channels + num_classes
self.fcs = nn.ModuleList()
for _ in range(num_fcs):
fc = ConvModule(
fc_in_channels,
fc_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.fcs.append(fc)
fc_in_channels = fc_channels
fc_in_channels += num_classes if self.coarse_pred_each_layer else 0
out_channels = 1 if self.class_agnostic else self.num_classes
self.fc_logits = nn.Conv1d(
fc_in_channels, out_channels, kernel_size=1, stride=1, padding=0)
def forward(self, fine_grained_feats, coarse_feats):
"""Classify each point base on fine grained and coarse feats.
Args:
fine_grained_feats (Tensor): Fine grained feature sampled from FPN,
shape (num_rois, in_channels, num_points).
coarse_feats (Tensor): Coarse feature sampled from CoarseMaskHead,
shape (num_rois, num_classes, num_points).
Returns:
Tensor: Point classification results,
shape (num_rois, num_class, num_points).
"""
x = torch.cat([fine_grained_feats, coarse_feats], dim=1)
for fc in self.fcs:
x = fc(x)
if self.coarse_pred_each_layer:
x = torch.cat((x, coarse_feats), dim=1)
return self.fc_logits(x)
def get_targets(self, rois, rel_roi_points, sampling_results, gt_masks,
cfg):
"""Get training targets of MaskPointHead for all images.
Args:
rois (Tensor): Region of Interest, shape (num_rois, 5).
rel_roi_points: Points coordinates relative to RoI, shape
(num_rois, num_points, 2).
sampling_results (:obj:`SamplingResult`): Sampling result after
sampling and assignment.
gt_masks (Tensor) : Ground truth segmentation masks of
corresponding boxes, shape (num_rois, height, width).
cfg (dict): Training cfg.
Returns:
Tensor: Point target, shape (num_rois, num_points).
"""
num_imgs = len(sampling_results)
rois_list = []
rel_roi_points_list = []
for batch_ind in range(num_imgs):
inds = (rois[:, 0] == batch_ind)
rois_list.append(rois[inds])
rel_roi_points_list.append(rel_roi_points[inds])
pos_assigned_gt_inds_list = [
res.pos_assigned_gt_inds for res in sampling_results
]
cfg_list = [cfg for _ in range(num_imgs)]
point_targets = map(self._get_target_single, rois_list,
rel_roi_points_list, pos_assigned_gt_inds_list,
gt_masks, cfg_list)
point_targets = list(point_targets)
if len(point_targets) > 0:
point_targets = torch.cat(point_targets)
return point_targets
def _get_target_single(self, rois, rel_roi_points, pos_assigned_gt_inds,
gt_masks, cfg):
"""Get training target of MaskPointHead for each image."""
num_pos = rois.size(0)
num_points = cfg.num_points
if num_pos > 0:
gt_masks_th = (
gt_masks.to_tensor(rois.dtype, rois.device).index_select(
0, pos_assigned_gt_inds))
gt_masks_th = gt_masks_th.unsqueeze(1)
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, gt_masks_th)
point_targets = point_sample(gt_masks_th,
rel_img_points).squeeze(1)
else:
point_targets = rois.new_zeros((0, num_points))
return point_targets
def loss(self, point_pred, point_targets, labels):
"""Calculate loss for MaskPointHead.
Args:
point_pred (Tensor): Point predication result, shape
(num_rois, num_classes, num_points).
point_targets (Tensor): Point targets, shape (num_roi, num_points).
labels (Tensor): Class label of corresponding boxes,
shape (num_rois, )
Returns:
dict[str, Tensor]: a dictionary of point loss components
"""
loss = dict()
if self.class_agnostic:
loss_point = self.loss_point(point_pred, point_targets,
torch.zeros_like(labels))
else:
loss_point = self.loss_point(point_pred, point_targets, labels)
loss['loss_point'] = loss_point
return loss
def get_roi_rel_points_train(self, mask_pred, labels, cfg):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'_get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
cfg (dict): Training config of point head.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
point_coords = get_uncertain_point_coords_with_randomness(
mask_pred, labels, cfg.num_points, cfg.oversample_ratio,
cfg.importance_sample_ratio)
return point_coords
def get_roi_rel_points_test(self, mask_pred, pred_label, cfg):
"""Get ``num_points`` most uncertain points during test.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
pred_label (list): The predication class for each instance.
cfg (dict): Testing config of point head.
Returns:
point_indices (Tensor): A tensor of shape (num_rois, num_points)
that contains indices from [0, mask_height x mask_width) of the
most uncertain points.
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid .
"""
num_points = cfg.subdivision_num_points
uncertainty_map = get_uncertainty(mask_pred, pred_label)
num_rois, _, mask_height, mask_width = uncertainty_map.shape
# During ONNX exporting, the type of each elements of 'shape' is
# `Tensor(float)`, while it is `float` during PyTorch inference.
if isinstance(mask_height, torch.Tensor):
h_step = 1.0 / mask_height.float()
w_step = 1.0 / mask_width.float()
else:
h_step = 1.0 / mask_height
w_step = 1.0 / mask_width
# cast to int to avoid dynamic K for TopK op in ONNX
mask_size = int(mask_height * mask_width)
uncertainty_map = uncertainty_map.view(num_rois, mask_size)
num_points = min(mask_size, num_points)
point_indices = uncertainty_map.topk(num_points, dim=1)[1]
xs = w_step / 2.0 + (point_indices % mask_width).float() * w_step
ys = h_step / 2.0 + (point_indices // mask_width).float() * h_step
point_coords = torch.stack([xs, ys], dim=2)
return point_indices, point_coords
| 10,785 | 41.464567 | 126 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/maskiou_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.cnn import Conv2d, Linear, MaxPool2d
from mmcv.runner import BaseModule, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.models.builder import HEADS, build_loss
@HEADS.register_module()
class MaskIoUHead(BaseModule):
"""Mask IoU Head.
This head predicts the IoU of predicted masks and corresponding gt masks.
"""
def __init__(self,
num_convs=4,
num_fcs=2,
roi_feat_size=14,
in_channels=256,
conv_out_channels=256,
fc_out_channels=1024,
num_classes=80,
loss_iou=dict(type='MSELoss', loss_weight=0.5),
init_cfg=[
dict(type='Kaiming', override=dict(name='convs')),
dict(type='Caffe2Xavier', override=dict(name='fcs')),
dict(
type='Normal',
std=0.01,
override=dict(name='fc_mask_iou'))
]):
super(MaskIoUHead, self).__init__(init_cfg)
self.in_channels = in_channels
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.num_classes = num_classes
self.fp16_enabled = False
self.convs = nn.ModuleList()
for i in range(num_convs):
if i == 0:
# concatenation of mask feature and mask prediction
in_channels = self.in_channels + 1
else:
in_channels = self.conv_out_channels
stride = 2 if i == num_convs - 1 else 1
self.convs.append(
Conv2d(
in_channels,
self.conv_out_channels,
3,
stride=stride,
padding=1))
roi_feat_size = _pair(roi_feat_size)
pooled_area = (roi_feat_size[0] // 2) * (roi_feat_size[1] // 2)
self.fcs = nn.ModuleList()
for i in range(num_fcs):
in_channels = (
self.conv_out_channels *
pooled_area if i == 0 else self.fc_out_channels)
self.fcs.append(Linear(in_channels, self.fc_out_channels))
self.fc_mask_iou = Linear(self.fc_out_channels, self.num_classes)
self.relu = nn.ReLU()
self.max_pool = MaxPool2d(2, 2)
self.loss_iou = build_loss(loss_iou)
def forward(self, mask_feat, mask_pred):
mask_pred = mask_pred.sigmoid()
mask_pred_pooled = self.max_pool(mask_pred.unsqueeze(1))
x = torch.cat((mask_feat, mask_pred_pooled), 1)
for conv in self.convs:
x = self.relu(conv(x))
x = x.flatten(1)
for fc in self.fcs:
x = self.relu(fc(x))
mask_iou = self.fc_mask_iou(x)
return mask_iou
@force_fp32(apply_to=('mask_iou_pred', ))
def loss(self, mask_iou_pred, mask_iou_targets):
pos_inds = mask_iou_targets > 0
if pos_inds.sum() > 0:
loss_mask_iou = self.loss_iou(mask_iou_pred[pos_inds],
mask_iou_targets[pos_inds])
else:
loss_mask_iou = mask_iou_pred.sum() * 0
return dict(loss_mask_iou=loss_mask_iou)
@force_fp32(apply_to=('mask_pred', ))
def get_targets(self, sampling_results, gt_masks, mask_pred, mask_targets,
rcnn_train_cfg):
"""Compute target of mask IoU.
Mask IoU target is the IoU of the predicted mask (inside a bbox) and
the gt mask of corresponding gt mask (the whole instance).
The intersection area is computed inside the bbox, and the gt mask area
is computed with two steps, firstly we compute the gt area inside the
bbox, then divide it by the area ratio of gt area inside the bbox and
the gt area of the whole instance.
Args:
sampling_results (list[:obj:`SamplingResult`]): sampling results.
gt_masks (BitmapMask | PolygonMask): Gt masks (the whole instance)
of each image, with the same shape of the input image.
mask_pred (Tensor): Predicted masks of each positive proposal,
shape (num_pos, h, w).
mask_targets (Tensor): Gt mask of each positive proposal,
binary map of the shape (num_pos, h, w).
rcnn_train_cfg (dict): Training config for R-CNN part.
Returns:
Tensor: mask iou target (length == num positive).
"""
pos_proposals = [res.pos_bboxes for res in sampling_results]
pos_assigned_gt_inds = [
res.pos_assigned_gt_inds for res in sampling_results
]
# compute the area ratio of gt areas inside the proposals and
# the whole instance
area_ratios = map(self._get_area_ratio, pos_proposals,
pos_assigned_gt_inds, gt_masks)
area_ratios = torch.cat(list(area_ratios))
assert mask_targets.size(0) == area_ratios.size(0)
mask_pred = (mask_pred > rcnn_train_cfg.mask_thr_binary).float()
mask_pred_areas = mask_pred.sum((-1, -2))
# mask_pred and mask_targets are binary maps
overlap_areas = (mask_pred * mask_targets).sum((-1, -2))
# compute the mask area of the whole instance
gt_full_areas = mask_targets.sum((-1, -2)) / (area_ratios + 1e-7)
mask_iou_targets = overlap_areas / (
mask_pred_areas + gt_full_areas - overlap_areas)
return mask_iou_targets
def _get_area_ratio(self, pos_proposals, pos_assigned_gt_inds, gt_masks):
"""Compute area ratio of the gt mask inside the proposal and the gt
mask of the corresponding instance."""
num_pos = pos_proposals.size(0)
if num_pos > 0:
area_ratios = []
proposals_np = pos_proposals.cpu().numpy()
pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy()
# compute mask areas of gt instances (batch processing for speedup)
gt_instance_mask_area = gt_masks.areas
for i in range(num_pos):
gt_mask = gt_masks[pos_assigned_gt_inds[i]]
# crop the gt mask inside the proposal
bbox = proposals_np[i, :].astype(np.int32)
gt_mask_in_proposal = gt_mask.crop(bbox)
ratio = gt_mask_in_proposal.areas[0] / (
gt_instance_mask_area[pos_assigned_gt_inds[i]] + 1e-7)
area_ratios.append(ratio)
area_ratios = torch.from_numpy(np.stack(area_ratios)).float().to(
pos_proposals.device)
else:
area_ratios = pos_proposals.new_zeros((0, ))
return area_ratios
@force_fp32(apply_to=('mask_iou_pred', ))
def get_mask_scores(self, mask_iou_pred, det_bboxes, det_labels):
"""Get the mask scores.
mask_score = bbox_score * mask_iou
"""
inds = range(det_labels.size(0))
mask_scores = mask_iou_pred[inds, det_labels] * det_bboxes[inds, -1]
mask_scores = mask_scores.cpu().numpy()
det_labels = det_labels.cpu().numpy()
return [mask_scores[det_labels == i] for i in range(self.num_classes)]
| 7,382 | 39.125 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/scnet_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fcn_mask_head import FCNMaskHead
@HEADS.register_module()
class SCNetMaskHead(FCNMaskHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetMaskHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if conv_to_res:
assert self.conv_kernel_size == 3
self.num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
self.num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
| 979 | 32.793103 | 72 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/scnet_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .fused_semantic_head import FusedSemanticHead
@HEADS.register_module()
class SCNetSemanticHead(FusedSemanticHead):
"""Mask head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
conv_to_res (bool, optional): if True, change the conv layers to
``SimplifiedBasicBlock``.
"""
def __init__(self, conv_to_res=True, **kwargs):
super(SCNetSemanticHead, self).__init__(**kwargs)
self.conv_to_res = conv_to_res
if self.conv_to_res:
num_res_blocks = self.num_convs // 2
self.convs = ResLayer(
SimplifiedBasicBlock,
self.in_channels,
self.conv_out_channels,
num_res_blocks,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
self.num_convs = num_res_blocks
| 998 | 33.448276 | 72 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/roi_extractors/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_extractor import BaseRoIExtractor
from .generic_roi_extractor import GenericRoIExtractor
from .single_level_roi_extractor import SingleRoIExtractor
__all__ = ['BaseRoIExtractor', 'SingleRoIExtractor', 'GenericRoIExtractor']
| 288 | 40.285714 | 75 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/roi_extractors/base_roi_extractor.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch
import torch.nn as nn
from mmcv import ops
from mmcv.runner import BaseModule
class BaseRoIExtractor(BaseModule, metaclass=ABCMeta):
"""Base class for RoI extractor.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (int): Strides of input feature maps.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
init_cfg=None):
super(BaseRoIExtractor, self).__init__(init_cfg)
self.roi_layers = self.build_roi_layers(roi_layer, featmap_strides)
self.out_channels = out_channels
self.featmap_strides = featmap_strides
self.fp16_enabled = False
@property
def num_inputs(self):
"""int: Number of input feature maps."""
return len(self.featmap_strides)
def build_roi_layers(self, layer_cfg, featmap_strides):
"""Build RoI operator to extract feature from each level feature map.
Args:
layer_cfg (dict): Dictionary to construct and config RoI layer
operation. Options are modules under ``mmcv/ops`` such as
``RoIAlign``.
featmap_strides (List[int]): The stride of input feature map w.r.t
to the original image size, which would be used to scale RoI
coordinate (original image coordinate system) to feature
coordinate system.
Returns:
nn.ModuleList: The RoI extractor modules for each level feature
map.
"""
cfg = layer_cfg.copy()
layer_type = cfg.pop('type')
assert hasattr(ops, layer_type)
layer_cls = getattr(ops, layer_type)
roi_layers = nn.ModuleList(
[layer_cls(spatial_scale=1 / s, **cfg) for s in featmap_strides])
return roi_layers
def roi_rescale(self, rois, scale_factor):
"""Scale RoI coordinates by scale factor.
Args:
rois (torch.Tensor): RoI (Region of Interest), shape (n, 5)
scale_factor (float): Scale factor that RoI will be multiplied by.
Returns:
torch.Tensor: Scaled RoI.
"""
cx = (rois[:, 1] + rois[:, 3]) * 0.5
cy = (rois[:, 2] + rois[:, 4]) * 0.5
w = rois[:, 3] - rois[:, 1]
h = rois[:, 4] - rois[:, 2]
new_w = w * scale_factor
new_h = h * scale_factor
x1 = cx - new_w * 0.5
x2 = cx + new_w * 0.5
y1 = cy - new_h * 0.5
y2 = cy + new_h * 0.5
new_rois = torch.stack((rois[:, 0], x1, y1, x2, y2), dim=-1)
return new_rois
@abstractmethod
def forward(self, feats, rois, roi_scale_factor=None):
pass
| 3,002 | 32.741573 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/roi_extractors/generic_roi_extractor.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn.bricks import build_plugin_layer
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class GenericRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from all level feature maps levels.
This is the implementation of `A novel Region of Interest Extraction Layer
for Instance Segmentation <https://arxiv.org/abs/2004.13665>`_.
Args:
aggregation (str): The method to aggregate multiple feature maps.
Options are 'sum', 'concat'. Default: 'sum'.
pre_cfg (dict | None): Specify pre-processing modules. Default: None.
post_cfg (dict | None): Specify post-processing modules. Default: None.
kwargs (keyword arguments): Arguments that are the same
as :class:`BaseRoIExtractor`.
"""
def __init__(self,
aggregation='sum',
pre_cfg=None,
post_cfg=None,
**kwargs):
super(GenericRoIExtractor, self).__init__(**kwargs)
assert aggregation in ['sum', 'concat']
self.aggregation = aggregation
self.with_post = post_cfg is not None
self.with_pre = pre_cfg is not None
# build pre/post processing modules
if self.with_post:
self.post_module = build_plugin_layer(post_cfg, '_post_module')[1]
if self.with_pre:
self.pre_module = build_plugin_layer(pre_cfg, '_pre_module')[1]
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
if len(feats) == 1:
return self.roi_layers[0](feats[0], rois)
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
# some times rois is an empty tensor
if roi_feats.shape[0] == 0:
return roi_feats
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
# mark the starting channels for concat mode
start_channels = 0
for i in range(num_levels):
roi_feats_t = self.roi_layers[i](feats[i], rois)
end_channels = start_channels + roi_feats_t.size(1)
if self.with_pre:
# apply pre-processing to a RoI extracted from each layer
roi_feats_t = self.pre_module(roi_feats_t)
if self.aggregation == 'sum':
# and sum them all
roi_feats = roi_feats + roi_feats_t
else:
# and concat them along channel dimension
roi_feats[:, start_channels:end_channels] = roi_feats_t
# update channels starting position
start_channels = end_channels
# check if concat channels match at the end
if self.aggregation == 'concat':
assert start_channels == self.out_channels
if self.with_post:
# apply post-processing before return the result
roi_feats = self.post_module(roi_feats)
return roi_feats
| 3,282 | 37.623529 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/roi_extractors/single_level_roi_extractor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.runner import force_fp32
from mmdet.models.builder import ROI_EXTRACTORS
from .base_roi_extractor import BaseRoIExtractor
@ROI_EXTRACTORS.register_module()
class SingleRoIExtractor(BaseRoIExtractor):
"""Extract RoI features from a single level feature map.
If there are multiple input feature levels, each RoI is mapped to a level
according to its scale. The mapping rule is proposed in
`FPN <https://arxiv.org/abs/1612.03144>`_.
Args:
roi_layer (dict): Specify RoI layer type and arguments.
out_channels (int): Output channels of RoI layers.
featmap_strides (List[int]): Strides of input feature maps.
finest_scale (int): Scale threshold of mapping to level 0. Default: 56.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
roi_layer,
out_channels,
featmap_strides,
finest_scale=56,
init_cfg=None):
super(SingleRoIExtractor, self).__init__(roi_layer, out_channels,
featmap_strides, init_cfg)
self.finest_scale = finest_scale
def map_roi_levels(self, rois, num_levels):
"""Map rois to corresponding feature levels by scales.
- scale < finest_scale * 2: level 0
- finest_scale * 2 <= scale < finest_scale * 4: level 1
- finest_scale * 4 <= scale < finest_scale * 8: level 2
- scale >= finest_scale * 8: level 3
Args:
rois (Tensor): Input RoIs, shape (k, 5).
num_levels (int): Total level number.
Returns:
Tensor: Level index (0-based) of each RoI, shape (k, )
"""
scale = torch.sqrt(
(rois[:, 3] - rois[:, 1]) * (rois[:, 4] - rois[:, 2]))
target_lvls = torch.floor(torch.log2(scale / self.finest_scale + 1e-6))
target_lvls = target_lvls.clamp(min=0, max=num_levels - 1).long()
return target_lvls
@force_fp32(apply_to=('feats', ), out_fp16=True)
def forward(self, feats, rois, roi_scale_factor=None):
"""Forward function."""
out_size = self.roi_layers[0].output_size
num_levels = len(feats)
expand_dims = (-1, self.out_channels * out_size[0] * out_size[1])
if torch.onnx.is_in_onnx_export():
# Work around to export mask-rcnn to onnx
roi_feats = rois[:, :1].clone().detach()
roi_feats = roi_feats.expand(*expand_dims)
roi_feats = roi_feats.reshape(-1, self.out_channels, *out_size)
roi_feats = roi_feats * 0
else:
roi_feats = feats[0].new_zeros(
rois.size(0), self.out_channels, *out_size)
if num_levels == 1:
if len(rois) == 0:
return roi_feats
return self.roi_layers[0](feats[0], rois)
target_lvls = self.map_roi_levels(rois, num_levels)
if roi_scale_factor is not None:
rois = self.roi_rescale(rois, roi_scale_factor)
for i in range(num_levels):
mask = target_lvls == i
if torch.onnx.is_in_onnx_export():
# To keep all roi_align nodes exported to onnx
# and skip nonzero op
mask = mask.float().unsqueeze(-1)
# select target level rois and reset the rest rois to zero.
rois_i = rois.clone().detach()
rois_i = rois_i * mask
mask_exp = mask.expand(*expand_dims).reshape(roi_feats.shape)
roi_feats_t = self.roi_layers[i](feats[i], rois_i)
roi_feats_t = roi_feats_t * mask_exp
roi_feats = roi_feats + roi_feats_t
continue
inds = mask.nonzero(as_tuple=False).squeeze(1)
if inds.numel() > 0:
rois_ = rois[inds]
roi_feats_t = self.roi_layers[i](feats[i], rois_)
roi_feats[inds] = roi_feats_t
else:
# Sometimes some pyramid levels will not be used for RoI
# feature extraction and this will cause an incomplete
# computation graph in one GPU, which is different from those
# in other GPUs and will cause a hanging error.
# Therefore, we add it to ensure each feature pyramid is
# included in the computation graph to avoid runtime bugs.
roi_feats = roi_feats + sum(
x.view(-1)[0]
for x in self.parameters()) * 0. + feats[i].sum() * 0.
return roi_feats
| 4,736 | 40.920354 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/shared_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .res_layer import ResLayer
__all__ = ['ResLayer']
| 104 | 20 | 47 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/shared_heads/res_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch.nn as nn
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.backbones import ResNet
from mmdet.models.builder import SHARED_HEADS
from mmdet.models.utils import ResLayer as _ResLayer
@SHARED_HEADS.register_module()
class ResLayer(BaseModule):
def __init__(self,
depth,
stage=3,
stride=2,
dilation=1,
style='pytorch',
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
with_cp=False,
dcn=None,
pretrained=None,
init_cfg=None):
super(ResLayer, self).__init__(init_cfg)
self.norm_eval = norm_eval
self.norm_cfg = norm_cfg
self.stage = stage
self.fp16_enabled = False
block, stage_blocks = ResNet.arch_settings[depth]
stage_block = stage_blocks[stage]
planes = 64 * 2**stage
inplanes = 64 * 2**(stage - 1) * block.expansion
res_layer = _ResLayer(
block,
inplanes,
planes,
stage_block,
stride=stride,
dilation=dilation,
style=style,
with_cp=with_cp,
norm_cfg=self.norm_cfg,
dcn=dcn)
self.add_module(f'layer{stage + 1}', res_layer)
assert not (init_cfg and pretrained), \
'init_cfg and pretrained cannot be specified at the same time'
if isinstance(pretrained, str):
warnings.warn('DeprecationWarning: pretrained is a deprecated, '
'please use "init_cfg" instead')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
elif pretrained is None:
if init_cfg is None:
self.init_cfg = [
dict(type='Kaiming', layer='Conv2d'),
dict(
type='Constant',
val=1,
layer=['_BatchNorm', 'GroupNorm'])
]
else:
raise TypeError('pretrained must be a str or None')
@auto_fp16()
def forward(self, x):
res_layer = getattr(self, f'layer{self.stage + 1}')
out = res_layer(x)
return out
def train(self, mode=True):
super(ResLayer, self).train(mode)
if self.norm_eval:
for m in self.modules():
if isinstance(m, nn.BatchNorm2d):
m.eval()
| 2,587 | 30.950617 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .panoptic_fpn_head import PanopticFPNHead # noqa: F401,F403
from .panoptic_fusion_heads import * # noqa: F401,F403
| 170 | 41.75 | 65 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/base_semantic_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
import torch.nn.functional as F
from mmcv.runner import BaseModule, force_fp32
from ..builder import build_loss
from ..utils import interpolate_as
class BaseSemanticHead(BaseModule, metaclass=ABCMeta):
"""Base module of Semantic Head.
Args:
num_classes (int): the number of classes.
init_cfg (dict): the initialization config.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_classes,
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss',
ignore_index=255,
loss_weight=1.0)):
super(BaseSemanticHead, self).__init__(init_cfg)
self.loss_seg = build_loss(loss_seg)
self.num_classes = num_classes
@force_fp32(apply_to=('seg_preds', ))
def loss(self, seg_preds, gt_semantic_seg):
"""Get the loss of semantic head.
Args:
seg_preds (Tensor): The input logits with the shape (N, C, H, W).
gt_semantic_seg: The ground truth of semantic segmentation with
the shape (N, H, W).
label_bias: The starting number of the semantic label.
Default: 1.
Returns:
dict: the loss of semantic head.
"""
if seg_preds.shape[-2:] != gt_semantic_seg.shape[-2:]:
seg_preds = interpolate_as(seg_preds, gt_semantic_seg)
seg_preds = seg_preds.permute((0, 2, 3, 1))
loss_seg = self.loss_seg(
seg_preds.reshape(-1, self.num_classes), # => [NxHxW, C]
gt_semantic_seg.reshape(-1).long())
return dict(loss_seg=loss_seg)
@abstractmethod
def forward(self, x):
"""Placeholder of forward function.
Returns:
dict[str, Tensor]: A dictionary, including features
and predicted scores. Required keys: 'seg_preds'
and 'feats'.
"""
pass
def forward_train(self, x, gt_semantic_seg):
output = self.forward(x)
seg_preds = output['seg_preds']
return self.loss(seg_preds, gt_semantic_seg)
def simple_test(self, x, img_metas, rescale=False):
output = self.forward(x)
seg_preds = output['seg_preds']
seg_preds = F.interpolate(
seg_preds,
size=img_metas[0]['pad_shape'][:2],
mode='bilinear',
align_corners=False)
if rescale:
h, w, _ = img_metas[0]['img_shape']
seg_preds = seg_preds[:, :, :h, :w]
h, w, _ = img_metas[0]['ori_shape']
seg_preds = F.interpolate(
seg_preds, size=(h, w), mode='bilinear', align_corners=False)
return seg_preds
| 2,849 | 31.758621 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/panoptic_fpn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from ..builder import HEADS
from ..utils import ConvUpsample
from .base_semantic_head import BaseSemanticHead
@HEADS.register_module()
class PanopticFPNHead(BaseSemanticHead):
"""PanopticFPNHead used in Panoptic FPN.
In this head, the number of output channels is ``num_stuff_classes
+ 1``, including all stuff classes and one thing class. The stuff
classes will be reset from ``0`` to ``num_stuff_classes - 1``, the
thing classes will be merged to ``num_stuff_classes``-th channel.
Arg:
num_things_classes (int): Number of thing classes. Default: 80.
num_stuff_classes (int): Number of stuff classes. Default: 53.
num_classes (int): Number of classes, including all stuff
classes and one thing class. This argument is deprecated,
please use ``num_things_classes`` and ``num_stuff_classes``.
The module will automatically infer the num_classes by
``num_stuff_classes + 1``.
in_channels (int): Number of channels in the input feature
map.
inner_channels (int): Number of channels in inner features.
start_level (int): The start level of the input features
used in PanopticFPN.
end_level (int): The end level of the used features, the
``end_level``-th layer will not be used.
fg_range (tuple): Range of the foreground classes. It starts
from ``0`` to ``num_things_classes-1``. Deprecated, please use
``num_things_classes`` directly.
bg_range (tuple): Range of the background classes. It starts
from ``num_things_classes`` to ``num_things_classes +
num_stuff_classes - 1``. Deprecated, please use
``num_stuff_classes`` and ``num_things_classes`` directly.
conv_cfg (dict): Dictionary to construct and config
conv layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Use ``GN`` by default.
init_cfg (dict or list[dict], optional): Initialization config dict.
loss_seg (dict): the loss of the semantic head.
"""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
num_classes=None,
in_channels=256,
inner_channels=128,
start_level=0,
end_level=4,
fg_range=None,
bg_range=None,
conv_cfg=None,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
init_cfg=None,
loss_seg=dict(
type='CrossEntropyLoss', ignore_index=-1,
loss_weight=1.0)):
if num_classes is not None:
warnings.warn(
'`num_classes` is deprecated now, please set '
'`num_stuff_classes` directly, the `num_classes` will be '
'set to `num_stuff_classes + 1`')
# num_classes = num_stuff_classes + 1 for PanopticFPN.
assert num_classes == num_stuff_classes + 1
super(PanopticFPNHead, self).__init__(num_stuff_classes + 1, init_cfg,
loss_seg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
if fg_range is not None and bg_range is not None:
self.fg_range = fg_range
self.bg_range = bg_range
self.num_things_classes = fg_range[1] - fg_range[0] + 1
self.num_stuff_classes = bg_range[1] - bg_range[0] + 1
warnings.warn(
'`fg_range` and `bg_range` are deprecated now, '
f'please use `num_things_classes`={self.num_things_classes} '
f'and `num_stuff_classes`={self.num_stuff_classes} instead.')
# Used feature layers are [start_level, end_level)
self.start_level = start_level
self.end_level = end_level
self.num_stages = end_level - start_level
self.inner_channels = inner_channels
self.conv_upsample_layers = ModuleList()
for i in range(start_level, end_level):
self.conv_upsample_layers.append(
ConvUpsample(
in_channels,
inner_channels,
num_layers=i if i > 0 else 1,
num_upsample=i if i > 0 else 0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
))
self.conv_logits = nn.Conv2d(inner_channels, self.num_classes, 1)
def _set_things_to_void(self, gt_semantic_seg):
"""Merge thing classes to one class.
In PanopticFPN, the background labels will be reset from `0` to
`self.num_stuff_classes-1`, the foreground labels will be merged to
`self.num_stuff_classes`-th channel.
"""
gt_semantic_seg = gt_semantic_seg.int()
fg_mask = gt_semantic_seg < self.num_things_classes
bg_mask = (gt_semantic_seg >= self.num_things_classes) * (
gt_semantic_seg < self.num_things_classes + self.num_stuff_classes)
new_gt_seg = torch.clone(gt_semantic_seg)
new_gt_seg = torch.where(bg_mask,
gt_semantic_seg - self.num_things_classes,
new_gt_seg)
new_gt_seg = torch.where(fg_mask,
fg_mask.int() * self.num_stuff_classes,
new_gt_seg)
return new_gt_seg
def loss(self, seg_preds, gt_semantic_seg):
"""The loss of PanopticFPN head.
Things classes will be merged to one class in PanopticFPN.
"""
gt_semantic_seg = self._set_things_to_void(gt_semantic_seg)
return super().loss(seg_preds, gt_semantic_seg)
def init_weights(self):
super().init_weights()
nn.init.normal_(self.conv_logits.weight.data, 0, 0.01)
self.conv_logits.bias.data.zero_()
def forward(self, x):
# the number of subnets must be not more than
# the length of features.
assert self.num_stages <= len(x)
feats = []
for i, layer in enumerate(self.conv_upsample_layers):
f = layer(x[self.start_level + i])
feats.append(f)
feats = torch.sum(torch.stack(feats, dim=0), dim=0)
seg_preds = self.conv_logits(feats)
out = dict(seg_preds=seg_preds, feats=feats)
return out
| 6,675 | 41.794872 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/panoptic_fusion_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base_panoptic_fusion_head import \
BasePanopticFusionHead # noqa: F401,F403
from .heuristic_fusion_head import HeuristicFusionHead # noqa: F401,F403
from .maskformer_fusion_head import MaskFormerFusionHead # noqa: F401,F403
| 285 | 46.666667 | 75 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/panoptic_fusion_heads/base_panoptic_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ...builder import build_loss
class BasePanopticFusionHead(BaseModule, metaclass=ABCMeta):
"""Base class for panoptic heads."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
loss_panoptic=None,
init_cfg=None,
**kwargs):
super(BasePanopticFusionHead, self).__init__(init_cfg)
self.num_things_classes = num_things_classes
self.num_stuff_classes = num_stuff_classes
self.num_classes = num_things_classes + num_stuff_classes
self.test_cfg = test_cfg
if loss_panoptic:
self.loss_panoptic = build_loss(loss_panoptic)
else:
self.loss_panoptic = None
@property
def with_loss(self):
"""bool: whether the panoptic head contains loss function."""
return self.loss_panoptic is not None
@abstractmethod
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""Forward function during training."""
@abstractmethod
def simple_test(self,
img_metas,
det_labels,
mask_preds,
seg_preds,
det_bboxes,
cfg=None,
**kwargs):
"""Test without augmentation."""
| 1,507 | 29.77551 | 75 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/panoptic_fusion_heads/heuristic_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class HeuristicFusionHead(BasePanopticFusionHead):
"""Fusion Head with Heuristic method."""
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
init_cfg=None,
**kwargs):
super(HeuristicFusionHead,
self).__init__(num_things_classes, num_stuff_classes, test_cfg,
None, init_cfg, **kwargs)
def forward_train(self, gt_masks=None, gt_semantic_seg=None, **kwargs):
"""HeuristicFusionHead has no training loss."""
return dict()
def _lay_masks(self, bboxes, labels, masks, overlap_thr=0.5):
"""Lay instance masks to a result map.
Args:
bboxes: The bboxes results, (K, 4).
labels: The labels of bboxes, (K, ).
masks: The instance masks, (K, H, W).
overlap_thr: Threshold to determine whether two masks overlap.
default: 0.5.
Returns:
Tensor: The result map, (H, W).
"""
num_insts = bboxes.shape[0]
id_map = torch.zeros(
masks.shape[-2:], device=bboxes.device, dtype=torch.long)
if num_insts == 0:
return id_map, labels
scores, bboxes = bboxes[:, -1], bboxes[:, :4]
# Sort by score to use heuristic fusion
order = torch.argsort(-scores)
bboxes = bboxes[order]
labels = labels[order]
segm_masks = masks[order]
instance_id = 1
left_labels = []
for idx in range(bboxes.shape[0]):
_cls = labels[idx]
_mask = segm_masks[idx]
instance_id_map = torch.ones_like(
_mask, dtype=torch.long) * instance_id
area = _mask.sum()
if area == 0:
continue
pasted = id_map > 0
intersect = (_mask * pasted).sum()
if (intersect / (area + 1e-5)) > overlap_thr:
continue
_part = _mask * (~pasted)
id_map = torch.where(_part, instance_id_map, id_map)
left_labels.append(_cls)
instance_id += 1
if len(left_labels) > 0:
instance_labels = torch.stack(left_labels)
else:
instance_labels = bboxes.new_zeros((0, ), dtype=torch.long)
assert instance_id == (len(instance_labels) + 1)
return id_map, instance_labels
def simple_test(self, det_bboxes, det_labels, mask_preds, seg_preds,
**kwargs):
"""Fuse the results of instance and semantic segmentations.
Args:
det_bboxes: The bboxes results, (K, 4).
det_labels: The labels of bboxes, (K,).
mask_preds: The masks results, (K, H, W).
seg_preds: The semantic segmentation results,
(K, num_stuff + 1, H, W).
Returns:
Tensor : The panoptic segmentation result, (H, W).
"""
mask_preds = mask_preds >= self.test_cfg.mask_thr_binary
id_map, labels = self._lay_masks(det_bboxes, det_labels, mask_preds,
self.test_cfg.mask_overlap)
seg_results = seg_preds.argmax(dim=0)
seg_results = seg_results + self.num_things_classes
pan_results = seg_results
instance_id = 1
for idx in range(det_labels.shape[0]):
_mask = id_map == (idx + 1)
if _mask.sum() == 0:
continue
_cls = labels[idx]
# simply trust detection
segment_id = _cls + instance_id * INSTANCE_OFFSET
pan_results[_mask] = segment_id
instance_id += 1
ids, counts = torch.unique(
pan_results % INSTANCE_OFFSET, return_counts=True)
stuff_ids = ids[ids >= self.num_things_classes]
stuff_counts = counts[ids >= self.num_things_classes]
ignore_stuff_ids = stuff_ids[
stuff_counts < self.test_cfg.stuff_area_limit]
assert pan_results.ndim == 2
pan_results[(pan_results.unsqueeze(2) == ignore_stuff_ids.reshape(
1, 1, -1)).any(dim=2)] = self.num_classes
return pan_results
| 4,482 | 34.299213 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/seg_heads/panoptic_fusion_heads/maskformer_fusion_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET
from mmdet.core.mask import mask2bbox
from mmdet.models.builder import HEADS
from .base_panoptic_fusion_head import BasePanopticFusionHead
@HEADS.register_module()
class MaskFormerFusionHead(BasePanopticFusionHead):
def __init__(self,
num_things_classes=80,
num_stuff_classes=53,
test_cfg=None,
loss_panoptic=None,
init_cfg=None,
**kwargs):
super().__init__(num_things_classes, num_stuff_classes, test_cfg,
loss_panoptic, init_cfg, **kwargs)
def forward_train(self, **kwargs):
"""MaskFormerFusionHead has no training loss."""
return dict()
def panoptic_postprocess(self, mask_cls, mask_pred):
"""Panoptic segmengation inference.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
Tensor: Panoptic segment result of shape \
(h, w), each element in Tensor means: \
``segment_id = _cls + instance_id * INSTANCE_OFFSET``.
"""
object_mask_thr = self.test_cfg.get('object_mask_thr', 0.8)
iou_thr = self.test_cfg.get('iou_thr', 0.8)
filter_low_score = self.test_cfg.get('filter_low_score', False)
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
mask_pred = mask_pred.sigmoid()
keep = labels.ne(self.num_classes) & (scores > object_mask_thr)
cur_scores = scores[keep]
cur_classes = labels[keep]
cur_masks = mask_pred[keep]
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
h, w = cur_masks.shape[-2:]
panoptic_seg = torch.full((h, w),
self.num_classes,
dtype=torch.int32,
device=cur_masks.device)
if cur_masks.shape[0] == 0:
# We didn't detect any mask :(
pass
else:
cur_mask_ids = cur_prob_masks.argmax(0)
instance_id = 1
for k in range(cur_classes.shape[0]):
pred_class = int(cur_classes[k].item())
isthing = pred_class < self.num_things_classes
mask = cur_mask_ids == k
mask_area = mask.sum().item()
original_area = (cur_masks[k] >= 0.5).sum().item()
if filter_low_score:
mask = mask & (cur_masks[k] >= 0.5)
if mask_area > 0 and original_area > 0:
if mask_area / original_area < iou_thr:
continue
if not isthing:
# different stuff regions of same class will be
# merged here, and stuff share the instance_id 0.
panoptic_seg[mask] = pred_class
else:
panoptic_seg[mask] = (
pred_class + instance_id * INSTANCE_OFFSET)
instance_id += 1
return panoptic_seg
def semantic_postprocess(self, mask_cls, mask_pred):
"""Semantic segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
Tensor: Semantic segment result of shape \
(cls_out_channels, h, w).
"""
# TODO add semantic segmentation result
raise NotImplementedError
def instance_postprocess(self, mask_cls, mask_pred):
"""Instance segmengation postprocess.
Args:
mask_cls (Tensor): Classfication outputs of shape
(num_queries, cls_out_channels) for a image.
Note `cls_out_channels` should includes
background.
mask_pred (Tensor): Mask outputs of shape
(num_queries, h, w) for a image.
Returns:
tuple[Tensor]: Instance segmentation results.
- labels_per_image (Tensor): Predicted labels,\
shape (n, ).
- bboxes (Tensor): Bboxes and scores with shape (n, 5) of \
positive region in binary mask, the last column is scores.
- mask_pred_binary (Tensor): Instance masks of \
shape (n, h, w).
"""
max_per_image = self.test_cfg.get('max_per_image', 100)
num_queries = mask_cls.shape[0]
# shape (num_queries, num_class)
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
# shape (num_queries * num_class, )
labels = torch.arange(self.num_classes, device=mask_cls.device).\
unsqueeze(0).repeat(num_queries, 1).flatten(0, 1)
scores_per_image, top_indices = scores.flatten(0, 1).topk(
max_per_image, sorted=False)
labels_per_image = labels[top_indices]
query_indices = top_indices // self.num_classes
mask_pred = mask_pred[query_indices]
# extract things
is_thing = labels_per_image < self.num_things_classes
scores_per_image = scores_per_image[is_thing]
labels_per_image = labels_per_image[is_thing]
mask_pred = mask_pred[is_thing]
mask_pred_binary = (mask_pred > 0).float()
mask_scores_per_image = (mask_pred.sigmoid() *
mask_pred_binary).flatten(1).sum(1) / (
mask_pred_binary.flatten(1).sum(1) + 1e-6)
det_scores = scores_per_image * mask_scores_per_image
mask_pred_binary = mask_pred_binary.bool()
bboxes = mask2bbox(mask_pred_binary)
bboxes = torch.cat([bboxes, det_scores[:, None]], dim=-1)
return labels_per_image, bboxes, mask_pred_binary
def simple_test(self,
mask_cls_results,
mask_pred_results,
img_metas,
rescale=False,
**kwargs):
"""Test segment without test-time aumengtation.
Only the output of last decoder layers was used.
Args:
mask_cls_results (Tensor): Mask classification logits,
shape (batch_size, num_queries, cls_out_channels).
Note `cls_out_channels` should includes background.
mask_pred_results (Tensor): Mask logits, shape
(batch_size, num_queries, h, w).
img_metas (list[dict]): List of image information.
rescale (bool, optional): If True, return boxes in
original image space. Default False.
Returns:
list[dict[str, Tensor | tuple[Tensor]]]: Semantic segmentation \
results and panoptic segmentation results for each \
image.
.. code-block:: none
[
{
'pan_results': Tensor, # shape = [h, w]
'ins_results': tuple[Tensor],
# semantic segmentation results are not supported yet
'sem_results': Tensor
},
...
]
"""
panoptic_on = self.test_cfg.get('panoptic_on', True)
semantic_on = self.test_cfg.get('semantic_on', False)
instance_on = self.test_cfg.get('instance_on', False)
assert not semantic_on, 'segmantic segmentation '\
'results are not supported yet.'
results = []
for mask_cls_result, mask_pred_result, meta in zip(
mask_cls_results, mask_pred_results, img_metas):
# remove padding
img_height, img_width = meta['img_shape'][:2]
mask_pred_result = mask_pred_result[:, :img_height, :img_width]
if rescale:
# return result in original resolution
ori_height, ori_width = meta['ori_shape'][:2]
mask_pred_result = F.interpolate(
mask_pred_result[:, None],
size=(ori_height, ori_width),
mode='bilinear',
align_corners=False)[:, 0]
result = dict()
if panoptic_on:
pan_results = self.panoptic_postprocess(
mask_cls_result, mask_pred_result)
result['pan_results'] = pan_results
if instance_on:
ins_results = self.instance_postprocess(
mask_cls_result, mask_pred_result)
result['ins_results'] = ins_results
if semantic_on:
sem_results = self.semantic_postprocess(
mask_cls_result, mask_pred_result)
result['sem_results'] = sem_results
results.append(result)
return results
| 9,430 | 37.971074 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .brick_wrappers import AdaptiveAvgPool2d, adaptive_avg_pool2d
from .builder import build_linear_layer, build_transformer
from .ckpt_convert import pvt_convert
from .conv_upsample import ConvUpsample
from .csp_layer import CSPLayer
from .gaussian_target import gaussian_radius, gen_gaussian_target
from .inverted_residual import InvertedResidual
from .make_divisible import make_divisible
from .misc import interpolate_as, sigmoid_geometric_mean
from .normed_predictor import NormedConv2d, NormedLinear
from .panoptic_gt_processing import preprocess_panoptic_gt
from .point_sample import (get_uncertain_point_coords_with_randomness,
get_uncertainty)
from .positional_encoding import (LearnedPositionalEncoding,
SinePositionalEncoding)
from .res_layer import ResLayer, SimplifiedBasicBlock
from .se_layer import DyReLU, SELayer
from .transformer import (DetrTransformerDecoder, DetrTransformerDecoderLayer,
DynamicConv, PatchEmbed, Transformer, nchw_to_nlc,
nlc_to_nchw)
__all__ = [
'ResLayer', 'gaussian_radius', 'gen_gaussian_target',
'DetrTransformerDecoderLayer', 'DetrTransformerDecoder', 'Transformer',
'build_transformer', 'build_linear_layer', 'SinePositionalEncoding',
'LearnedPositionalEncoding', 'DynamicConv', 'SimplifiedBasicBlock',
'NormedLinear', 'NormedConv2d', 'make_divisible', 'InvertedResidual',
'SELayer', 'interpolate_as', 'ConvUpsample', 'CSPLayer',
'adaptive_avg_pool2d', 'AdaptiveAvgPool2d', 'PatchEmbed', 'nchw_to_nlc',
'nlc_to_nchw', 'pvt_convert', 'sigmoid_geometric_mean',
'preprocess_panoptic_gt', 'DyReLU',
'get_uncertain_point_coords_with_randomness', 'get_uncertainty'
]
| 1,809 | 50.714286 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/utils/brick_wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn.bricks.wrappers import NewEmptyTensorOp, obsolete_torch_version
if torch.__version__ == 'parrots':
TORCH_VERSION = torch.__version__
else:
# torch.__version__ could be 1.3.1+cu92, we only need the first two
# for comparison
TORCH_VERSION = tuple(int(x) for x in torch.__version__.split('.')[:2])
def adaptive_avg_pool2d(input, output_size):
"""Handle empty batch dimension to adaptive_avg_pool2d.
Args:
input (tensor): 4D tensor.
output_size (int, tuple[int,int]): the target output size.
"""
if input.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
if isinstance(output_size, int):
output_size = [output_size, output_size]
output_size = [*input.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(input, output_size)
return empty
else:
return F.adaptive_avg_pool2d(input, output_size)
class AdaptiveAvgPool2d(nn.AdaptiveAvgPool2d):
"""Handle empty batch dimension to AdaptiveAvgPool2d."""
def forward(self, x):
# PyTorch 1.9 does not support empty tensor inference yet
if x.numel() == 0 and obsolete_torch_version(TORCH_VERSION, (1, 9)):
output_size = self.output_size
if isinstance(output_size, int):
output_size = [output_size, output_size]
else:
output_size = [
v if v is not None else d
for v, d in zip(output_size,
x.size()[-2:])
]
output_size = [*x.shape[:2], *output_size]
empty = NewEmptyTensorOp.apply(x, output_size)
return empty
return super().forward(x)
| 1,856 | 34.711538 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/utils/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.utils import Registry, build_from_cfg
TRANSFORMER = Registry('Transformer')
LINEAR_LAYERS = Registry('linear layers')
def build_transformer(cfg, default_args=None):
"""Builder for Transformer."""
return build_from_cfg(cfg, TRANSFORMER, default_args)
LINEAR_LAYERS.register_module('Linear', module=nn.Linear)
def build_linear_layer(cfg, *args, **kwargs):
"""Build linear layer.
Args:
cfg (None or dict): The linear layer config, which should contain:
- type (str): Layer type.
- layer args: Args needed to instantiate an linear layer.
args (argument list): Arguments passed to the `__init__`
method of the corresponding linear layer.
kwargs (keyword arguments): Keyword arguments passed to the `__init__`
method of the corresponding linear layer.
Returns:
nn.Module: Created linear layer.
"""
if cfg is None:
cfg_ = dict(type='Linear')
else:
if not isinstance(cfg, dict):
raise TypeError('cfg must be a dict')
if 'type' not in cfg:
raise KeyError('the cfg dict must contain the key "type"')
cfg_ = cfg.copy()
layer_type = cfg_.pop('type')
if layer_type not in LINEAR_LAYERS:
raise KeyError(f'Unrecognized linear type {layer_type}')
else:
linear_layer = LINEAR_LAYERS.get(layer_type)
layer = linear_layer(*args, **kwargs, **cfg_)
return layer
| 1,535 | 31 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/utils/ckpt_convert.py | # Copyright (c) OpenMMLab. All rights reserved.
# This script consists of several convert functions which
# can modify the weights of model in original repo to be
# pre-trained weights.
from collections import OrderedDict
import torch
def pvt_convert(ckpt):
new_ckpt = OrderedDict()
# Process the concat between q linear weights and kv linear weights
use_abs_pos_embed = False
use_conv_ffn = False
for k in ckpt.keys():
if k.startswith('pos_embed'):
use_abs_pos_embed = True
if k.find('dwconv') >= 0:
use_conv_ffn = True
for k, v in ckpt.items():
if k.startswith('head'):
continue
if k.startswith('norm.'):
continue
if k.startswith('cls_token'):
continue
if k.startswith('pos_embed'):
stage_i = int(k.replace('pos_embed', ''))
new_k = k.replace(f'pos_embed{stage_i}',
f'layers.{stage_i - 1}.1.0.pos_embed')
if stage_i == 4 and v.size(1) == 50: # 1 (cls token) + 7 * 7
new_v = v[:, 1:, :] # remove cls token
else:
new_v = v
elif k.startswith('patch_embed'):
stage_i = int(k.split('.')[0].replace('patch_embed', ''))
new_k = k.replace(f'patch_embed{stage_i}',
f'layers.{stage_i - 1}.0')
new_v = v
if 'proj.' in new_k:
new_k = new_k.replace('proj.', 'projection.')
elif k.startswith('block'):
stage_i = int(k.split('.')[0].replace('block', ''))
layer_i = int(k.split('.')[1])
new_layer_i = layer_i + use_abs_pos_embed
new_k = k.replace(f'block{stage_i}.{layer_i}',
f'layers.{stage_i - 1}.1.{new_layer_i}')
new_v = v
if 'attn.q.' in new_k:
sub_item_k = k.replace('q.', 'kv.')
new_k = new_k.replace('q.', 'attn.in_proj_')
new_v = torch.cat([v, ckpt[sub_item_k]], dim=0)
elif 'attn.kv.' in new_k:
continue
elif 'attn.proj.' in new_k:
new_k = new_k.replace('proj.', 'attn.out_proj.')
elif 'attn.sr.' in new_k:
new_k = new_k.replace('sr.', 'sr.')
elif 'mlp.' in new_k:
string = f'{new_k}-'
new_k = new_k.replace('mlp.', 'ffn.layers.')
if 'fc1.weight' in new_k or 'fc2.weight' in new_k:
new_v = v.reshape((*v.shape, 1, 1))
new_k = new_k.replace('fc1.', '0.')
new_k = new_k.replace('dwconv.dwconv.', '1.')
if use_conv_ffn:
new_k = new_k.replace('fc2.', '4.')
else:
new_k = new_k.replace('fc2.', '3.')
string += f'{new_k} {v.shape}-{new_v.shape}'
elif k.startswith('norm'):
stage_i = int(k[4])
new_k = k.replace(f'norm{stage_i}', f'layers.{stage_i - 1}.2')
new_v = v
else:
new_k = k
new_v = v
new_ckpt[new_k] = new_v
return new_ckpt
def swin_converter(ckpt):
new_ckpt = OrderedDict()
def correct_unfold_reduction_order(x):
out_channel, in_channel = x.shape
x = x.reshape(out_channel, 4, in_channel // 4)
x = x[:, [0, 2, 1, 3], :].transpose(1,
2).reshape(out_channel, in_channel)
return x
def correct_unfold_norm_order(x):
in_channel = x.shape[0]
x = x.reshape(4, in_channel // 4)
x = x[[0, 2, 1, 3], :].transpose(0, 1).reshape(in_channel)
return x
for k, v in ckpt.items():
if k.startswith('head'):
continue
elif k.startswith('layers'):
new_v = v
if 'attn.' in k:
new_k = k.replace('attn.', 'attn.w_msa.')
elif 'mlp.' in k:
if 'mlp.fc1.' in k:
new_k = k.replace('mlp.fc1.', 'ffn.layers.0.0.')
elif 'mlp.fc2.' in k:
new_k = k.replace('mlp.fc2.', 'ffn.layers.1.')
else:
new_k = k.replace('mlp.', 'ffn.')
elif 'downsample' in k:
new_k = k
if 'reduction.' in k:
new_v = correct_unfold_reduction_order(v)
elif 'norm.' in k:
new_v = correct_unfold_norm_order(v)
else:
new_k = k
new_k = new_k.replace('layers', 'stages', 1)
elif k.startswith('patch_embed'):
new_v = v
if 'proj' in k:
new_k = k.replace('proj', 'projection')
else:
new_k = k
else:
new_v = v
new_k = k
new_ckpt['backbone.' + new_k] = new_v
return new_ckpt
| 4,964 | 34.978261 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/conv_upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, ModuleList
class ConvUpsample(BaseModule):
"""ConvUpsample performs 2x upsampling after Conv.
There are several `ConvModule` layers. In the first few layers, upsampling
will be applied after each layer of convolution. The number of upsampling
must be no more than the number of ConvModule layers.
Args:
in_channels (int): Number of channels in the input feature map.
inner_channels (int): Number of channels produced by the convolution.
num_layers (int): Number of convolution layers.
num_upsample (int | optional): Number of upsampling layer. Must be no
more than num_layers. Upsampling will be applied after the first
``num_upsample`` layers of convolution. Default: ``num_layers``.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict): Config dict for initialization. Default: None.
kwargs (key word augments): Other augments used in ConvModule.
"""
def __init__(self,
in_channels,
inner_channels,
num_layers=1,
num_upsample=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
**kwargs):
super(ConvUpsample, self).__init__(init_cfg)
if num_upsample is None:
num_upsample = num_layers
assert num_upsample <= num_layers, \
f'num_upsample({num_upsample})must be no more than ' \
f'num_layers({num_layers})'
self.num_layers = num_layers
self.num_upsample = num_upsample
self.conv = ModuleList()
for i in range(num_layers):
self.conv.append(
ConvModule(
in_channels,
inner_channels,
3,
padding=1,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
in_channels = inner_channels
def forward(self, x):
num_upsample = self.num_upsample
for i in range(self.num_layers):
x = self.conv[i](x)
if num_upsample > 0:
num_upsample -= 1
x = F.interpolate(
x, scale_factor=2, mode='bilinear', align_corners=False)
return x
| 2,653 | 38.029412 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/utils/csp_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
class DarknetBottleneck(BaseModule):
"""The basic bottleneck block used in Darknet.
Each ResBlock consists of two ConvModules and the input is added to the
final output. Each ConvModule is composed of Conv, BN, and LeakyReLU.
The first convLayer has filter size of 1x1 and the second one has the
filter size of 3x3.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
expansion (int): The kernel size of the convolution. Default: 0.5
add_identity (bool): Whether to add identity to the out.
Default: True
use_depthwise (bool): Whether to use depthwise separable convolution.
Default: False
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish').
"""
def __init__(self,
in_channels,
out_channels,
expansion=0.5,
add_identity=True,
use_depthwise=False,
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=None):
super().__init__(init_cfg)
hidden_channels = int(out_channels * expansion)
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
self.conv1 = ConvModule(
in_channels,
hidden_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.conv2 = conv(
hidden_channels,
out_channels,
3,
stride=1,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.add_identity = \
add_identity and in_channels == out_channels
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
if self.add_identity:
return out + identity
else:
return out
class CSPLayer(BaseModule):
"""Cross Stage Partial Layer.
Args:
in_channels (int): The input channels of the CSP layer.
out_channels (int): The output channels of the CSP layer.
expand_ratio (float): Ratio to adjust the number of channels of the
hidden layer. Default: 0.5
num_blocks (int): Number of blocks. Default: 1
add_identity (bool): Whether to add identity in blocks.
Default: True
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
"""
def __init__(self,
in_channels,
out_channels,
expand_ratio=0.5,
num_blocks=1,
add_identity=True,
use_depthwise=False,
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=None):
super().__init__(init_cfg)
mid_channels = int(out_channels * expand_ratio)
self.main_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.short_conv = ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.final_conv = ConvModule(
2 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.blocks = nn.Sequential(*[
DarknetBottleneck(
mid_channels,
mid_channels,
1.0,
add_identity,
use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg) for _ in range(num_blocks)
])
def forward(self, x):
x_short = self.short_conv(x)
x_main = self.main_conv(x)
x_main = self.blocks(x_main)
x_final = torch.cat((x_main, x_short), dim=1)
return self.final_conv(x_final)
| 5,079 | 32.642384 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/utils/gaussian_target.py | # Copyright (c) OpenMMLab. All rights reserved.
from math import sqrt
import torch
import torch.nn.functional as F
def gaussian2D(radius, sigma=1, dtype=torch.float32, device='cpu'):
"""Generate 2D gaussian kernel.
Args:
radius (int): Radius of gaussian kernel.
sigma (int): Sigma of gaussian function. Default: 1.
dtype (torch.dtype): Dtype of gaussian tensor. Default: torch.float32.
device (str): Device of gaussian tensor. Default: 'cpu'.
Returns:
h (Tensor): Gaussian kernel with a
``(2 * radius + 1) * (2 * radius + 1)`` shape.
"""
x = torch.arange(
-radius, radius + 1, dtype=dtype, device=device).view(1, -1)
y = torch.arange(
-radius, radius + 1, dtype=dtype, device=device).view(-1, 1)
h = (-(x * x + y * y) / (2 * sigma * sigma)).exp()
h[h < torch.finfo(h.dtype).eps * h.max()] = 0
return h
def gen_gaussian_target(heatmap, center, radius, k=1):
"""Generate 2D gaussian heatmap.
Args:
heatmap (Tensor): Input heatmap, the gaussian kernel will cover on
it and maintain the max value.
center (list[int]): Coord of gaussian kernel's center.
radius (int): Radius of gaussian kernel.
k (int): Coefficient of gaussian kernel. Default: 1.
Returns:
out_heatmap (Tensor): Updated heatmap covered by gaussian kernel.
"""
diameter = 2 * radius + 1
gaussian_kernel = gaussian2D(
radius, sigma=diameter / 6, dtype=heatmap.dtype, device=heatmap.device)
x, y = center
height, width = heatmap.shape[:2]
left, right = min(x, radius), min(width - x, radius + 1)
top, bottom = min(y, radius), min(height - y, radius + 1)
masked_heatmap = heatmap[y - top:y + bottom, x - left:x + right]
masked_gaussian = gaussian_kernel[radius - top:radius + bottom,
radius - left:radius + right]
out_heatmap = heatmap
torch.max(
masked_heatmap,
masked_gaussian * k,
out=out_heatmap[y - top:y + bottom, x - left:x + right])
return out_heatmap
def gaussian_radius(det_size, min_overlap):
r"""Generate 2D gaussian radius.
This function is modified from the `official github repo
<https://github.com/princeton-vl/CornerNet-Lite/blob/master/core/sample/
utils.py#L65>`_.
Given ``min_overlap``, radius could computed by a quadratic equation
according to Vieta's formulas.
There are 3 cases for computing gaussian radius, details are following:
- Explanation of figure: ``lt`` and ``br`` indicates the left-top and
bottom-right corner of ground truth box. ``x`` indicates the
generated corner at the limited position when ``radius=r``.
- Case1: one corner is inside the gt box and the other is outside.
.. code:: text
|< width >|
lt-+----------+ -
| | | ^
+--x----------+--+
| | | |
| | | | height
| | overlap | |
| | | |
| | | | v
+--+---------br--+ -
| | |
+----------+--x
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{(w-r)*(h-r)}{w*h+(w+h)r-r^2} \ge {iou} \quad\Rightarrow\quad
{r^2-(w+h)r+\cfrac{1-iou}{1+iou}*w*h} \ge 0 \\
{a} = 1,\quad{b} = {-(w+h)},\quad{c} = {\cfrac{1-iou}{1+iou}*w*h} \\
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
- Case2: both two corners are inside the gt box.
.. code:: text
|< width >|
lt-+----------+ -
| | | ^
+--x-------+ |
| | | |
| |overlap| | height
| | | |
| +-------x--+
| | | v
+----------+-br -
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{(w-2*r)*(h-2*r)}{w*h} \ge {iou} \quad\Rightarrow\quad
{4r^2-2(w+h)r+(1-iou)*w*h} \ge 0 \\
{a} = 4,\quad {b} = {-2(w+h)},\quad {c} = {(1-iou)*w*h} \\
{r} \le \cfrac{-b-\sqrt{b^2-4*a*c}}{2*a}
- Case3: both two corners are outside the gt box.
.. code:: text
|< width >|
x--+----------------+
| | |
+-lt-------------+ | -
| | | | ^
| | | |
| | overlap | | height
| | | |
| | | | v
| +------------br--+ -
| | |
+----------------+--x
To ensure IoU of generated box and gt box is larger than ``min_overlap``:
.. math::
\cfrac{w*h}{(w+2*r)*(h+2*r)} \ge {iou} \quad\Rightarrow\quad
{4*iou*r^2+2*iou*(w+h)r+(iou-1)*w*h} \le 0 \\
{a} = {4*iou},\quad {b} = {2*iou*(w+h)},\quad {c} = {(iou-1)*w*h} \\
{r} \le \cfrac{-b+\sqrt{b^2-4*a*c}}{2*a}
Args:
det_size (list[int]): Shape of object.
min_overlap (float): Min IoU with ground truth for boxes generated by
keypoints inside the gaussian kernel.
Returns:
radius (int): Radius of gaussian kernel.
"""
height, width = det_size
a1 = 1
b1 = (height + width)
c1 = width * height * (1 - min_overlap) / (1 + min_overlap)
sq1 = sqrt(b1**2 - 4 * a1 * c1)
r1 = (b1 - sq1) / (2 * a1)
a2 = 4
b2 = 2 * (height + width)
c2 = (1 - min_overlap) * width * height
sq2 = sqrt(b2**2 - 4 * a2 * c2)
r2 = (b2 - sq2) / (2 * a2)
a3 = 4 * min_overlap
b3 = -2 * min_overlap * (height + width)
c3 = (min_overlap - 1) * width * height
sq3 = sqrt(b3**2 - 4 * a3 * c3)
r3 = (b3 + sq3) / (2 * a3)
return min(r1, r2, r3)
def get_local_maximum(heat, kernel=3):
"""Extract local maximum pixel with given kernel.
Args:
heat (Tensor): Target heatmap.
kernel (int): Kernel size of max pooling. Default: 3.
Returns:
heat (Tensor): A heatmap where local maximum pixels maintain its
own value and other positions are 0.
"""
pad = (kernel - 1) // 2
hmax = F.max_pool2d(heat, kernel, stride=1, padding=pad)
keep = (hmax == heat).float()
return heat * keep
def get_topk_from_heatmap(scores, k=20):
"""Get top k positions from heatmap.
Args:
scores (Tensor): Target heatmap with shape
[batch, num_classes, height, width].
k (int): Target number. Default: 20.
Returns:
tuple[torch.Tensor]: Scores, indexes, categories and coords of
topk keypoint. Containing following Tensors:
- topk_scores (Tensor): Max scores of each topk keypoint.
- topk_inds (Tensor): Indexes of each topk keypoint.
- topk_clses (Tensor): Categories of each topk keypoint.
- topk_ys (Tensor): Y-coord of each topk keypoint.
- topk_xs (Tensor): X-coord of each topk keypoint.
"""
batch, _, height, width = scores.size()
topk_scores, topk_inds = torch.topk(scores.view(batch, -1), k)
topk_clses = topk_inds // (height * width)
topk_inds = topk_inds % (height * width)
topk_ys = topk_inds // width
topk_xs = (topk_inds % width).int().float()
return topk_scores, topk_inds, topk_clses, topk_ys, topk_xs
def gather_feat(feat, ind, mask=None):
"""Gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
mask (Tensor | None): Mask of feature map. Default: None.
Returns:
feat (Tensor): Gathered feature.
"""
dim = feat.size(2)
ind = ind.unsqueeze(2).repeat(1, 1, dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def transpose_and_gather_feat(feat, ind):
"""Transpose and gather feature according to index.
Args:
feat (Tensor): Target feature map.
ind (Tensor): Target coord index.
Returns:
feat (Tensor): Transposed and gathered feature.
"""
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = gather_feat(feat, ind)
return feat
| 8,399 | 30.226766 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.utils.checkpoint as cp
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import DropPath
from mmcv.runner import BaseModule
from .se_layer import SELayer
class InvertedResidual(BaseModule):
"""Inverted Residual Block.
Args:
in_channels (int): The input channels of this Module.
out_channels (int): The output channels of this Module.
mid_channels (int): The input channels of the depthwise convolution.
kernel_size (int): The kernel size of the depthwise convolution.
Default: 3.
stride (int): The stride of the depthwise convolution. Default: 1.
se_cfg (dict): Config dict for se layer. Default: None, which means no
se layer.
with_expand_conv (bool): Use expand conv or not. If set False,
mid_channels must be the same with in_channels.
Default: True.
conv_cfg (dict): Config dict for convolution layer. Default: None,
which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
drop_path_rate (float): stochastic depth rate. Defaults to 0.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed. Default: False.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
Returns:
Tensor: The output tensor.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
kernel_size=3,
stride=1,
se_cfg=None,
with_expand_conv=True,
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
drop_path_rate=0.,
with_cp=False,
init_cfg=None):
super(InvertedResidual, self).__init__(init_cfg)
self.with_res_shortcut = (stride == 1 and in_channels == out_channels)
assert stride in [1, 2], f'stride must in [1, 2]. ' \
f'But received {stride}.'
self.with_cp = with_cp
self.drop_path = DropPath(
drop_path_rate) if drop_path_rate > 0 else nn.Identity()
self.with_se = se_cfg is not None
self.with_expand_conv = with_expand_conv
if self.with_se:
assert isinstance(se_cfg, dict)
if not self.with_expand_conv:
assert mid_channels == in_channels
if self.with_expand_conv:
self.expand_conv = ConvModule(
in_channels=in_channels,
out_channels=mid_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.depthwise_conv = ConvModule(
in_channels=mid_channels,
out_channels=mid_channels,
kernel_size=kernel_size,
stride=stride,
padding=kernel_size // 2,
groups=mid_channels,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
if self.with_se:
self.se = SELayer(**se_cfg)
self.linear_conv = ConvModule(
in_channels=mid_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
def forward(self, x):
def _inner_forward(x):
out = x
if self.with_expand_conv:
out = self.expand_conv(out)
out = self.depthwise_conv(out)
if self.with_se:
out = self.se(out)
out = self.linear_conv(out)
if self.with_res_shortcut:
return x + self.drop_path(out)
else:
return out
if self.with_cp and x.requires_grad:
out = cp.checkpoint(_inner_forward, x)
else:
out = _inner_forward(x)
return out
| 4,380 | 32.442748 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/utils/make_divisible.py | # Copyright (c) OpenMMLab. All rights reserved.
def make_divisible(value, divisor, min_value=None, min_ratio=0.9):
"""Make divisible function.
This function rounds the channel number to the nearest value that can be
divisible by the divisor. It is taken from the original tf repo. It ensures
that all layers have a channel number that is divisible by divisor. It can
be seen here: https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py # noqa
Args:
value (int): The original channel number.
divisor (int): The divisor to fully divide the channel number.
min_value (int): The minimum value of the output channel.
Default: None, means that the minimum value equal to the divisor.
min_ratio (float): The minimum ratio of the rounded channel number to
the original channel number. Default: 0.9.
Returns:
int: The modified output channel number.
"""
if min_value is None:
min_value = divisor
new_value = max(min_value, int(value + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than (1-min_ratio).
if new_value < min_ratio * value:
new_value += divisor
return new_value
| 1,279 | 43.137931 | 116 | py |
mmdetection | mmdetection-master/mmdet/models/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch.autograd import Function
from torch.nn import functional as F
class SigmoidGeometricMean(Function):
"""Forward and backward function of geometric mean of two sigmoid
functions.
This implementation with analytical gradient function substitutes
the autograd function of (x.sigmoid() * y.sigmoid()).sqrt(). The
original implementation incurs none during gradient backprapagation
if both x and y are very small values.
"""
@staticmethod
def forward(ctx, x, y):
x_sigmoid = x.sigmoid()
y_sigmoid = y.sigmoid()
z = (x_sigmoid * y_sigmoid).sqrt()
ctx.save_for_backward(x_sigmoid, y_sigmoid, z)
return z
@staticmethod
def backward(ctx, grad_output):
x_sigmoid, y_sigmoid, z = ctx.saved_tensors
grad_x = grad_output * z * (1 - x_sigmoid) / 2
grad_y = grad_output * z * (1 - y_sigmoid) / 2
return grad_x, grad_y
sigmoid_geometric_mean = SigmoidGeometricMean.apply
def interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` to the shape of the `target`.
The `source` must be a Tensor, but the `target` can be a Tensor or a
np.ndarray with the shape (..., target_h, target_w).
Args:
source (Tensor): A 3D/4D Tensor with the shape (N, H, W) or
(N, C, H, W).
target (Tensor | np.ndarray): The interpolation target with the shape
(..., target_h, target_w).
mode (str): Algorithm used for interpolation. The options are the
same as those in F.interpolate(). Default: ``'bilinear'``.
align_corners (bool): The same as the argument in F.interpolate().
Returns:
Tensor: The interpolated source Tensor.
"""
assert len(target.shape) >= 2
def _interpolate_as(source, target, mode='bilinear', align_corners=False):
"""Interpolate the `source` (4D) to the shape of the `target`."""
target_h, target_w = target.shape[-2:]
source_h, source_w = source.shape[-2:]
if target_h != source_h or target_w != source_w:
source = F.interpolate(
source,
size=(target_h, target_w),
mode=mode,
align_corners=align_corners)
return source
if len(source.shape) == 3:
source = source[:, None, :, :]
source = _interpolate_as(source, target, mode, align_corners)
return source[:, 0, :, :]
else:
return _interpolate_as(source, target, mode, align_corners)
| 2,606 | 34.712329 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/utils/normed_predictor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import CONV_LAYERS
from .builder import LINEAR_LAYERS
@LINEAR_LAYERS.register_module(name='NormedLinear')
class NormedLinear(nn.Linear):
"""Normalized Linear Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
"""
def __init__(self, *args, tempearture=20, power=1.0, eps=1e-6, **kwargs):
super(NormedLinear, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.eps = eps
self.init_weights()
def init_weights(self):
nn.init.normal_(self.weight, mean=0, std=0.01)
if self.bias is not None:
nn.init.constant_(self.bias, 0)
def forward(self, x):
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
return F.linear(x_, weight_, self.bias)
@CONV_LAYERS.register_module(name='NormedConv2d')
class NormedConv2d(nn.Conv2d):
"""Normalized Conv2d Layer.
Args:
tempeature (float, optional): Tempeature term. Default to 20.
power (int, optional): Power term. Default to 1.0.
eps (float, optional): The minimal value of divisor to
keep numerical stability. Default to 1e-6.
norm_over_kernel (bool, optional): Normalize over kernel.
Default to False.
"""
def __init__(self,
*args,
tempearture=20,
power=1.0,
eps=1e-6,
norm_over_kernel=False,
**kwargs):
super(NormedConv2d, self).__init__(*args, **kwargs)
self.tempearture = tempearture
self.power = power
self.norm_over_kernel = norm_over_kernel
self.eps = eps
def forward(self, x):
if not self.norm_over_kernel:
weight_ = self.weight / (
self.weight.norm(dim=1, keepdim=True).pow(self.power) +
self.eps)
else:
weight_ = self.weight / (
self.weight.view(self.weight.size(0), -1).norm(
dim=1, keepdim=True).pow(self.power)[..., None, None] +
self.eps)
x_ = x / (x.norm(dim=1, keepdim=True).pow(self.power) + self.eps)
x_ = x_ * self.tempearture
if hasattr(self, 'conv2d_forward'):
x_ = self.conv2d_forward(x_, weight_)
else:
if torch.__version__ >= '1.8':
x_ = self._conv_forward(x_, weight_, self.bias)
else:
x_ = self._conv_forward(x_, weight_)
return x_
| 2,998 | 32.696629 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/utils/panoptic_gt_processing.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def preprocess_panoptic_gt(gt_labels, gt_masks, gt_semantic_seg, num_things,
num_stuff, img_metas):
"""Preprocess the ground truth for a image.
Args:
gt_labels (Tensor): Ground truth labels of each bbox,
with shape (num_gts, ).
gt_masks (BitmapMasks): Ground truth masks of each instances
of a image, shape (num_gts, h, w).
gt_semantic_seg (Tensor | None): Ground truth of semantic
segmentation with the shape (1, h, w).
[0, num_thing_class - 1] means things,
[num_thing_class, num_class-1] means stuff,
255 means VOID. It's None when training instance segmentation.
img_metas (dict): List of image meta information.
Returns:
tuple: a tuple containing the following targets.
- labels (Tensor): Ground truth class indices for a
image, with shape (n, ), n is the sum of number
of stuff type and number of instance in a image.
- masks (Tensor): Ground truth mask for a image, with
shape (n, h, w). Contains stuff and things when training
panoptic segmentation, and things only when training
instance segmentation.
"""
num_classes = num_things + num_stuff
things_masks = gt_masks.pad(img_metas['pad_shape'][:2], pad_val=0)\
.to_tensor(dtype=torch.bool, device=gt_labels.device)
if gt_semantic_seg is None:
masks = things_masks.long()
return gt_labels, masks
things_labels = gt_labels
gt_semantic_seg = gt_semantic_seg.squeeze(0)
semantic_labels = torch.unique(
gt_semantic_seg,
sorted=False,
return_inverse=False,
return_counts=False)
stuff_masks_list = []
stuff_labels_list = []
for label in semantic_labels:
if label < num_things or label >= num_classes:
continue
stuff_mask = gt_semantic_seg == label
stuff_masks_list.append(stuff_mask)
stuff_labels_list.append(label)
if len(stuff_masks_list) > 0:
stuff_masks = torch.stack(stuff_masks_list, dim=0)
stuff_labels = torch.stack(stuff_labels_list, dim=0)
labels = torch.cat([things_labels, stuff_labels], dim=0)
masks = torch.cat([things_masks, stuff_masks], dim=0)
else:
labels = things_labels
masks = things_masks
masks = masks.long()
return labels, masks
| 2,536 | 35.768116 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/utils/point_sample.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import point_sample
def get_uncertainty(mask_pred, labels):
"""Estimate uncertainty based on pred logits.
We estimate uncertainty as L1 distance between 0.0 and the logits
prediction in 'mask_pred' for the foreground class in `classes`.
Args:
mask_pred (Tensor): mask predication logits, shape (num_rois,
num_classes, mask_height, mask_width).
labels (list[Tensor]): Either predicted or ground truth label for
each predicted mask, of length num_rois.
Returns:
scores (Tensor): Uncertainty scores with the most uncertain
locations having the highest uncertainty score,
shape (num_rois, 1, mask_height, mask_width)
"""
if mask_pred.shape[1] == 1:
gt_class_logits = mask_pred.clone()
else:
inds = torch.arange(mask_pred.shape[0], device=mask_pred.device)
gt_class_logits = mask_pred[inds, labels].unsqueeze(1)
return -torch.abs(gt_class_logits)
def get_uncertain_point_coords_with_randomness(mask_pred, labels, num_points,
oversample_ratio,
importance_sample_ratio):
"""Get ``num_points`` most uncertain points with random points during
train.
Sample points in [0, 1] x [0, 1] coordinate space based on their
uncertainty. The uncertainties are calculated for each point using
'get_uncertainty()' function that takes point's logit prediction as
input.
Args:
mask_pred (Tensor): A tensor of shape (num_rois, num_classes,
mask_height, mask_width) for class-specific or class-agnostic
prediction.
labels (list): The ground truth class for each instance.
num_points (int): The number of points to sample.
oversample_ratio (int): Oversampling parameter.
importance_sample_ratio (float): Ratio of points that are sampled
via importnace sampling.
Returns:
point_coords (Tensor): A tensor of shape (num_rois, num_points, 2)
that contains the coordinates sampled points.
"""
assert oversample_ratio >= 1
assert 0 <= importance_sample_ratio <= 1
batch_size = mask_pred.shape[0]
num_sampled = int(num_points * oversample_ratio)
point_coords = torch.rand(
batch_size, num_sampled, 2, device=mask_pred.device)
point_logits = point_sample(mask_pred, point_coords)
# It is crucial to calculate uncertainty based on the sampled
# prediction value for the points. Calculating uncertainties of the
# coarse predictions first and sampling them for points leads to
# incorrect results. To illustrate this: assume uncertainty func(
# logits)=-abs(logits), a sampled point between two coarse
# predictions with -1 and 1 logits has 0 logits, and therefore 0
# uncertainty value. However, if we calculate uncertainties for the
# coarse predictions first, both will have -1 uncertainty,
# and sampled point will get -1 uncertainty.
point_uncertainties = get_uncertainty(point_logits, labels)
num_uncertain_points = int(importance_sample_ratio * num_points)
num_random_points = num_points - num_uncertain_points
idx = torch.topk(
point_uncertainties[:, 0, :], k=num_uncertain_points, dim=1)[1]
shift = num_sampled * torch.arange(
batch_size, dtype=torch.long, device=mask_pred.device)
idx += shift[:, None]
point_coords = point_coords.view(-1, 2)[idx.view(-1), :].view(
batch_size, num_uncertain_points, 2)
if num_random_points > 0:
rand_roi_coords = torch.rand(
batch_size, num_random_points, 2, device=mask_pred.device)
point_coords = torch.cat((point_coords, rand_roi_coords), dim=1)
return point_coords
| 3,878 | 43.079545 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/utils/positional_encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn.bricks.transformer import POSITIONAL_ENCODING
from mmcv.runner import BaseModule
@POSITIONAL_ENCODING.register_module()
class SinePositionalEncoding(BaseModule):
"""Position encoding with sine and cosine functions.
See `End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. Note the final returned dimension
for each position is 2 times of this value.
temperature (int, optional): The temperature used for scaling
the position embedding. Defaults to 10000.
normalize (bool, optional): Whether to normalize the position
embedding. Defaults to False.
scale (float, optional): A scale factor that scales the position
embedding. The scale will be used only when `normalize` is True.
Defaults to 2*pi.
eps (float, optional): A value added to the denominator for
numerical stability. Defaults to 1e-6.
offset (float): offset add to embed when do the normalization.
Defaults to 0.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_feats,
temperature=10000,
normalize=False,
scale=2 * math.pi,
eps=1e-6,
offset=0.,
init_cfg=None):
super(SinePositionalEncoding, self).__init__(init_cfg)
if normalize:
assert isinstance(scale, (float, int)), 'when normalize is set,' \
'scale should be provided and in float or int type, ' \
f'found {type(scale)}'
self.num_feats = num_feats
self.temperature = temperature
self.normalize = normalize
self.scale = scale
self.eps = eps
self.offset = offset
def forward(self, mask):
"""Forward function for `SinePositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
# For convenience of exporting to ONNX, it's required to convert
# `masks` from bool to int.
mask = mask.to(torch.int)
not_mask = 1 - mask # logical_not
y_embed = not_mask.cumsum(1, dtype=torch.float32)
x_embed = not_mask.cumsum(2, dtype=torch.float32)
if self.normalize:
y_embed = (y_embed + self.offset) / \
(y_embed[:, -1:, :] + self.eps) * self.scale
x_embed = (x_embed + self.offset) / \
(x_embed[:, :, -1:] + self.eps) * self.scale
dim_t = torch.arange(
self.num_feats, dtype=torch.float32, device=mask.device)
dim_t = self.temperature**(2 * (dim_t // 2) / self.num_feats)
pos_x = x_embed[:, :, :, None] / dim_t
pos_y = y_embed[:, :, :, None] / dim_t
# use `view` instead of `flatten` for dynamically exporting to ONNX
B, H, W = mask.size()
pos_x = torch.stack(
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()),
dim=4).view(B, H, W, -1)
pos_y = torch.stack(
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()),
dim=4).view(B, H, W, -1)
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
return pos
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
repr_str += f'temperature={self.temperature}, '
repr_str += f'normalize={self.normalize}, '
repr_str += f'scale={self.scale}, '
repr_str += f'eps={self.eps})'
return repr_str
@POSITIONAL_ENCODING.register_module()
class LearnedPositionalEncoding(BaseModule):
"""Position embedding with learnable embedding weights.
Args:
num_feats (int): The feature dimension for each position
along x-axis or y-axis. The final returned dimension for
each position is 2 times of this value.
row_num_embed (int, optional): The dictionary size of row embeddings.
Default 50.
col_num_embed (int, optional): The dictionary size of col embeddings.
Default 50.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
num_feats,
row_num_embed=50,
col_num_embed=50,
init_cfg=dict(type='Uniform', layer='Embedding')):
super(LearnedPositionalEncoding, self).__init__(init_cfg)
self.row_embed = nn.Embedding(row_num_embed, num_feats)
self.col_embed = nn.Embedding(col_num_embed, num_feats)
self.num_feats = num_feats
self.row_num_embed = row_num_embed
self.col_num_embed = col_num_embed
def forward(self, mask):
"""Forward function for `LearnedPositionalEncoding`.
Args:
mask (Tensor): ByteTensor mask. Non-zero values representing
ignored positions, while zero values means valid positions
for this image. Shape [bs, h, w].
Returns:
pos (Tensor): Returned position embedding with shape
[bs, num_feats*2, h, w].
"""
h, w = mask.shape[-2:]
x = torch.arange(w, device=mask.device)
y = torch.arange(h, device=mask.device)
x_embed = self.col_embed(x)
y_embed = self.row_embed(y)
pos = torch.cat(
(x_embed.unsqueeze(0).repeat(h, 1, 1), y_embed.unsqueeze(1).repeat(
1, w, 1)),
dim=-1).permute(2, 0,
1).unsqueeze(0).repeat(mask.shape[0], 1, 1, 1)
return pos
def __repr__(self):
"""str: a string that describes the module"""
repr_str = self.__class__.__name__
repr_str += f'(num_feats={self.num_feats}, '
repr_str += f'row_num_embed={self.row_num_embed}, '
repr_str += f'col_num_embed={self.col_num_embed})'
return repr_str
| 6,568 | 39.054878 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/res_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import build_conv_layer, build_norm_layer
from mmcv.runner import BaseModule, Sequential
from torch import nn as nn
class ResLayer(Sequential):
"""ResLayer to build ResNet style backbone.
Args:
block (nn.Module): block used to build ResLayer.
inplanes (int): inplanes of block.
planes (int): planes of block.
num_blocks (int): number of blocks.
stride (int): stride of the first block. Default: 1
avg_down (bool): Use AvgPool instead of stride conv when
downsampling in the bottleneck. Default: False
conv_cfg (dict): dictionary to construct and config conv layer.
Default: None
norm_cfg (dict): dictionary to construct and config norm layer.
Default: dict(type='BN')
downsample_first (bool): Downsample at the first block or last block.
False for Hourglass, True for ResNet. Default: True
"""
def __init__(self,
block,
inplanes,
planes,
num_blocks,
stride=1,
avg_down=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
downsample_first=True,
**kwargs):
self.block = block
downsample = None
if stride != 1 or inplanes != planes * block.expansion:
downsample = []
conv_stride = stride
if avg_down:
conv_stride = 1
downsample.append(
nn.AvgPool2d(
kernel_size=stride,
stride=stride,
ceil_mode=True,
count_include_pad=False))
downsample.extend([
build_conv_layer(
conv_cfg,
inplanes,
planes * block.expansion,
kernel_size=1,
stride=conv_stride,
bias=False),
build_norm_layer(norm_cfg, planes * block.expansion)[1]
])
downsample = nn.Sequential(*downsample)
layers = []
if downsample_first:
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
inplanes = planes * block.expansion
for _ in range(1, num_blocks):
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
else: # downsample_first=False is for HourglassModule
for _ in range(num_blocks - 1):
layers.append(
block(
inplanes=inplanes,
planes=inplanes,
stride=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
layers.append(
block(
inplanes=inplanes,
planes=planes,
stride=stride,
downsample=downsample,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
**kwargs))
super(ResLayer, self).__init__(*layers)
class SimplifiedBasicBlock(BaseModule):
"""Simplified version of original basic residual block. This is used in
`SCNet <https://arxiv.org/abs/2012.10150>`_.
- Norm layer is now optional
- Last ReLU in forward function is removed
"""
expansion = 1
def __init__(self,
inplanes,
planes,
stride=1,
dilation=1,
downsample=None,
style='pytorch',
with_cp=False,
conv_cfg=None,
norm_cfg=dict(type='BN'),
dcn=None,
plugins=None,
init_fg=None):
super(SimplifiedBasicBlock, self).__init__(init_fg)
assert dcn is None, 'Not implemented yet.'
assert plugins is None, 'Not implemented yet.'
assert not with_cp, 'Not implemented yet.'
self.with_norm = norm_cfg is not None
with_bias = True if norm_cfg is None else False
self.conv1 = build_conv_layer(
conv_cfg,
inplanes,
planes,
3,
stride=stride,
padding=dilation,
dilation=dilation,
bias=with_bias)
if self.with_norm:
self.norm1_name, norm1 = build_norm_layer(
norm_cfg, planes, postfix=1)
self.add_module(self.norm1_name, norm1)
self.conv2 = build_conv_layer(
conv_cfg, planes, planes, 3, padding=1, bias=with_bias)
if self.with_norm:
self.norm2_name, norm2 = build_norm_layer(
norm_cfg, planes, postfix=2)
self.add_module(self.norm2_name, norm2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
self.dilation = dilation
self.with_cp = with_cp
@property
def norm1(self):
"""nn.Module: normalization layer after the first convolution layer"""
return getattr(self, self.norm1_name) if self.with_norm else None
@property
def norm2(self):
"""nn.Module: normalization layer after the second convolution layer"""
return getattr(self, self.norm2_name) if self.with_norm else None
def forward(self, x):
"""Forward function."""
identity = x
out = self.conv1(x)
if self.with_norm:
out = self.norm1(out)
out = self.relu(out)
out = self.conv2(out)
if self.with_norm:
out = self.norm2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
return out
| 6,392 | 32.471204 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
class SELayer(BaseModule):
"""Squeeze-and-Excitation Module.
Args:
channels (int): The input (and output) channels of the SE layer.
ratio (int): Squeeze ratio in SELayer, the intermediate channel will be
``int(channels/ratio)``. Default: 16.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='Sigmoid'))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
channels,
ratio=16,
conv_cfg=None,
act_cfg=(dict(type='ReLU'), dict(type='Sigmoid')),
init_cfg=None):
super(SELayer, self).__init__(init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
out = self.global_avgpool(x)
out = self.conv1(out)
out = self.conv2(out)
return x * out
class DyReLU(BaseModule):
"""Dynamic ReLU (DyReLU) module.
See `Dynamic ReLU <https://arxiv.org/abs/2003.10027>`_ for details.
Current implementation is specialized for task-aware attention in DyHead.
HSigmoid arguments in default act_cfg follow DyHead official code.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
channels (int): The input (and output) channels of DyReLU module.
ratio (int): Squeeze ratio in Squeeze-and-Excitation-like module,
the intermediate channel will be ``int(channels/ratio)``.
Default: 4.
conv_cfg (None or dict): Config dict for convolution layer.
Default: None, which means using conv2d.
act_cfg (dict or Sequence[dict]): Config dict for activation layer.
If act_cfg is a dict, two activation layers will be configurated
by this dict. If act_cfg is a sequence of dicts, the first
activation layer will be configurated by the first dict and the
second activation layer will be configurated by the second dict.
Default: (dict(type='ReLU'), dict(type='HSigmoid', bias=3.0,
divisor=6.0))
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
channels,
ratio=4,
conv_cfg=None,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0)),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
if isinstance(act_cfg, dict):
act_cfg = (act_cfg, act_cfg)
assert len(act_cfg) == 2
assert mmcv.is_tuple_of(act_cfg, dict)
self.channels = channels
self.expansion = 4 # for a1, b1, a2, b2
self.global_avgpool = nn.AdaptiveAvgPool2d(1)
self.conv1 = ConvModule(
in_channels=channels,
out_channels=int(channels / ratio),
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[0])
self.conv2 = ConvModule(
in_channels=int(channels / ratio),
out_channels=channels * self.expansion,
kernel_size=1,
stride=1,
conv_cfg=conv_cfg,
act_cfg=act_cfg[1])
def forward(self, x):
"""Forward function."""
coeffs = self.global_avgpool(x)
coeffs = self.conv1(coeffs)
coeffs = self.conv2(coeffs) - 0.5 # value range: [-0.5, 0.5]
a1, b1, a2, b2 = torch.split(coeffs, self.channels, dim=1)
a1 = a1 * 2.0 + 1.0 # [-1.0, 1.0] + 1.0
a2 = a2 * 2.0 # [-1.0, 1.0]
out = torch.max(x * a1 + b1, x * a2 + b2)
return out
| 5,007 | 38.125 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/utils/transformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
from typing import Sequence
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (build_activation_layer, build_conv_layer,
build_norm_layer, xavier_init)
from mmcv.cnn.bricks.registry import (TRANSFORMER_LAYER,
TRANSFORMER_LAYER_SEQUENCE)
from mmcv.cnn.bricks.transformer import (BaseTransformerLayer,
TransformerLayerSequence,
build_transformer_layer_sequence)
from mmcv.runner.base_module import BaseModule
from mmcv.utils import to_2tuple
from torch.nn.init import normal_
from mmdet.models.utils.builder import TRANSFORMER
try:
from mmcv.ops.multi_scale_deform_attn import MultiScaleDeformableAttention
except ImportError:
warnings.warn(
'`MultiScaleDeformableAttention` in MMCV has been moved to '
'`mmcv.ops.multi_scale_deform_attn`, please update your MMCV')
from mmcv.cnn.bricks.transformer import MultiScaleDeformableAttention
def nlc_to_nchw(x, hw_shape):
"""Convert [N, L, C] shape tensor to [N, C, H, W] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, L, C] before conversion.
hw_shape (Sequence[int]): The height and width of output feature map.
Returns:
Tensor: The output tensor of shape [N, C, H, W] after conversion.
"""
H, W = hw_shape
assert len(x.shape) == 3
B, L, C = x.shape
assert L == H * W, 'The seq_len does not match H, W'
return x.transpose(1, 2).reshape(B, C, H, W).contiguous()
def nchw_to_nlc(x):
"""Flatten [N, C, H, W] shape tensor to [N, L, C] shape tensor.
Args:
x (Tensor): The input tensor of shape [N, C, H, W] before conversion.
Returns:
Tensor: The output tensor of shape [N, L, C] after conversion.
"""
assert len(x.shape) == 4
return x.flatten(2).transpose(1, 2).contiguous()
class AdaptivePadding(nn.Module):
"""Applies padding to input (if needed) so that input can get fully covered
by filter you specified. It support two modes "same" and "corner". The
"same" mode is same with "SAME" padding mode in TensorFlow, pad zero around
input. The "corner" mode would pad zero to bottom right.
Args:
kernel_size (int | tuple): Size of the kernel:
stride (int | tuple): Stride of the filter. Default: 1:
dilation (int | tuple): Spacing between kernel elements.
Default: 1
padding (str): Support "same" and "corner", "corner" mode
would pad zero to bottom right, and "same" mode would
pad zero around input. Default: "corner".
Example:
>>> kernel_size = 16
>>> stride = 16
>>> dilation = 1
>>> input = torch.rand(1, 1, 15, 17)
>>> adap_pad = AdaptivePadding(
>>> kernel_size=kernel_size,
>>> stride=stride,
>>> dilation=dilation,
>>> padding="corner")
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
>>> input = torch.rand(1, 1, 16, 17)
>>> out = adap_pad(input)
>>> assert (out.shape[2], out.shape[3]) == (16, 32)
"""
def __init__(self, kernel_size=1, stride=1, dilation=1, padding='corner'):
super(AdaptivePadding, self).__init__()
assert padding in ('same', 'corner')
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
padding = to_2tuple(padding)
dilation = to_2tuple(dilation)
self.padding = padding
self.kernel_size = kernel_size
self.stride = stride
self.dilation = dilation
def get_pad_shape(self, input_shape):
input_h, input_w = input_shape
kernel_h, kernel_w = self.kernel_size
stride_h, stride_w = self.stride
output_h = math.ceil(input_h / stride_h)
output_w = math.ceil(input_w / stride_w)
pad_h = max((output_h - 1) * stride_h +
(kernel_h - 1) * self.dilation[0] + 1 - input_h, 0)
pad_w = max((output_w - 1) * stride_w +
(kernel_w - 1) * self.dilation[1] + 1 - input_w, 0)
return pad_h, pad_w
def forward(self, x):
pad_h, pad_w = self.get_pad_shape(x.size()[-2:])
if pad_h > 0 or pad_w > 0:
if self.padding == 'corner':
x = F.pad(x, [0, pad_w, 0, pad_h])
elif self.padding == 'same':
x = F.pad(x, [
pad_w // 2, pad_w - pad_w // 2, pad_h // 2,
pad_h - pad_h // 2
])
return x
class PatchEmbed(BaseModule):
"""Image to Patch Embedding.
We use a conv layer to implement PatchEmbed.
Args:
in_channels (int): The num of input channels. Default: 3
embed_dims (int): The dimensions of embedding. Default: 768
conv_type (str): The config dict for embedding
conv layer type selection. Default: "Conv2d.
kernel_size (int): The kernel_size of embedding conv. Default: 16.
stride (int): The slide stride of embedding conv.
Default: None (Would be set as `kernel_size`).
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int): The dilation rate of embedding conv. Default: 1.
bias (bool): Bias of embed conv. Default: True.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
input_size (int | tuple | None): The size of input, which will be
used to calculate the out size. Only work when `dynamic_size`
is False. Default: None.
init_cfg (`mmcv.ConfigDict`, optional): The Config for initialization.
Default: None.
"""
def __init__(
self,
in_channels=3,
embed_dims=768,
conv_type='Conv2d',
kernel_size=16,
stride=16,
padding='corner',
dilation=1,
bias=True,
norm_cfg=None,
input_size=None,
init_cfg=None,
):
super(PatchEmbed, self).__init__(init_cfg=init_cfg)
self.embed_dims = embed_dims
if stride is None:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of conv
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.projection = build_conv_layer(
dict(type=conv_type),
in_channels=in_channels,
out_channels=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, embed_dims)[1]
else:
self.norm = None
if input_size:
input_size = to_2tuple(input_size)
# `init_out_size` would be used outside to
# calculate the num_patches
# when `use_abs_pos_embed` outside
self.init_input_size = input_size
if self.adap_padding:
pad_h, pad_w = self.adap_padding.get_pad_shape(input_size)
input_h, input_w = input_size
input_h = input_h + pad_h
input_w = input_w + pad_w
input_size = (input_h, input_w)
# https://pytorch.org/docs/stable/generated/torch.nn.Conv2d.html
h_out = (input_size[0] + 2 * padding[0] - dilation[0] *
(kernel_size[0] - 1) - 1) // stride[0] + 1
w_out = (input_size[1] + 2 * padding[1] - dilation[1] *
(kernel_size[1] - 1) - 1) // stride[1] + 1
self.init_out_size = (h_out, w_out)
else:
self.init_input_size = None
self.init_out_size = None
def forward(self, x):
"""
Args:
x (Tensor): Has shape (B, C, H, W). In most case, C is 3.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, out_h * out_w, embed_dims)
- out_size (tuple[int]): Spatial shape of x, arrange as
(out_h, out_w).
"""
if self.adap_padding:
x = self.adap_padding(x)
x = self.projection(x)
out_size = (x.shape[2], x.shape[3])
x = x.flatten(2).transpose(1, 2)
if self.norm is not None:
x = self.norm(x)
return x, out_size
class PatchMerging(BaseModule):
"""Merge patch feature map.
This layer groups feature map by kernel_size, and applies norm and linear
layers to the grouped feature map. Our implementation uses `nn.Unfold` to
merge patch, which is about 25% faster than original implementation.
Instead, we need to modify pretrained models for compatibility.
Args:
in_channels (int): The num of input channels.
to gets fully covered by filter and stride you specified..
Default: True.
out_channels (int): The num of output channels.
kernel_size (int | tuple, optional): the kernel size in the unfold
layer. Defaults to 2.
stride (int | tuple, optional): the stride of the sliding blocks in the
unfold layer. Default: None. (Would be set as `kernel_size`)
padding (int | tuple | string ): The padding length of
embedding conv. When it is a string, it means the mode
of adaptive padding, support "same" and "corner" now.
Default: "corner".
dilation (int | tuple, optional): dilation parameter in the unfold
layer. Default: 1.
bias (bool, optional): Whether to add bias in linear layer or not.
Defaults: False.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='LN').
init_cfg (dict, optional): The extra config for initialization.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=2,
stride=None,
padding='corner',
dilation=1,
bias=False,
norm_cfg=dict(type='LN'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
if stride:
stride = stride
else:
stride = kernel_size
kernel_size = to_2tuple(kernel_size)
stride = to_2tuple(stride)
dilation = to_2tuple(dilation)
if isinstance(padding, str):
self.adap_padding = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
# disable the padding of unfold
padding = 0
else:
self.adap_padding = None
padding = to_2tuple(padding)
self.sampler = nn.Unfold(
kernel_size=kernel_size,
dilation=dilation,
padding=padding,
stride=stride)
sample_dim = kernel_size[0] * kernel_size[1] * in_channels
if norm_cfg is not None:
self.norm = build_norm_layer(norm_cfg, sample_dim)[1]
else:
self.norm = None
self.reduction = nn.Linear(sample_dim, out_channels, bias=bias)
def forward(self, x, input_size):
"""
Args:
x (Tensor): Has shape (B, H*W, C_in).
input_size (tuple[int]): The spatial shape of x, arrange as (H, W).
Default: None.
Returns:
tuple: Contains merged results and its spatial shape.
- x (Tensor): Has shape (B, Merged_H * Merged_W, C_out)
- out_size (tuple[int]): Spatial shape of x, arrange as
(Merged_H, Merged_W).
"""
B, L, C = x.shape
assert isinstance(input_size, Sequence), f'Expect ' \
f'input_size is ' \
f'`Sequence` ' \
f'but get {input_size}'
H, W = input_size
assert L == H * W, 'input feature has wrong size'
x = x.view(B, H, W, C).permute([0, 3, 1, 2]) # B, C, H, W
# Use nn.Unfold to merge patch. About 25% faster than original method,
# but need to modify pretrained model for compatibility
if self.adap_padding:
x = self.adap_padding(x)
H, W = x.shape[-2:]
x = self.sampler(x)
# if kernel_size=2 and stride=2, x should has shape (B, 4*C, H/2*W/2)
out_h = (H + 2 * self.sampler.padding[0] - self.sampler.dilation[0] *
(self.sampler.kernel_size[0] - 1) -
1) // self.sampler.stride[0] + 1
out_w = (W + 2 * self.sampler.padding[1] - self.sampler.dilation[1] *
(self.sampler.kernel_size[1] - 1) -
1) // self.sampler.stride[1] + 1
output_size = (out_h, out_w)
x = x.transpose(1, 2) # B, H/2*W/2, 4*C
x = self.norm(x) if self.norm else x
x = self.reduction(x)
return x, output_size
def inverse_sigmoid(x, eps=1e-5):
"""Inverse function of sigmoid.
Args:
x (Tensor): The tensor to do the
inverse.
eps (float): EPS avoid numerical
overflow. Defaults 1e-5.
Returns:
Tensor: The x has passed the inverse
function of sigmoid, has same
shape with input.
"""
x = x.clamp(min=0, max=1)
x1 = x.clamp(min=eps)
x2 = (1 - x).clamp(min=eps)
return torch.log(x1 / x2)
@TRANSFORMER_LAYER.register_module()
class DetrTransformerDecoderLayer(BaseTransformerLayer):
"""Implements decoder layer in DETR transformer.
Args:
attn_cfgs (list[`mmcv.ConfigDict`] | list[dict] | dict )):
Configs for self_attention or cross_attention, the order
should be consistent with it in `operation_order`. If it is
a dict, it would be expand to the number of attention in
`operation_order`.
feedforward_channels (int): The hidden dimension for FFNs.
ffn_dropout (float): Probability of an element to be zeroed
in ffn. Default 0.0.
operation_order (tuple[str]): The execution order of operation
in transformer. Such as ('self_attn', 'norm', 'ffn', 'norm').
Default:None
act_cfg (dict): The activation config for FFNs. Default: `LN`
norm_cfg (dict): Config dict for normalization layer.
Default: `LN`.
ffn_num_fcs (int): The number of fully-connected layers in FFNs.
Default:2.
"""
def __init__(self,
attn_cfgs,
feedforward_channels,
ffn_dropout=0.0,
operation_order=None,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
ffn_num_fcs=2,
**kwargs):
super(DetrTransformerDecoderLayer, self).__init__(
attn_cfgs=attn_cfgs,
feedforward_channels=feedforward_channels,
ffn_dropout=ffn_dropout,
operation_order=operation_order,
act_cfg=act_cfg,
norm_cfg=norm_cfg,
ffn_num_fcs=ffn_num_fcs,
**kwargs)
assert len(operation_order) == 6
assert set(operation_order) == set(
['self_attn', 'norm', 'cross_attn', 'ffn'])
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class DetrTransformerEncoder(TransformerLayerSequence):
"""TransformerEncoder of DETR.
Args:
post_norm_cfg (dict): Config of last normalization layer. Default:
`LN`. Only used when `self.pre_norm` is `True`
"""
def __init__(self, *args, post_norm_cfg=dict(type='LN'), **kwargs):
super(DetrTransformerEncoder, self).__init__(*args, **kwargs)
if post_norm_cfg is not None:
self.post_norm = build_norm_layer(
post_norm_cfg, self.embed_dims)[1] if self.pre_norm else None
else:
assert not self.pre_norm, f'Use prenorm in ' \
f'{self.__class__.__name__},' \
f'Please specify post_norm_cfg'
self.post_norm = None
def forward(self, *args, **kwargs):
"""Forward function for `TransformerCoder`.
Returns:
Tensor: forwarded results with shape [num_query, bs, embed_dims].
"""
x = super(DetrTransformerEncoder, self).forward(*args, **kwargs)
if self.post_norm is not None:
x = self.post_norm(x)
return x
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class DetrTransformerDecoder(TransformerLayerSequence):
"""Implements the decoder in DETR transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
post_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self,
*args,
post_norm_cfg=dict(type='LN'),
return_intermediate=False,
**kwargs):
super(DetrTransformerDecoder, self).__init__(*args, **kwargs)
self.return_intermediate = return_intermediate
if post_norm_cfg is not None:
self.post_norm = build_norm_layer(post_norm_cfg,
self.embed_dims)[1]
else:
self.post_norm = None
def forward(self, query, *args, **kwargs):
"""Forward function for `TransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
if not self.return_intermediate:
x = super().forward(query, *args, **kwargs)
if self.post_norm:
x = self.post_norm(x)[None]
return x
intermediate = []
for layer in self.layers:
query = layer(query, *args, **kwargs)
if self.return_intermediate:
if self.post_norm is not None:
intermediate.append(self.post_norm(query))
else:
intermediate.append(query)
return torch.stack(intermediate)
@TRANSFORMER.register_module()
class Transformer(BaseModule):
"""Implements the DETR transformer.
Following the official DETR implementation, this module copy-paste
from torch.nn.Transformer with modifications:
* positional encodings are passed in MultiheadAttention
* extra LN at the end of encoder is removed
* decoder returns a stack of activations from all decoding layers
See `paper: End-to-End Object Detection with Transformers
<https://arxiv.org/pdf/2005.12872>`_ for details.
Args:
encoder (`mmcv.ConfigDict` | Dict): Config of
TransformerEncoder. Defaults to None.
decoder ((`mmcv.ConfigDict` | Dict)): Config of
TransformerDecoder. Defaults to None
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Defaults to None.
"""
def __init__(self, encoder=None, decoder=None, init_cfg=None):
super(Transformer, self).__init__(init_cfg=init_cfg)
self.encoder = build_transformer_layer_sequence(encoder)
self.decoder = build_transformer_layer_sequence(decoder)
self.embed_dims = self.encoder.embed_dims
def init_weights(self):
# follow the official DETR to init parameters
for m in self.modules():
if hasattr(m, 'weight') and m.weight.dim() > 1:
xavier_init(m, distribution='uniform')
self._is_init = True
def forward(self, x, mask, query_embed, pos_embed):
"""Forward function for `Transformer`.
Args:
x (Tensor): Input query with shape [bs, c, h, w] where
c = embed_dims.
mask (Tensor): The key_padding_mask used for encoder and decoder,
with shape [bs, h, w].
query_embed (Tensor): The query embedding for decoder, with shape
[num_query, c].
pos_embed (Tensor): The positional encoding for encoder and
decoder, with the same shape as `x`.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- out_dec: Output from decoder. If return_intermediate_dec \
is True output has shape [num_dec_layers, bs,
num_query, embed_dims], else has shape [1, bs, \
num_query, embed_dims].
- memory: Output results from encoder, with shape \
[bs, embed_dims, h, w].
"""
bs, c, h, w = x.shape
# use `view` instead of `flatten` for dynamically exporting to ONNX
x = x.view(bs, c, -1).permute(2, 0, 1) # [bs, c, h, w] -> [h*w, bs, c]
pos_embed = pos_embed.view(bs, c, -1).permute(2, 0, 1)
query_embed = query_embed.unsqueeze(1).repeat(
1, bs, 1) # [num_query, dim] -> [num_query, bs, dim]
mask = mask.view(bs, -1) # [bs, h, w] -> [bs, h*w]
memory = self.encoder(
query=x,
key=None,
value=None,
query_pos=pos_embed,
query_key_padding_mask=mask)
target = torch.zeros_like(query_embed)
# out_dec: [num_layers, num_query, bs, dim]
out_dec = self.decoder(
query=target,
key=memory,
value=memory,
key_pos=pos_embed,
query_pos=query_embed,
key_padding_mask=mask)
out_dec = out_dec.transpose(1, 2)
memory = memory.permute(1, 2, 0).reshape(bs, c, h, w)
return out_dec, memory
@TRANSFORMER_LAYER_SEQUENCE.register_module()
class DeformableDetrTransformerDecoder(TransformerLayerSequence):
"""Implements the decoder in DETR transformer.
Args:
return_intermediate (bool): Whether to return intermediate outputs.
coder_norm_cfg (dict): Config of last normalization layer. Default:
`LN`.
"""
def __init__(self, *args, return_intermediate=False, **kwargs):
super(DeformableDetrTransformerDecoder, self).__init__(*args, **kwargs)
self.return_intermediate = return_intermediate
def forward(self,
query,
*args,
reference_points=None,
valid_ratios=None,
reg_branches=None,
**kwargs):
"""Forward function for `TransformerDecoder`.
Args:
query (Tensor): Input query with shape
`(num_query, bs, embed_dims)`.
reference_points (Tensor): The reference
points of offset. has shape
(bs, num_query, 4) when as_two_stage,
otherwise has shape ((bs, num_query, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
reg_branch: (obj:`nn.ModuleList`): Used for
refining the regression results. Only would
be passed when with_box_refine is True,
otherwise would be passed a `None`.
Returns:
Tensor: Results with shape [1, num_query, bs, embed_dims] when
return_intermediate is `False`, otherwise it has shape
[num_layers, num_query, bs, embed_dims].
"""
output = query
intermediate = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
if reference_points.shape[-1] == 4:
reference_points_input = reference_points[:, :, None] * \
torch.cat([valid_ratios, valid_ratios], -1)[:, None]
else:
assert reference_points.shape[-1] == 2
reference_points_input = reference_points[:, :, None] * \
valid_ratios[:, None]
output = layer(
output,
*args,
reference_points=reference_points_input,
**kwargs)
output = output.permute(1, 0, 2)
if reg_branches is not None:
tmp = reg_branches[lid](output)
if reference_points.shape[-1] == 4:
new_reference_points = tmp + inverse_sigmoid(
reference_points)
new_reference_points = new_reference_points.sigmoid()
else:
assert reference_points.shape[-1] == 2
new_reference_points = tmp
new_reference_points[..., :2] = tmp[
..., :2] + inverse_sigmoid(reference_points)
new_reference_points = new_reference_points.sigmoid()
reference_points = new_reference_points.detach()
output = output.permute(1, 0, 2)
if self.return_intermediate:
intermediate.append(output)
intermediate_reference_points.append(reference_points)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(
intermediate_reference_points)
return output, reference_points
@TRANSFORMER.register_module()
class DeformableDetrTransformer(Transformer):
"""Implements the DeformableDETR transformer.
Args:
as_two_stage (bool): Generate query from encoder features.
Default: False.
num_feature_levels (int): Number of feature maps from FPN:
Default: 4.
two_stage_num_proposals (int): Number of proposals when set
`as_two_stage` as True. Default: 300.
"""
def __init__(self,
as_two_stage=False,
num_feature_levels=4,
two_stage_num_proposals=300,
**kwargs):
super(DeformableDetrTransformer, self).__init__(**kwargs)
self.as_two_stage = as_two_stage
self.num_feature_levels = num_feature_levels
self.two_stage_num_proposals = two_stage_num_proposals
self.embed_dims = self.encoder.embed_dims
self.init_layers()
def init_layers(self):
"""Initialize layers of the DeformableDetrTransformer."""
self.level_embeds = nn.Parameter(
torch.Tensor(self.num_feature_levels, self.embed_dims))
if self.as_two_stage:
self.enc_output = nn.Linear(self.embed_dims, self.embed_dims)
self.enc_output_norm = nn.LayerNorm(self.embed_dims)
self.pos_trans = nn.Linear(self.embed_dims * 2,
self.embed_dims * 2)
self.pos_trans_norm = nn.LayerNorm(self.embed_dims * 2)
else:
self.reference_points = nn.Linear(self.embed_dims, 2)
def init_weights(self):
"""Initialize the transformer weights."""
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
for m in self.modules():
if isinstance(m, MultiScaleDeformableAttention):
m.init_weights()
if not self.as_two_stage:
xavier_init(self.reference_points, distribution='uniform', bias=0.)
normal_(self.level_embeds)
def gen_encoder_output_proposals(self, memory, memory_padding_mask,
spatial_shapes):
"""Generate proposals from encoded memory.
Args:
memory (Tensor) : The output of encoder,
has shape (bs, num_key, embed_dim). num_key is
equal the number of points on feature map from
all level.
memory_padding_mask (Tensor): Padding mask for memory.
has shape (bs, num_key).
spatial_shapes (Tensor): The shape of all feature maps.
has shape (num_level, 2).
Returns:
tuple: A tuple of feature map and bbox prediction.
- output_memory (Tensor): The input of decoder, \
has shape (bs, num_key, embed_dim). num_key is \
equal the number of points on feature map from \
all levels.
- output_proposals (Tensor): The normalized proposal \
after a inverse sigmoid, has shape \
(bs, num_keys, 4).
"""
N, S, C = memory.shape
proposals = []
_cur = 0
for lvl, (H, W) in enumerate(spatial_shapes):
mask_flatten_ = memory_padding_mask[:, _cur:(_cur + H * W)].view(
N, H, W, 1)
valid_H = torch.sum(~mask_flatten_[:, :, 0, 0], 1)
valid_W = torch.sum(~mask_flatten_[:, 0, :, 0], 1)
grid_y, grid_x = torch.meshgrid(
torch.linspace(
0, H - 1, H, dtype=torch.float32, device=memory.device),
torch.linspace(
0, W - 1, W, dtype=torch.float32, device=memory.device))
grid = torch.cat([grid_x.unsqueeze(-1), grid_y.unsqueeze(-1)], -1)
scale = torch.cat([valid_W.unsqueeze(-1),
valid_H.unsqueeze(-1)], 1).view(N, 1, 1, 2)
grid = (grid.unsqueeze(0).expand(N, -1, -1, -1) + 0.5) / scale
wh = torch.ones_like(grid) * 0.05 * (2.0**lvl)
proposal = torch.cat((grid, wh), -1).view(N, -1, 4)
proposals.append(proposal)
_cur += (H * W)
output_proposals = torch.cat(proposals, 1)
output_proposals_valid = ((output_proposals > 0.01) &
(output_proposals < 0.99)).all(
-1, keepdim=True)
output_proposals = torch.log(output_proposals / (1 - output_proposals))
output_proposals = output_proposals.masked_fill(
memory_padding_mask.unsqueeze(-1), float('inf'))
output_proposals = output_proposals.masked_fill(
~output_proposals_valid, float('inf'))
output_memory = memory
output_memory = output_memory.masked_fill(
memory_padding_mask.unsqueeze(-1), float(0))
output_memory = output_memory.masked_fill(~output_proposals_valid,
float(0))
output_memory = self.enc_output_norm(self.enc_output(output_memory))
return output_memory, output_proposals
@staticmethod
def get_reference_points(spatial_shapes, valid_ratios, device):
"""Get the reference points used in decoder.
Args:
spatial_shapes (Tensor): The shape of all
feature maps, has shape (num_level, 2).
valid_ratios (Tensor): The radios of valid
points on the feature map, has shape
(bs, num_levels, 2)
device (obj:`device`): The device where
reference_points should be.
Returns:
Tensor: reference points used in decoder, has \
shape (bs, num_keys, num_levels, 2).
"""
reference_points_list = []
for lvl, (H, W) in enumerate(spatial_shapes):
# TODO check this 0.5
ref_y, ref_x = torch.meshgrid(
torch.linspace(
0.5, H - 0.5, H, dtype=torch.float32, device=device),
torch.linspace(
0.5, W - 0.5, W, dtype=torch.float32, device=device))
ref_y = ref_y.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 1] * H)
ref_x = ref_x.reshape(-1)[None] / (
valid_ratios[:, None, lvl, 0] * W)
ref = torch.stack((ref_x, ref_y), -1)
reference_points_list.append(ref)
reference_points = torch.cat(reference_points_list, 1)
reference_points = reference_points[:, :, None] * valid_ratios[:, None]
return reference_points
def get_valid_ratio(self, mask):
"""Get the valid radios of feature maps of all level."""
_, H, W = mask.shape
valid_H = torch.sum(~mask[:, :, 0], 1)
valid_W = torch.sum(~mask[:, 0, :], 1)
valid_ratio_h = valid_H.float() / H
valid_ratio_w = valid_W.float() / W
valid_ratio = torch.stack([valid_ratio_w, valid_ratio_h], -1)
return valid_ratio
def get_proposal_pos_embed(self,
proposals,
num_pos_feats=128,
temperature=10000):
"""Get the position embedding of proposal."""
scale = 2 * math.pi
dim_t = torch.arange(
num_pos_feats, dtype=torch.float32, device=proposals.device)
dim_t = temperature**(2 * (dim_t // 2) / num_pos_feats)
# N, L, 4
proposals = proposals.sigmoid() * scale
# N, L, 4, 128
pos = proposals[:, :, :, None] / dim_t
# N, L, 4, 64, 2
pos = torch.stack((pos[:, :, :, 0::2].sin(), pos[:, :, :, 1::2].cos()),
dim=4).flatten(2)
return pos
def forward(self,
mlvl_feats,
mlvl_masks,
query_embed,
mlvl_pos_embeds,
reg_branches=None,
cls_branches=None,
**kwargs):
"""Forward function for `Transformer`.
Args:
mlvl_feats (list(Tensor)): Input queries from
different level. Each element has shape
[bs, embed_dims, h, w].
mlvl_masks (list(Tensor)): The key_padding_mask from
different level used for encoder and decoder,
each element has shape [bs, h, w].
query_embed (Tensor): The query embedding for decoder,
with shape [num_query, c].
mlvl_pos_embeds (list(Tensor)): The positional encoding
of feats from different level, has the shape
[bs, embed_dims, h, w].
reg_branches (obj:`nn.ModuleList`): Regression heads for
feature maps from each decoder layer. Only would
be passed when
`with_box_refine` is True. Default to None.
cls_branches (obj:`nn.ModuleList`): Classification heads
for feature maps from each decoder layer. Only would
be passed when `as_two_stage`
is True. Default to None.
Returns:
tuple[Tensor]: results of decoder containing the following tensor.
- inter_states: Outputs from decoder. If
return_intermediate_dec is True output has shape \
(num_dec_layers, bs, num_query, embed_dims), else has \
shape (1, bs, num_query, embed_dims).
- init_reference_out: The initial value of reference \
points, has shape (bs, num_queries, 4).
- inter_references_out: The internal value of reference \
points in decoder, has shape \
(num_dec_layers, bs,num_query, embed_dims)
- enc_outputs_class: The classification score of \
proposals generated from \
encoder's feature maps, has shape \
(batch, h*w, num_classes). \
Only would be returned when `as_two_stage` is True, \
otherwise None.
- enc_outputs_coord_unact: The regression results \
generated from encoder's feature maps., has shape \
(batch, h*w, 4). Only would \
be returned when `as_two_stage` is True, \
otherwise None.
"""
assert self.as_two_stage or query_embed is not None
feat_flatten = []
mask_flatten = []
lvl_pos_embed_flatten = []
spatial_shapes = []
for lvl, (feat, mask, pos_embed) in enumerate(
zip(mlvl_feats, mlvl_masks, mlvl_pos_embeds)):
bs, c, h, w = feat.shape
spatial_shape = (h, w)
spatial_shapes.append(spatial_shape)
feat = feat.flatten(2).transpose(1, 2)
mask = mask.flatten(1)
pos_embed = pos_embed.flatten(2).transpose(1, 2)
lvl_pos_embed = pos_embed + self.level_embeds[lvl].view(1, 1, -1)
lvl_pos_embed_flatten.append(lvl_pos_embed)
feat_flatten.append(feat)
mask_flatten.append(mask)
feat_flatten = torch.cat(feat_flatten, 1)
mask_flatten = torch.cat(mask_flatten, 1)
lvl_pos_embed_flatten = torch.cat(lvl_pos_embed_flatten, 1)
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=feat_flatten.device)
level_start_index = torch.cat((spatial_shapes.new_zeros(
(1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
valid_ratios = torch.stack(
[self.get_valid_ratio(m) for m in mlvl_masks], 1)
reference_points = \
self.get_reference_points(spatial_shapes,
valid_ratios,
device=feat.device)
feat_flatten = feat_flatten.permute(1, 0, 2) # (H*W, bs, embed_dims)
lvl_pos_embed_flatten = lvl_pos_embed_flatten.permute(
1, 0, 2) # (H*W, bs, embed_dims)
memory = self.encoder(
query=feat_flatten,
key=None,
value=None,
query_pos=lvl_pos_embed_flatten,
query_key_padding_mask=mask_flatten,
spatial_shapes=spatial_shapes,
reference_points=reference_points,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
**kwargs)
memory = memory.permute(1, 0, 2)
bs, _, c = memory.shape
if self.as_two_stage:
output_memory, output_proposals = \
self.gen_encoder_output_proposals(
memory, mask_flatten, spatial_shapes)
enc_outputs_class = cls_branches[self.decoder.num_layers](
output_memory)
enc_outputs_coord_unact = \
reg_branches[
self.decoder.num_layers](output_memory) + output_proposals
topk = self.two_stage_num_proposals
# We only use the first channel in enc_outputs_class as foreground,
# the other (num_classes - 1) channels are actually not used.
# Its targets are set to be 0s, which indicates the first
# class (foreground) because we use [0, num_classes - 1] to
# indicate class labels, background class is indicated by
# num_classes (similar convention in RPN).
# See https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/deformable_detr_head.py#L241 # noqa
# This follows the official implementation of Deformable DETR.
topk_proposals = torch.topk(
enc_outputs_class[..., 0], topk, dim=1)[1]
topk_coords_unact = torch.gather(
enc_outputs_coord_unact, 1,
topk_proposals.unsqueeze(-1).repeat(1, 1, 4))
topk_coords_unact = topk_coords_unact.detach()
reference_points = topk_coords_unact.sigmoid()
init_reference_out = reference_points
pos_trans_out = self.pos_trans_norm(
self.pos_trans(self.get_proposal_pos_embed(topk_coords_unact)))
query_pos, query = torch.split(pos_trans_out, c, dim=2)
else:
query_pos, query = torch.split(query_embed, c, dim=1)
query_pos = query_pos.unsqueeze(0).expand(bs, -1, -1)
query = query.unsqueeze(0).expand(bs, -1, -1)
reference_points = self.reference_points(query_pos).sigmoid()
init_reference_out = reference_points
# decoder
query = query.permute(1, 0, 2)
memory = memory.permute(1, 0, 2)
query_pos = query_pos.permute(1, 0, 2)
inter_states, inter_references = self.decoder(
query=query,
key=None,
value=memory,
query_pos=query_pos,
key_padding_mask=mask_flatten,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
level_start_index=level_start_index,
valid_ratios=valid_ratios,
reg_branches=reg_branches,
**kwargs)
inter_references_out = inter_references
if self.as_two_stage:
return inter_states, init_reference_out,\
inter_references_out, enc_outputs_class,\
enc_outputs_coord_unact
return inter_states, init_reference_out, \
inter_references_out, None, None
@TRANSFORMER.register_module()
class DynamicConv(BaseModule):
"""Implements Dynamic Convolution.
This module generate parameters for each sample and
use bmm to implement 1*1 convolution. Code is modified
from the `official github repo <https://github.com/PeizeSun/
SparseR-CNN/blob/main/projects/SparseRCNN/sparsercnn/head.py#L258>`_ .
Args:
in_channels (int): The input feature channel.
Defaults to 256.
feat_channels (int): The inner feature channel.
Defaults to 64.
out_channels (int, optional): The output feature channel.
When not specified, it will be set to `in_channels`
by default
input_feat_shape (int): The shape of input feature.
Defaults to 7.
with_proj (bool): Project two-dimentional feature to
one-dimentional feature. Default to True.
act_cfg (dict): The activation config for DynamicConv.
norm_cfg (dict): Config dict for normalization layer. Default
layer normalization.
init_cfg (obj:`mmcv.ConfigDict`): The Config for initialization.
Default: None.
"""
def __init__(self,
in_channels=256,
feat_channels=64,
out_channels=None,
input_feat_shape=7,
with_proj=True,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'),
init_cfg=None):
super(DynamicConv, self).__init__(init_cfg)
self.in_channels = in_channels
self.feat_channels = feat_channels
self.out_channels_raw = out_channels
self.input_feat_shape = input_feat_shape
self.with_proj = with_proj
self.act_cfg = act_cfg
self.norm_cfg = norm_cfg
self.out_channels = out_channels if out_channels else in_channels
self.num_params_in = self.in_channels * self.feat_channels
self.num_params_out = self.out_channels * self.feat_channels
self.dynamic_layer = nn.Linear(
self.in_channels, self.num_params_in + self.num_params_out)
self.norm_in = build_norm_layer(norm_cfg, self.feat_channels)[1]
self.norm_out = build_norm_layer(norm_cfg, self.out_channels)[1]
self.activation = build_activation_layer(act_cfg)
num_output = self.out_channels * input_feat_shape**2
if self.with_proj:
self.fc_layer = nn.Linear(num_output, self.out_channels)
self.fc_norm = build_norm_layer(norm_cfg, self.out_channels)[1]
def forward(self, param_feature, input_feature):
"""Forward function for `DynamicConv`.
Args:
param_feature (Tensor): The feature can be used
to generate the parameter, has shape
(num_all_proposals, in_channels).
input_feature (Tensor): Feature that
interact with parameters, has shape
(num_all_proposals, in_channels, H, W).
Returns:
Tensor: The output feature has shape
(num_all_proposals, out_channels).
"""
input_feature = input_feature.flatten(2).permute(2, 0, 1)
input_feature = input_feature.permute(1, 0, 2)
parameters = self.dynamic_layer(param_feature)
param_in = parameters[:, :self.num_params_in].view(
-1, self.in_channels, self.feat_channels)
param_out = parameters[:, -self.num_params_out:].view(
-1, self.feat_channels, self.out_channels)
# input_feature has shape (num_all_proposals, H*W, in_channels)
# param_in has shape (num_all_proposals, in_channels, feat_channels)
# feature has shape (num_all_proposals, H*W, feat_channels)
features = torch.bmm(input_feature, param_in)
features = self.norm_in(features)
features = self.activation(features)
# param_out has shape (batch_size, feat_channels, out_channels)
features = torch.bmm(features, param_out)
features = self.norm_out(features)
features = self.activation(features)
if self.with_proj:
features = features.flatten(1)
features = self.fc_layer(features)
features = self.fc_norm(features)
features = self.activation(features)
return features
| 46,532 | 38.839897 | 132 | py |
mmdetection | mmdetection-master/mmdet/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .ascend_util import (batch_images_to_levels,
get_max_num_gt_division_factor, masked_fill)
from .collect_env import collect_env
from .compat_config import compat_cfg
from .logger import get_caller_name, get_root_logger, log_img_scale
from .memory import AvoidCUDAOOM, AvoidOOM
from .misc import find_latest_checkpoint, update_data_root
from .replace_cfg_vals import replace_cfg_vals
from .rfnext import rfnext_init_model
from .setup_env import setup_multi_processes
from .split_batch import split_batch
from .util_distribution import build_ddp, build_dp, get_device
__all__ = [
'get_root_logger', 'collect_env', 'find_latest_checkpoint',
'update_data_root', 'setup_multi_processes', 'get_caller_name',
'log_img_scale', 'compat_cfg', 'split_batch', 'build_ddp', 'build_dp',
'get_device', 'replace_cfg_vals', 'AvoidOOM', 'AvoidCUDAOOM',
'get_max_num_gt_division_factor', 'masked_fill', 'batch_images_to_levels',
'rfnext_init_model'
]
| 1,035 | 44.043478 | 78 | py |
mmdetection | mmdetection-master/mmdet/utils/ascend_util.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def masked_fill(ori_tensor, mask, new_value, neg=False):
"""The Value of ori_tensor is new_value, depending on mask.
Args:
ori_tensor (Tensor): Input tensor.
mask (Tensor): If select new_value.
new_value(Tensor | scalar): Value selected for ori_tensor.
neg (bool): If True, select ori_tensor. If False, select new_value.
Returns:
ori_tensor: (Tensor): The Value of ori_tensor is new_value,
depending on mask.
"""
if mask is None:
return ori_tensor
else:
if neg:
return ori_tensor * mask + new_value * (1 - mask)
else:
return ori_tensor * (1 - mask) + new_value * mask
def batch_images_to_levels(target, num_levels):
"""Convert targets by image to targets by feature level.
[target_img0, target_img1] -> [target_level0, target_level1, ...] or
target_imgs -> [target_level0, target_level1, ...]
Args:
target (Tensor | List[Tensor]): Tensor split to image levels.
num_levels (List[int]): Image levels num.
Returns:
level_targets: (Tensor): Tensor split by image levels.
"""
if not isinstance(target, torch.Tensor):
target = torch.stack(target, 0)
level_targets = []
start = 0
for n in num_levels:
end = start + n
# level_targets.append(target[:, start:end].squeeze(0))
level_targets.append(target[:, start:end])
start = end
return level_targets
def get_max_num_gt_division_factor(gt_nums,
min_num_gt=32,
max_num_gt=1024,
division_factor=2):
"""Count max num of gt.
Args:
gt_nums (List[int]): Ground truth bboxes num of images.
min_num_gt (int): Min num of ground truth bboxes.
max_num_gt (int): Max num of ground truth bboxes.
division_factor (int): Division factor of result.
Returns:
max_gt_nums_align: (int): max num of ground truth bboxes.
"""
max_gt_nums = max(gt_nums)
max_gt_nums_align = min_num_gt
while max_gt_nums_align < max_gt_nums:
max_gt_nums_align *= division_factor
if max_gt_nums_align > max_num_gt:
raise RuntimeError
return max_gt_nums_align
| 2,359 | 32.714286 | 75 | py |
mmdetection | mmdetection-master/mmdet/utils/collect_env.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.utils import collect_env as collect_base_env
from mmcv.utils import get_git_hash
import mmdet
def collect_env():
"""Collect the information of the running environments."""
env_info = collect_base_env()
env_info['MMDetection'] = mmdet.__version__ + '+' + get_git_hash()[:7]
return env_info
if __name__ == '__main__':
for name, val in collect_env().items():
print(f'{name}: {val}')
| 471 | 25.222222 | 74 | py |
mmdetection | mmdetection-master/mmdet/utils/compat_config.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
from mmcv import ConfigDict
def compat_cfg(cfg):
"""This function would modify some filed to keep the compatibility of
config.
For example, it will move some args which will be deprecated to the correct
fields.
"""
cfg = copy.deepcopy(cfg)
cfg = compat_imgs_per_gpu(cfg)
cfg = compat_loader_args(cfg)
cfg = compat_runner_args(cfg)
return cfg
def compat_runner_args(cfg):
if 'runner' not in cfg:
cfg.runner = ConfigDict({
'type': 'EpochBasedRunner',
'max_epochs': cfg.total_epochs
})
warnings.warn(
'config is now expected to have a `runner` section, '
'please set `runner` in your config.', UserWarning)
else:
if 'total_epochs' in cfg:
assert cfg.total_epochs == cfg.runner.max_epochs
return cfg
def compat_imgs_per_gpu(cfg):
cfg = copy.deepcopy(cfg)
if 'imgs_per_gpu' in cfg.data:
warnings.warn('"imgs_per_gpu" is deprecated in MMDet V2.0. '
'Please use "samples_per_gpu" instead')
if 'samples_per_gpu' in cfg.data:
warnings.warn(
f'Got "imgs_per_gpu"={cfg.data.imgs_per_gpu} and '
f'"samples_per_gpu"={cfg.data.samples_per_gpu}, "imgs_per_gpu"'
f'={cfg.data.imgs_per_gpu} is used in this experiments')
else:
warnings.warn('Automatically set "samples_per_gpu"="imgs_per_gpu"='
f'{cfg.data.imgs_per_gpu} in this experiments')
cfg.data.samples_per_gpu = cfg.data.imgs_per_gpu
return cfg
def compat_loader_args(cfg):
"""Deprecated sample_per_gpu in cfg.data."""
cfg = copy.deepcopy(cfg)
if 'train_dataloader' not in cfg.data:
cfg.data['train_dataloader'] = ConfigDict()
if 'val_dataloader' not in cfg.data:
cfg.data['val_dataloader'] = ConfigDict()
if 'test_dataloader' not in cfg.data:
cfg.data['test_dataloader'] = ConfigDict()
# special process for train_dataloader
if 'samples_per_gpu' in cfg.data:
samples_per_gpu = cfg.data.pop('samples_per_gpu')
assert 'samples_per_gpu' not in \
cfg.data.train_dataloader, ('`samples_per_gpu` are set '
'in `data` field and ` '
'data.train_dataloader` '
'at the same time. '
'Please only set it in '
'`data.train_dataloader`. ')
cfg.data.train_dataloader['samples_per_gpu'] = samples_per_gpu
if 'persistent_workers' in cfg.data:
persistent_workers = cfg.data.pop('persistent_workers')
assert 'persistent_workers' not in \
cfg.data.train_dataloader, ('`persistent_workers` are set '
'in `data` field and ` '
'data.train_dataloader` '
'at the same time. '
'Please only set it in '
'`data.train_dataloader`. ')
cfg.data.train_dataloader['persistent_workers'] = persistent_workers
if 'workers_per_gpu' in cfg.data:
workers_per_gpu = cfg.data.pop('workers_per_gpu')
cfg.data.train_dataloader['workers_per_gpu'] = workers_per_gpu
cfg.data.val_dataloader['workers_per_gpu'] = workers_per_gpu
cfg.data.test_dataloader['workers_per_gpu'] = workers_per_gpu
# special process for val_dataloader
if 'samples_per_gpu' in cfg.data.val:
# keep default value of `sample_per_gpu` is 1
assert 'samples_per_gpu' not in \
cfg.data.val_dataloader, ('`samples_per_gpu` are set '
'in `data.val` field and ` '
'data.val_dataloader` at '
'the same time. '
'Please only set it in '
'`data.val_dataloader`. ')
cfg.data.val_dataloader['samples_per_gpu'] = \
cfg.data.val.pop('samples_per_gpu')
# special process for val_dataloader
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
if 'samples_per_gpu' in cfg.data.test:
assert 'samples_per_gpu' not in \
cfg.data.test_dataloader, ('`samples_per_gpu` are set '
'in `data.test` field and ` '
'data.test_dataloader` '
'at the same time. '
'Please only set it in '
'`data.test_dataloader`. ')
cfg.data.test_dataloader['samples_per_gpu'] = \
cfg.data.test.pop('samples_per_gpu')
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
if 'samples_per_gpu' in ds_cfg:
assert 'samples_per_gpu' not in \
cfg.data.test_dataloader, ('`samples_per_gpu` are set '
'in `data.test` field and ` '
'data.test_dataloader` at'
' the same time. '
'Please only set it in '
'`data.test_dataloader`. ')
samples_per_gpu = max(
[ds_cfg.pop('samples_per_gpu', 1) for ds_cfg in cfg.data.test])
cfg.data.test_dataloader['samples_per_gpu'] = samples_per_gpu
return cfg
| 5,966 | 41.621429 | 79 | py |
mmdetection | mmdetection-master/mmdet/utils/contextmanagers.py | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import contextlib
import logging
import os
import time
from typing import List
import torch
logger = logging.getLogger(__name__)
DEBUG_COMPLETED_TIME = bool(os.environ.get('DEBUG_COMPLETED_TIME', False))
@contextlib.asynccontextmanager
async def completed(trace_name='',
name='',
sleep_interval=0.05,
streams: List[torch.cuda.Stream] = None):
"""Async context manager that waits for work to complete on given CUDA
streams."""
if not torch.cuda.is_available():
yield
return
stream_before_context_switch = torch.cuda.current_stream()
if not streams:
streams = [stream_before_context_switch]
else:
streams = [s if s else stream_before_context_switch for s in streams]
end_events = [
torch.cuda.Event(enable_timing=DEBUG_COMPLETED_TIME) for _ in streams
]
if DEBUG_COMPLETED_TIME:
start = torch.cuda.Event(enable_timing=True)
stream_before_context_switch.record_event(start)
cpu_start = time.monotonic()
logger.debug('%s %s starting, streams: %s', trace_name, name, streams)
grad_enabled_before = torch.is_grad_enabled()
try:
yield
finally:
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_end = time.monotonic()
for i, stream in enumerate(streams):
event = end_events[i]
stream.record_event(event)
grad_enabled_after = torch.is_grad_enabled()
# observed change of torch.is_grad_enabled() during concurrent run of
# async_test_bboxes code
assert (grad_enabled_before == grad_enabled_after
), 'Unexpected is_grad_enabled() value change'
are_done = [e.query() for e in end_events]
logger.debug('%s %s completed: %s streams: %s', trace_name, name,
are_done, streams)
with torch.cuda.stream(stream_before_context_switch):
while not all(are_done):
await asyncio.sleep(sleep_interval)
are_done = [e.query() for e in end_events]
logger.debug(
'%s %s completed: %s streams: %s',
trace_name,
name,
are_done,
streams,
)
current_stream = torch.cuda.current_stream()
assert current_stream == stream_before_context_switch
if DEBUG_COMPLETED_TIME:
cpu_time = (cpu_end - cpu_start) * 1000
stream_times_ms = ''
for i, stream in enumerate(streams):
elapsed_time = start.elapsed_time(end_events[i])
stream_times_ms += f' {stream} {elapsed_time:.2f} ms'
logger.info('%s %s %.2f ms %s', trace_name, name, cpu_time,
stream_times_ms)
@contextlib.asynccontextmanager
async def concurrent(streamqueue: asyncio.Queue,
trace_name='concurrent',
name='stream'):
"""Run code concurrently in different streams.
:param streamqueue: asyncio.Queue instance.
Queue tasks define the pool of streams used for concurrent execution.
"""
if not torch.cuda.is_available():
yield
return
initial_stream = torch.cuda.current_stream()
with torch.cuda.stream(initial_stream):
stream = await streamqueue.get()
assert isinstance(stream, torch.cuda.Stream)
try:
with torch.cuda.stream(stream):
logger.debug('%s %s is starting, stream: %s', trace_name, name,
stream)
yield
current = torch.cuda.current_stream()
assert current == stream
logger.debug('%s %s has finished, stream: %s', trace_name,
name, stream)
finally:
streamqueue.task_done()
streamqueue.put_nowait(stream)
| 4,125 | 32.544715 | 79 | py |
mmdetection | mmdetection-master/mmdet/utils/logger.py | # Copyright (c) OpenMMLab. All rights reserved.
import inspect
import logging
from mmcv.utils import get_logger
def get_root_logger(log_file=None, log_level=logging.INFO):
"""Get root logger.
Args:
log_file (str, optional): File path of log. Defaults to None.
log_level (int, optional): The level of logger.
Defaults to logging.INFO.
Returns:
:obj:`logging.Logger`: The obtained logger
"""
logger = get_logger(name='mmdet', log_file=log_file, log_level=log_level)
return logger
def get_caller_name():
"""Get name of caller method."""
# this_func_frame = inspect.stack()[0][0] # i.e., get_caller_name
# callee_frame = inspect.stack()[1][0] # e.g., log_img_scale
caller_frame = inspect.stack()[2][0] # e.g., caller of log_img_scale
caller_method = caller_frame.f_code.co_name
try:
caller_class = caller_frame.f_locals['self'].__class__.__name__
return f'{caller_class}.{caller_method}'
except KeyError: # caller is a function
return caller_method
def log_img_scale(img_scale, shape_order='hw', skip_square=False):
"""Log image size.
Args:
img_scale (tuple): Image size to be logged.
shape_order (str, optional): The order of image shape.
'hw' for (height, width) and 'wh' for (width, height).
Defaults to 'hw'.
skip_square (bool, optional): Whether to skip logging for square
img_scale. Defaults to False.
Returns:
bool: Whether to have done logging.
"""
if shape_order == 'hw':
height, width = img_scale
elif shape_order == 'wh':
width, height = img_scale
else:
raise ValueError(f'Invalid shape_order {shape_order}.')
if skip_square and (height == width):
return False
logger = get_root_logger()
caller = get_caller_name()
logger.info(f'image shape: height={height}, width={width} in {caller}')
return True
| 1,985 | 29.090909 | 77 | py |
mmdetection | mmdetection-master/mmdet/utils/memory.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import abc
from contextlib import contextmanager
from functools import wraps
import torch
from mmdet.utils import get_root_logger
def cast_tensor_type(inputs, src_type=None, dst_type=None):
"""Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype | torch.device): Source type.
src_type (torch.dtype | torch.device): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
assert dst_type is not None
if isinstance(inputs, torch.Tensor):
if isinstance(dst_type, torch.device):
# convert Tensor to dst_device
if hasattr(inputs, 'to') and \
hasattr(inputs, 'device') and \
(inputs.device == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
else:
# convert Tensor to dst_dtype
if hasattr(inputs, 'to') and \
hasattr(inputs, 'dtype') and \
(inputs.dtype == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
# we need to ensure that the type of inputs to be casted are the same
# as the argument `src_type`.
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type=src_type, dst_type=dst_type)
for item in inputs)
# TODO: Currently not supported
# elif isinstance(inputs, InstanceData):
# for key, value in inputs.items():
# inputs[key] = cast_tensor_type(
# value, src_type=src_type, dst_type=dst_type)
# return inputs
else:
return inputs
@contextmanager
def _ignore_torch_cuda_oom():
"""A context which ignores CUDA OOM exception from pytorch.
Code is modified from
<https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py> # noqa: E501
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if 'CUDA out of memory. ' in str(e):
pass
else:
raise
class AvoidOOM:
"""Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of
Memory error. It will do the following steps:
1. First retry after calling `torch.cuda.empty_cache()`.
2. If that still fails, it will then retry by converting inputs
to FP16.
3. If that still fails trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to
CPU implementation.
Args:
to_cpu (bool): Whether to convert outputs to CPU if get an OOM
error. This will slow down the code significantly.
Defaults to True.
test (bool): Skip `_ignore_torch_cuda_oom` operate that can use
lightweight data in unit test, only used in
test unit. Defaults to False.
Examples:
>>> from mmdet.utils.memory import AvoidOOM
>>> AvoidCUDAOOM = AvoidOOM()
>>> output = AvoidOOM.retry_if_cuda_oom(
>>> some_torch_function)(input1, input2)
>>> # To use as a decorator
>>> # from mmdet.utils import AvoidCUDAOOM
>>> @AvoidCUDAOOM.retry_if_cuda_oom
>>> def function(*args, **kwargs):
>>> return None
```
Note:
1. The output may be on CPU even if inputs are on GPU. Processing
on CPU will slow down the code significantly.
2. When converting inputs to CPU, it will only look at each argument
and check if it has `.device` and `.to` for conversion. Nested
structures of tensors are not supported.
3. Since the function might be called more than once, it has to be
stateless.
"""
def __init__(self, to_cpu=True, test=False):
self.to_cpu = to_cpu
self.test = test
def retry_if_cuda_oom(self, func):
"""Makes a function retry itself after encountering pytorch's CUDA OOM
error.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py
Args:
func: a stateless callable that takes tensor-like objects
as arguments.
Returns:
func: a callable which retries `func` if OOM is encountered.
""" # noqa: W605
@wraps(func)
def wrapped(*args, **kwargs):
# raw function
if not self.test:
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# get the type and device of first tensor
dtype, device = None, None
values = args + tuple(kwargs.values())
for value in values:
if isinstance(value, torch.Tensor):
dtype = value.dtype
device = value.device
break
if dtype is None or device is None:
raise ValueError('There is no tensor in the inputs, '
'cannot get dtype and device.')
# Convert to FP16
fp16_args = cast_tensor_type(args, dst_type=torch.half)
fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)
logger = get_root_logger()
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to FP16 due to CUDA OOM')
# get input tensor type, the output type will same as
# the first parameter type.
with _ignore_torch_cuda_oom():
output = func(*fp16_args, **fp16_kwargs)
output = cast_tensor_type(
output, src_type=torch.half, dst_type=dtype)
if not self.test:
return output
logger.warning('Using FP16 still meet CUDA OOM')
# Try on CPU. This will slow down the code significantly,
# therefore print a notice.
if self.to_cpu:
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to CPU due to CUDA OOM')
cpu_device = torch.empty(0).device
cpu_args = cast_tensor_type(args, dst_type=cpu_device)
cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)
# convert outputs to GPU
with _ignore_torch_cuda_oom():
logger.warning(f'Convert outputs to GPU (device={device})')
output = func(*cpu_args, **cpu_kwargs)
output = cast_tensor_type(
output, src_type=cpu_device, dst_type=device)
return output
warnings.warn('Cannot convert output to GPU due to CUDA OOM, '
'the output is now on CPU, which might cause '
'errors if the output need to interact with GPU '
'data in subsequent operations')
logger.warning('Cannot convert output to GPU due to '
'CUDA OOM, the output is on CPU now.')
return func(*cpu_args, **cpu_kwargs)
else:
# may still get CUDA OOM error
return func(*args, **kwargs)
return wrapped
# To use AvoidOOM as a decorator
AvoidCUDAOOM = AvoidOOM()
| 8,088 | 36.799065 | 103 | py |
mmdetection | mmdetection-master/mmdet/utils/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os
import os.path as osp
import warnings
import mmcv
import torch
from mmcv.utils import TORCH_VERSION, digit_version, print_log
def find_latest_checkpoint(path, suffix='pth'):
"""Find the latest checkpoint from the working directory.
Args:
path(str): The path to find checkpoints.
suffix(str): File extension.
Defaults to pth.
Returns:
latest_path(str | None): File path of the latest checkpoint.
References:
.. [1] https://github.com/microsoft/SoftTeacher
/blob/main/ssod/utils/patch.py
"""
if not osp.exists(path):
warnings.warn('The path of checkpoints does not exist.')
return None
if osp.exists(osp.join(path, f'latest.{suffix}')):
return osp.join(path, f'latest.{suffix}')
checkpoints = glob.glob(osp.join(path, f'*.{suffix}'))
if len(checkpoints) == 0:
warnings.warn('There are no checkpoints in the path.')
return None
latest = -1
latest_path = None
for checkpoint in checkpoints:
count = int(osp.basename(checkpoint).split('_')[-1].split('.')[0])
if count > latest:
latest = count
latest_path = checkpoint
return latest_path
def update_data_root(cfg, logger=None):
"""Update data root according to env MMDET_DATASETS.
If set env MMDET_DATASETS, update cfg.data_root according to
MMDET_DATASETS. Otherwise, using cfg.data_root as default.
Args:
cfg (mmcv.Config): The model config need to modify
logger (logging.Logger | str | None): the way to print msg
"""
assert isinstance(cfg, mmcv.Config), \
f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'
if 'MMDET_DATASETS' in os.environ:
dst_root = os.environ['MMDET_DATASETS']
print_log(f'MMDET_DATASETS has been set to be {dst_root}.'
f'Using {dst_root} as data root.')
else:
return
assert isinstance(cfg, mmcv.Config), \
f'cfg got wrong type: {type(cfg)}, expected mmcv.Config'
def update(cfg, src_str, dst_str):
for k, v in cfg.items():
if isinstance(v, mmcv.ConfigDict):
update(cfg[k], src_str, dst_str)
if isinstance(v, str) and src_str in v:
cfg[k] = v.replace(src_str, dst_str)
update(cfg.data, cfg.data_root, dst_root)
cfg.data_root = dst_root
_torch_version_div_indexing = (
'parrots' not in TORCH_VERSION
and digit_version(TORCH_VERSION) >= digit_version('1.8'))
def floordiv(dividend, divisor, rounding_mode='trunc'):
if _torch_version_div_indexing:
return torch.div(dividend, divisor, rounding_mode=rounding_mode)
else:
return dividend // divisor
| 2,818 | 30.322222 | 74 | py |
mmdetection | mmdetection-master/mmdet/utils/profiling.py | # Copyright (c) OpenMMLab. All rights reserved.
import contextlib
import sys
import time
import torch
if sys.version_info >= (3, 7):
@contextlib.contextmanager
def profile_time(trace_name,
name,
enabled=True,
stream=None,
end_stream=None):
"""Print time spent by CPU and GPU.
Useful as a temporary context manager to find sweet spots of code
suitable for async implementation.
"""
if (not enabled) or not torch.cuda.is_available():
yield
return
stream = stream if stream else torch.cuda.current_stream()
end_stream = end_stream if end_stream else stream
start = torch.cuda.Event(enable_timing=True)
end = torch.cuda.Event(enable_timing=True)
stream.record_event(start)
try:
cpu_start = time.monotonic()
yield
finally:
cpu_end = time.monotonic()
end_stream.record_event(end)
end.synchronize()
cpu_time = (cpu_end - cpu_start) * 1000
gpu_time = start.elapsed_time(end)
msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms '
msg += f'gpu_time {gpu_time:.2f} ms stream {stream}'
print(msg, end_stream)
| 1,336 | 31.609756 | 73 | py |
mmdetection | mmdetection-master/mmdet/utils/replace_cfg_vals.py | # Copyright (c) OpenMMLab. All rights reserved.
import re
from mmcv.utils import Config
def replace_cfg_vals(ori_cfg):
"""Replace the string "${key}" with the corresponding value.
Replace the "${key}" with the value of ori_cfg.key in the config. And
support replacing the chained ${key}. Such as, replace "${key0.key1}"
with the value of cfg.key0.key1. Code is modified from `vars.py
< https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/vars.py>`_ # noqa: E501
Args:
ori_cfg (mmcv.utils.config.Config):
The origin config with "${key}" generated from a file.
Returns:
updated_cfg [mmcv.utils.config.Config]:
The config with "${key}" replaced by the corresponding value.
"""
def get_value(cfg, key):
for k in key.split('.'):
cfg = cfg[k]
return cfg
def replace_value(cfg):
if isinstance(cfg, dict):
return {key: replace_value(value) for key, value in cfg.items()}
elif isinstance(cfg, list):
return [replace_value(item) for item in cfg]
elif isinstance(cfg, tuple):
return tuple([replace_value(item) for item in cfg])
elif isinstance(cfg, str):
# the format of string cfg may be:
# 1) "${key}", which will be replaced with cfg.key directly
# 2) "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx",
# which will be replaced with the string of the cfg.key
keys = pattern_key.findall(cfg)
values = [get_value(ori_cfg, key[2:-1]) for key in keys]
if len(keys) == 1 and keys[0] == cfg:
# the format of string cfg is "${key}"
cfg = values[0]
else:
for key, value in zip(keys, values):
# the format of string cfg is
# "xxx${key}xxx" or "xxx${key1}xxx${key2}xxx"
assert not isinstance(value, (dict, list, tuple)), \
f'for the format of string cfg is ' \
f"'xxxxx${key}xxxxx' or 'xxx${key}xxx${key}xxx', " \
f"the type of the value of '${key}' " \
f'can not be dict, list, or tuple' \
f'but you input {type(value)} in {cfg}'
cfg = cfg.replace(key, str(value))
return cfg
else:
return cfg
# the pattern of string "${key}"
pattern_key = re.compile(r'\$\{[a-zA-Z\d_.]*\}')
# the type of ori_cfg._cfg_dict is mmcv.utils.config.ConfigDict
updated_cfg = Config(
replace_value(ori_cfg._cfg_dict), filename=ori_cfg.filename)
# replace the model with model_wrapper
if updated_cfg.get('model_wrapper', None) is not None:
updated_cfg.model = updated_cfg.model_wrapper
updated_cfg.pop('model_wrapper')
return updated_cfg
| 2,915 | 40.070423 | 92 | py |
mmdetection | mmdetection-master/mmdet/utils/rfnext.py | # Copyright (c) OpenMMLab. All rights reserved.
try:
from mmcv.cnn import RFSearchHook
except ImportError:
RFSearchHook = None
def rfnext_init_model(detector, cfg):
"""Rcecptive field search via dilation rates.
Please refer to `RF-Next: Efficient Receptive Field
Search for Convolutional Neural Networks
<https://arxiv.org/abs/2206.06637>`_ for more details.
Args:
detector (nn.Module): The detector before initializing RF-Next.
cfg (mmcv.Config): The config for RF-Next.
If the RFSearchHook is defined in the cfg.custom_hooks,
the detector will be initialized for RF-Next.
"""
if cfg.get('custom_hooks', None) is None:
return
custom_hook_types = [hook['type'] for hook in cfg.custom_hooks]
if 'RFSearchHook' not in custom_hook_types:
return
index = custom_hook_types.index('RFSearchHook')
rfsearch_cfg = cfg.custom_hooks[index]
assert rfsearch_cfg['type'] == 'RFSearchHook'
assert RFSearchHook is not None, 'Please install mmcv > 1.7.0'
# initlize a RFSearchHook
rfsearch_warp = RFSearchHook(
mode=rfsearch_cfg.get('mode', 'search'),
config=rfsearch_cfg.get('config', None),
rfstructure_file=rfsearch_cfg.get('rfstructure_file', None),
by_epoch=rfsearch_cfg.get('by_epoch', True),
verbose=rfsearch_cfg.get('verbose', True),
)
rfsearch_warp.init_model(detector)
rfsearch_cfg['rfstructure_file'] = None
| 1,486 | 32.795455 | 71 | py |
mmdetection | mmdetection-master/mmdet/utils/setup_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import platform
import warnings
import cv2
import torch.multiprocessing as mp
def setup_multi_processes(cfg):
"""Setup multi-processing environment variables."""
# set multi-process start method as `fork` to speed up the training
if platform.system() != 'Windows':
mp_start_method = cfg.get('mp_start_method', 'fork')
current_method = mp.get_start_method(allow_none=True)
if current_method is not None and current_method != mp_start_method:
warnings.warn(
f'Multi-processing start method `{mp_start_method}` is '
f'different from the previous setting `{current_method}`.'
f'It will be force set to `{mp_start_method}`. You can change '
f'this behavior by changing `mp_start_method` in your config.')
mp.set_start_method(mp_start_method, force=True)
# disable opencv multithreading to avoid system being overloaded
opencv_num_threads = cfg.get('opencv_num_threads', 0)
cv2.setNumThreads(opencv_num_threads)
# setup OMP threads
# This code is referred from https://github.com/pytorch/pytorch/blob/master/torch/distributed/run.py # noqa
workers_per_gpu = cfg.data.get('workers_per_gpu', 1)
if 'train_dataloader' in cfg.data:
workers_per_gpu = \
max(cfg.data.train_dataloader.get('workers_per_gpu', 1),
workers_per_gpu)
if 'OMP_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
omp_num_threads = 1
warnings.warn(
f'Setting OMP_NUM_THREADS environment variable for each process '
f'to be {omp_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['OMP_NUM_THREADS'] = str(omp_num_threads)
# setup MKL threads
if 'MKL_NUM_THREADS' not in os.environ and workers_per_gpu > 1:
mkl_num_threads = 1
warnings.warn(
f'Setting MKL_NUM_THREADS environment variable for each process '
f'to be {mkl_num_threads} in default, to avoid your system being '
f'overloaded, please further tune the variable for optimal '
f'performance in your application as needed.')
os.environ['MKL_NUM_THREADS'] = str(mkl_num_threads)
| 2,428 | 43.981481 | 112 | py |
mmdetection | mmdetection-master/mmdet/utils/split_batch.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def split_batch(img, img_metas, kwargs):
"""Split data_batch by tags.
Code is modified from
<https://github.com/microsoft/SoftTeacher/blob/main/ssod/utils/structure_utils.py> # noqa: E501
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys, see
:class:`mmdet.datasets.pipelines.Collect`.
kwargs (dict): Specific to concrete implementation.
Returns:
data_groups (dict): a dict that data_batch splited by tags,
such as 'sup', 'unsup_teacher', and 'unsup_student'.
"""
# only stack img in the batch
def fuse_list(obj_list, obj):
return torch.stack(obj_list) if isinstance(obj,
torch.Tensor) else obj_list
# select data with tag from data_batch
def select_group(data_batch, current_tag):
group_flag = [tag == current_tag for tag in data_batch['tag']]
return {
k: fuse_list([vv for vv, gf in zip(v, group_flag) if gf], v)
for k, v in data_batch.items()
}
kwargs.update({'img': img, 'img_metas': img_metas})
kwargs.update({'tag': [meta['tag'] for meta in img_metas]})
tags = list(set(kwargs['tag']))
data_groups = {tag: select_group(kwargs, tag) for tag in tags}
for tag, group in data_groups.items():
group.pop('tag')
return data_groups
| 1,778 | 37.673913 | 99 | py |
mmdetection | mmdetection-master/mmdet/utils/util_distribution.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
dp_factory = {'cuda': MMDataParallel, 'cpu': MMDataParallel}
ddp_factory = {'cuda': MMDistributedDataParallel}
def build_dp(model, device='cuda', dim=0, *args, **kwargs):
"""build DataParallel module by device type.
if device is cuda, return a MMDataParallel model; if device is mlu,
return a MLUDataParallel model.
Args:
model (:class:`nn.Module`): model to be parallelized.
device (str): device type, cuda, cpu or mlu. Defaults to cuda.
dim (int): Dimension used to scatter the data. Defaults to 0.
Returns:
nn.Module: the model to be parallelized.
"""
if device == 'npu':
from mmcv.device.npu import NPUDataParallel
dp_factory['npu'] = NPUDataParallel
torch.npu.set_device(kwargs['device_ids'][0])
torch.npu.set_compile_mode(jit_compile=False)
model = model.npu()
elif device == 'cuda':
model = model.cuda(kwargs['device_ids'][0])
elif device == 'mlu':
from mmcv.device.mlu import MLUDataParallel
dp_factory['mlu'] = MLUDataParallel
model = model.mlu()
return dp_factory[device](model, dim=dim, *args, **kwargs)
def build_ddp(model, device='cuda', *args, **kwargs):
"""Build DistributedDataParallel module by device type.
If device is cuda, return a MMDistributedDataParallel model;
if device is mlu, return a MLUDistributedDataParallel model.
Args:
model (:class:`nn.Module`): module to be parallelized.
device (str): device type, mlu or cuda.
Returns:
:class:`nn.Module`: the module to be parallelized
References:
.. [1] https://pytorch.org/docs/stable/generated/torch.nn.parallel.
DistributedDataParallel.html
"""
assert device in ['cuda', 'mlu',
'npu'], 'Only available for cuda or mlu or npu devices.'
if device == 'npu':
from mmcv.device.npu import NPUDistributedDataParallel
torch.npu.set_compile_mode(jit_compile=False)
ddp_factory['npu'] = NPUDistributedDataParallel
model = model.npu()
elif device == 'cuda':
model = model.cuda()
elif device == 'mlu':
from mmcv.device.mlu import MLUDistributedDataParallel
ddp_factory['mlu'] = MLUDistributedDataParallel
model = model.mlu()
return ddp_factory[device](model, *args, **kwargs)
def is_npu_available():
"""Returns a bool indicating if NPU is currently available."""
return hasattr(torch, 'npu') and torch.npu.is_available()
def is_mlu_available():
"""Returns a bool indicating if MLU is currently available."""
return hasattr(torch, 'is_mlu_available') and torch.is_mlu_available()
def get_device():
"""Returns an available device, cpu, cuda or mlu."""
is_device_available = {
'npu': is_npu_available(),
'cuda': torch.cuda.is_available(),
'mlu': is_mlu_available()
}
device_list = [k for k, v in is_device_available.items() if v]
return device_list[0] if len(device_list) >= 1 else 'cpu'
| 3,189 | 33.301075 | 78 | py |
mmdetection | mmdetection-master/mmdet/utils/util_mixins.py | # Copyright (c) OpenMMLab. All rights reserved.
"""This module defines the :class:`NiceRepr` mixin class, which defines a
``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
method, which you must define. This means you only have to overload one
function instead of two. Furthermore, if the object defines a ``__len__``
method, then the ``__nice__`` method defaults to something sensible, otherwise
it is treated as abstract and raises ``NotImplementedError``.
To use simply have your object inherit from :class:`NiceRepr`
(multi-inheritance should be ok).
This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
Example:
>>> # Objects that define __nice__ have a default __str__ and __repr__
>>> class Student(NiceRepr):
... def __init__(self, name):
... self.name = name
... def __nice__(self):
... return self.name
>>> s1 = Student('Alice')
>>> s2 = Student('Bob')
>>> print(f's1 = {s1}')
>>> print(f's2 = {s2}')
s1 = <Student(Alice)>
s2 = <Student(Bob)>
Example:
>>> # Objects that define __len__ have a default __nice__
>>> class Group(NiceRepr):
... def __init__(self, data):
... self.data = data
... def __len__(self):
... return len(self.data)
>>> g = Group([1, 2, 3])
>>> print(f'g = {g}')
g = <Group(3)>
"""
import warnings
class NiceRepr:
"""Inherit from this class and define ``__nice__`` to "nicely" print your
objects.
Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
If the inheriting class has a ``__len__``, method then the default
``__nice__`` method will return its length.
Example:
>>> class Foo(NiceRepr):
... def __nice__(self):
... return 'info'
>>> foo = Foo()
>>> assert str(foo) == '<Foo(info)>'
>>> assert repr(foo).startswith('<Foo(info) at ')
Example:
>>> class Bar(NiceRepr):
... pass
>>> bar = Bar()
>>> import pytest
>>> with pytest.warns(None) as record:
>>> assert 'object at' in str(bar)
>>> assert 'object at' in repr(bar)
Example:
>>> class Baz(NiceRepr):
... def __len__(self):
... return 5
>>> baz = Baz()
>>> assert str(baz) == '<Baz(5)>'
"""
def __nice__(self):
"""str: a "nice" summary string describing this module"""
if hasattr(self, '__len__'):
# It is a common pattern for objects to use __len__ in __nice__
# As a convenience we define a default __nice__ for these objects
return str(len(self))
else:
# In all other cases force the subclass to overload __nice__
raise NotImplementedError(
f'Define the __nice__ method for {self.__class__!r}')
def __repr__(self):
"""str: the string of the module"""
try:
nice = self.__nice__()
classname = self.__class__.__name__
return f'<{classname}({nice}) at {hex(id(self))}>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
def __str__(self):
"""str: the string of the module"""
try:
classname = self.__class__.__name__
nice = self.__nice__()
return f'<{classname}({nice})>'
except NotImplementedError as ex:
warnings.warn(str(ex), category=RuntimeWarning)
return object.__repr__(self)
| 3,712 | 34.028302 | 78 | py |
mmdetection | mmdetection-master/mmdet/utils/util_random.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Helpers for random number generators."""
import numpy as np
def ensure_rng(rng=None):
"""Coerces input into a random number generator.
If the input is None, then a global random state is returned.
If the input is a numeric value, then that is used as a seed to construct a
random state. Otherwise the input is returned as-is.
Adapted from [1]_.
Args:
rng (int | numpy.random.RandomState | None):
if None, then defaults to the global rng. Otherwise this can be an
integer or a RandomState class
Returns:
(numpy.random.RandomState) : rng -
a numpy random number generator
References:
.. [1] https://gitlab.kitware.com/computer-vision/kwarray/blob/master/kwarray/util_random.py#L270 # noqa: E501
"""
if rng is None:
rng = np.random.mtrand._rand
elif isinstance(rng, int):
rng = np.random.RandomState(rng)
else:
rng = rng
return rng
| 1,025 | 28.314286 | 119 | py |
mmdetection | mmdetection-master/tests/data/configs_mmtrack/faster_rcnn_r50_dc5.py | model = dict(
detector=dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
strides=(1, 2, 2, 1),
dilations=(1, 1, 1, 2),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='ChannelMapper',
in_channels=[16],
out_channels=16,
kernel_size=3),
rpn_head=dict(
type='RPNHead',
in_channels=16,
feat_channels=16,
anchor_generator=dict(
type='AnchorGenerator',
scales=[4, 8, 16, 32],
ratios=[0.5, 1.0, 2.0],
strides=[16]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=16,
featmap_strides=[16]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=16,
fc_out_channels=32,
roi_feat_size=7,
num_classes=30,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.2, 0.2, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0,
loss_weight=1.0))),
# detector training and testing settings
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=6000,
max_per_img=600,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=6000,
max_per_img=300,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.0001,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
| 4,091 | 34.894737 | 76 | py |
mmdetection | mmdetection-master/tests/data/configs_mmtrack/faster_rcnn_r50_fpn.py | model = dict(
detector=dict(
type='FasterRCNN',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch'),
neck=dict(
type='FPN', in_channels=[2, 4, 8, 16], out_channels=16,
num_outs=5),
rpn_head=dict(
type='RPNHead',
in_channels=16,
feat_channels=16,
anchor_generator=dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8, 16, 32, 64]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
roi_head=dict(
type='StandardRoIHead',
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=16,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='Shared2FCBBoxHead',
in_channels=16,
fc_out_channels=32,
roi_feat_size=7,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', loss_weight=1.0))),
train_cfg=dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_pre=2000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
match_low_quality=False,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True),
pos_weight=-1,
debug=False)),
test_cfg=dict(
rpn=dict(
nms_pre=1000,
max_per_img=1000,
nms=dict(type='nms', iou_threshold=0.7),
min_bbox_size=0),
rcnn=dict(
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
# soft-nms is also supported for rcnn testing
# e.g., nms=dict(type='soft_nms', iou_threshold=0.5, min_score=0.05)
))
| 4,013 | 35.490909 | 76 | py |
mmdetection | mmdetection-master/tests/data/configs_mmtrack/mot_challenge.py | # dataset settings
dataset_type = 'MOTChallengeDataset'
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)
train_pipeline = [
dict(type='LoadMultiImagesFromFile', to_float32=True),
dict(type='SeqLoadAnnotations', with_bbox=True, with_track=True),
dict(
type='SeqResize',
img_scale=(1088, 1088),
share_params=True,
ratio_range=(0.8, 1.2),
keep_ratio=True,
bbox_clip_border=False),
dict(type='SeqPhotoMetricDistortion', share_params=True),
dict(
type='SeqRandomCrop',
share_params=False,
crop_size=(1088, 1088),
bbox_clip_border=False),
dict(type='SeqRandomFlip', share_params=True, flip_ratio=0.5),
dict(type='SeqNormalize', **img_norm_cfg),
dict(type='SeqPad', size_divisor=32),
dict(type='MatchInstances', skip_nomatch=True),
dict(
type='VideoCollect',
keys=[
'img', 'gt_bboxes', 'gt_labels', 'gt_match_indices',
'gt_instance_ids'
]),
dict(type='SeqDefaultFormatBundle', ref_prefix='ref')
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1088, 1088),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='VideoCollect', keys=['img'])
])
]
data_root = 'data/MOT17/'
data = dict(
samples_per_gpu=2,
workers_per_gpu=2,
train=dict(
type=dataset_type,
visibility_thr=-1,
ann_file=data_root + 'annotations/half-train_cocoformat.json',
img_prefix=data_root + 'train',
ref_img_sampler=dict(
num_ref_imgs=1,
frame_range=10,
filter_key_img=True,
method='uniform'),
pipeline=train_pipeline),
val=dict(
type=dataset_type,
ann_file=data_root + 'annotations/half-val_cocoformat.json',
img_prefix=data_root + 'train',
ref_img_sampler=None,
pipeline=test_pipeline),
test=dict(
type=dataset_type,
ann_file=data_root + 'annotations/half-val_cocoformat.json',
img_prefix=data_root + 'train',
ref_img_sampler=None,
pipeline=test_pipeline))
| 2,465 | 31.88 | 77 | py |
mmdetection | mmdetection-master/tests/data/configs_mmtrack/selsa_faster_rcnn_r101_dc5_1x.py | _base_ = [
'./faster_rcnn_r50_dc5.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='SELSA',
pretrains=None,
detector=dict(
backbone=dict(depth=18, base_channels=2),
roi_head=dict(
type='SelsaRoIHead',
bbox_head=dict(
type='SelsaBBoxHead',
num_shared_fcs=2,
aggregator=dict(
type='SelsaAggregator',
in_channels=32,
num_attention_blocks=16)))))
# dataset settings
data = dict(
val=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')),
test=dict(
ref_img_sampler=dict(
_delete_=True,
num_ref_imgs=14,
frame_range=[-7, 7],
method='test_with_adaptive_stride')))
# optimizer
optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
optimizer_config = dict(
_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=500,
warmup_ratio=1.0 / 3,
step=[2, 5])
# runtime settings
total_epochs = 7
evaluation = dict(metric=['bbox'], interval=7)
| 1,351 | 26.591837 | 72 | py |
mmdetection | mmdetection-master/tests/data/configs_mmtrack/tracktor_faster-rcnn_r50_fpn_4e.py | _base_ = [
'./faster_rcnn_r50_fpn.py', './mot_challenge.py',
'../../../configs/_base_/default_runtime.py'
]
model = dict(
type='Tracktor',
pretrains=dict(
detector= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/faster_rcnn/faster-rcnn_r50_fpn_4e_mot17-half-64ee2ed4.pth', # noqa: E501
reid= # noqa: E251
'https://download.openmmlab.com/mmtracking/mot/reid/reid_r50_6e_mot17-4bf6b63d.pth' # noqa: E501
),
detector=dict(
rpn_head=dict(bbox_coder=dict(clip_border=False)),
roi_head=dict(
bbox_head=dict(bbox_coder=dict(
clip_border=False), num_classes=1))),
reid=dict(
type='BaseReID',
backbone=dict(
type='ResNet',
depth=18,
base_channels=2,
num_stages=4,
out_indices=(3, ),
style='pytorch'),
neck=dict(type='GlobalAveragePooling', kernel_size=(8, 4), stride=1),
head=dict(
type='LinearReIDHead',
num_fcs=1,
in_channels=16,
fc_channels=32,
out_channels=16,
num_classes=8,
loss=dict(type='CrossEntropyLoss', loss_weight=1.0),
loss_pairwise=dict(
type='TripletLoss', margin=0.3, loss_weight=1.0),
norm_cfg=dict(type='BN1d'),
act_cfg=dict(type='ReLU'))),
motion=dict(
type='CameraMotionCompensation',
warp_mode='cv2.MOTION_EUCLIDEAN',
num_iters=100,
stop_eps=0.00001),
tracker=dict(
type='TracktorTracker',
obj_score_thr=0.5,
regression=dict(
obj_score_thr=0.5,
nms=dict(type='nms', iou_threshold=0.6),
match_iou_thr=0.3),
reid=dict(
num_samples=10,
img_scale=(256, 128),
img_norm_cfg=None,
match_score_thr=2.0,
match_iou_thr=0.2),
momentums=None,
num_frames_retain=10))
# learning policy
lr_config = dict(
policy='step',
warmup='linear',
warmup_iters=100,
warmup_ratio=1.0 / 100,
step=[3])
# runtime settings
total_epochs = 4
evaluation = dict(metric=['bbox', 'track'], interval=1)
search_metrics = ['MOTA', 'IDF1', 'FN', 'FP', 'IDs', 'MT', 'ML']
| 2,309 | 31.535211 | 129 | py |
mmdetection | mmdetection-master/tests/test_data/test_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import get_loading_pipeline, replace_ImageToTensor
def test_replace_ImageToTensor():
# with MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1333, 800),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
])
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
# without MultiScaleFlipAug
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize'),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img']),
]
with pytest.warns(UserWarning):
assert expected_pipelines == replace_ImageToTensor(pipelines)
def test_get_loading_pipeline():
pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True),
dict(type='Resize', img_scale=(1333, 800), keep_ratio=True),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels'])
]
expected_pipelines = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True)
]
assert expected_pipelines == \
get_loading_pipeline(pipelines)
| 2,721 | 32.604938 | 70 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_coco_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import pytest
from mmdet.datasets import CocoDataset
def _create_ids_error_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations': [annotation_1, annotation_2],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def test_coco_annotation_ids_unique():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_ids_error_coco_json(fake_json_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
CocoDataset(ann_file=fake_json_file, classes=('car', ), pipeline=[])
| 1,293 | 20.932203 | 76 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_coco_occluded.py | import os.path as osp
from tempfile import TemporaryDirectory
import mmcv
import numpy as np
from mmdet.datasets import OccludedSeparatedCocoDataset
def test_occluded_separated_coco_dataset():
ann = [[
'fake1.jpg', 'person', 8, [219.9, 176.12, 11.14, 34.23], {
'size': [480, 640],
'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'
}
]] * 3
dummy_mask = np.zeros((10, 10), dtype=np.uint8)
dummy_mask[:5, :5] = 1
rle = {
'size': [480, 640],
'counts': b'nYW31n>2N2FNbA48Kf=?XBDe=m0OM3M4YOPB8_>L4JXao5'
}
res = [([np.array([[50, 60, 70, 80, 0.77]])] * 2, [[rle]] * 2)] * 3
tempdir = TemporaryDirectory()
ann_path = osp.join(tempdir.name, 'coco_occluded.pkl')
mmcv.dump(ann, ann_path)
dataset = OccludedSeparatedCocoDataset(
ann_file='tests/data/coco_sample.json',
occluded_ann=ann_path,
separated_ann=ann_path,
pipeline=[],
test_mode=True)
eval_res = dataset.evaluate(res)
assert isinstance(eval_res, dict)
assert eval_res['occluded_recall'] == 100
assert eval_res['separated_recall'] == 100
| 1,160 | 28.769231 | 71 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_common.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import logging
import os.path as osp
import tempfile
from unittest.mock import MagicMock, patch
import mmcv
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner
from torch.utils.data import DataLoader
from mmdet.core.evaluation import DistEvalHook, EvalHook
from mmdet.datasets import DATASETS, CocoDataset, CustomDataset, build_dataset
def _create_dummy_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 2,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
annotation_3 = {
'id': 3,
'image_id': 0,
'category_id': 0,
'area': 1600,
'bbox': [150, 160, 40, 40],
'iscrowd': 0,
}
annotation_4 = {
'id': 4,
'image_id': 0,
'category_id': 0,
'area': 10000,
'bbox': [250, 260, 100, 100],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations':
[annotation_1, annotation_2, annotation_3, annotation_4],
'categories': categories
}
mmcv.dump(fake_json, json_name)
def _create_dummy_custom_pkl(pkl_name):
fake_pkl = [{
'filename': 'fake_name.jpg',
'width': 640,
'height': 640,
'ann': {
'bboxes':
np.array([[50, 60, 70, 80], [100, 120, 130, 150],
[150, 160, 190, 200], [250, 260, 350, 360]]),
'labels':
np.array([0, 0, 0, 0])
}
}]
mmcv.dump(fake_pkl, pkl_name)
def _create_dummy_results():
boxes = [
np.array([[50, 60, 70, 80, 1.0], [100, 120, 130, 150, 0.98],
[150, 160, 190, 200, 0.96], [250, 260, 350, 360, 0.95]])
]
return [boxes]
@pytest.mark.parametrize('config_path',
['./configs/_base_/datasets/voc0712.py'])
def test_dataset_init(config_path, monkeypatch):
data_config = mmcv.Config.fromfile(config_path)
if 'data' not in data_config:
return
monkeypatch.chdir('./tests/') # to use ./tests/data
stage_names = ['train', 'val', 'test']
for stage_name in stage_names:
dataset_config = copy.deepcopy(data_config.data.get(stage_name))
dataset = build_dataset(dataset_config)
dataset[0]
def test_dataset_evaluation():
tmp_dir = tempfile.TemporaryDirectory()
# create dummy data
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_dummy_coco_json(fake_json_file)
# test single coco dataset evaluation
coco_dataset = CocoDataset(
ann_file=fake_json_file, classes=('car', ), pipeline=[])
fake_results = _create_dummy_results()
eval_results = coco_dataset.evaluate(fake_results, classwise=True)
assert eval_results['bbox_mAP'] == 1
assert eval_results['bbox_mAP_50'] == 1
assert eval_results['bbox_mAP_75'] == 1
# test concat dataset evaluation
fake_concat_results = _create_dummy_results() + _create_dummy_results()
# build concat dataset through two config dict
coco_cfg = dict(
type='CocoDataset',
ann_file=fake_json_file,
classes=('car', ),
pipeline=[])
concat_cfgs = [coco_cfg, coco_cfg]
concat_dataset = build_dataset(concat_cfgs)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_bbox_mAP'] == 1
assert eval_results['0_bbox_mAP_50'] == 1
assert eval_results['0_bbox_mAP_75'] == 1
assert eval_results['1_bbox_mAP'] == 1
assert eval_results['1_bbox_mAP_50'] == 1
assert eval_results['1_bbox_mAP_75'] == 1
# build concat dataset through concatenated ann_file
coco_cfg = dict(
type='CocoDataset',
ann_file=[fake_json_file, fake_json_file],
classes=('car', ),
pipeline=[])
concat_dataset = build_dataset(coco_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_bbox_mAP'] == 1
assert eval_results['0_bbox_mAP_50'] == 1
assert eval_results['0_bbox_mAP_75'] == 1
assert eval_results['1_bbox_mAP'] == 1
assert eval_results['1_bbox_mAP_50'] == 1
assert eval_results['1_bbox_mAP_75'] == 1
# create dummy data
fake_pkl_file = osp.join(tmp_dir.name, 'fake_data.pkl')
_create_dummy_custom_pkl(fake_pkl_file)
# test single custom dataset evaluation
custom_dataset = CustomDataset(
ann_file=fake_pkl_file, classes=('car', ), pipeline=[])
fake_results = _create_dummy_results()
eval_results = custom_dataset.evaluate(fake_results)
assert eval_results['mAP'] == 1
# test concat dataset evaluation
fake_concat_results = _create_dummy_results() + _create_dummy_results()
# build concat dataset through two config dict
custom_cfg = dict(
type='CustomDataset',
ann_file=fake_pkl_file,
classes=('car', ),
pipeline=[])
concat_cfgs = [custom_cfg, custom_cfg]
concat_dataset = build_dataset(concat_cfgs)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_mAP'] == 1
assert eval_results['1_mAP'] == 1
# build concat dataset through concatenated ann_file
concat_cfg = dict(
type='CustomDataset',
ann_file=[fake_pkl_file, fake_pkl_file],
classes=('car', ),
pipeline=[])
concat_dataset = build_dataset(concat_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results)
assert eval_results['0_mAP'] == 1
assert eval_results['1_mAP'] == 1
# build concat dataset through explicit type
concat_cfg = dict(
type='ConcatDataset',
datasets=[custom_cfg, custom_cfg],
separate_eval=False)
concat_dataset = build_dataset(concat_cfg)
eval_results = concat_dataset.evaluate(fake_concat_results, metric='mAP')
assert eval_results['mAP'] == 1
assert len(concat_dataset.datasets[0].data_infos) == \
len(concat_dataset.datasets[1].data_infos)
assert len(concat_dataset.datasets[0].data_infos) == 1
tmp_dir.cleanup()
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookParam', (EvalHook, DistEvalHook))
def test_evaluation_hook(EvalHookParam):
# create dummy data
dataloader = DataLoader(torch.ones((5, 2)))
# 0.1. dataloader is not a DataLoader object
with pytest.raises(TypeError):
EvalHookParam(dataloader=MagicMock(), interval=-1)
# 0.2. negative interval
with pytest.raises(ValueError):
EvalHookParam(dataloader, interval=-1)
# 1. start=None, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 2. start=1, interval=1: perform evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 2
# 3. start=None, interval=2: perform evaluation after epoch 2, 4, 6, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 1 # after epoch 2
# 4. start=1, interval=2: perform evaluation after epoch 1, 3, 5, etc
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1, interval=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 1 & 3
# 5. start=0/negative, interval=1: perform evaluation after each epoch and
# before epoch 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. start=0, interval=2, dynamic_intervals=[(3, 1)]: the evaluation
# interval is 2 when it is less than 3 epoch, otherwise it is 1.
runner = _build_demo_runner()
evalhook = EvalHookParam(
dataloader, start=0, interval=2, dynamic_intervals=[(3, 1)])
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 4)
assert evalhook.evaluate.call_count == 3
# the evaluation start epoch cannot be less than 0
runner = _build_demo_runner()
with pytest.raises(ValueError):
EvalHookParam(dataloader, start=-2)
evalhook = EvalHookParam(dataloader, start=0)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner.run([dataloader], [('train', 1)], 2)
assert evalhook.evaluate.call_count == 3 # before epoch1 and after e1 & e2
# 6. resuming from epoch i, start = x (x<=i), interval =1: perform
# evaluation after each epoch and before the first epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=1)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 2
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # before & after epoch 3
# 7. resuming from epoch i, start = i+1/None, interval =1: perform
# evaluation after each epoch.
runner = _build_demo_runner()
evalhook = EvalHookParam(dataloader, start=2)
evalhook.evaluate = MagicMock()
runner.register_hook(evalhook)
runner._epoch = 1
runner.run([dataloader], [('train', 1)], 3)
assert evalhook.evaluate.call_count == 2 # after epoch 2 & 3
def _build_demo_runner():
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
tmp_dir = tempfile.mkdtemp()
runner = EpochBasedRunner(
model=model, work_dir=tmp_dir, logger=logging.getLogger())
return runner
@pytest.mark.parametrize('classes, expected_length', [(['bus'], 2),
(['car'], 1),
(['bus', 'car'], 2)])
def test_allow_empty_images(classes, expected_length):
dataset_class = DATASETS.get('CocoDataset')
# Filter empty images
filtered_dataset = dataset_class(
ann_file='tests/data/coco_sample.json',
img_prefix='tests/data',
pipeline=[],
classes=classes,
filter_empty_gt=True)
# Get all
full_dataset = dataset_class(
ann_file='tests/data/coco_sample.json',
img_prefix='tests/data',
pipeline=[],
classes=classes,
filter_empty_gt=False)
assert len(filtered_dataset) == expected_length
assert len(filtered_dataset.img_ids) == expected_length
assert len(full_dataset) == 3
assert len(full_dataset.img_ids) == 3
assert filtered_dataset.CLASSES == classes
assert full_dataset.CLASSES == classes
| 12,160 | 31.867568 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_custom_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import unittest
from unittest.mock import MagicMock, patch
import pytest
from mmdet.datasets import DATASETS
@patch('mmdet.datasets.CocoDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CustomDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.XMLDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CityscapesDataset.load_annotations', MagicMock())
@patch('mmdet.datasets.CocoDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CustomDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.XMLDataset._filter_imgs', MagicMock)
@patch('mmdet.datasets.CityscapesDataset._filter_imgs', MagicMock)
@pytest.mark.parametrize('dataset',
['CocoDataset', 'VOCDataset', 'CityscapesDataset'])
def test_custom_classes_override_default(dataset):
dataset_class = DATASETS.get(dataset)
if dataset in ['CocoDataset', 'CityscapesDataset']:
dataset_class.coco = MagicMock()
dataset_class.cat_ids = MagicMock()
original_classes = dataset_class.CLASSES
# Test setting classes as a tuple
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=('bus', 'car'),
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ('bus', 'car')
print(custom_dataset)
# Test setting classes as a list
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['bus', 'car'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
# Test overriding not a subset
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=['foo'],
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['foo']
print(custom_dataset)
# Test default behavior
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=None,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES == original_classes
print(custom_dataset)
# Test sending file path
import tempfile
with tempfile.TemporaryDirectory() as tmpdir:
path = tmpdir + 'classes.txt'
with open(path, 'w') as f:
f.write('bus\ncar\n')
custom_dataset = dataset_class(
ann_file=MagicMock(),
pipeline=[],
classes=path,
test_mode=True,
img_prefix='VOC2007' if dataset == 'VOCDataset' else '')
assert custom_dataset.CLASSES != original_classes
assert custom_dataset.CLASSES == ['bus', 'car']
print(custom_dataset)
class CustomDatasetTests(unittest.TestCase):
def setUp(self):
super().setUp()
self.data_dir = osp.join(
osp.dirname(osp.dirname(osp.dirname(__file__))), 'data')
self.dataset_class = DATASETS.get('XMLDataset')
def test_data_infos__default_db_directories(self):
"""Test correct data read having a Pacal-VOC directory structure."""
test_dataset_root = osp.join(self.data_dir, 'VOCdevkit', 'VOC2007')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=osp.join(test_dataset_root, 'ImageSets', 'Main',
'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True)
self.assertListEqual([{
'id': '000001',
'filename': osp.join('JPEGImages', '000001.jpg'),
'width': 353,
'height': 500
}], custom_ds.data_infos)
def test_data_infos__overridden_db_subdirectories(self):
"""Test correct data read having a customized directory structure."""
test_dataset_root = osp.join(self.data_dir, 'custom_dataset')
custom_ds = self.dataset_class(
data_root=test_dataset_root,
ann_file=osp.join(test_dataset_root, 'trainval.txt'),
pipeline=[],
classes=('person', 'dog'),
test_mode=True,
img_prefix='',
img_subdir='images',
ann_subdir='images')
self.assertListEqual([{
'id': '000001',
'filename': osp.join('images', '000001.jpg'),
'width': 353,
'height': 500
}], custom_ds.data_infos)
| 4,777 | 33.374101 | 77 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_dataset_wrapper.py | # Copyright (c) OpenMMLab. All rights reserved.
import bisect
import math
from collections import defaultdict
from unittest.mock import MagicMock
import numpy as np
import pytest
from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,
MultiImageMixDataset, RepeatDataset)
def test_dataset_wrapper():
CustomDataset.load_annotations = MagicMock()
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 10
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
ann_info_list_a = []
for _ in range(len_a):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_a.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
dataset_a.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_a[idx])
dataset_b = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_b = 20
cat_ids_list_b = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_b)
]
ann_info_list_b = []
for _ in range(len_b):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
ann_info_list_b.append(
dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
dataset_b.data_infos = MagicMock()
dataset_b.data_infos.__len__.return_value = len_b
dataset_b.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_b[idx])
dataset_b.get_ann_info = MagicMock(
side_effect=lambda idx: ann_info_list_b[idx])
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset[5] == 5
assert concat_dataset[25] == 15
assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]
assert concat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert concat_dataset.get_ann_info(25) == ann_info_list_b[15]
assert len(concat_dataset) == len(dataset_a) + len(dataset_b)
# Test if ConcatDataset allows dataset classes without the PALETTE
# attribute
palette_backup = CustomDataset.PALETTE
delattr(CustomDataset, 'PALETTE')
concat_dataset = ConcatDataset([dataset_a, dataset_b])
assert concat_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset[5] == 5
assert repeat_dataset[15] == 5
assert repeat_dataset[27] == 7
assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]
assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]
assert repeat_dataset.get_ann_info(5) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(15) == ann_info_list_a[5]
assert repeat_dataset.get_ann_info(27) == ann_info_list_a[7]
assert len(repeat_dataset) == 10 * len(dataset_a)
# Test if RepeatDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
repeat_dataset = RepeatDataset(dataset_a, 10)
assert repeat_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
category_freq = defaultdict(int)
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
for cat_id in cat_ids:
category_freq[cat_id] += 1
for k, v in category_freq.items():
category_freq[k] = v / len(cat_ids_list_a)
mean_freq = np.mean(list(category_freq.values()))
repeat_thr = mean_freq
category_repeat = {
cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))
for cat_id, cat_freq in category_freq.items()
}
repeat_factors = []
for cat_ids in cat_ids_list_a:
cat_ids = set(cat_ids)
repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})
repeat_factors.append(math.ceil(repeat_factor))
repeat_factors_cumsum = np.cumsum(repeat_factors)
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]
for idx in np.random.randint(0, len(repeat_factor_dataset), 3):
assert repeat_factor_dataset[idx] == bisect.bisect_right(
repeat_factors_cumsum, idx)
assert repeat_factor_dataset.get_ann_info(idx) == ann_info_list_a[
bisect.bisect_right(repeat_factors_cumsum, idx)]
# Test if ClassBalancedDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)
assert repeat_factor_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
img_scale = (60, 60)
pipeline = [
dict(type='Mosaic', img_scale=img_scale, pad_val=114.0),
dict(
type='RandomAffine',
scaling_ratio_range=(0.1, 2),
border=(-img_scale[0] // 2, -img_scale[1] // 2)),
dict(
type='MixUp',
img_scale=img_scale,
ratio_range=(0.8, 1.6),
pad_val=114.0),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Resize', img_scale=img_scale, keep_ratio=True),
dict(type='Pad', pad_to_square=True, pad_val=114.0),
]
CustomDataset.load_annotations = MagicMock()
results = []
for _ in range(2):
height = np.random.randint(10, 30)
weight = np.random.randint(10, 30)
img = np.ones((height, weight, 3))
gt_bbox = np.concatenate([
np.random.randint(1, 5, (2, 2)),
np.random.randint(1, 5, (2, 2)) + 5
],
axis=1)
gt_labels = np.random.randint(0, 80, 2)
results.append(dict(gt_bboxes=gt_bbox, gt_labels=gt_labels, img=img))
CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: results[idx])
dataset_a = CustomDataset(
ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')
len_a = 2
cat_ids_list_a = [
np.random.randint(0, 80, num).tolist()
for num in np.random.randint(1, 20, len_a)
]
dataset_a.data_infos = MagicMock()
dataset_a.data_infos.__len__.return_value = len_a
dataset_a.get_cat_ids = MagicMock(
side_effect=lambda idx: cat_ids_list_a[idx])
# test dynamic_scale deprecated
with pytest.raises(RuntimeError):
MultiImageMixDataset(dataset_a, pipeline, (80, 80))
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
# test skip_type_keys
multi_image_mix_dataset = MultiImageMixDataset(
dataset_a,
pipeline,
skip_type_keys=('MixUp', 'RandomFlip', 'Resize', 'Pad'))
for idx in range(len_a):
results_ = multi_image_mix_dataset[idx]
assert results_['img'].shape == (img_scale[0], img_scale[1], 3)
# Test if MultiImageMixDataset allows dataset classes without the PALETTE
# attribute
delattr(CustomDataset, 'PALETTE')
multi_image_mix_dataset = MultiImageMixDataset(dataset_a, pipeline)
assert multi_image_mix_dataset.PALETTE is None
CustomDataset.PALETTE = palette_backup
| 8,276 | 38.414286 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_objects365.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import pytest
from mmdet.datasets import Objects365V1Dataset, Objects365V2Dataset
def _create_objects365_json(json_name):
images = [{
'file_name': 'fake1.jpg',
'height': 800,
'width': 800,
'id': 0
}, {
'file_name': 'fake2.jpg',
'height': 800,
'width': 800,
'id': 1
}, {
'file_name': 'patch16/objects365_v2_00908726.jpg',
'height': 800,
'width': 800,
'id': 2
}]
annotations = [{
'bbox': [0, 0, 20, 20],
'area': 400.00,
'score': 1.0,
'category_id': 1,
'id': 1,
'image_id': 0
}, {
'bbox': [0, 0, 20, 20],
'area': 400.00,
'score': 1.0,
'category_id': 2,
'id': 2,
'image_id': 0
}, {
'bbox': [0, 0, 20, 20],
'area': 400.00,
'score': 1.0,
'category_id': 1,
'id': 3,
'image_id': 1
}, {
'bbox': [0, 0, 20, 20],
'area': 400.00,
'score': 1.0,
'category_id': 1,
'id': 4,
'image_id': 2
}]
categories = [{
'id': 1,
'name': 'bus',
'supercategory': 'none'
}, {
'id': 2,
'name': 'car',
'supercategory': 'none'
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
print(fake_json)
mmcv.dump(fake_json, json_name)
def _create_ids_error_coco_json(json_name):
image = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name.jpg',
}
annotation_1 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0,
}
annotation_2 = {
'id': 1,
'image_id': 0,
'category_id': 0,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0,
}
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
}]
fake_json = {
'images': [image],
'annotations': [annotation_1, annotation_2],
'categories': categories
}
mmcv.dump(fake_json, json_name)
@pytest.mark.parametrize('datasets',
[Objects365V1Dataset, Objects365V2Dataset])
def test_annotation_ids_unique(datasets):
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_ids_error_coco_json(fake_json_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
datasets(ann_file=fake_json_file, classes=('car', ), pipeline=[])
tmp_dir.cleanup()
def test_load_objects365v1_annotations():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_objects365_json(fake_json_file)
dataset = Objects365V1Dataset(
ann_file=fake_json_file, classes=('bus', 'car'), pipeline=[])
# The Objects365V1Dataset do not filter the `objv2_ignore_list`
assert len(dataset.data_infos) == 3
tmp_dir.cleanup()
def test_load_objects365v2_annotations():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
_create_objects365_json(fake_json_file)
dataset = Objects365V2Dataset(
ann_file=fake_json_file, classes=('bus', 'car'), pipeline=[])
# The Objects365V2Dataset need filter the `objv2_ignore_list`
assert len(dataset.data_infos) == 2
tmp_dir.cleanup()
| 3,690 | 22.660256 | 73 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_openimages_dataset.py | import csv
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
from mmdet.datasets import OpenImagesChallengeDataset, OpenImagesDataset
def _create_ids_error_oid_csv(
label_file,
fake_csv_file,
):
label_description = ['/m/000002', 'Football']
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(label_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(label_description)
header = [
'ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin',
'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction',
'IsInside'
]
annotations = [[
'color', 'xclick', '/m/000002', '1', '0.022673031', '0.9642005',
'0.07103825', '0.80054647', '0', '0', '0', '0', '0'
],
[
'000595fe6fee6369', 'xclick', '/m/000000', '1', '0',
'1', '0', '1', '0', '0', '1', '0', '0'
]]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(fake_csv_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
f_csv.writerows(annotations)
def _create_oid_style_ann(label_file, csv_file, label_level_file):
label_description = [['/m/000000', 'Sports equipment'],
['/m/000001', 'Ball'], ['/m/000002', 'Football'],
['/m/000004', 'Bicycle']]
with open(label_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerows(label_description)
header = [
'ImageID', 'Source', 'LabelName', 'Confidence', 'XMin', 'XMax', 'YMin',
'YMax', 'IsOccluded', 'IsTruncated', 'IsGroupOf', 'IsDepiction',
'IsInside'
]
annotations = [
[
'color', 'xclick', '/m/000002', 1, 0.0333333, 0.1, 0.0333333, 0.1,
0, 0, 1, 0, 0
],
[
'color', 'xclick', '/m/000002', 1, 0.1, 0.166667, 0.1, 0.166667, 0,
0, 0, 0, 0
],
]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(csv_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
f_csv.writerows(annotations)
header = ['ImageID', 'Source', 'LabelName', 'Confidence']
annotations = [['color', 'xclick', '/m/000002', '1'],
['color', 'xclick', '/m/000004', '0']]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(label_level_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
f_csv.writerows(annotations)
def _create_hierarchy_json(hierarchy_name):
fake_hierarchy = \
{'LabelName': '/m/0bl9f', # entity label
'Subcategory': [
{
'LabelName': '/m/000000',
'Subcategory':
[
{'LabelName': '/m/000001',
'Subcategory':
[
{
'LabelName': '/m/000002'
}
]
},
{
'LabelName': '/m/000004'
}
]
}
]
}
mmcv.dump(fake_hierarchy, hierarchy_name)
def _create_hierarchy_np(hierarchy_name):
fake_hierarchy = np.array([[0, 1, 0, 0, 0], [0, 1, 1, 0,
0], [0, 1, 1, 1, 0],
[0, 1, 0, 0, 1], [0, 0, 0, 0, 0]])
with open(hierarchy_name, 'wb') as f:
np.save(f, fake_hierarchy)
def _create_dummy_results():
boxes = [
np.zeros((0, 5)),
np.zeros((0, 5)),
np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98],
[10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97],
[30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]]),
np.array([[30, 30, 50, 50, 0.51]]),
]
return [boxes]
def _creat_oid_challenge_style_ann(txt_file, label_file, label_level_file):
bboxes = [
'validation/color.jpg\n',
'4 29\n',
'2\n',
'1 0.0333333 0.1 0.0333333 0.1 1\n',
'1 0.1 0.166667 0.1 0.166667 0\n',
]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(txt_file, 'w', newline='') as f:
f.writelines(bboxes)
f.close()
label_description = [['/m/000000', 'Sports equipment', 1],
['/m/000001', 'Ball', 2],
['/m/000002', 'Football', 3],
['/m/000004', 'Bicycle', 4]]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(label_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerows(label_description)
header = ['ImageID', 'LabelName', 'Confidence']
annotations = [['color', '/m/000001', '1'], ['color', '/m/000000', '0']]
# `newline=''` is used to avoid index error of out of bounds
# in Windows system
with open(label_level_file, 'w', newline='') as f:
f_csv = csv.writer(f)
f_csv.writerow(header)
f_csv.writerows(annotations)
def _create_metas(meta_file):
fake_meta = [{
'filename': 'data/OpenImages/OpenImages/validation/color.jpg',
'ori_shape': (300, 300, 3)
}]
mmcv.dump(fake_meta, meta_file)
def test_oid_annotation_ids_unique():
# create fake ann files
tmp_dir = tempfile.TemporaryDirectory()
fake_label_file = osp.join(tmp_dir.name, 'fake_label.csv')
fake_ann_file = osp.join(tmp_dir.name, 'fake_ann.csv')
_create_ids_error_oid_csv(fake_label_file, fake_ann_file)
# test annotation ids not unique error
with pytest.raises(AssertionError):
OpenImagesDataset(
ann_file=fake_ann_file, label_file=fake_label_file, pipeline=[])
tmp_dir.cleanup()
def test_openimages_dataset():
# create fake ann files
tmp_dir = tempfile.TemporaryDirectory()
label_file = osp.join(tmp_dir.name, 'label_file.csv')
ann_file = osp.join(tmp_dir.name, 'ann_file.csv')
label_level_file = osp.join(tmp_dir.name, 'label_level_file.csv')
_create_oid_style_ann(label_file, ann_file, label_level_file)
hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json')
_create_hierarchy_json(hierarchy_json)
# test whether hierarchy_file is not None when set
# get_parent_classes is True
with pytest.raises(AssertionError):
OpenImagesDataset(
ann_file=ann_file,
label_file=label_file,
image_level_ann_file=label_level_file,
pipeline=[])
dataset = OpenImagesDataset(
ann_file=ann_file,
label_file=label_file,
image_level_ann_file=label_level_file,
hierarchy_file=hierarchy_json,
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal detection bboxes with `group_of` parameter
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == \
ann['gt_is_group_ofs'].shape[0] == 2
# test load metas from pipeline
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(128, 128),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
dataset = OpenImagesDataset(
ann_file=ann_file,
img_prefix='tests/data',
label_file=label_file,
image_level_ann_file=label_level_file,
load_from_file=False,
hierarchy_file=hierarchy_json,
pipeline=test_pipeline)
dataset.prepare_test_img(0)
assert len(dataset.test_img_metas) == 1
result = _create_dummy_results()
dataset.evaluate(result)
# test get hierarchy for classes
hierarchy_json = osp.join(tmp_dir.name, 'hierarchy.json')
_create_hierarchy_json(hierarchy_json)
# test with hierarchy file wrong suffix
with pytest.raises(AssertionError):
fake_path = osp.join(tmp_dir.name, 'hierarchy.csv')
OpenImagesDataset(
ann_file=ann_file,
img_prefix='tests/data',
label_file=label_file,
image_level_ann_file=label_level_file,
load_from_file=False,
hierarchy_file=fake_path,
pipeline=test_pipeline)
# test load hierarchy file succseefully
hierarchy = dataset.get_relation_matrix(hierarchy_json)
hierarchy_gt = np.array([[1, 0, 0, 0], [1, 1, 0, 0], [1, 1, 1, 0],
[1, 0, 0, 1]])
assert np.equal(hierarchy, hierarchy_gt).all()
# test evaluation
# create fake metas
meta_file = osp.join(tmp_dir.name, 'meta.pkl')
_create_metas(meta_file)
dataset = OpenImagesDataset(
ann_file=ann_file,
label_file=label_file,
image_level_ann_file=label_level_file,
hierarchy_file=hierarchy_json,
meta_file=meta_file,
pipeline=[])
# test evaluation with using group_of, adding father classes to
# GT and annotations, and considering image_level_image,
# In the first label (Sports equipment): tp = [0, 1, 0, 0, 1],
# fp = [1, 0, 1, 1, 0]
# In the second label (Ball), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0].
# In the third label (Football), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0].
# In the forth label (Bicycle), tp = [0], fp = [1].
result = _create_dummy_results()
parsed_results = dataset.evaluate(result)
assert np.isclose(parsed_results['mAP'], 0.8333, 1e-4)
dataset = OpenImagesDataset(
ann_file=ann_file,
label_file=label_file,
load_image_level_labels=False,
image_level_ann_file=label_level_file,
hierarchy_file=hierarchy_json,
meta_file=meta_file,
pipeline=[])
# test evaluation with using group_of, adding father classes to
# GT and annotations, and not considering image_level_image,
# In the first label (Sports equipment): tp = [0, 1, 0, 0, 1],
# fp = [1, 0, 1, 1, 0]
# In the second label (Ball), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0].
# In the third label (Football), tp = [0, 1, 0, 1], fp = [1, 0, 1, 0].
# In the forth label (Bicycle), tp = [], fp = [].
result = _create_dummy_results()
parsed_results = dataset.evaluate(result)
assert np.isclose(parsed_results['mAP'], 0.8333, 1e-4)
tmp_dir.cleanup()
def test_openimages_challenge_dataset():
# create fake ann files
tmp_dir = tempfile.TemporaryDirectory()
ann_file = osp.join(tmp_dir.name, 'ann_file.txt')
label_file = osp.join(tmp_dir.name, 'label_file.csv')
label_level_file = osp.join(tmp_dir.name, 'label_level_file.csv')
_creat_oid_challenge_style_ann(ann_file, label_file, label_level_file)
dataset = OpenImagesChallengeDataset(
ann_file=ann_file,
label_file=label_file,
load_image_level_labels=False,
get_supercategory=False,
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal detection bboxes with `group_of` parameter
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == \
ann['gt_is_group_ofs'].shape[0] == 2
dataset.prepare_train_img(0)
dataset.prepare_test_img(0)
meta_file = osp.join(tmp_dir.name, 'meta.pkl')
_create_metas(meta_file)
result = _create_dummy_results()
with pytest.raises(AssertionError):
fake_json = osp.join(tmp_dir.name, 'hierarchy.json')
OpenImagesChallengeDataset(
ann_file=ann_file,
label_file=label_file,
image_level_ann_file=label_level_file,
hierarchy_file=fake_json,
meta_file=meta_file,
pipeline=[])
hierarchy_file = osp.join(tmp_dir.name, 'hierarchy.np')
_create_hierarchy_np(hierarchy_file)
dataset = OpenImagesChallengeDataset(
ann_file=ann_file,
label_file=label_file,
image_level_ann_file=label_level_file,
hierarchy_file=hierarchy_file,
meta_file=meta_file,
pipeline=[])
dataset.evaluate(result)
tmp_dir.cleanup()
| 12,808 | 33.807065 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_panoptic_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import mmcv
import numpy as np
from mmdet.core import encode_mask_results
from mmdet.datasets.api_wrappers import pq_compute_single_core
from mmdet.datasets.coco_panoptic import INSTANCE_OFFSET, CocoPanopticDataset
try:
from panopticapi.utils import id2rgb
except ImportError:
id2rgb = None
def _create_panoptic_style_json(json_name):
image1 = {
'id': 0,
'width': 640,
'height': 640,
'file_name': 'fake_name1.jpg',
}
image2 = {
'id': 1,
'width': 640,
'height': 800,
'file_name': 'fake_name2.jpg',
}
images = [image1, image2]
annotations = [
{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
}, {
'id': 2,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 0
}, {
'id': 3,
'category_id': 2,
'iscrowd': 0,
'bbox': [1, 189, 612, 285],
'area': 70036
}],
'file_name':
'fake_name1.jpg',
'image_id':
0
},
{
'segments_info': [
{
# Different to instance style json, there
# are duplicate ids in panoptic style json
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [50, 60, 20, 20],
'iscrowd': 0
},
{
'id': 4,
'category_id': 1,
'area': 900,
'bbox': [100, 120, 30, 30],
'iscrowd': 1
},
{
'id': 5,
'category_id': 2,
'iscrowd': 0,
'bbox': [100, 200, 200, 300],
'area': 66666
},
{
'id': 6,
'category_id': 0,
'iscrowd': 0,
'bbox': [1, 189, -10, 285],
'area': 70036
}
],
'file_name':
'fake_name2.jpg',
'image_id':
1
}
]
categories = [{
'id': 0,
'name': 'car',
'supercategory': 'car',
'isthing': 1
}, {
'id': 1,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
fake_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
mmcv.dump(fake_json, json_name)
return fake_json
def test_load_panoptic_style_json():
tmp_dir = tempfile.TemporaryDirectory()
fake_json_file = osp.join(tmp_dir.name, 'fake_data.json')
fake_json = _create_panoptic_style_json(fake_json_file)
dataset = CocoPanopticDataset(
ann_file=fake_json_file,
classes=[cat['name'] for cat in fake_json['categories']],
pipeline=[])
ann = dataset.get_ann_info(0)
# two legal instances
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 2
# three masks for both foreground and background
assert len(ann['masks']) == 3
ann = dataset.get_ann_info(1)
# one legal instance, one illegal instance,
# one crowd instance and one background mask
assert ann['bboxes'].shape[0] == ann['labels'].shape[0] == 1
assert ann['bboxes_ignore'].shape[0] == 1
assert len(ann['masks']) == 3
def _create_panoptic_gt_annotations(ann_file):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 1,
'name': 'dog',
'supercategory': 'dog',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
annotations = [{
'segments_info': [{
'id': 1,
'category_id': 0,
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
}, {
'id': 2,
'category_id': 0,
'area': 400,
'bbox': [30, 10, 10, 40],
'iscrowd': 0
}, {
'id': 3,
'category_id': 1,
'iscrowd': 0,
'bbox': [50, 10, 10, 5],
'area': 50
}, {
'id': 4,
'category_id': 2,
'iscrowd': 0,
'bbox': [0, 0, 80, 60],
'area': 3950
}],
'file_name':
'fake_name1.png',
'image_id':
0
}]
gt_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
# 4 is the id of the background class annotation.
gt = np.zeros((60, 80), dtype=np.int64) + 4
gt_bboxes = np.array([[10, 10, 10, 40], [30, 10, 10, 40], [50, 10, 10, 5]],
dtype=np.int64)
for i in range(3):
x, y, w, h = gt_bboxes[i]
gt[y:y + h, x:x + w] = i + 1 # id starts from 1
gt = id2rgb(gt).astype(np.uint8)
img_path = osp.join(osp.dirname(ann_file), 'fake_name1.png')
mmcv.imwrite(gt[:, :, ::-1], img_path)
mmcv.dump(gt_json, ann_file)
return gt_json
def test_panoptic_evaluation():
if id2rgb is None:
return
# TP for background class, IoU=3576/4324=0.827
# 2 the category id of the background class
pred = np.zeros((60, 80), dtype=np.int64) + 2
pred_bboxes = np.array(
[
[11, 11, 10, 40], # TP IoU=351/449=0.78
[38, 10, 10, 40], # FP
[51, 10, 10, 5]
], # TP IoU=45/55=0.818
dtype=np.int64)
pred_labels = np.array([0, 0, 1], dtype=np.int64)
for i in range(3):
x, y, w, h = pred_bboxes[i]
pred[y:y + h, x:x + w] = (i + 1) * INSTANCE_OFFSET + pred_labels[i]
tmp_dir = tempfile.TemporaryDirectory()
ann_file = osp.join(tmp_dir.name, 'panoptic.json')
gt_json = _create_panoptic_gt_annotations(ann_file)
results = [{'pan_results': pred}]
dataset = CocoPanopticDataset(
ann_file=ann_file,
seg_prefix=tmp_dir.name,
classes=[cat['name'] for cat in gt_json['categories']],
pipeline=[])
# For 'person', sq = 0.78 / 1, rq = 1 / 2( 1 tp + 0.5 * (1 fn + 1 fp))
# For 'dog', sq = 0.818, rq = 1 / 1
# For 'wall', sq = 0.827, rq = 1 / 1
# Here is the results for all classes:
# +--------+--------+--------+---------+------------+
# | | PQ | SQ | RQ | categories |
# +--------+--------+--------+---------+------------+
# | All | 67.869 | 80.898 | 83.333 | 3 |
# | Things | 60.453 | 79.996 | 75.000 | 2 |
# | Stuff | 82.701 | 82.701 | 100.000 | 1 |
# +--------+--------+--------+---------+------------+
parsed_results = dataset.evaluate(results)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test jsonfile_prefix
outfile_prefix = osp.join(tmp_dir.name, 'results')
parsed_results = dataset.evaluate(results, jsonfile_prefix=outfile_prefix)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test classwise
parsed_results = dataset.evaluate(results, classwise=True)
assert np.isclose(parsed_results['PQ'], 67.869)
assert np.isclose(parsed_results['SQ'], 80.898)
assert np.isclose(parsed_results['RQ'], 83.333)
assert np.isclose(parsed_results['PQ_th'], 60.453)
assert np.isclose(parsed_results['SQ_th'], 79.996)
assert np.isclose(parsed_results['RQ_th'], 75.000)
assert np.isclose(parsed_results['PQ_st'], 82.701)
assert np.isclose(parsed_results['SQ_st'], 82.701)
assert np.isclose(parsed_results['RQ_st'], 100.000)
# test the api wrapper of `pq_compute_single_core`
# Codes are copied from `coco_panoptic.py` and modified
result_files, _ = dataset.format_results(
results, jsonfile_prefix=outfile_prefix)
imgs = dataset.coco.imgs
gt_json = dataset.coco.img_ann_map # image to annotations
gt_json = [{
'image_id': k,
'segments_info': v,
'file_name': imgs[k]['segm_file']
} for k, v in gt_json.items()]
pred_json = mmcv.load(result_files['panoptic'])
pred_json = dict((el['image_id'], el) for el in pred_json['annotations'])
# match the gt_anns and pred_anns in the same image
matched_annotations_list = []
for gt_ann in gt_json:
img_id = gt_ann['image_id']
matched_annotations_list.append((gt_ann, pred_json[img_id]))
gt_folder = dataset.seg_prefix
pred_folder = osp.join(osp.dirname(outfile_prefix), 'panoptic')
pq_stat = pq_compute_single_core(0, matched_annotations_list, gt_folder,
pred_folder, dataset.categories)
pq_all = pq_stat.pq_average(dataset.categories, isthing=None)[0]
assert np.isclose(pq_all['pq'] * 100, 67.869)
assert np.isclose(pq_all['sq'] * 100, 80.898)
assert np.isclose(pq_all['rq'] * 100, 83.333)
assert pq_all['n'] == 3
def _create_instance_segmentation_gt_annotations(ann_file):
categories = [{
'id': 0,
'name': 'person',
'supercategory': 'person',
'isthing': 1
}, {
'id': 1,
'name': 'dog',
'supercategory': 'dog',
'isthing': 1
}, {
'id': 2,
'name': 'wall',
'supercategory': 'wall',
'isthing': 0
}]
images = [{
'id': 0,
'width': 80,
'height': 60,
'file_name': 'fake_name1.jpg',
}]
person1_polygon = [10, 10, 20, 10, 20, 50, 10, 50, 10, 10]
person2_polygon = [30, 10, 40, 10, 40, 50, 30, 50, 30, 10]
dog_polygon = [50, 10, 60, 10, 60, 15, 50, 15, 50, 10]
annotations = [
{
'id': 0,
'image_id': 0,
'category_id': 0,
'segmentation': [person1_polygon],
'area': 400,
'bbox': [10, 10, 10, 40],
'iscrowd': 0
},
{
'id': 1,
'image_id': 0,
'category_id': 0,
'segmentation': [person2_polygon],
'area': 400,
'bbox': [30, 10, 10, 40],
'iscrowd': 0
},
{
'id': 2,
'image_id': 0,
'category_id': 1,
'segmentation': [dog_polygon],
'area': 50,
'bbox': [50, 10, 10, 5],
'iscrowd': 0
},
]
gt_json = {
'images': images,
'annotations': annotations,
'categories': categories
}
mmcv.dump(gt_json, ann_file)
def test_instance_segmentation_evaluation():
pred_bbox = [
np.array([[11, 10, 20, 50, 0.8], [31, 10, 40, 50, 0.8]]),
np.array([[51, 10, 60, 15, 0.7]])
]
person1_mask = np.zeros((60, 80), dtype=bool)
person1_mask[20:50, 11:20] = True
person2_mask = np.zeros((60, 80), dtype=bool)
person2_mask[20:50, 31:40] = True
dog_mask = np.zeros((60, 80), dtype=bool)
dog_mask[10:15, 51:60] = True
pred_mask = [[person1_mask, person2_mask], [
dog_mask,
]]
results = [{'ins_results': (pred_bbox, encode_mask_results(pred_mask))}]
tmp_dir = tempfile.TemporaryDirectory()
pan_ann_file = osp.join(tmp_dir.name, 'panoptic.json')
ins_ann_file = osp.join(tmp_dir.name, 'instance.json')
_create_panoptic_gt_annotations(pan_ann_file)
_create_instance_segmentation_gt_annotations(ins_ann_file)
dataset = CocoPanopticDataset(
ann_file=pan_ann_file,
ins_ann_file=ins_ann_file,
seg_prefix=tmp_dir.name,
pipeline=[])
dataset.THING_CLASSES = ['person', 'dog']
dataset.STUFF_CLASSES = ['wall']
dataset.CLASSES = dataset.THING_CLASSES + dataset.STUFF_CLASSES
parsed_results = dataset.evaluate(results, metric=['segm', 'bbox'])
# Here is the results for instance segmentation:
# {
# 'segm_mAP': 0.5005, 'segm_mAP_50': 0.626, 'segm_mAP_75': 0.5,
# 'segm_mAP_s': 0.5, 'segm_mAP_m': -1.0, 'segm_mAP_l': -1.0,
# 'segm_mAP_copypaste': '0.500 0.626 0.500 0.500 -1.000 -1.000',
# 'bbox_mAP': 0.5636, 'bbox_mAP_50': 0.626, 'bbox_mAP_75': 0.626,
# 'bbox_mAP_s': 0.564, 'bbox_mAP_m': -1.0, 'bbox_mAP_l': -1.0,
# 'bbox_mAP_copypaste': '0.564 0.626 0.626 0.564 -1.000 -1.000'
# }
assert np.isclose(parsed_results['segm_mAP'], 0.5005)
assert np.isclose(parsed_results['bbox_mAP'], 0.5636)
| 13,935 | 29.49453 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_datasets/test_xml_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.datasets import DATASETS
def test_xml_dataset():
dataconfig = {
'ann_file': 'data/VOCdevkit/VOC2007/ImageSets/Main/test.txt',
'img_prefix': 'data/VOCdevkit/VOC2007/',
'pipeline': [{
'type': 'LoadImageFromFile'
}]
}
XMLDataset = DATASETS.get('XMLDataset')
class XMLDatasetSubClass(XMLDataset):
CLASSES = None
# get_ann_info and _filter_imgs of XMLDataset
# would use self.CLASSES, we added CLASSES not NONE
with pytest.raises(AssertionError):
XMLDatasetSubClass(**dataconfig)
| 641 | 25.75 | 69 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_formatting.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
def test_default_format_bundle():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
bundle = dict(type='DefaultFormatBundle')
bundle = build_from_cfg(bundle, PIPELINES)
results = load(results)
assert 'pad_shape' not in results
assert 'scale_factor' not in results
assert 'img_norm_cfg' not in results
results = bundle(results)
assert 'pad_shape' in results
assert 'scale_factor' in results
assert 'img_norm_cfg' in results
| 786 | 30.48 | 65 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_loading.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.pipelines import (FilterAnnotations, LoadImageFromFile,
LoadImageFromWebcam,
LoadMultiChannelImageFromFiles)
class TestLoading:
@classmethod
def setup_class(cls):
cls.data_prefix = osp.join(osp.dirname(__file__), '../../data')
def test_load_img(self):
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == osp.join(self.data_prefix, 'color.jpg')
assert results['ori_filename'] == 'color.jpg'
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='color', channel_order='bgr', " + \
"file_client_args={'backend': 'disk'})"
# no img_prefix
results = dict(
img_prefix=None, img_info=dict(filename='tests/data/color.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['filename'] == 'tests/data/color.jpg'
assert results['ori_filename'] == 'tests/data/color.jpg'
assert results['img'].shape == (288, 512, 3)
# to_float32
transform = LoadImageFromFile(to_float32=True)
results = transform(copy.deepcopy(results))
assert results['img'].dtype == np.float32
# gray image
results = dict(
img_prefix=self.data_prefix, img_info=dict(filename='gray.jpg'))
transform = LoadImageFromFile()
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
transform = LoadImageFromFile(color_type='unchanged')
results = transform(copy.deepcopy(results))
assert results['img'].shape == (288, 512)
assert results['img'].dtype == np.uint8
def test_load_multi_channel_img(self):
results = dict(
img_prefix=self.data_prefix,
img_info=dict(filename=['color.jpg', 'color.jpg']))
transform = LoadMultiChannelImageFromFiles()
results = transform(copy.deepcopy(results))
assert results['filename'] == [
osp.join(self.data_prefix, 'color.jpg'),
osp.join(self.data_prefix, 'color.jpg')
]
assert results['ori_filename'] == ['color.jpg', 'color.jpg']
assert results['img'].shape == (288, 512, 3, 2)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3, 2)
assert results['ori_shape'] == (288, 512, 3, 2)
assert results['pad_shape'] == (288, 512, 3, 2)
assert results['scale_factor'] == 1.0
assert repr(transform) == transform.__class__.__name__ + \
"(to_float32=False, color_type='unchanged', " + \
"file_client_args={'backend': 'disk'})"
def test_load_webcam_img(self):
img = mmcv.imread(osp.join(self.data_prefix, 'color.jpg'))
results = dict(img=img)
transform = LoadImageFromWebcam()
results = transform(copy.deepcopy(results))
assert results['filename'] is None
assert results['ori_filename'] is None
assert results['img'].shape == (288, 512, 3)
assert results['img'].dtype == np.uint8
assert results['img_shape'] == (288, 512, 3)
assert results['ori_shape'] == (288, 512, 3)
def _build_filter_annotations_args():
kwargs = (dict(min_gt_bbox_wh=(100, 100)),
dict(min_gt_bbox_wh=(100, 100), keep_empty=False),
dict(min_gt_bbox_wh=(1, 1)), dict(min_gt_bbox_wh=(.01, .01)),
dict(min_gt_bbox_wh=(.01, .01),
by_mask=True), dict(by_mask=True),
dict(by_box=False, by_mask=True))
targets = (None, 0, 1, 2, 1, 1, 1)
return list(zip(targets, kwargs))
@pytest.mark.parametrize('target, kwargs', _build_filter_annotations_args())
def test_filter_annotations(target, kwargs):
filter_ann = FilterAnnotations(**kwargs)
bboxes = np.array([[2., 10., 4., 14.], [2., 10., 2.1, 10.1]])
raw_masks = np.zeros((2, 24, 24))
raw_masks[0, 10:14, 2:4] = 1
bitmap_masks = BitmapMasks(raw_masks, 24, 24)
results = dict(gt_bboxes=bboxes, gt_masks=bitmap_masks)
results = filter_ann(results)
if results is not None:
results = results['gt_bboxes'].shape[0]
assert results == target
polygons = [[np.array([2.0, 10.0, 4.0, 10.0, 4.0, 14.0, 2.0, 14.0])],
[np.array([2.0, 10.0, 2.1, 10.0, 2.1, 10.1, 2.0, 10.1])]]
polygon_masks = PolygonMasks(polygons, 24, 24)
results = dict(gt_bboxes=bboxes, gt_masks=polygon_masks)
results = filter_ann(results)
if results is not None:
results = len(results.get('gt_masks').masks)
assert results == target
| 5,336 | 39.12782 | 78 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_sampler.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.bbox.assigners import MaxIoUAssigner
from mmdet.core.bbox.samplers import (OHEMSampler, RandomSampler,
ScoreHLRSampler)
def test_random_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, ).long()
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
assign_result = assigner.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
sampler = RandomSampler(
num=10, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=True)
sample_result = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def _context_for_ohem():
import sys
from os.path import dirname
sys.path.insert(0, dirname(dirname(dirname(__file__))))
from test_models.test_forward import _get_detector_cfg
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model['pretrained'] = None
from mmdet.models import build_detector
context = build_detector(model).roi_head
return context
def test_ohem_sampler():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([1, 2])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_gt():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_ohem_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
gt_bboxes_ignore = torch.Tensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
context = _context_for_ohem()
sampler = OHEMSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
sample_result = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
def test_random_sample_result():
from mmdet.core.bbox.samplers.sampling_result import SamplingResult
SamplingResult.random(num_gts=0, num_preds=0)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=3, num_preds=3)
SamplingResult.random(num_gts=0, num_preds=3)
SamplingResult.random(num_gts=7, num_preds=7)
SamplingResult.random(num_gts=7, num_preds=64)
SamplingResult.random(num_gts=24, num_preds=3)
for i in range(3):
SamplingResult.random(rng=i)
def test_score_hlr_sampler_empty_pred():
assigner = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
context = _context_for_ohem()
sampler = ScoreHLRSampler(
num=10,
pos_fraction=0.5,
context=context,
neg_pos_ub=-1,
add_gt_as_proposals=True)
gt_bboxes_ignore = torch.Tensor([])
feats = [torch.rand(1, 256, int(2**i), int(2**i)) for i in [6, 5, 4, 3, 2]]
# empty bbox
bboxes = torch.empty(0, 4)
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.neg_inds) == 0
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
# empty gt
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.LongTensor([])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_inds) == 0
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
# non-empty input
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_labels = torch.LongTensor([1, 2, 2, 3])
assign_result = assigner.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
sample_result, _ = sampler.sample(
assign_result, bboxes, gt_bboxes, gt_labels, feats=feats)
assert len(sample_result.pos_bboxes) == len(sample_result.pos_inds)
assert len(sample_result.neg_bboxes) == len(sample_result.neg_inds)
| 9,735 | 28.50303 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_result_same, construct_toy_data, create_random_bboxes
__all__ = ['create_random_bboxes', 'construct_toy_data', 'check_result_same']
| 206 | 40.4 | 78 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_img_augment.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import mmcv
import numpy as np
from mmcv.utils import build_from_cfg
from numpy.testing import assert_array_equal
from mmdet.datasets.builder import PIPELINES
from .utils import construct_toy_data
def test_adjust_color():
results = construct_toy_data()
# test wighout aug
transform = dict(type='ColorTransform', prob=0, level=10)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test with factor 1
img = results['img']
transform = dict(type='ColorTransform', prob=1, level=10)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img)
# test with factor 0
transform_module.factor = 0
img_gray = mmcv.bgr2gray(img.copy())
img_r = np.stack([img_gray, img_gray, img_gray], axis=-1)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img_r)
# test with factor 0.5
transform_module.factor = 0.5
results_transformed = transform_module(copy.deepcopy(results))
img = results['img']
assert_array_equal(
results_transformed['img'],
np.round(np.clip((img * 0.5 + img_r * 0.5), 0, 255)).astype(img.dtype))
def test_imequalize(nb_rand_test=100):
def _imequalize(img):
# equalize the image using PIL.ImageOps.equalize
from PIL import Image, ImageOps
img = Image.fromarray(img)
equalized_img = np.asarray(ImageOps.equalize(img))
return equalized_img
results = construct_toy_data()
# test wighout aug
transform = dict(type='EqualizeTransform', prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test equalize with case step=0
transform = dict(type='EqualizeTransform', prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
img = np.array([[0, 0, 0], [120, 120, 120], [255, 255, 255]],
dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], img)
# test equalize with randomly sampled image.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], _imequalize(img))
def test_adjust_brightness(nb_rand_test=100):
def _adjust_brightness(img, factor):
# adjust the brightness of image using
# PIL.ImageEnhance.Brightness
from PIL import Image
from PIL.ImageEnhance import Brightness
img = Image.fromarray(img)
brightened_img = Brightness(img).enhance(factor)
return np.asarray(brightened_img)
results = construct_toy_data()
# test wighout aug
transform = dict(type='BrightnessTransform', level=10, prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 1.0
transform = dict(type='BrightnessTransform', level=10, prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
transform_module.factor = 1.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 0.0
transform_module.factor = 0.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'],
np.zeros_like(results['img']))
# test with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1000, 1200, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
transform_module.factor = factor
results['img'] = img
np.testing.assert_allclose(
transform_module(copy.deepcopy(results))['img'].astype(np.int32),
_adjust_brightness(img, factor).astype(np.int32),
rtol=0,
atol=1)
def test_adjust_contrast(nb_rand_test=100):
def _adjust_contrast(img, factor):
from PIL import Image
from PIL.ImageEnhance import Contrast
# Image.fromarray defaultly supports RGB, not BGR.
# convert from BGR to RGB
img = Image.fromarray(img[..., ::-1], mode='RGB')
contrasted_img = Contrast(img).enhance(factor)
# convert from RGB to BGR
return np.asarray(contrasted_img)[..., ::-1]
results = construct_toy_data()
# test wighout aug
transform = dict(type='ContrastTransform', level=10, prob=0)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 1.0
transform = dict(type='ContrastTransform', level=10, prob=1.)
transform_module = build_from_cfg(transform, PIPELINES)
transform_module.factor = 1.0
results_transformed = transform_module(copy.deepcopy(results))
assert_array_equal(results_transformed['img'], results['img'])
# test case with factor 0.0
transform_module.factor = 0.0
results_transformed = transform_module(copy.deepcopy(results))
np.testing.assert_allclose(
results_transformed['img'],
_adjust_contrast(results['img'], 0.),
rtol=0,
atol=1)
# test adjust_contrast with randomly sampled images and factors.
for _ in range(nb_rand_test):
img = np.clip(np.random.uniform(0, 1, (1200, 1000, 3)) * 260, 0,
255).astype(np.uint8)
factor = np.random.uniform()
transform_module.factor = factor
results['img'] = img
results_transformed = transform_module(copy.deepcopy(results))
# Note the gap (less_equal 1) between PIL.ImageEnhance.Contrast
# and mmcv.adjust_contrast comes from the gap that converts from
# a color image to gray image using mmcv or PIL.
np.testing.assert_allclose(
transform_module(copy.deepcopy(results))['img'].astype(np.int32),
_adjust_contrast(results['img'], factor).astype(np.int32),
rtol=0,
atol=1)
| 6,904 | 38.232955 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_models_aug_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import torch
from mmcv.parallel import collate
from mmcv.utils import build_from_cfg
from mmdet.datasets.builder import PIPELINES
from mmdet.models import build_detector
def model_aug_test_template(cfg_file):
# get config
cfg = mmcv.Config.fromfile(cfg_file)
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['img_scale'] = [(1333, 800), (800, 600), (640, 480)]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
return aug_result
def test_aug_test_size():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
# get config
transform = dict(
type='MultiScaleFlipAug',
transforms=[],
img_scale=[(1333, 800), (800, 600), (640, 480)],
flip=True,
flip_direction=['horizontal', 'vertical', 'diagonal'])
multi_aug_test_module = build_from_cfg(transform, PIPELINES)
results = load(results)
results = multi_aug_test_module(load(results))
# len(["original", "horizontal", "vertical", "diagonal"]) *
# len([(1333, 800), (800, 600), (640, 480)])
assert len(results['img']) == 12
def test_cascade_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 80
def test_mask_rcnn_aug_test():
aug_result = model_aug_test_template(
'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_htc_aug_test():
aug_result = model_aug_test_template('configs/htc/htc_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_scnet_aug_test():
aug_result = model_aug_test_template(
'configs/scnet/scnet_r50_fpn_1x_coco.py')
assert len(aug_result[0]) == 2
assert len(aug_result[0][0]) == 80
assert len(aug_result[0][1]) == 80
def test_cornernet_aug_test():
# get config
cfg = mmcv.Config.fromfile(
'configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py')
# init model
cfg.model.pretrained = None
cfg.model.train_cfg = None
model = build_detector(cfg.model)
# init test pipeline and set aug test
load_cfg, multi_scale_cfg = cfg.test_pipeline
multi_scale_cfg['flip'] = True
multi_scale_cfg['flip_direction'] = ['horizontal', 'vertical', 'diagonal']
multi_scale_cfg['scale_factor'] = [0.5, 1.0, 2.0]
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
results = transform(load(results))
assert len(results['img']) == 12
assert len(results['img_metas']) == 12
results['img'] = [collate([x]) for x in results['img']]
results['img_metas'] = [collate([x]).data[0] for x in results['img_metas']]
# aug test the model
model.eval()
with torch.no_grad():
aug_result = model(return_loss=False, rescale=True, **results)
assert len(aug_result[0]) == 80
| 4,314 | 31.689394 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_rotate.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
from .utils import check_result_same, construct_toy_data
def test_rotate():
# test assertion for invalid type of max_rotate_angle
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=1, max_rotate_angle=(30, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of scale
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, scale=(1.2, ))
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(
type='Rotate', level=2, img_fill_val=[
128,
])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid number of elements in center
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, center=(0.5, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of center
with pytest.raises(AssertionError):
transform = dict(type='Rotate', level=2, center=[0, 0])
build_from_cfg(transform, PIPELINES)
# test case when no rotate aug (level=0)
results = construct_toy_data()
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Rotate',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
)
rotate_module = build_from_cfg(transform, PIPELINES)
results_wo_rotate = rotate_module(copy.deepcopy(results))
check_result_same(results, results_wo_rotate)
# test case when no rotate aug (prob<=0)
transform = dict(
type='Rotate', level=10, prob=0., img_fill_val=img_fill_val, scale=0.6)
rotate_module = build_from_cfg(transform, PIPELINES)
results_wo_rotate = rotate_module(copy.deepcopy(results))
check_result_same(results, results_wo_rotate)
# test clockwise rotation with angle 90
results = construct_toy_data()
img_fill_val = 128
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
img_fill_val=img_fill_val,
# set random_negative_prob to 0 for clockwise rotation
random_negative_prob=0.,
prob=1.)
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
img_r = np.array([[img_fill_val, 6, 2, img_fill_val],
[img_fill_val, 7, 3, img_fill_val]]).astype(np.uint8)
img_r = np.stack([img_r, img_r, img_r], axis=-1)
results_gt = copy.deepcopy(results)
results_gt['img'] = img_r
results_gt['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[255, 6, 2, 255], [255, 7, 3,
255]]).astype(results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_rotated)
# test clockwise rotation with angle 90, PolygonMasks
results = construct_toy_data(poly2mask=False)
results_rotated = rotate_module(copy.deepcopy(results))
gt_masks = [[np.array([2, 0, 2, 1, 1, 1, 1, 0], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_rotated)
# test counter-clockwise rotation with angle 90,
# and specify the ratation center
img_fill_val = (104, 116, 124)
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
center=(0, 0),
img_fill_val=img_fill_val,
# set random_negative_prob to 1 for counter-clockwise rotation
random_negative_prob=1.,
prob=1.)
results = construct_toy_data()
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
h, w = results['img'].shape[:2]
img_r = np.stack([
np.ones((h, w)) * img_fill_val[0],
np.ones((h, w)) * img_fill_val[1],
np.ones((h, w)) * img_fill_val[2]
],
axis=-1).astype(np.uint8)
img_r[0, 0, :] = 1
img_r[0, 1, :] = 5
results_gt['img'] = img_r
results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64)
gt_masks = np.empty((0, h, w), dtype=np.uint8)
results_gt['gt_masks'] = BitmapMasks(gt_masks, h, w)
gt_seg = (np.ones((h, w)) * 255).astype(results['gt_semantic_seg'].dtype)
gt_seg[0, 0], gt_seg[0, 1] = 1, 5
results_gt['gt_semantic_seg'] = gt_seg
check_result_same(results_gt, results_rotated)
transform = dict(
type='Rotate',
level=10,
max_rotate_angle=90,
center=(0),
img_fill_val=img_fill_val,
random_negative_prob=1.,
prob=1.)
rotate_module = build_from_cfg(transform, PIPELINES)
results_rotated = rotate_module(copy.deepcopy(results))
check_result_same(results_gt, results_rotated)
# test counter-clockwise rotation with angle 90,
# and specify the ratation center, PolygonMasks
results = construct_toy_data(poly2mask=False)
results_rotated = rotate_module(copy.deepcopy(results))
gt_masks = [[np.array([0, 0, 0, 0, 1, 0, 1, 0], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_rotated)
# test AutoAugment equipped with Rotate
policies = [[dict(type='Rotate', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Rotate', level=10, prob=1.),
dict(
type='Rotate',
level=8,
max_rotate_angle=90,
center=(0),
img_fill_val=img_fill_val)
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 6,600 | 37.156069 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_shear.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
from .utils import check_result_same, construct_toy_data
def test_shear():
# test assertion for invalid type of max_shear_magnitude
with pytest.raises(AssertionError):
transform = dict(type='Shear', level=1, max_shear_magnitude=(0.5, ))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of max_shear_magnitude
with pytest.raises(AssertionError):
transform = dict(type='Shear', level=2, max_shear_magnitude=1.2)
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(type='Shear', level=2, img_fill_val=[128])
build_from_cfg(transform, PIPELINES)
results = construct_toy_data()
# test case when no shear aug (level=0, direction='horizontal')
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Shear',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='horizontal')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test case when no shear aug (level=0, direction='vertical')
transform = dict(
type='Shear',
level=0,
prob=1.,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='vertical')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test case when no shear aug (prob<=0)
transform = dict(
type='Shear',
level=10,
prob=0.,
img_fill_val=img_fill_val,
direction='vertical')
shear_module = build_from_cfg(transform, PIPELINES)
results_wo_shear = shear_module(copy.deepcopy(results))
check_result_same(results, results_wo_shear)
# test shear horizontally, magnitude=1
transform = dict(
type='Shear',
level=10,
prob=1.,
img_fill_val=img_fill_val,
direction='horizontal',
max_shear_magnitude=1.,
random_negative_prob=0.)
shear_module = build_from_cfg(transform, PIPELINES)
results_sheared = shear_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
img_s = np.array([[1, 2, 3, 4], [0, 5, 6, 7]], dtype=np.uint8)
img_s = np.stack([img_s, img_s, img_s], axis=-1)
img_s[1, 0, :] = np.array(img_fill_val)
results_gt['img'] = img_s
results_gt['gt_bboxes'] = np.array([[0., 0., 3., 1.]], dtype=np.float32)
results_gt['gt_bboxes_ignore'] = np.array([[2., 0., 4., 1.]],
dtype=np.float32)
gt_masks = np.array([[0, 1, 1, 0], [0, 0, 1, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[1, 2, 3, 4], [255, 5, 6, 7]], dtype=results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_sheared)
# test PolygonMasks with shear horizontally, magnitude=1
results = construct_toy_data(poly2mask=False)
results_sheared = shear_module(copy.deepcopy(results))
print(results_sheared['gt_masks'])
gt_masks = [[np.array([0, 0, 2, 0, 3, 1, 1, 1], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_sheared)
# test shear vertically, magnitude=-1
img_fill_val = 128
results = construct_toy_data()
transform = dict(
type='Shear',
level=10,
prob=1.,
img_fill_val=img_fill_val,
direction='vertical',
max_shear_magnitude=1.,
random_negative_prob=1.)
shear_module = build_from_cfg(transform, PIPELINES)
results_sheared = shear_module(copy.deepcopy(results))
results_gt = copy.deepcopy(results)
img_s = np.array([[1, 6, img_fill_val, img_fill_val],
[5, img_fill_val, img_fill_val, img_fill_val]],
dtype=np.uint8)
img_s = np.stack([img_s, img_s, img_s], axis=-1)
results_gt['img'] = img_s
results_gt['gt_bboxes'] = np.empty((0, 4), dtype=np.float32)
results_gt['gt_labels'] = np.empty((0, ), dtype=np.int64)
results_gt['gt_bboxes_ignore'] = np.empty((0, 4), dtype=np.float32)
gt_masks = np.array([[0, 1, 0, 0], [0, 0, 0, 0]],
dtype=np.uint8)[None, :, :]
results_gt['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
results_gt['gt_semantic_seg'] = np.array(
[[1, 6, 255, 255], [5, 255, 255, 255]],
dtype=results['gt_semantic_seg'].dtype)
check_result_same(results_gt, results_sheared)
# test PolygonMasks with shear vertically, magnitude=-1
results = construct_toy_data(poly2mask=False)
results_sheared = shear_module(copy.deepcopy(results))
gt_masks = [[np.array([0, 0, 2, 0, 2, 0, 0, 1], dtype=np.float)]]
results_gt['gt_masks'] = PolygonMasks(gt_masks, 2, 4)
check_result_same(results_gt, results_sheared)
results = construct_toy_data()
# same mask for BitmapMasks and PolygonMasks
results['gt_masks'] = BitmapMasks(
np.array([[0, 1, 1, 0], [0, 1, 1, 0]], dtype=np.uint8)[None, :, :], 2,
4)
results['gt_bboxes'] = np.array([[1., 0., 2., 1.]], dtype=np.float32)
results_sheared_bitmap = shear_module(copy.deepcopy(results))
check_result_same(results_sheared_bitmap, results_sheared)
# test AutoAugment equipped with Shear
policies = [[dict(type='Shear', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Shear', level=10, prob=1.),
dict(
type='Shear',
level=8,
img_fill_val=img_fill_val,
direction='vertical',
max_shear_magnitude=1.)
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 6,481 | 38.284848 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_transform.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import mmcv
import numpy as np
import pytest
import torch
from mmcv.utils import build_from_cfg
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.datasets.builder import PIPELINES
from .utils import create_full_masks, create_random_bboxes
def test_resize():
# test assertion if img_scale is a list
with pytest.raises(AssertionError):
transform = dict(type='Resize', img_scale=[1333, 800], keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion if len(img_scale) while ratio_range is not None
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
ratio_range=(0.9, 1.1),
keep_ratio=True)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid multiscale_mode
with pytest.raises(AssertionError):
transform = dict(
type='Resize',
img_scale=[(1333, 800), (1333, 600)],
keep_ratio=True,
multiscale_mode='2333')
build_from_cfg(transform, PIPELINES)
# test assertion if both scale and scale_factor are set
with pytest.raises(AssertionError):
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
transform = build_from_cfg(transform, PIPELINES)
results = load(results)
results['scale'] = (1333, 800)
results['scale_factor'] = 1.0
results = transform(results)
transform = dict(type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['img_fields'] = ['img', 'img2']
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
results.pop('scale')
results.pop('scale_factor')
transform = dict(
type='Resize',
img_scale=(1280, 800),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
results = resize_module(results)
assert np.equal(results['img'], results['img2']).all()
assert results['img_shape'] == (800, 1280, 3)
assert results['img'].dtype == results['img'].dtype == np.uint8
results_seg = {
'img': img,
'img_shape': img.shape,
'ori_shape': img.shape,
'gt_semantic_seg': copy.deepcopy(img),
'gt_seg': copy.deepcopy(img),
'seg_fields': ['gt_semantic_seg', 'gt_seg']
}
transform = dict(
type='Resize',
img_scale=(640, 400),
multiscale_mode='value',
keep_ratio=False)
resize_module = build_from_cfg(transform, PIPELINES)
results_seg = resize_module(results_seg)
assert results_seg['gt_semantic_seg'].shape == results_seg['gt_seg'].shape
assert results_seg['img_shape'] == (400, 640, 3)
assert results_seg['img_shape'] != results_seg['ori_shape']
assert results_seg['gt_semantic_seg'].shape == results_seg['img_shape']
assert np.equal(results_seg['gt_semantic_seg'],
results_seg['gt_seg']).all()
def test_flip():
# test assertion for invalid flip_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', flip_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for 0 <= sum(flip_ratio) <= 1
with pytest.raises(AssertionError):
transform = dict(
type='RandomFlip',
flip_ratio=[0.7, 0.8],
direction=['horizontal', 'vertical'])
build_from_cfg(transform, PIPELINES)
# test assertion for mismatch between number of flip_ratio and direction
with pytest.raises(AssertionError):
transform = dict(type='RandomFlip', flip_ratio=[0.4, 0.5])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid direction
with pytest.raises(AssertionError):
transform = dict(
type='RandomFlip', flip_ratio=1., direction='horizonta')
build_from_cfg(transform, PIPELINES)
transform = dict(type='RandomFlip', flip_ratio=1.)
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
flip_module = build_from_cfg(transform, PIPELINES)
results = flip_module(results)
assert np.equal(results['img'], results['img2']).all()
assert np.equal(original_img, results['img']).all()
# test flip_ratio is float, direction is list
transform = dict(
type='RandomFlip',
flip_ratio=0.9,
direction=['horizontal', 'vertical', 'diagonal'])
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img']
results = flip_module(results)
if results['flip']:
assert np.array_equal(
mmcv.imflip(original_img, results['flip_direction']),
results['img'])
else:
assert np.array_equal(original_img, results['img'])
# test flip_ratio is list, direction is list
transform = dict(
type='RandomFlip',
flip_ratio=[0.3, 0.3, 0.2],
direction=['horizontal', 'vertical', 'diagonal'])
flip_module = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img']
results = flip_module(results)
if results['flip']:
assert np.array_equal(
mmcv.imflip(original_img, results['flip_direction']),
results['img'])
else:
assert np.array_equal(original_img, results['img'])
def test_random_crop():
# test assertion for invalid random crop
with pytest.raises(AssertionError):
transform = dict(type='RandomCrop', crop_size=(-1, 0))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomCrop', crop_size=(h - 20, w - 20))
crop_module = build_from_cfg(transform, PIPELINES)
results = crop_module(results)
assert results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert results['img_shape'][:2] == (h - 20, w - 20)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes'].shape[0] == 8
assert results['gt_bboxes_ignore'].shape[0] == 2
def area(bboxes):
return np.prod(bboxes[:, 2:4] - bboxes[:, 0:2], axis=1)
assert (area(results['gt_bboxes']) <= area(gt_bboxes)).all()
assert (area(results['gt_bboxes_ignore']) <= area(gt_bboxes_ignore)).all()
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
# test assertion for invalid crop_type
with pytest.raises(ValueError):
transform = dict(
type='RandomCrop', crop_size=(1, 1), crop_type='unknown')
build_from_cfg(transform, PIPELINES)
# test assertion for invalid crop_size
with pytest.raises(AssertionError):
transform = dict(
type='RandomCrop', crop_type='relative', crop_size=(0, 0))
build_from_cfg(transform, PIPELINES)
def _construct_toy_data():
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
return results
# test crop_type "relative_range"
results = _construct_toy_data()
transform = dict(
type='RandomCrop',
crop_type='relative_range',
crop_size=(0.3, 0.7),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert int(2 * 0.3 + 0.5) <= h <= int(2 * 1 + 0.5)
assert int(4 * 0.7 + 0.5) <= w <= int(4 * 1 + 0.5)
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "relative"
transform = dict(
type='RandomCrop',
crop_type='relative',
crop_size=(0.3, 0.7),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert h == int(2 * 0.3 + 0.5) and w == int(4 * 0.7 + 0.5)
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "absolute"
transform = dict(
type='RandomCrop',
crop_type='absolute',
crop_size=(1, 2),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert h == 1 and w == 2
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
# test crop_type "absolute_range"
transform = dict(
type='RandomCrop',
crop_type='absolute_range',
crop_size=(1, 20),
allow_negative_crop=True)
transform_module = build_from_cfg(transform, PIPELINES)
results_transformed = transform_module(copy.deepcopy(results))
h, w = results_transformed['img_shape'][:2]
assert 1 <= h <= 2 and 1 <= w <= 4
assert results_transformed['gt_bboxes'].dtype == np.float32
assert results_transformed['gt_bboxes_ignore'].dtype == np.float32
def test_min_iou_random_crop():
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(1, w, h)
gt_bboxes_ignore = create_random_bboxes(1, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='MinIoURandomCrop')
crop_module = build_from_cfg(transform, PIPELINES)
# Test for img_fields
results_test = copy.deepcopy(results)
results_test['img1'] = results_test['img']
results_test['img_fields'] = ['img', 'img1']
with pytest.raises(AssertionError):
crop_module(results_test)
results = crop_module(results)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
patch = np.array([0, 0, results['img_shape'][1], results['img_shape'][0]])
ious = bbox_overlaps(patch.reshape(-1, 4),
results['gt_bboxes']).reshape(-1)
ious_ignore = bbox_overlaps(
patch.reshape(-1, 4), results['gt_bboxes_ignore']).reshape(-1)
mode = crop_module.mode
if mode == 1:
assert np.equal(results['gt_bboxes'], gt_bboxes).all()
assert np.equal(results['gt_bboxes_ignore'], gt_bboxes_ignore).all()
else:
assert (ious >= mode).all()
assert (ious_ignore >= mode).all()
def test_pad():
# test assertion if both size_divisor and size is None
with pytest.raises(AssertionError):
transform = dict(type='Pad')
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', size_divisor=32)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
# original img already divisible by 32
assert np.equal(results['img'], original_img).all()
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
resize_transform = dict(
type='Resize', img_scale=(1333, 800), keep_ratio=True)
resize_module = build_from_cfg(resize_transform, PIPELINES)
results = resize_module(results)
results = transform(results)
img_shape = results['img'].shape
assert np.equal(results['img'], results['img2']).all()
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
# test the size and size_divisor must be None when pad2square is True
with pytest.raises(AssertionError):
transform = dict(type='Pad', size_divisor=32, pad_to_square=True)
build_from_cfg(transform, PIPELINES)
transform = dict(type='Pad', pad_to_square=True)
transform = build_from_cfg(transform, PIPELINES)
results['img'] = img
results = transform(results)
assert results['img'].shape[0] == results['img'].shape[1]
# test the pad_val is converted to a dict
transform = dict(type='Pad', size_divisor=32, pad_val=0)
with pytest.deprecated_call():
transform = build_from_cfg(transform, PIPELINES)
assert isinstance(transform.pad_val, dict)
results = transform(results)
img_shape = results['img'].shape
assert img_shape[0] % 32 == 0
assert img_shape[1] % 32 == 0
def test_normalize():
img_norm_cfg = dict(
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
transform = dict(type='Normalize', **img_norm_cfg)
transform = build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
original_img = copy.deepcopy(img)
results['img'] = img
results['img2'] = copy.deepcopy(img)
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['scale_factor'] = 1.0
results['img_fields'] = ['img', 'img2']
results = transform(results)
assert np.equal(results['img'], results['img2']).all()
mean = np.array(img_norm_cfg['mean'])
std = np.array(img_norm_cfg['std'])
converted_img = (original_img[..., ::-1] - mean) / std
assert np.allclose(results['img'], converted_img)
def test_albu_transform():
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
# Define simple pipeline
load = dict(type='LoadImageFromFile')
load = build_from_cfg(load, PIPELINES)
albu_transform = dict(
type='Albu', transforms=[dict(type='ChannelShuffle', p=1)])
albu_transform = build_from_cfg(albu_transform, PIPELINES)
normalize = dict(type='Normalize', mean=[0] * 3, std=[0] * 3, to_rgb=True)
normalize = build_from_cfg(normalize, PIPELINES)
# Execute transforms
results = load(results)
results = albu_transform(results)
results = normalize(results)
assert results['img'].dtype == np.float32
def test_random_center_crop_pad():
# test assertion for invalid crop_size while test_mode=False
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(-1, 0),
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid ratios while test_mode=False
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=(1.0),
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid mean, std and to_rgb
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
mean=None,
std=None,
to_rgb=None,
test_mode=False,
test_pad_mode=None)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid crop_size while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=(511, 511),
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid ratios while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=(0.9, 1.0, 1.1),
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid border while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid test_pad_mode while test_mode=True
with pytest.raises(AssertionError):
transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('do_nothing', 100))
build_from_cfg(transform, PIPELINES)
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load = dict(type='LoadImageFromFile', to_float32=True)
load = build_from_cfg(load, PIPELINES)
results = load(results)
test_results = copy.deepcopy(results)
h, w, _ = results['img_shape']
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
train_transform = dict(
type='RandomCenterCropPad',
crop_size=(h - 20, w - 20),
ratios=(1.0, ),
border=128,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=False,
test_pad_mode=None)
crop_module = build_from_cfg(train_transform, PIPELINES)
train_results = crop_module(results)
assert train_results['img'].shape[:2] == (h - 20, w - 20)
# All bboxes should be reserved after crop
assert train_results['pad_shape'][:2] == (h - 20, w - 20)
assert train_results['gt_bboxes'].shape[0] == 8
assert train_results['gt_bboxes_ignore'].shape[0] == 2
assert train_results['gt_bboxes'].dtype == np.float32
assert train_results['gt_bboxes_ignore'].dtype == np.float32
test_transform = dict(
type='RandomCenterCropPad',
crop_size=None,
ratios=None,
border=None,
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True,
test_mode=True,
test_pad_mode=('logical_or', 127))
crop_module = build_from_cfg(test_transform, PIPELINES)
test_results = crop_module(test_results)
assert test_results['img'].shape[:2] == (h | 127, w | 127)
assert test_results['pad_shape'][:2] == (h | 127, w | 127)
assert 'border' in test_results
def test_multi_scale_flip_aug():
# test assertion if give both scale_factor and img_scale
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
scale_factor=1.0,
img_scale=[(1333, 800)],
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if both scale_factor and img_scale are None
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
scale_factor=None,
img_scale=None,
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if img_scale is not tuple or list of tuple
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
img_scale=[1333, 800],
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
# test assertion if flip_direction is not str or list of str
with pytest.raises(AssertionError):
transform = dict(
type='MultiScaleFlipAug',
img_scale=[(1333, 800)],
flip_direction=1,
transforms=[dict(type='Resize')])
build_from_cfg(transform, PIPELINES)
scale_transform = dict(
type='MultiScaleFlipAug',
img_scale=[(1333, 800), (1333, 640)],
transforms=[dict(type='Resize', keep_ratio=True)])
transform = build_from_cfg(scale_transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
# Set initial values for default meta_keys
results['pad_shape'] = img.shape
results['img_fields'] = ['img']
scale_results = transform(copy.deepcopy(results))
assert len(scale_results['img']) == 2
assert scale_results['img'][0].shape == (750, 1333, 3)
assert scale_results['img_shape'][0] == (750, 1333, 3)
assert scale_results['img'][1].shape == (640, 1138, 3)
assert scale_results['img_shape'][1] == (640, 1138, 3)
scale_factor_transform = dict(
type='MultiScaleFlipAug',
scale_factor=[0.8, 1.0, 1.2],
transforms=[dict(type='Resize', keep_ratio=False)])
transform = build_from_cfg(scale_factor_transform, PIPELINES)
scale_factor_results = transform(copy.deepcopy(results))
assert len(scale_factor_results['img']) == 3
assert scale_factor_results['img'][0].shape == (230, 409, 3)
assert scale_factor_results['img_shape'][0] == (230, 409, 3)
assert scale_factor_results['img'][1].shape == (288, 512, 3)
assert scale_factor_results['img_shape'][1] == (288, 512, 3)
assert scale_factor_results['img'][2].shape == (345, 614, 3)
assert scale_factor_results['img_shape'][2] == (345, 614, 3)
# test pipeline of coco_detection
results = dict(
img_prefix=osp.join(osp.dirname(__file__), '../../../data'),
img_info=dict(filename='color.jpg'))
load_cfg, multi_scale_cfg = mmcv.Config.fromfile(
'configs/_base_/datasets/coco_detection.py').test_pipeline
load = build_from_cfg(load_cfg, PIPELINES)
transform = build_from_cfg(multi_scale_cfg, PIPELINES)
results = transform(load(results))
assert len(results['img']) == 1
assert len(results['img_metas']) == 1
assert isinstance(results['img'][0], torch.Tensor)
assert isinstance(results['img_metas'][0], mmcv.parallel.DataContainer)
assert results['img_metas'][0].data['ori_shape'] == (288, 512, 3)
assert results['img_metas'][0].data['img_shape'] == (750, 1333, 3)
assert results['img_metas'][0].data['pad_shape'] == (768, 1344, 3)
assert results['img_metas'][0].data['scale_factor'].tolist() == [
2.603515625, 2.6041667461395264, 2.603515625, 2.6041667461395264
]
def test_cutout():
# test n_holes
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=(5, 3), cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=(3, 4, 5), cutout_shape=(8, 8))
build_from_cfg(transform, PIPELINES)
# test cutout_shape and cutout_ratio
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1, cutout_shape=8)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1, cutout_ratio=0.2)
build_from_cfg(transform, PIPELINES)
# either of cutout_shape and cutout_ratio should be given
with pytest.raises(AssertionError):
transform = dict(type='CutOut', n_holes=1)
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(
type='CutOut',
n_holes=1,
cutout_shape=(2, 2),
cutout_ratio=(0.4, 0.4))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['pad_shape'] = img.shape
results['img_fields'] = ['img']
transform = dict(type='CutOut', n_holes=1, cutout_shape=(10, 10))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(type='CutOut', n_holes=1, cutout_ratio=(0.8, 0.8))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() < img.sum()
transform = dict(
type='CutOut',
n_holes=(2, 4),
cutout_shape=[(10, 10), (15, 15)],
fill_in=(255, 255, 255))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
transform = dict(
type='CutOut',
n_holes=1,
cutout_ratio=(0.8, 0.8),
fill_in=(255, 255, 255))
cutout_module = build_from_cfg(transform, PIPELINES)
cutout_result = cutout_module(copy.deepcopy(results))
assert cutout_result['img'].sum() > img.sum()
def test_random_shift():
# test assertion for invalid shift_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomShift', shift_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid max_shift_px
with pytest.raises(AssertionError):
transform = dict(type='RandomShift', max_shift_px=-1)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomShift', shift_ratio=1.0)
random_shift_module = build_from_cfg(transform, PIPELINES)
results = random_shift_module(results)
assert results['img'].shape[:2] == (h, w)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_random_affine():
# test assertion for invalid translate_ratio
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', max_translate_ratio=1.5)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid scaling_ratio_range
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', scaling_ratio_range=(1.5, 0.5))
build_from_cfg(transform, PIPELINES)
with pytest.raises(AssertionError):
transform = dict(type='RandomAffine', scaling_ratio_range=(0, 0.5))
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='RandomAffine')
random_affine_module = build_from_cfg(transform, PIPELINES)
results = random_affine_module(results)
assert results['img'].shape[:2] == (h, w)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
# test filter bbox
gt_bboxes = np.array([[0, 0, 1, 1], [0, 0, 3, 100]], dtype=np.float32)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
transform = dict(
type='RandomAffine',
max_rotate_degree=0.,
max_translate_ratio=0.,
scaling_ratio_range=(1., 1.),
max_shear_degree=0.,
border=(0, 0),
min_bbox_size=2,
max_aspect_ratio=20,
skip_filter=False)
random_affine_module = build_from_cfg(transform, PIPELINES)
results = random_affine_module(results)
assert results['gt_bboxes'].shape[0] == 0
assert results['gt_labels'].shape[0] == 0
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_mosaic():
# test assertion for invalid img_scale
with pytest.raises(AssertionError):
transform = dict(type='Mosaic', img_scale=640)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid probability
with pytest.raises(AssertionError):
transform = dict(type='Mosaic', prob=1.5)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='Mosaic', img_scale=(10, 12))
mosaic_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
mosaic_module(results)
results['mix_results'] = [copy.deepcopy(results)] * 3
results = mosaic_module(results)
assert results['img'].shape[:2] == (20, 24)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_mixup():
# test assertion for invalid img_scale
with pytest.raises(AssertionError):
transform = dict(type='MixUp', img_scale=640)
build_from_cfg(transform, PIPELINES)
results = dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
results['img'] = img
# TODO: add img_fields test
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
h, w, _ = img.shape
gt_bboxes = create_random_bboxes(8, w, h)
gt_bboxes_ignore = create_random_bboxes(2, w, h)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = gt_bboxes_ignore
transform = dict(type='MixUp', img_scale=(10, 12))
mixup_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
mixup_module(results)
with pytest.raises(AssertionError):
results['mix_results'] = [copy.deepcopy(results)] * 2
mixup_module(results)
results['mix_results'] = [copy.deepcopy(results)]
results = mixup_module(results)
assert results['img'].shape[:2] == (288, 512)
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
# test filter bbox :
# 2 boxes with sides 1 and 3 are filtered as min_bbox_size=5
gt_bboxes = np.array([[0, 0, 1, 1], [0, 0, 3, 3]], dtype=np.float32)
results['gt_labels'] = np.ones(gt_bboxes.shape[0], dtype=np.int64)
results['gt_bboxes'] = gt_bboxes
results['gt_bboxes_ignore'] = np.array([], dtype=np.float32)
mixresults = results['mix_results'][0]
mixresults['gt_labels'] = copy.deepcopy(results['gt_labels'])
mixresults['gt_bboxes'] = copy.deepcopy(results['gt_bboxes'])
mixresults['gt_bboxes_ignore'] = copy.deepcopy(results['gt_bboxes_ignore'])
transform = dict(
type='MixUp',
img_scale=(10, 12),
ratio_range=(1.5, 1.5),
min_bbox_size=5,
skip_filter=False)
mixup_module = build_from_cfg(transform, PIPELINES)
results = mixup_module(results)
assert results['gt_bboxes'].shape[0] == 2
assert results['gt_labels'].shape[0] == 2
assert results['gt_labels'].shape[0] == results['gt_bboxes'].shape[0]
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
assert results['gt_bboxes_ignore'].dtype == np.float32
def test_photo_metric_distortion():
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
transform = dict(type='PhotoMetricDistortion')
distortion_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid img_fields
with pytest.raises(AssertionError):
results = dict()
results['img'] = img
results['img2'] = img
results['img_fields'] = ['img', 'img2']
distortion_module(results)
# test uint8 input
results = dict()
results['img'] = img
results = distortion_module(results)
assert results['img'].dtype == np.float32
# test float32 input
results = dict()
results['img'] = img.astype(np.float32)
results = distortion_module(results)
assert results['img'].dtype == np.float32
def test_copypaste():
dst_results, src_results = dict(), dict()
img = mmcv.imread(
osp.join(osp.dirname(__file__), '../../../data/color.jpg'), 'color')
dst_results['img'] = img.copy()
src_results['img'] = img.copy()
h, w, _ = img.shape
dst_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h],
[0.5 * w, 0.5 * h, 0.6 * w, 0.6 * h]],
dtype=np.float32)
src_bboxes = np.array([[0.1 * w, 0.1 * h, 0.3 * w, 0.5 * h],
[0.4 * w, 0.4 * h, 0.7 * w, 0.7 * h],
[0.8 * w, 0.8 * h, 0.9 * w, 0.9 * h]],
dtype=np.float32)
dst_labels = np.ones(dst_bboxes.shape[0], dtype=np.int64)
src_labels = np.ones(src_bboxes.shape[0], dtype=np.int64) * 2
dst_masks = create_full_masks(dst_bboxes, w, h)
src_masks = create_full_masks(src_bboxes, w, h)
dst_results['gt_bboxes'] = dst_bboxes.copy()
src_results['gt_bboxes'] = src_bboxes.copy()
dst_results['gt_labels'] = dst_labels.copy()
src_results['gt_labels'] = src_labels.copy()
dst_results['gt_masks'] = copy.deepcopy(dst_masks)
src_results['gt_masks'] = copy.deepcopy(src_masks)
results = copy.deepcopy(dst_results)
transform = dict(type='CopyPaste', selected=False)
copypaste_module = build_from_cfg(transform, PIPELINES)
# test assertion for invalid mix_results
with pytest.raises(AssertionError):
copypaste_module(results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = copypaste_module(results)
assert results['img'].shape[:2] == (h, w)
# one object of destination image is totally occluded
assert results['gt_bboxes'].shape[0] == \
dst_bboxes.shape[0] + src_bboxes.shape[0] - 1
assert results['gt_labels'].shape[0] == \
dst_labels.shape[0] + src_labels.shape[0] - 1
assert results['gt_masks'].masks.shape[0] == \
dst_masks.masks.shape[0] + src_masks.masks.shape[0] - 1
assert results['gt_labels'].dtype == np.int64
assert results['gt_bboxes'].dtype == np.float32
# the object of destination image is partially occluded
ori_bbox = dst_bboxes[0]
occ_bbox = results['gt_bboxes'][0]
ori_mask = dst_masks.masks[0]
occ_mask = results['gt_masks'].masks[0]
assert ori_mask.sum() > occ_mask.sum()
assert np.all(np.abs(occ_bbox - ori_bbox) <=
copypaste_module.bbox_occluded_thr) or \
occ_mask.sum() > copypaste_module.mask_occluded_thr
# test copypaste with selected objects
transform = dict(type='CopyPaste')
copypaste_module = build_from_cfg(transform, PIPELINES)
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
copypaste_module(results)
# test copypaste with an empty source image
results = copy.deepcopy(dst_results)
valid_inds = [False] * src_bboxes.shape[0]
src_results['gt_bboxes'] = src_bboxes[valid_inds]
src_results['gt_labels'] = src_labels[valid_inds]
src_results['gt_masks'] = src_masks[valid_inds]
results['mix_results'] = [copy.deepcopy(src_results)]
copypaste_module(results)
# test copy_paste based on bbox
dst_results.pop('gt_masks')
src_results.pop('gt_masks')
dst_bboxes = dst_results['gt_bboxes']
src_bboxes = src_results['gt_bboxes']
dst_masks = create_full_masks(dst_bboxes, w, h)
src_masks = create_full_masks(src_bboxes, w, h)
results = copy.deepcopy(dst_results)
results['mix_results'] = [copy.deepcopy(src_results)]
results = copypaste_module(results)
result_masks = create_full_masks(results['gt_bboxes'], w, h)
result_masks_np = np.where(result_masks.to_ndarray().sum(0) > 0, 1, 0)
masks_np = np.where(
(src_masks.to_ndarray().sum(0) + dst_masks.to_ndarray().sum(0)) > 0, 1,
0)
assert np.all(result_masks_np == masks_np)
assert 'gt_masks' not in results
| 42,738 | 37.193923 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/test_translate.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pycocotools.mask as maskUtils
import pytest
from mmcv.utils import build_from_cfg
from mmdet.core.mask import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import PIPELINES
def _check_keys(results, results_translated):
assert len(set(results.keys()).difference(set(
results_translated.keys()))) == 0
assert len(set(results_translated.keys()).difference(set(
results.keys()))) == 0
def _pad(h, w, c, pad_val, axis=-1, dtype=np.float32):
assert isinstance(pad_val, (int, float, tuple))
if isinstance(pad_val, (int, float)):
pad_val = tuple([pad_val] * c)
assert len(pad_val) == c
pad_data = np.stack([np.ones((h, w)) * pad_val[i] for i in range(c)],
axis=axis).astype(dtype)
return pad_data
def _construct_img(results):
h, w = results['img_info']['height'], results['img_info']['width']
img = np.random.uniform(0, 1, (h, w, 3)) * 255
img = img.astype(np.uint8)
results['img'] = img
results['img_shape'] = img.shape
results['ori_shape'] = img.shape
results['img_fields'] = ['img']
def _construct_ann_info(h=427, w=640, c=3):
bboxes = np.array(
[[222.62, 217.82, 241.81, 238.93], [50.5, 329.7, 130.23, 384.96],
[175.47, 331.97, 254.8, 389.26]],
dtype=np.float32)
labels = np.array([9, 2, 2], dtype=np.int64)
bboxes_ignore = np.array([[59., 253., 311., 337.]], dtype=np.float32)
masks = [
[[222.62, 217.82, 222.62, 238.93, 241.81, 238.93, 240.85, 218.78]],
[[
69.19, 332.17, 82.39, 330.25, 97.24, 329.7, 114.01, 331.35, 116.76,
337.39, 119.78, 343.17, 128.03, 344.54, 128.86, 347.84, 124.18,
350.59, 129.96, 358.01, 130.23, 366.54, 129.13, 377.81, 125.28,
382.48, 119.78, 381.93, 117.31, 377.54, 116.21, 379.46, 114.83,
382.21, 107.14, 383.31, 105.49, 378.36, 77.99, 377.54, 75.79,
381.11, 69.74, 381.93, 66.72, 378.91, 65.07, 377.81, 63.15, 379.19,
62.32, 383.31, 52.7, 384.96, 50.5, 379.46, 51.32, 375.61, 51.6,
370.11, 51.6, 364.06, 53.52, 354.99, 56.27, 344.54, 59.57, 336.29,
66.45, 332.72
]],
[[
175.47, 386.86, 175.87, 376.44, 177.08, 351.2, 189.1, 332.77,
194.31, 331.97, 236.37, 332.77, 244.79, 342.39, 246.79, 346.79,
248.39, 345.99, 251.6, 345.59, 254.8, 348.0, 254.8, 351.6, 250.0,
352.0, 250.0, 354.81, 251.6, 358.41, 251.6, 364.42, 251.6, 370.03,
252.8, 378.04, 252.8, 384.05, 250.8, 387.26, 246.39, 387.66,
245.19, 386.46, 242.38, 388.86, 233.97, 389.26, 232.77, 388.06,
232.77, 383.65, 195.91, 381.25, 195.91, 384.86, 191.1, 384.86,
187.49, 385.26, 186.69, 382.85, 184.29, 382.45, 183.09, 387.26,
178.68, 388.46, 176.28, 387.66
]]
]
return dict(
bboxes=bboxes, labels=labels, bboxes_ignore=bboxes_ignore, masks=masks)
def _load_bboxes(results):
ann_info = results['ann_info']
results['gt_bboxes'] = ann_info['bboxes'].copy()
results['bbox_fields'] = ['gt_bboxes']
gt_bboxes_ignore = ann_info.get('bboxes_ignore', None)
if gt_bboxes_ignore is not None:
results['gt_bboxes_ignore'] = gt_bboxes_ignore.copy()
results['bbox_fields'].append('gt_bboxes_ignore')
def _load_labels(results):
results['gt_labels'] = results['ann_info']['labels'].copy()
def _poly2mask(mask_ann, img_h, img_w):
if isinstance(mask_ann, list):
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(mask_ann, img_h, img_w)
rle = maskUtils.merge(rles)
elif isinstance(mask_ann['counts'], list):
# uncompressed RLE
rle = maskUtils.frPyObjects(mask_ann, img_h, img_w)
else:
# rle
rle = mask_ann
mask = maskUtils.decode(rle)
return mask
def _process_polygons(polygons):
polygons = [np.array(p) for p in polygons]
valid_polygons = []
for polygon in polygons:
if len(polygon) % 2 == 0 and len(polygon) >= 6:
valid_polygons.append(polygon)
return valid_polygons
def _load_masks(results, poly2mask=True):
h, w = results['img_info']['height'], results['img_info']['width']
gt_masks = results['ann_info']['masks']
if poly2mask:
gt_masks = BitmapMasks([_poly2mask(mask, h, w) for mask in gt_masks],
h, w)
else:
gt_masks = PolygonMasks(
[_process_polygons(polygons) for polygons in gt_masks], h, w)
results['gt_masks'] = gt_masks
results['mask_fields'] = ['gt_masks']
def _construct_semantic_seg(results):
h, w = results['img_info']['height'], results['img_info']['width']
seg_toy = (np.random.uniform(0, 1, (h, w)) * 255).astype(np.uint8)
results['gt_semantic_seg'] = seg_toy
results['seg_fields'] = ['gt_semantic_seg']
def construct_toy_data(poly2mask=True):
img_info = dict(height=427, width=640)
ann_info = _construct_ann_info(h=img_info['height'], w=img_info['width'])
results = dict(img_info=img_info, ann_info=ann_info)
# construct image, similar to 'LoadImageFromFile'
_construct_img(results)
# 'LoadAnnotations' (bboxes, labels, masks, semantic_seg)
_load_bboxes(results)
_load_labels(results)
_load_masks(results, poly2mask)
_construct_semantic_seg(results)
return results
def test_translate():
# test assertion for invalid value of level
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=-1)
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of level
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=[1])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid prob
with pytest.raises(AssertionError):
transform = dict(type='Translate', level=1, prob=-0.5)
build_from_cfg(transform, PIPELINES)
# test assertion for the num of elements in tuple img_fill_val
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=(128, 128, 128, 128))
build_from_cfg(transform, PIPELINES)
# test ValueError for invalid type of img_fill_val
with pytest.raises(ValueError):
transform = dict(
type='Translate', level=1, img_fill_val=[128, 128, 128])
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of img_fill_val
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=(128, -1, 256))
build_from_cfg(transform, PIPELINES)
# test assertion for invalid value of direction
with pytest.raises(AssertionError):
transform = dict(
type='Translate', level=1, img_fill_val=128, direction='diagonal')
build_from_cfg(transform, PIPELINES)
# test assertion for invalid type of max_translate_offset
with pytest.raises(AssertionError):
transform = dict(
type='Translate',
level=1,
img_fill_val=128,
max_translate_offset=(250., ))
build_from_cfg(transform, PIPELINES)
# construct toy data example for unit test
results = construct_toy_data()
def _check_bbox_mask(results,
results_translated,
offset,
direction,
min_size=0.):
# The key correspondence from bboxes to labels and masks.
bbox2label = {
'gt_bboxes': 'gt_labels',
'gt_bboxes_ignore': 'gt_labels_ignore'
}
bbox2mask = {
'gt_bboxes': 'gt_masks',
'gt_bboxes_ignore': 'gt_masks_ignore'
}
def _translate_bbox(bboxes, offset, direction, max_h, max_w):
if direction == 'horizontal':
bboxes[:, 0::2] = bboxes[:, 0::2] + offset
elif direction == 'vertical':
bboxes[:, 1::2] = bboxes[:, 1::2] + offset
else:
raise ValueError
bboxes[:, 0::2] = np.clip(bboxes[:, 0::2], 0, max_w)
bboxes[:, 1::2] = np.clip(bboxes[:, 1::2], 0, max_h)
return bboxes
h, w, c = results_translated['img'].shape
for key in results_translated.get('bbox_fields', []):
label_key, mask_key = bbox2label[key], bbox2mask[key]
# check length of key
if label_key in results:
assert len(results_translated[key]) == len(
results_translated[label_key])
if mask_key in results:
assert len(results_translated[key]) == len(
results_translated[mask_key])
# construct gt_bboxes
gt_bboxes = _translate_bbox(
copy.deepcopy(results[key]), offset, direction, h, w)
valid_inds = (gt_bboxes[:, 2] - gt_bboxes[:, 0] > min_size) & (
gt_bboxes[:, 3] - gt_bboxes[:, 1] > min_size)
gt_bboxes = gt_bboxes[valid_inds]
# check bbox
assert np.equal(gt_bboxes, results_translated[key]).all()
# construct gt_masks
if mask_key not in results:
# e.g. 'gt_masks_ignore'
continue
masks, masks_translated = results[mask_key].to_ndarray(
), results_translated[mask_key].to_ndarray()
assert masks.dtype == masks_translated.dtype
if direction == 'horizontal':
masks_pad = _pad(
h,
abs(offset),
masks.shape[0],
0,
axis=0,
dtype=masks.dtype)
if offset <= 0:
# left shift
gt_masks = np.concatenate(
(masks[:, :, -offset:], masks_pad), axis=-1)
else:
# right shift
gt_masks = np.concatenate(
(masks_pad, masks[:, :, :-offset]), axis=-1)
else:
masks_pad = _pad(
abs(offset),
w,
masks.shape[0],
0,
axis=0,
dtype=masks.dtype)
if offset <= 0:
# top shift
gt_masks = np.concatenate(
(masks[:, -offset:, :], masks_pad), axis=1)
else:
# bottom shift
gt_masks = np.concatenate(
(masks_pad, masks[:, :-offset, :]), axis=1)
gt_masks = gt_masks[valid_inds]
# check masks
assert np.equal(gt_masks, masks_translated).all()
def _check_img_seg(results, results_translated, keys, offset, fill_val,
direction):
for key in keys:
assert isinstance(results_translated[key], type(results[key]))
# assert type(results[key]) == type(results_translated[key])
data, data_translated = results[key], results_translated[key]
if 'mask' in key:
data, data_translated = data.to_ndarray(
), data_translated.to_ndarray()
assert data.dtype == data_translated.dtype
if 'img' in key:
data, data_translated = data.transpose(
(2, 0, 1)), data_translated.transpose((2, 0, 1))
elif 'seg' in key:
data, data_translated = data[None, :, :], data_translated[
None, :, :]
c, h, w = data.shape
if direction == 'horizontal':
data_pad = _pad(
h, abs(offset), c, fill_val, axis=0, dtype=data.dtype)
if offset <= 0:
# left shift
data_gt = np.concatenate((data[:, :, -offset:], data_pad),
axis=-1)
else:
# right shift
data_gt = np.concatenate((data_pad, data[:, :, :-offset]),
axis=-1)
else:
data_pad = _pad(
abs(offset), w, c, fill_val, axis=0, dtype=data.dtype)
if offset <= 0:
# top shift
data_gt = np.concatenate((data[:, -offset:, :], data_pad),
axis=1)
else:
# bottom shift
data_gt = np.concatenate((data_pad, data[:, :-offset, :]),
axis=1)
if 'mask' in key:
# TODO assertion here. ``data_translated`` must be a subset
# (or equal) of ``data_gt``
pass
else:
assert np.equal(data_gt, data_translated).all()
def check_translate(results,
results_translated,
offset,
img_fill_val,
seg_ignore_label,
direction,
min_size=0):
# check keys
_check_keys(results, results_translated)
# check image
_check_img_seg(results, results_translated,
results.get('img_fields', ['img']), offset,
img_fill_val, direction)
# check segmentation map
_check_img_seg(results, results_translated,
results.get('seg_fields', []), offset, seg_ignore_label,
direction)
# check masks and bboxes
_check_bbox_mask(results, results_translated, offset, direction,
min_size)
# test case when level=0 (without translate aug)
img_fill_val = (104, 116, 124)
seg_ignore_label = 255
transform = dict(
type='Translate',
level=0,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
results_wo_translate = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_wo_translate,
0,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate horizontally (left shift).
transform = dict(
type='Translate',
level=8,
prob=1.0,
img_fill_val=img_fill_val,
random_negative_prob=1.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_translated,
-offset,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate horizontally (right shift).
translate_module.random_negative_prob = 0.0
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results),
results_translated,
offset,
img_fill_val,
seg_ignore_label,
'horizontal',
)
# test case when level>0 and translate vertically (top shift).
transform = dict(
type='Translate',
level=10,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
random_negative_prob=1.0,
direction='vertical')
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results), results_translated, -offset, img_fill_val,
seg_ignore_label, 'vertical')
# test case when level>0 and translate vertically (bottom shift).
translate_module.random_negative_prob = 0.0
results_translated = translate_module(copy.deepcopy(results))
check_translate(
copy.deepcopy(results), results_translated, offset, img_fill_val,
seg_ignore_label, 'vertical')
# test case when no translation is called (prob<=0)
transform = dict(
type='Translate',
level=8,
prob=0.0,
img_fill_val=img_fill_val,
random_negative_prob=0.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
results_translated = translate_module(copy.deepcopy(results))
# test translate vertically with PolygonMasks (top shift)
results = construct_toy_data(False)
transform = dict(
type='Translate',
level=10,
prob=1.0,
img_fill_val=img_fill_val,
seg_ignore_label=seg_ignore_label,
direction='vertical')
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
translate_module.random_negative_prob = 1.0
results_translated = translate_module(copy.deepcopy(results))
def _translated_gt(masks, direction, offset, out_shape):
translated_masks = []
for poly_per_obj in masks:
translated_poly_per_obj = []
for p in poly_per_obj:
p = p.copy()
if direction == 'horizontal':
p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1])
elif direction == 'vertical':
p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0])
if PolygonMasks([[p]], *out_shape).areas[0] > 0:
# filter invalid (area=0)
translated_poly_per_obj.append(p)
if len(translated_poly_per_obj):
translated_masks.append(translated_poly_per_obj)
translated_masks = PolygonMasks(translated_masks, *out_shape)
return translated_masks
h, w = results['img_shape'][:2]
for key in results.get('mask_fields', []):
masks = results[key]
translated_gt = _translated_gt(masks, 'vertical', -offset, (h, w))
assert np.equal(results_translated[key].to_ndarray(),
translated_gt.to_ndarray()).all()
# test translate horizontally with PolygonMasks (right shift)
results = construct_toy_data(False)
transform = dict(
type='Translate',
level=8,
prob=1.0,
img_fill_val=img_fill_val,
random_negative_prob=0.0,
seg_ignore_label=seg_ignore_label)
translate_module = build_from_cfg(transform, PIPELINES)
offset = translate_module.offset
results_translated = translate_module(copy.deepcopy(results))
h, w = results['img_shape'][:2]
for key in results.get('mask_fields', []):
masks = results[key]
translated_gt = _translated_gt(masks, 'horizontal', offset, (h, w))
assert np.equal(results_translated[key].to_ndarray(),
translated_gt.to_ndarray()).all()
# test AutoAugment equipped with Translate
policies = [[dict(type='Translate', level=10, prob=1.)]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
policies = [[
dict(type='Translate', level=10, prob=1.),
dict(
type='Translate',
level=8,
img_fill_val=img_fill_val,
direction='vertical')
]]
autoaug = dict(type='AutoAugment', policies=policies)
autoaug_module = build_from_cfg(autoaug, PIPELINES)
autoaug_module(copy.deepcopy(results))
| 19,985 | 37.65764 | 79 | py |
mmdetection | mmdetection-master/tests/test_data/test_pipelines/test_transform/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmdet.core.mask import BitmapMasks, PolygonMasks
def _check_fields(results, pipeline_results, keys):
"""Check data in fields from two results are same."""
for key in keys:
if isinstance(results[key], (BitmapMasks, PolygonMasks)):
assert np.equal(results[key].to_ndarray(),
pipeline_results[key].to_ndarray()).all()
else:
assert np.equal(results[key], pipeline_results[key]).all()
assert results[key].dtype == pipeline_results[key].dtype
def check_result_same(results, pipeline_results):
"""Check whether the `pipeline_results` is the same with the predefined
`results`.
Args:
results (dict): Predefined results which should be the standard output
of the transform pipeline.
pipeline_results (dict): Results processed by the transform pipeline.
"""
# check image
_check_fields(results, pipeline_results,
results.get('img_fields', ['img']))
# check bboxes
_check_fields(results, pipeline_results, results.get('bbox_fields', []))
# check masks
_check_fields(results, pipeline_results, results.get('mask_fields', []))
# check segmentations
_check_fields(results, pipeline_results, results.get('seg_fields', []))
# check gt_labels
if 'gt_labels' in results:
assert np.equal(results['gt_labels'],
pipeline_results['gt_labels']).all()
def construct_toy_data(poly2mask=True):
img = np.array([[1, 2, 3, 4], [5, 6, 7, 8]], dtype=np.uint8)
img = np.stack([img, img, img], axis=-1)
results = dict()
# image
results['img'] = img
results['img_shape'] = img.shape
results['img_fields'] = ['img']
# bboxes
results['bbox_fields'] = ['gt_bboxes', 'gt_bboxes_ignore']
results['gt_bboxes'] = np.array([[0., 0., 2., 1.]], dtype=np.float32)
results['gt_bboxes_ignore'] = np.array([[2., 0., 3., 1.]],
dtype=np.float32)
# labels
results['gt_labels'] = np.array([1], dtype=np.int64)
# masks
results['mask_fields'] = ['gt_masks']
if poly2mask:
gt_masks = np.array([[0, 1, 1, 0], [0, 1, 0, 0]],
dtype=np.uint8)[None, :, :]
results['gt_masks'] = BitmapMasks(gt_masks, 2, 4)
else:
raw_masks = [[np.array([0, 0, 2, 0, 2, 1, 0, 1], dtype=np.float)]]
results['gt_masks'] = PolygonMasks(raw_masks, 2, 4)
# segmentations
results['seg_fields'] = ['gt_semantic_seg']
results['gt_semantic_seg'] = img[..., 0]
return results
def create_random_bboxes(num_bboxes, img_w, img_h):
bboxes_left_top = np.random.uniform(0, 0.5, size=(num_bboxes, 2))
bboxes_right_bottom = np.random.uniform(0.5, 1, size=(num_bboxes, 2))
bboxes = np.concatenate((bboxes_left_top, bboxes_right_bottom), 1)
bboxes = (bboxes * np.array([img_w, img_h, img_w, img_h])).astype(
np.float32)
return bboxes
def create_full_masks(gt_bboxes, img_w, img_h):
xmin, ymin = gt_bboxes[:, 0:1], gt_bboxes[:, 1:2]
xmax, ymax = gt_bboxes[:, 2:3], gt_bboxes[:, 3:4]
gt_masks = np.zeros((len(gt_bboxes), img_h, img_w), dtype=np.uint8)
for i in range(len(gt_bboxes)):
gt_masks[i, int(ymin[i]):int(ymax[i]), int(xmin[i]):int(xmax[i])] = 1
gt_masks = BitmapMasks(gt_masks, img_h, img_w)
return gt_masks
| 3,469 | 37.988764 | 78 | py |
mmdetection | mmdetection-master/tests/test_downstream/test_mmtrack.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from collections import defaultdict
import numpy as np
import pytest
import torch
from mmcv import Config
@pytest.mark.parametrize(
'cfg_file',
['./tests/data/configs_mmtrack/selsa_faster_rcnn_r101_dc5_1x.py'])
def test_vid_fgfa_style_forward(cfg_file):
config = Config.fromfile(cfg_file)
model = copy.deepcopy(config.model)
model.pretrains = None
model.detector.pretrained = None
from mmtrack.models import build_model
detector = build_model(model)
# Test forward train with a non-empty truth batch
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
img_metas[0]['is_video_data'] = True
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
ref_input_shape = (2, 3, 256, 256)
ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[9, 11])
ref_img = ref_mm_inputs.pop('imgs')[None]
ref_img_metas = ref_mm_inputs.pop('img_metas')
ref_img_metas[0]['is_video_data'] = True
ref_img_metas[1]['is_video_data'] = True
ref_gt_bboxes = ref_mm_inputs['gt_bboxes']
ref_gt_labels = ref_mm_inputs['gt_labels']
ref_gt_masks = ref_mm_inputs['gt_masks']
losses = detector.forward(
img=imgs,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
ref_img=ref_img,
ref_img_metas=[ref_img_metas],
ref_gt_bboxes=ref_gt_bboxes,
ref_gt_labels=ref_gt_labels,
gt_masks=gt_masks,
ref_gt_masks=ref_gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
img_metas[0]['is_video_data'] = True
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
ref_mm_inputs = _demo_mm_inputs(ref_input_shape, num_items=[0, 0])
ref_imgs = ref_mm_inputs.pop('imgs')[None]
ref_img_metas = ref_mm_inputs.pop('img_metas')
ref_img_metas[0]['is_video_data'] = True
ref_img_metas[1]['is_video_data'] = True
ref_gt_bboxes = ref_mm_inputs['gt_bboxes']
ref_gt_labels = ref_mm_inputs['gt_labels']
ref_gt_masks = ref_mm_inputs['gt_masks']
losses = detector.forward(
img=imgs,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
ref_img=ref_imgs,
ref_img_metas=[ref_img_metas],
ref_gt_bboxes=ref_gt_bboxes,
ref_gt_labels=ref_gt_labels,
gt_masks=gt_masks,
ref_gt_masks=ref_gt_masks,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward test with frame_stride=1 and frame_range=[-1,0]
with torch.no_grad():
imgs = torch.cat([imgs, imgs.clone()], dim=0)
img_list = [g[None, :] for g in imgs]
img_metas.extend(copy.deepcopy(img_metas))
for i in range(len(img_metas)):
img_metas[i]['frame_id'] = i
img_metas[i]['num_left_ref_imgs'] = 1
img_metas[i]['frame_stride'] = 1
ref_imgs = [ref_imgs.clone(), imgs[[0]][None].clone()]
ref_img_metas = [
copy.deepcopy(ref_img_metas),
copy.deepcopy([img_metas[0]])
]
results = defaultdict(list)
for one_img, one_meta, ref_img, ref_img_meta in zip(
img_list, img_metas, ref_imgs, ref_img_metas):
result = detector.forward([one_img], [[one_meta]],
ref_img=[ref_img],
ref_img_metas=[[ref_img_meta]],
return_loss=False)
for k, v in result.items():
results[k].append(v)
@pytest.mark.parametrize('cfg_file', [
'./tests/data/configs_mmtrack/tracktor_faster-rcnn_r50_fpn_4e.py',
])
def test_tracktor_forward(cfg_file):
config = Config.fromfile(cfg_file)
model = copy.deepcopy(config.model)
model.pretrains = None
model.detector.pretrained = None
from mmtrack.models import build_model
mot = build_model(model)
mot.eval()
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10], with_track=True)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
with torch.no_grad():
imgs = torch.cat([imgs, imgs.clone()], dim=0)
img_list = [g[None, :] for g in imgs]
img2_metas = copy.deepcopy(img_metas)
img2_metas[0]['frame_id'] = 1
img_metas.extend(img2_metas)
results = defaultdict(list)
for one_img, one_meta in zip(img_list, img_metas):
result = mot.forward([one_img], [[one_meta]], return_loss=False)
for k, v in result.items():
results[k].append(v)
def _demo_mm_inputs(
input_shape=(1, 3, 300, 300),
num_items=None,
num_classes=10,
with_track=False):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
'frame_id': 0,
'img_norm_cfg': {
'mean': (128.0, 128.0, 128.0),
'std': (10.0, 10.0, 10.0)
}
} for i in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
gt_match_indices = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
if with_track:
gt_match_indices.append(torch.arange(boxes.shape[0]))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_track:
mm_inputs['gt_match_indices'] = gt_match_indices
return mm_inputs
| 7,592 | 31.87013 | 77 | py |
mmdetection | mmdetection-master/tests/test_metrics/test_box_overlap.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BboxOverlaps2D, bbox_overlaps
from mmdet.core.evaluation.bbox_overlaps import \
bbox_overlaps as recall_overlaps
def test_bbox_overlaps_2d(eps=1e-7):
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes, num_bbox
# is_aligned is True, bboxes.size(-1) == 5 (include score)
self = BboxOverlaps2D()
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = torch.cat((bboxes1, torch.rand((num_bbox, 1))), 1)
bboxes2 = torch.cat((bboxes2, torch.rand((num_bbox, 1))), 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (num_bbox, ), gious.size()
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, bboxes1.size(-2) == 0
bboxes1 = torch.empty((0, 4))
bboxes2 = torch.empty((0, 4))
gious = self(bboxes1, bboxes2, 'giou', True)
assert gious.size() == (0, ), gious.size()
assert torch.all(gious == torch.empty((0, )))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# is_aligned is True, and bboxes.ndims > 2
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
# test assertion when batch dim is not the same
with pytest.raises(AssertionError):
self(bboxes1, bboxes2.unsqueeze(0).repeat(3, 1, 1), 'giou', True)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox)
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1, 1)
gious = self(bboxes1, bboxes2, 'giou', True)
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, 2, num_bbox)
# is_aligned is False
bboxes1, num_bbox1 = _construct_bbox()
bboxes2, num_bbox2 = _construct_bbox()
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (num_bbox1, num_bbox2)
# is_aligned is False, and bboxes.ndims > 2
bboxes1 = bboxes1.unsqueeze(0).repeat(2, 1, 1)
bboxes2 = bboxes2.unsqueeze(0).repeat(2, 1, 1)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (2, num_bbox1, num_bbox2)
bboxes1 = bboxes1.unsqueeze(0)
bboxes2 = bboxes2.unsqueeze(0)
gious = self(bboxes1, bboxes2, 'giou')
assert torch.all(gious >= -1) and torch.all(gious <= 1)
assert gious.size() == (1, 2, num_bbox1, num_bbox2)
# is_aligned is False, bboxes1.size(-2) == 0
gious = self(torch.empty(1, 2, 0, 4), bboxes2, 'giou')
assert torch.all(gious == torch.empty(1, 2, 0, bboxes2.size(-2)))
assert torch.all(gious >= -1) and torch.all(gious <= 1)
# test allclose between bbox_overlaps and the original official
# implementation.
bboxes1 = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
bboxes2 = torch.FloatTensor([
[0, 0, 10, 20],
[0, 10, 10, 19],
[10, 10, 20, 20],
])
gious = bbox_overlaps(bboxes1, bboxes2, 'giou', is_aligned=True, eps=eps)
gious = gious.numpy().round(4)
# the gt is got with four decimal precision.
expected_gious = np.array([0.5000, -0.0500, -0.8214])
assert np.allclose(gious, expected_gious, rtol=0, atol=eps)
# test mode 'iof'
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', is_aligned=True, eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), )
ious = bbox_overlaps(bboxes1, bboxes2, 'iof', eps=eps)
assert torch.all(ious >= -1) and torch.all(ious <= 1)
assert ious.size() == (bboxes1.size(0), bboxes2.size(0))
def test_voc_recall_overlaps():
def _construct_bbox(num_bbox=None):
img_h = int(np.random.randint(3, 1000))
img_w = int(np.random.randint(3, 1000))
if num_bbox is None:
num_bbox = np.random.randint(1, 10)
x1y1 = torch.rand((num_bbox, 2))
x2y2 = torch.max(torch.rand((num_bbox, 2)), x1y1)
bboxes = torch.cat((x1y1, x2y2), -1)
bboxes[:, 0::2] *= img_w
bboxes[:, 1::2] *= img_h
return bboxes.numpy(), num_bbox
bboxes1, num_bbox = _construct_bbox()
bboxes2, _ = _construct_bbox(num_bbox)
ious = recall_overlaps(
bboxes1, bboxes2, 'iou', use_legacy_coordinate=False)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
ious = recall_overlaps(bboxes1, bboxes2, 'iou', use_legacy_coordinate=True)
assert ious.shape == (num_bbox, num_bbox)
assert np.all(ious >= -1) and np.all(ious <= 1)
| 5,316 | 38.385185 | 79 | py |
mmdetection | mmdetection-master/tests/test_metrics/test_losses.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models import Accuracy, build_loss
def test_ce_loss():
# use_mask and use_sigmoid cannot be true at the same time
with pytest.raises(AssertionError):
loss_cfg = dict(
type='CrossEntropyLoss',
use_mask=True,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test loss with class weights
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=[0.8, 0.2],
loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))
loss_cls_cfg = dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
def test_varifocal_loss():
# only sigmoid version of VarifocalLoss is implemented
with pytest.raises(AssertionError):
loss_cfg = dict(
type='VarifocalLoss', use_sigmoid=False, loss_weight=1.0)
build_loss(loss_cfg)
# test that alpha should be greater than 0
with pytest.raises(AssertionError):
loss_cfg = dict(
type='VarifocalLoss',
alpha=-0.75,
gamma=2.0,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test that pred and target should be of the same size
loss_cls_cfg = dict(
type='VarifocalLoss',
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[100.0, -100.0]])
fake_target = torch.Tensor([[1.0]])
loss_cls(fake_pred, fake_target)
# test the calculation
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, -100.0]])
fake_target = torch.Tensor([[1.0, 0.0]])
assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))
# test the loss with weights
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[0.0, 100.0]])
fake_target = torch.Tensor([[1.0, 1.0]])
fake_weight = torch.Tensor([0.0, 1.0])
assert torch.allclose(
loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0))
def test_kd_loss():
# test that temperature should be greater than 1
with pytest.raises(AssertionError):
loss_cfg = dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=0.5)
build_loss(loss_cfg)
# test that pred and target should be of the same size
loss_cls_cfg = dict(
type='KnowledgeDistillationKLDivLoss', loss_weight=1.0, T=1)
loss_cls = build_loss(loss_cls_cfg)
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# test the calculation
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, 100.0]])
fake_target = torch.Tensor([[1.0, 1.0]])
assert torch.allclose(loss_cls(fake_pred, fake_target), torch.tensor(0.0))
# test the loss with weights
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100.0, -100.0], [100.0, 100.0]])
fake_target = torch.Tensor([[1.0, 0.0], [1.0, 1.0]])
fake_weight = torch.Tensor([0.0, 1.0])
assert torch.allclose(
loss_cls(fake_pred, fake_target, fake_weight), torch.tensor(0.0))
def test_seesaw_loss():
# only softmax version of Seesaw Loss is implemented
with pytest.raises(AssertionError):
loss_cfg = dict(type='SeesawLoss', use_sigmoid=True, loss_weight=1.0)
build_loss(loss_cfg)
# test that cls_score.size(-1) == num_classes + 2
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
# the length of fake_pred should be num_classes + 2 = 4
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[-100, 100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# the length of fake_pred should be num_classes + 2 = 4
with pytest.raises(AssertionError):
fake_pred = torch.Tensor([[-100, 100, -100]])
fake_label = torch.Tensor([1]).long()
loss_cls(fake_pred, fake_label)
# test the calculation without p and q
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([1]).long()
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'], torch.tensor(0.))
# test the calculation with p and without q
loss_cls_cfg = dict(
type='SeesawLoss', p=1.0, q=0.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([0]).long()
loss_cls.cum_samples[0] = torch.exp(torch.Tensor([20]))
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'], torch.tensor(180.))
# test the calculation with q and without p
loss_cls_cfg = dict(
type='SeesawLoss', p=0.0, q=1.0, loss_weight=1.0, num_classes=2)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[-100, 100, -100, 100]])
fake_label = torch.Tensor([0]).long()
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss['loss_cls_objectness'], torch.tensor(200.))
assert torch.allclose(loss['loss_cls_classes'],
torch.tensor(200.) + torch.tensor(100.).log())
# test the others
loss_cls_cfg = dict(
type='SeesawLoss',
p=0.0,
q=1.0,
loss_weight=1.0,
num_classes=2,
return_dict=False)
loss_cls = build_loss(loss_cls_cfg)
fake_pred = torch.Tensor([[100, -100, 100, -100]])
fake_label = torch.Tensor([0]).long()
loss = loss_cls(fake_pred, fake_label)
acc = loss_cls.get_accuracy(fake_pred, fake_label)
act = loss_cls.get_activation(fake_pred)
assert torch.allclose(loss, torch.tensor(0.))
assert torch.allclose(acc['acc_objectness'], torch.tensor(100.))
assert torch.allclose(acc['acc_classes'], torch.tensor(100.))
assert torch.allclose(act, torch.tensor([1., 0., 0.]))
def test_accuracy():
# test for empty pred
pred = torch.empty(0, 4)
label = torch.empty(0)
accuracy = Accuracy(topk=1)
acc = accuracy(pred, label)
assert acc.item() == 0
pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6],
[0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1],
[0.0, 0.0, 0.99, 0]])
# test for top1
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1)
acc = accuracy(pred, true_label)
assert acc.item() == 100
# test for top1 with score thresh=0.8
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, thresh=0.8)
acc = accuracy(pred, true_label)
assert acc.item() == 40
# test for top2
accuracy = Accuracy(topk=2)
label = torch.Tensor([3, 2, 0, 0, 2]).long()
acc = accuracy(pred, label)
assert acc.item() == 100
# test for both top1 and top2
accuracy = Accuracy(topk=(1, 2))
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
acc = accuracy(pred, true_label)
for a in acc:
assert a.item() == 100
# topk is larger than pred class number
with pytest.raises(AssertionError):
accuracy = Accuracy(topk=5)
accuracy(pred, true_label)
# wrong topk type
with pytest.raises(AssertionError):
accuracy = Accuracy(topk='wrong type')
accuracy(pred, true_label)
# label size is larger than required
with pytest.raises(AssertionError):
label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch
accuracy = Accuracy()
accuracy(pred, label)
# wrong pred dimension
with pytest.raises(AssertionError):
accuracy = Accuracy()
accuracy(pred[:, :, None], true_label)
| 8,694 | 34.929752 | 78 | py |
mmdetection | mmdetection-master/tests/test_metrics/test_mean_ap.py | import numpy as np
from mmdet.core.evaluation.mean_ap import (eval_map, tpfp_default,
tpfp_imagenet, tpfp_openimages)
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_tpfp_imagenet():
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_imagenet(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_tpfp_default():
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=True)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
result = tpfp_default(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
use_legacy_coordinate=False)
tp = result[0]
fp = result[1]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert (tp == np.array([[1, 1, 0]])).all()
assert (fp == np.array([[0, 0, 1]])).all()
def test_eval_map():
# 2 image and 2 classes
det_results = [[det_bboxes, det_bboxes], [det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info, gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
# 1 image and 2 classes
det_results = [[det_bboxes, det_bboxes]]
labels = np.array([0, 1, 1])
labels_ignore = np.array([0, 1])
gt_info = {
'bboxes': gt_bboxes,
'bboxes_ignore': gt_ignore,
'labels': labels,
'labels_ignore': labels_ignore
}
annotations = [gt_info]
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=True)
assert 0.291 < mean_ap < 0.293
mean_ap, eval_results = eval_map(
det_results, annotations, use_legacy_coordinate=False)
assert 0.291 < mean_ap < 0.293
def test_tpfp_openimages():
det_bboxes = np.array([[10, 10, 15, 15, 1.0], [15, 15, 30, 30, 0.98],
[10, 10, 25, 25, 0.98], [28, 28, 35, 35, 0.97],
[30, 30, 51, 51, 0.96], [100, 110, 120, 130, 0.15]])
gt_bboxes = np.array([[10., 10., 30., 30.], [30., 30., 50., 50.]])
gt_groups_of = np.array([True, False], dtype=bool)
gt_ignore = np.zeros((0, 4))
# Open Images evaluation using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 4)
assert fp.shape == (1, 4)
assert cls_dets.shape == (4, 5)
assert (tp == np.array([[0, 1, 0, 1]])).all()
assert (fp == np.array([[1, 0, 1, 0]])).all()
cls_dets_gt = np.array([[28., 28., 35., 35., 0.97],
[30., 30., 51., 51., 0.96],
[100., 110., 120., 130., 0.15],
[10., 10., 15., 15., 1.]])
assert (cls_dets == cls_dets_gt).all()
# Open Images evaluation not using group of.
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=False,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 6)
assert fp.shape == (1, 6)
assert cls_dets.shape == (6, 5)
# Open Images evaluation using group of, and gt is all group of bboxes.
gt_groups_of = np.array([True, True], dtype=bool)
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
tp = result[0]
fp = result[1]
cls_dets = result[2]
assert tp.shape == (1, 3)
assert fp.shape == (1, 3)
assert cls_dets.shape == (3, 5)
# Open Images evaluation with empty gt.
gt_bboxes = np.zeros((0, 4))
gt_groups_of = np.empty((0))
result = tpfp_openimages(
det_bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_ignore,
gt_bboxes_group_of=gt_groups_of,
use_group_of=True,
ioa_thr=0.5)
fp = result[1]
assert (fp == np.array([[1, 1, 1, 1, 1, 1]])).all()
| 5,477 | 28.138298 | 79 | py |
mmdetection | mmdetection-master/tests/test_metrics/test_recall.py | import numpy as np
from mmdet.core.evaluation.recall import eval_recalls
det_bboxes = np.array([
[0, 0, 10, 10],
[10, 10, 20, 20],
[32, 32, 38, 42],
])
gt_bboxes = np.array([[0, 0, 10, 20], [0, 10, 10, 19], [10, 10, 20, 20]])
gt_ignore = np.array([[5, 5, 10, 20], [6, 10, 10, 19]])
def test_eval_recalls():
gts = [gt_bboxes, gt_bboxes, gt_bboxes]
proposals = [det_bboxes, det_bboxes, det_bboxes]
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=True)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=False)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts, proposals, proposal_nums=2, use_legacy_coordinate=True)
assert recall.shape == (1, 1)
assert 0.66 < recall[0][0] < 0.667
recall = eval_recalls(
gts,
proposals,
iou_thrs=[0.1, 0.9],
proposal_nums=2,
use_legacy_coordinate=False)
assert recall.shape == (1, 2)
assert recall[0][1] <= recall[0][0]
recall = eval_recalls(
gts,
proposals,
iou_thrs=[0.1, 0.9],
proposal_nums=2,
use_legacy_coordinate=True)
assert recall.shape == (1, 2)
assert recall[0][1] <= recall[0][0]
| 1,377 | 28.319149 | 73 | py |
mmdetection | mmdetection-master/tests/test_models/test_forward.py | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_forward.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
def _replace_r50_with_r18(model):
"""Replace ResNet50 with ResNet18 in config."""
model = copy.deepcopy(model)
if model.backbone.type == 'ResNet':
model.backbone.depth = 18
model.backbone.base_channels = 2
model.neck.in_channels = [2, 4, 8, 16]
return model
def test_sparse_rcnn_forward():
config_path = 'sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py'
model = _get_detector_cfg(config_path)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
detector.init_weights()
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[5])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
detector.forward_dummy(imgs)
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_bboxes = [item for item in gt_bboxes]
gt_labels = mm_inputs['gt_labels']
gt_labels = [item for item in gt_labels]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.roi_head.simple_test([imgs[0][None, :]], torch.empty(
(1, 0, 4)), torch.empty((1, 100, 4)), [img_metas[0]],
torch.ones((1, 4)))
def test_rpn_forward():
model = _get_detector_cfg('rpn/rpn_r50_fpn_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
losses = detector.forward(
imgs, img_metas, gt_bboxes=gt_bboxes, return_loss=True)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize(
'cfg_file',
[
'reppoints/reppoints_moment_r50_fpn_1x_coco.py',
'retinanet/retinanet_r50_fpn_1x_coco.py',
'guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py',
'ghm/retinanet_ghm_r50_fpn_1x_coco.py',
'fcos/fcos_center_r50_caffe_fpn_gn-head_1x_coco.py',
'foveabox/fovea_align_r50_fpn_gn-head_4x4_2x_coco.py',
# 'free_anchor/retinanet_free_anchor_r50_fpn_1x_coco.py',
# 'atss/atss_r50_fpn_1x_coco.py', # not ready for topk
'yolo/yolov3_mobilenetv2_320_300e_coco.py',
'yolox/yolox_tiny_8x8_300e_coco.py'
])
def test_single_stage_forward_gpu(cfg_file):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (2, 3, 128, 128)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
detector = detector.cuda()
imgs = imgs.cuda()
# Test forward train
gt_bboxes = [b.cuda() for b in mm_inputs['gt_bboxes']]
gt_labels = [g.cuda() for g in mm_inputs['gt_labels']]
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def test_faster_rcnn_ohem_forward():
model = _get_detector_cfg(
'faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test RoI forward train with an empty proposals
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(
feature,
img_metas, [torch.empty((0, 5))],
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert isinstance(losses, dict)
@pytest.mark.parametrize(
'cfg_file',
[
# 'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py',
# 'grid_rcnn/grid_rcnn_r50_fpn_gn-head_2x_coco.py',
# 'ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py',
# 'htc/htc_r50_fpn_1x_coco.py',
# 'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
# 'scnet/scnet_r50_fpn_20e_coco.py',
# 'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
])
def test_two_stage_forward(cfg_file):
models_with_semantic = [
'htc/htc_r50_fpn_1x_coco.py',
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
if cfg_file in models_with_semantic:
with_semantic = True
else:
with_semantic = False
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
# Save cost
if cfg_file in [
'seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501
]:
model.roi_head.bbox_head.num_classes = 80
model.roi_head.bbox_head.loss_cls.num_classes = 80
model.roi_head.mask_head.num_classes = 80
model.test_cfg.rcnn.score_thr = 0.05
model.test_cfg.rcnn.max_per_img = 100
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 128, 128)
# Test forward train with a non-empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[10], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(
input_shape, num_items=[0], with_semantic=with_semantic)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
losses = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
loss.requires_grad_(True)
assert float(loss.item()) > 0
loss.backward()
# Test RoI forward train with an empty proposals
if cfg_file in [
'panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py' # noqa: E501
]:
mm_inputs.pop('gt_semantic_seg')
feature = detector.extract_feat(imgs[0][None, :])
losses = detector.roi_head.forward_train(feature, img_metas,
[torch.empty(
(0, 5))], **mm_inputs)
assert isinstance(losses, dict)
# Test forward test
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
cascade_models = [
'cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py',
'htc/htc_r50_fpn_1x_coco.py',
'scnet/scnet_r50_fpn_20e_coco.py',
]
# test empty proposal in roi_head
with torch.no_grad():
# test no proposal in the whole batch
detector.simple_test(
imgs[0][None, :], [img_metas[0]], proposals=[torch.empty((0, 4))])
# test no proposal of aug
features = detector.extract_feats([imgs[0][None, :]] * 2)
detector.roi_head.aug_test(features, [torch.empty((0, 4))] * 2,
[[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :])
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]], [torch.empty((0, 4))], None)
assert all([bbox.shape == torch.Size((0, 4)) for bbox in bboxes])
assert all([
score.shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
for score in scores
])
# test no proposal in the some image
x1y1 = torch.randint(1, 100, (10, 2)).float()
# x2y2 must be greater than x1y1
x2y2 = x1y1 + torch.randint(1, 100, (10, 2))
detector.simple_test(
imgs[0][None, :].repeat(2, 1, 1, 1), [img_metas[0]] * 2,
proposals=[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)])
# test no proposal of aug
detector.roi_head.aug_test(
features, [torch.cat([x1y1, x2y2], dim=-1),
torch.empty((0, 4))], [[img_metas[0]]] * 2)
# test rcnn_test_cfg is None
if cfg_file not in cascade_models:
feature = detector.extract_feat(imgs[0][None, :].repeat(
2, 1, 1, 1))
bboxes, scores = detector.roi_head.simple_test_bboxes(
feature, [img_metas[0]] * 2,
[torch.empty((0, 4)),
torch.cat([x1y1, x2y2], dim=-1)], None)
assert bboxes[0].shape == torch.Size((0, 4))
assert scores[0].shape == torch.Size(
(0, detector.roi_head.bbox_head.fc_cls.out_features))
@pytest.mark.parametrize(
'cfg_file', ['ghm/retinanet_ghm_r50_fpn_1x_coco.py', 'ssd/ssd300_coco.py'])
def test_single_stage_forward_cpu(cfg_file):
model = _get_detector_cfg(cfg_file)
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 300, 300)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
return_loss=False)
batch_results.append(result)
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
def test_yolact_forward():
model = _get_detector_cfg('yolact/yolact_r50_1x8_coco.py')
model = _replace_r50_with_r18(model)
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
gt_masks = mm_inputs['gt_masks']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
return_loss=True)
assert isinstance(losses, dict)
# Test forward dummy for get_flops
detector.forward_dummy(imgs)
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_detr_forward():
model = _get_detector_cfg('detr/detr_r50_8x2_150e_coco.py')
model.backbone.depth = 18
model.bbox_head.in_channels = 512
model.backbone.init_cfg = None
from mmdet.models import build_detector
detector = build_detector(model)
input_shape = (1, 3, 100, 100)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
mm_inputs = _demo_mm_inputs(input_shape, num_items=[0])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
losses = detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in imgs]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
def test_inference_detector():
from mmcv import ConfigDict
from mmdet.apis import inference_detector
from mmdet.models import build_detector
# small RetinaNet
num_class = 3
model_dict = dict(
type='RetinaNet',
backbone=dict(
type='ResNet',
depth=18,
num_stages=4,
out_indices=(3, ),
norm_cfg=dict(type='BN', requires_grad=False),
norm_eval=True,
style='pytorch'),
neck=None,
bbox_head=dict(
type='RetinaHead',
num_classes=num_class,
in_channels=512,
stacked_convs=1,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]),
),
test_cfg=dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
rng = np.random.RandomState(0)
img1 = rng.rand(100, 100, 3)
img2 = rng.rand(100, 100, 3)
model = build_detector(ConfigDict(model_dict))
config = _get_config_module('retinanet/retinanet_r50_fpn_1x_coco.py')
model.cfg = config
# test single image
result = inference_detector(model, img1)
assert len(result) == num_class
# test multiple image
result = inference_detector(model, [img1, img2])
assert len(result) == 2 and len(result[0]) == num_class
def test_yolox_random_size():
from mmdet.models import build_detector
model = _get_detector_cfg('yolox/yolox_tiny_8x8_300e_coco.py')
model.random_size_range = (2, 2)
model.input_size = (64, 96)
model.random_size_interval = 1
detector = build_detector(model)
input_shape = (1, 3, 64, 64)
mm_inputs = _demo_mm_inputs(input_shape)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# Test forward train with non-empty truth batch
detector.train()
gt_bboxes = mm_inputs['gt_bboxes']
gt_labels = mm_inputs['gt_labels']
detector.forward(
imgs,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
return_loss=True)
assert detector._input_size == (64, 96)
def test_maskformer_forward():
model_cfg = _get_detector_cfg(
'maskformer/maskformer_r50_mstrain_16x1_75e_coco.py')
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.attn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.ffn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.attn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.ffn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.feedforward_channels = base_channels * 8
from mmdet.core import BitmapMasks
from mmdet.models import build_detector
detector = build_detector(model_cfg)
# Test forward train with non-empty truth batch
detector.train()
img_metas = [
{
'batch_input_shape': (128, 160),
'img_shape': (126, 160, 3),
'ori_shape': (63, 80, 3),
'pad_shape': (128, 160, 3)
},
]
img = torch.rand((1, 3, 128, 160))
gt_bboxes = None
gt_labels = [
torch.tensor([10]).long(),
]
thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask1[0, :50] = 1
gt_masks = [
BitmapMasks(thing_mask1, 128, 160),
]
stuff_mask1 = torch.zeros((1, 128, 160)).long()
stuff_mask1[0, :50] = 10
stuff_mask1[0, 50:] = 100
gt_semantic_seg = [
stuff_mask1,
]
losses = detector.forward(
img=img,
img_metas=img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
gt_semantic_seg=gt_semantic_seg,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with an empty truth batch
gt_bboxes = [
torch.empty((0, 4)).float(),
]
gt_labels = [
torch.empty((0, )).long(),
]
mask = np.zeros((0, 128, 160), dtype=np.uint8)
gt_masks = [
BitmapMasks(mask, 128, 160),
]
gt_semantic_seg = [
torch.randint(0, 133, (0, 128, 160)),
]
losses = detector.forward(
img,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
gt_semantic_seg=gt_semantic_seg,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in img]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
batch_results.append(result)
@pytest.mark.parametrize('cfg_file', [
'mask2former/mask2former_r50_lsj_8x2_50e_coco.py',
'mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py'
])
def test_mask2former_forward(cfg_file):
# Test Panoptic Segmentation and Instance Segmentation
model_cfg = _get_detector_cfg(cfg_file)
base_channels = 32
model_cfg.backbone.depth = 18
model_cfg.backbone.init_cfg = None
model_cfg.backbone.base_channels = base_channels
model_cfg.panoptic_head.in_channels = [
base_channels * 2**i for i in range(4)
]
model_cfg.panoptic_head.feat_channels = base_channels
model_cfg.panoptic_head.out_channels = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.attn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.ffn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.pixel_decoder.encoder.\
transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 4
model_cfg.panoptic_head.pixel_decoder.\
positional_encoding.num_feats = base_channels // 2
model_cfg.panoptic_head.positional_encoding.\
num_feats = base_channels // 2
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.attn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.ffn_cfgs.embed_dims = base_channels
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.ffn_cfgs.feedforward_channels = base_channels * 8
model_cfg.panoptic_head.transformer_decoder.\
transformerlayers.feedforward_channels = base_channels * 8
num_stuff_classes = model_cfg.panoptic_head.num_stuff_classes
from mmdet.core import BitmapMasks
from mmdet.models import build_detector
detector = build_detector(model_cfg)
def _forward_train():
losses = detector.forward(
img,
img_metas,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
gt_semantic_seg=gt_semantic_seg,
return_loss=True)
assert isinstance(losses, dict)
loss, _ = detector._parse_losses(losses)
assert float(loss.item()) > 0
# Test forward train with non-empty truth batch
detector.train()
img_metas = [
{
'batch_input_shape': (128, 160),
'img_shape': (126, 160, 3),
'ori_shape': (63, 80, 3),
'pad_shape': (128, 160, 3)
},
]
img = torch.rand((1, 3, 128, 160))
gt_bboxes = None
gt_labels = [
torch.tensor([10]).long(),
]
thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask1[0, :50] = 1
gt_masks = [
BitmapMasks(thing_mask1, 128, 160),
]
stuff_mask1 = torch.zeros((1, 128, 160)).long()
stuff_mask1[0, :50] = 10
stuff_mask1[0, 50:] = 100
gt_semantic_seg = [
stuff_mask1,
]
_forward_train()
# Test forward train with non-empty truth batch and gt_semantic_seg=None
gt_semantic_seg = None
_forward_train()
# Test forward train with an empty truth batch
gt_bboxes = [
torch.empty((0, 4)).float(),
]
gt_labels = [
torch.empty((0, )).long(),
]
mask = np.zeros((0, 128, 160), dtype=np.uint8)
gt_masks = [
BitmapMasks(mask, 128, 160),
]
gt_semantic_seg = [
torch.randint(0, 133, (0, 128, 160)),
]
_forward_train()
# Test forward train with an empty truth batch and gt_semantic_seg=None
gt_semantic_seg = None
_forward_train()
# Test forward test
detector.eval()
with torch.no_grad():
img_list = [g[None, :] for g in img]
batch_results = []
for one_img, one_meta in zip(img_list, img_metas):
result = detector.forward([one_img], [[one_meta]],
rescale=True,
return_loss=False)
if num_stuff_classes > 0:
assert isinstance(result[0], dict)
else:
assert isinstance(result[0], tuple)
batch_results.append(result)
| 31,150 | 32.280983 | 110 | py |
mmdetection | mmdetection-master/tests/test_models/test_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import digit_version
from mmdet.models.losses import (BalancedL1Loss, CrossEntropyLoss, DiceLoss,
DistributionFocalLoss, FocalLoss,
GaussianFocalLoss,
KnowledgeDistillationKLDivLoss, L1Loss,
MSELoss, QualityFocalLoss, SeesawLoss,
SmoothL1Loss, VarifocalLoss)
from mmdet.models.losses.ghm_loss import GHMC, GHMR
from mmdet.models.losses.iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss,
GIoULoss, IoULoss)
@pytest.mark.parametrize(
'loss_class', [IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss])
def test_iou_type_loss_zeros_weight(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4))
weight = torch.zeros(10)
loss = loss_class()(pred, target, weight)
assert loss == 0.
@pytest.mark.parametrize('loss_class', [
BalancedL1Loss, BoundedIoULoss, CIoULoss, CrossEntropyLoss, DIoULoss,
FocalLoss, DistributionFocalLoss, MSELoss, SeesawLoss, GaussianFocalLoss,
GIoULoss, IoULoss, L1Loss, QualityFocalLoss, VarifocalLoss, GHMR, GHMC,
SmoothL1Loss, KnowledgeDistillationKLDivLoss, DiceLoss
])
def test_loss_with_reduction_override(loss_class):
pred = torch.rand((10, 4))
target = torch.rand((10, 4)),
weight = None
with pytest.raises(AssertionError):
# only reduction_override from [None, 'none', 'mean', 'sum']
# is not allowed
reduction_override = True
loss_class()(
pred, target, weight, reduction_override=reduction_override)
@pytest.mark.parametrize('loss_class', [
IoULoss, BoundedIoULoss, GIoULoss, DIoULoss, CIoULoss, MSELoss, L1Loss,
SmoothL1Loss, BalancedL1Loss
])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_regression_losses(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [FocalLoss, CrossEntropyLoss])
@pytest.mark.parametrize('input_shape', [(10, 5), (0, 5)])
def test_classification_losses(loss_class, input_shape):
if input_shape[0] == 0 and digit_version(
torch.__version__) < digit_version('1.5.0'):
pytest.skip(
f'CELoss in PyTorch {torch.__version__} does not support empty'
f'tensor.')
pred = torch.rand(input_shape)
target = torch.randint(0, 5, (input_shape[0], ))
# Test loss forward
loss = loss_class()(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class()(pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class()(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class()(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('loss_class', [GHMR])
@pytest.mark.parametrize('input_shape', [(10, 4), (0, 4)])
def test_GHMR_loss(loss_class, input_shape):
pred = torch.rand(input_shape)
target = torch.rand(input_shape)
weight = torch.rand(input_shape)
# Test loss forward
loss = loss_class()(pred, target, weight)
assert isinstance(loss, torch.Tensor)
@pytest.mark.parametrize('use_sigmoid', [True, False])
@pytest.mark.parametrize('reduction', ['sum', 'mean', None])
@pytest.mark.parametrize('avg_non_ignore', [True, False])
def test_loss_with_ignore_index(use_sigmoid, reduction, avg_non_ignore):
# Test cross_entropy loss
loss_class = CrossEntropyLoss(
use_sigmoid=use_sigmoid,
use_mask=False,
ignore_index=255,
avg_non_ignore=avg_non_ignore)
pred = torch.rand((10, 5))
target = torch.randint(0, 5, (10, ))
ignored_indices = torch.randint(0, 10, (2, ), dtype=torch.long)
target[ignored_indices] = 255
# Test loss forward with default ignore
loss_with_ignore = loss_class(pred, target, reduction_override=reduction)
assert isinstance(loss_with_ignore, torch.Tensor)
# Test loss forward with forward ignore
target[ignored_indices] = 255
loss_with_forward_ignore = loss_class(
pred, target, ignore_index=255, reduction_override=reduction)
assert isinstance(loss_with_forward_ignore, torch.Tensor)
# Verify correctness
if avg_non_ignore:
# manually remove the ignored elements
not_ignored_indices = (target != 255)
pred = pred[not_ignored_indices]
target = target[not_ignored_indices]
loss = loss_class(pred, target, reduction_override=reduction)
assert torch.allclose(loss, loss_with_ignore)
assert torch.allclose(loss, loss_with_forward_ignore)
# test ignore all target
pred = torch.rand((10, 5))
target = torch.ones((10, ), dtype=torch.long) * 255
loss = loss_class(pred, target, reduction_override=reduction)
assert loss == 0
@pytest.mark.parametrize('naive_dice', [True, False])
def test_dice_loss(naive_dice):
loss_class = DiceLoss
pred = torch.rand((10, 4, 4))
target = torch.rand((10, 4, 4))
weight = torch.rand((10))
# Test loss forward
loss = loss_class(naive_dice=naive_dice)(pred, target)
assert isinstance(loss, torch.Tensor)
# Test loss forward with weight
loss = loss_class(naive_dice=naive_dice)(pred, target, weight)
assert isinstance(loss, torch.Tensor)
# Test loss forward with reduction_override
loss = loss_class(naive_dice=naive_dice)(
pred, target, reduction_override='mean')
assert isinstance(loss, torch.Tensor)
# Test loss forward with avg_factor
loss = loss_class(naive_dice=naive_dice)(pred, target, avg_factor=10)
assert isinstance(loss, torch.Tensor)
with pytest.raises(ValueError):
# loss can evaluate with avg_factor only if
# reduction is None, 'none' or 'mean'.
reduction_override = 'sum'
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
# Test loss forward with avg_factor and reduction
for reduction_override in [None, 'none', 'mean']:
loss_class(naive_dice=naive_dice)(
pred, target, avg_factor=10, reduction_override=reduction_override)
assert isinstance(loss, torch.Tensor)
# Test loss forward with has_acted=False and use_sigmoid=False
with pytest.raises(NotImplementedError):
loss_class(
use_sigmoid=False, activate=True, naive_dice=naive_dice)(pred,
target)
# Test loss forward with weight.ndim != loss.ndim
with pytest.raises(AssertionError):
weight = torch.rand((2, 8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
# Test loss forward with len(weight) != len(pred)
with pytest.raises(AssertionError):
weight = torch.rand((8))
loss_class(naive_dice=naive_dice)(pred, target, weight)
| 8,705 | 36.364807 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_loss_compatibility.py | # Copyright (c) OpenMMLab. All rights reserved.
"""pytest tests/test_loss_compatibility.py."""
import copy
from os.path import dirname, exists, join
import numpy as np
import pytest
import torch
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(dirname(__file__)))
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _get_config_module(fname):
"""Load a configuration as a python module."""
from mmcv import Config
config_dpath = _get_config_directory()
config_fpath = join(config_dpath, fname)
config_mod = Config.fromfile(config_fpath)
return config_mod
def _get_detector_cfg(fname):
"""Grab configs necessary to create a detector.
These are deep copied to allow for safe modification of parameters without
influencing other tests.
"""
config = _get_config_module(fname)
model = copy.deepcopy(config.model)
return model
@pytest.mark.parametrize('loss_bbox', [
dict(type='L1Loss', loss_weight=1.0),
dict(type='GHMR', mu=0.02, bins=10, momentum=0.7, loss_weight=10.0),
dict(type='IoULoss', loss_weight=1.0),
dict(type='BoundedIoULoss', loss_weight=1.0),
dict(type='GIoULoss', loss_weight=1.0),
dict(type='DIoULoss', loss_weight=1.0),
dict(type='CIoULoss', loss_weight=1.0),
dict(type='MSELoss', loss_weight=1.0),
dict(type='SmoothL1Loss', loss_weight=1.0),
dict(type='BalancedL1Loss', loss_weight=1.0)
])
def test_bbox_loss_compatibility(loss_bbox):
"""Test loss_bbox compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
if 'IoULoss' in loss_bbox['type']:
cfg_model.roi_head.bbox_head.reg_decoded_bbox = True
cfg_model.roi_head.bbox_head.loss_bbox = loss_bbox
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
@pytest.mark.parametrize('loss_cls', [
dict(type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
dict(
type='GHMC', bins=30, momentum=0.75, use_sigmoid=True, loss_weight=1.0)
])
def test_cls_loss_compatibility(loss_cls):
"""Test loss_cls compatibility.
Using Faster R-CNN as a sample, modifying the loss function in the config
file to verify the compatibility of Loss APIS
"""
# Faster R-CNN config dict
config_path = '_base_/models/faster_rcnn_r50_fpn.py'
cfg_model = _get_detector_cfg(config_path)
input_shape = (1, 3, 256, 256)
mm_inputs = _demo_mm_inputs(input_shape, num_items=[10])
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
# verify class loss function compatibility
# for loss_cls in loss_clses:
cfg_model.roi_head.bbox_head.loss_cls = loss_cls
from mmdet.models import build_detector
detector = build_detector(cfg_model)
loss = detector.forward(imgs, img_metas, return_loss=True, **mm_inputs)
assert isinstance(loss, dict)
loss, _ = detector._parse_losses(loss)
assert float(loss.item()) > 0
def _demo_mm_inputs(input_shape=(1, 3, 300, 300),
num_items=None, num_classes=10,
with_semantic=False): # yapf: disable
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_items (None | List[int]):
specifies the number of boxes in each batch item
num_classes (int):
number of different labels a box might have
"""
from mmdet.core import BitmapMasks
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': np.array([1.1, 1.2, 1.1, 1.2]),
'flip': False,
'flip_direction': None,
} for _ in range(N)]
gt_bboxes = []
gt_labels = []
gt_masks = []
for batch_idx in range(N):
if num_items is None:
num_boxes = rng.randint(1, 10)
else:
num_boxes = num_items[batch_idx]
cx, cy, bw, bh = rng.rand(num_boxes, 4).T
tl_x = ((cx * W) - (W * bw / 2)).clip(0, W)
tl_y = ((cy * H) - (H * bh / 2)).clip(0, H)
br_x = ((cx * W) + (W * bw / 2)).clip(0, W)
br_y = ((cy * H) + (H * bh / 2)).clip(0, H)
boxes = np.vstack([tl_x, tl_y, br_x, br_y]).T
class_idxs = rng.randint(1, num_classes, size=num_boxes)
gt_bboxes.append(torch.FloatTensor(boxes))
gt_labels.append(torch.LongTensor(class_idxs))
mask = np.random.randint(0, 2, (len(boxes), H, W), dtype=np.uint8)
gt_masks.append(BitmapMasks(mask, H, W))
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_bboxes': gt_bboxes,
'gt_labels': gt_labels,
'gt_bboxes_ignore': None,
'gt_masks': gt_masks,
}
if with_semantic:
# assume gt_semantic_seg using scale 1/8 of the img
gt_semantic_seg = np.random.randint(
0, num_classes, (1, 1, H // 8, W // 8), dtype=np.uint8)
mm_inputs.update(
{'gt_semantic_seg': torch.ByteTensor(gt_semantic_seg)})
return mm_inputs
| 6,361 | 30.49505 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_necks.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.necks import (FPG, FPN, FPN_CARAFE, NASFCOS_FPN, NASFPN,
YOLOXPAFPN, ChannelMapper, CTResNetNeck,
DilatedEncoder, DyHead, SSDNeck, YOLOV3Neck)
def test_fpn():
"""Tests fpn."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
# end_level=-1 is equal to end_level=3
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=-1,
num_outs=5)
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=0,
end_level=3,
num_outs=5)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=2,
num_outs=3)
# `num_outs` is not equal to len(in_channels) - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
num_outs=2)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
end_level=3,
num_outs=1)
# Invalid `add_extra_convs` option
with pytest.raises(AssertionError):
FPN(in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs='on_xxx',
num_outs=5)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
num_outs=5)
# FPN expects a multiple levels of features per image
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
outs = fpn_model(feats)
assert fpn_model.add_extra_convs == 'on_input'
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with no extra convs (pooling is used instead)
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=False,
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert not fpn_model.add_extra_convs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Tests for fpn with lateral bns
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
bn_exist = False
for m in fpn_model.modules():
if isinstance(m, _BatchNorm):
bn_exist = True
assert bn_exist
# Bilinear upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
fpn_model(feats)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
assert fpn_model.add_extra_convs == 'on_input'
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Scale factor instead of fixed upsample size upsample
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
start_level=1,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'inputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_input'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'laterals'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_lateral'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# Extra convs source is 'outputs'
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
start_level=1,
num_outs=5)
assert fpn_model.add_extra_convs == 'on_output'
outs = fpn_model(feats)
assert len(outs) == fpn_model.num_outs
for i in range(fpn_model.num_outs):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_channel_mapper():
"""Tests ChannelMapper."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
kernel_size = 3
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
# in_channels must be a list
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=10, out_channels=out_channels, kernel_size=kernel_size)
# the length of channel_mapper's inputs must be equal to the length of
# in_channels
with pytest.raises(AssertionError):
channel_mapper = ChannelMapper(
in_channels=in_channels[:-1],
out_channels=out_channels,
kernel_size=kernel_size)
channel_mapper(feats)
channel_mapper = ChannelMapper(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size)
outs = channel_mapper(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
outs[i].shape[1] == out_channels
outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dilated_encoder():
in_channels = 16
out_channels = 32
out_shape = 34
dilated_encoder = DilatedEncoder(in_channels, out_channels, 16, 2,
[2, 4, 6, 8])
feat = [torch.rand(1, in_channels, 34, 34)]
out_feat = dilated_encoder(feat)[0]
assert out_feat.shape == (1, out_channels, out_shape, out_shape)
def test_ct_resnet_neck():
# num_filters/num_kernels must be a list
with pytest.raises(TypeError):
CTResNetNeck(
in_channel=10, num_deconv_filters=10, num_deconv_kernels=4)
# num_filters/num_kernels must be same length
with pytest.raises(AssertionError):
CTResNetNeck(
in_channel=10,
num_deconv_filters=(10, 10),
num_deconv_kernels=(4, ))
in_channels = 16
num_filters = (8, 8)
num_kernels = (4, 4)
feat = torch.rand(1, 16, 4, 4)
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels,
use_dcn=False)
# feat must be list or tuple
with pytest.raises(AssertionError):
ct_resnet_neck(feat)
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
if torch.cuda.is_available():
# test dcn
ct_resnet_neck = CTResNetNeck(
in_channel=in_channels,
num_deconv_filters=num_filters,
num_deconv_kernels=num_kernels)
ct_resnet_neck = ct_resnet_neck.cuda()
feat = feat.cuda()
out_feat = ct_resnet_neck([feat])[0]
assert out_feat.shape == (1, num_filters[-1], 16, 16)
def test_yolov3_neck():
# num_scales, in_channels, out_channels must be same length
with pytest.raises(AssertionError):
YOLOV3Neck(num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4])
# len(feats) must equal to num_scales
with pytest.raises(AssertionError):
neck = YOLOV3Neck(
num_scales=3, in_channels=[16, 8, 4], out_channels=[8, 4, 2])
feats = (torch.rand(1, 4, 16, 16), torch.rand(1, 8, 16, 16))
neck(feats)
# test normal channels
s = 32
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
# test more flexible setting
s = 32
in_channels = [32, 8, 16]
out_channels = [19, 21, 5]
feat_sizes = [s // 2**i for i in range(len(in_channels) - 1, -1, -1)]
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels) - 1, -1, -1)
]
neck = YOLOV3Neck(
num_scales=3, in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape == \
(1, out_channels[i], feat_sizes[i], feat_sizes[i])
def test_ssd_neck():
# level_strides/level_paddings must be same length
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8, 16, 32],
level_strides=[2],
level_paddings=[2, 1])
# length of out_channels must larger than in_channels
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[8],
level_strides=[2],
level_paddings=[2])
# len(out_channels) - len(in_channels) must equal to len(level_strides)
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2, 2],
level_paddings=[2, 2])
# in_channels must be same with out_channels[:len(in_channels)]
with pytest.raises(AssertionError):
SSDNeck(
in_channels=[8, 16],
out_channels=[4, 16, 64],
level_strides=[2],
level_paddings=[2])
ssd_neck = SSDNeck(
in_channels=[4],
out_channels=[4, 8, 16],
level_strides=[2, 1],
level_paddings=[1, 0])
feats = (torch.rand(1, 4, 16, 16), )
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 16, 16)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 6, 6)
# test SSD-Lite Neck
ssd_neck = SSDNeck(
in_channels=[4, 8],
out_channels=[4, 8, 16],
level_strides=[1],
level_paddings=[1],
l2_norm_scale=None,
use_depthwise=True,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU6'))
assert not hasattr(ssd_neck, 'l2_norm')
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(ssd_neck.extra_layers[0][-1],
DepthwiseSeparableConvModule)
feats = (torch.rand(1, 4, 8, 8), torch.rand(1, 8, 8, 8))
outs = ssd_neck(feats)
assert outs[0].shape == (1, 4, 8, 8)
assert outs[1].shape == (1, 8, 8, 8)
assert outs[2].shape == (1, 16, 8, 8)
def test_yolox_pafpn():
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 24
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
neck = YOLOXPAFPN(in_channels=in_channels, out_channels=out_channels)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
# test depth-wise
neck = YOLOXPAFPN(
in_channels=in_channels, out_channels=out_channels, use_depthwise=True)
from mmcv.cnn.bricks import DepthwiseSeparableConvModule
assert isinstance(neck.downsamples[0], DepthwiseSeparableConvModule)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(feats)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
def test_dyhead():
s = 64
in_channels = 8
out_channels = 16
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
feats = [
torch.rand(1, in_channels, feat_sizes[i], feat_sizes[i])
for i in range(len(feat_sizes))
]
neck = DyHead(
in_channels=in_channels, out_channels=out_channels, num_blocks=3)
outs = neck(feats)
assert len(outs) == len(feats)
for i in range(len(outs)):
assert outs[i].shape[1] == out_channels
assert outs[i].shape[2] == outs[i].shape[3] == s // (2**i)
feat = torch.rand(1, 8, 4, 4)
# input feat must be tuple or list
with pytest.raises(AssertionError):
neck(feat)
def test_fpg():
# end_level=-1 is equal to end_level=3
norm_cfg = dict(type='BN', requires_grad=True)
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=-1,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
inter_channels=8,
num_outs=5,
add_extra_convs=True,
start_level=1,
end_level=3,
stack_times=9,
paths=['bu'] * 9,
same_down_trans=None,
same_up_trans=dict(
type='conv',
kernel_size=3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_lateral_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
across_down_trans=dict(
type='interpolation_conv',
mode='nearest',
kernel_size=3,
norm_cfg=norm_cfg,
order=('act', 'conv', 'norm'),
inplace=False),
across_up_trans=None,
across_skip_trans=dict(
type='conv',
kernel_size=1,
norm_cfg=norm_cfg,
inplace=False,
order=('act', 'conv', 'norm')),
output_trans=dict(
type='last_conv',
kernel_size=3,
order=('act', 'conv', 'norm'),
inplace=False),
norm_cfg=norm_cfg,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=4,
num_outs=2,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPG(in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
paths=['bu'] * 9,
start_level=1,
end_level=2,
num_outs=3,
skip_inds=[(0, 1, 2, 3), (0, 1, 2), (0, 1), (0, ), ()])
def test_fpn_carafe():
# end_level=-1 is equal to end_level=3
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
FPN_CARAFE(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
def test_nas_fpn():
# end_level=-1 is equal to end_level=3
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=3,
num_outs=4)
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
stack_times=9,
start_level=1,
end_level=2,
num_outs=3)
def test_nasfcos_fpn():
# end_level=-1 is equal to end_level=3
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=3,
num_outs=4)
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=0,
end_level=-1,
num_outs=4)
# `end_level` is larger than len(in_channels) - 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=4,
num_outs=2)
# `num_outs` is not equal to end_level - start_level + 1
with pytest.raises(AssertionError):
NASFCOS_FPN(
in_channels=[8, 16, 32, 64],
out_channels=8,
start_level=1,
end_level=2,
num_outs=3)
| 20,961 | 30.10089 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_plugins.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv import ConfigDict
from mmcv.cnn import build_plugin_layer
from mmdet.models.plugins import DropBlock
def test_dropblock():
feat = torch.rand(1, 1, 11, 11)
drop_prob = 1.0
dropblock = DropBlock(drop_prob, block_size=11, warmup_iters=0)
out_feat = dropblock(feat)
assert (out_feat == 0).all() and out_feat.shape == feat.shape
drop_prob = 0.5
dropblock = DropBlock(drop_prob, block_size=5, warmup_iters=0)
out_feat = dropblock(feat)
assert out_feat.shape == feat.shape
# drop_prob must be (0,1]
with pytest.raises(AssertionError):
DropBlock(1.5, 3)
# block_size cannot be an even number
with pytest.raises(AssertionError):
DropBlock(0.5, 2)
# warmup_iters cannot be less than 0
with pytest.raises(AssertionError):
DropBlock(0.5, 3, -1)
def test_pixel_decoder():
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='PixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU')))
self = build_plugin_layer(pixel_decoder_cfg)[1]
img_metas = [{}, {}]
feats = [
torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert (memory == feats[-1]).all()
assert mask_feature.shape == feats[0].shape
def test_transformer_encoder_pixel_decoder():
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='TransformerEncoderPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
operation_order=('self_attn', 'norm', 'ffn', 'norm'),
norm_cfg=dict(type='LN'),
init_cfg=None,
batch_first=False),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True)))
self = build_plugin_layer(pixel_decoder_cfg)[1]
img_metas = [{
'batch_input_shape': (128, 160),
'img_shape': (120, 160, 3),
}, {
'batch_input_shape': (128, 160),
'img_shape': (125, 160, 3),
}]
feats = [
torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, memory = self(feats, img_metas)
assert memory.shape[-2:] == feats[-1].shape[-2:]
assert mask_feature.shape == feats[0].shape
def test_msdeformattn_pixel_decoder():
base_channels = 64
pixel_decoder_cfg = ConfigDict(
dict(
type='MSDeformAttnPixelDecoder',
in_channels=[base_channels * 2**i for i in range(4)],
strides=[4, 8, 16, 32],
feat_channels=base_channels,
out_channels=base_channels,
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention',
embed_dims=base_channels,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=False,
norm_cfg=None,
init_cfg=None),
ffn_cfgs=dict(
type='FFN',
embed_dims=base_channels,
feedforward_channels=base_channels * 4,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True)),
operation_order=('self_attn', 'norm', 'ffn', 'norm')),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True),
init_cfg=None), )
self = build_plugin_layer(pixel_decoder_cfg)[1]
feats = [
torch.rand((2, base_channels * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
mask_feature, multi_scale_features = self(feats)
assert mask_feature.shape == feats[0].shape
assert len(multi_scale_features) == 3
multi_scale_features = multi_scale_features[::-1]
for i in range(3):
assert multi_scale_features[i].shape[-2:] == feats[i + 1].shape[-2:]
| 6,057 | 35.059524 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import check_norm_state, is_block, is_norm
__all__ = ['is_block', 'is_norm', 'check_norm_state']
| 158 | 30.8 | 54 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_csp_darknet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.csp_darknet import CSPDarknet
from .utils import check_norm_state, is_norm
def test_csp_darknet_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, len(arch_setting) + 1)
CSPDarknet(frozen_stages=6)
with pytest.raises(AssertionError):
# out_indices in range(len(arch_setting) + 1)
CSPDarknet(out_indices=[6])
# Test CSPDarknet with first stage frozen
frozen_stages = 1
model = CSPDarknet(frozen_stages=frozen_stages)
model.train()
for mod in model.stem.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'stage{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test CSPDarknet with norm_eval=True
model = CSPDarknet(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test CSPDarknet-P5 forward with widen_factor=0.5
model = CSPDarknet(arch='P5', widen_factor=0.25, out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 16, 32, 32))
assert feat[1].shape == torch.Size((1, 32, 16, 16))
assert feat[2].shape == torch.Size((1, 64, 8, 8))
assert feat[3].shape == torch.Size((1, 128, 4, 4))
assert feat[4].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet-P6 forward with widen_factor=0.5
model = CSPDarknet(
arch='P6',
widen_factor=0.25,
out_indices=range(0, 6),
spp_kernal_sizes=(3, 5, 7))
model.train()
imgs = torch.randn(1, 3, 128, 128)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 16, 64, 64))
assert feat[1].shape == torch.Size((1, 32, 32, 32))
assert feat[2].shape == torch.Size((1, 64, 16, 16))
assert feat[3].shape == torch.Size((1, 128, 8, 8))
assert feat[4].shape == torch.Size((1, 192, 4, 4))
assert feat[5].shape == torch.Size((1, 256, 2, 2))
# Test CSPDarknet forward with dict(type='ReLU')
model = CSPDarknet(
widen_factor=0.125, act_cfg=dict(type='ReLU'), out_indices=range(0, 5))
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with BatchNorm forward
model = CSPDarknet(widen_factor=0.125, out_indices=range(0, 5))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 64, 64)
feat = model(imgs)
assert len(feat) == 5
assert feat[0].shape == torch.Size((1, 8, 32, 32))
assert feat[1].shape == torch.Size((1, 16, 16, 16))
assert feat[2].shape == torch.Size((1, 32, 8, 8))
assert feat[3].shape == torch.Size((1, 64, 4, 4))
assert feat[4].shape == torch.Size((1, 128, 2, 2))
# Test CSPDarknet with custom arch forward
arch_ovewrite = [[32, 56, 3, True, False], [56, 224, 2, True, False],
[224, 512, 1, True, False]]
model = CSPDarknet(
arch_ovewrite=arch_ovewrite,
widen_factor=0.25,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size((1, 8, 16, 16))
assert feat[1].shape == torch.Size((1, 14, 8, 8))
assert feat[2].shape == torch.Size((1, 56, 4, 4))
assert feat[3].shape == torch.Size((1, 128, 2, 2))
| 4,117 | 34.196581 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_detectors_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.models.backbones import DetectoRS_ResNet
def test_detectorrs_resnet_backbone():
detectorrs_cfg = dict(
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
frozen_stages=1,
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=True,
style='pytorch',
conv_cfg=dict(type='ConvAWS'),
sac=dict(type='SAC', use_deform=True),
stage_with_sac=(False, True, True, True),
output_img=True)
"""Test init_weights config"""
with pytest.raises(AssertionError):
# pretrained and init_cfg cannot be specified at the same time
DetectoRS_ResNet(
**detectorrs_cfg, pretrained='Pretrained', init_cfg='Pretrained')
with pytest.raises(AssertionError):
# init_cfg must be a dict
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=['Pretrained'])
with pytest.raises(KeyError):
# init_cfg must contain the key `type`
DetectoRS_ResNet(
**detectorrs_cfg,
pretrained=None,
init_cfg=dict(checkpoint='Pretrained'))
with pytest.raises(AssertionError):
# init_cfg only support initialize pretrained model way
DetectoRS_ResNet(
**detectorrs_cfg, pretrained=None, init_cfg=dict(type='Trained'))
with pytest.raises(TypeError):
# pretrained mast be a str or None
model = DetectoRS_ResNet(
**detectorrs_cfg, pretrained=['Pretrained'], init_cfg=None)
model.init_weights()
| 1,611 | 32.583333 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_efficientnet.py | import pytest
import torch
from mmdet.models.backbones import EfficientNet
def test_efficientnet_backbone():
"""Test EfficientNet backbone."""
with pytest.raises(AssertionError):
# EfficientNet arch should be a key in EfficientNet.arch_settings
EfficientNet(arch='c3')
model = EfficientNet(arch='b0', out_indices=(0, 1, 2, 3, 4, 5, 6))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size([2, 32, 16, 16])
assert feat[1].shape == torch.Size([2, 16, 16, 16])
assert feat[2].shape == torch.Size([2, 24, 8, 8])
assert feat[3].shape == torch.Size([2, 40, 4, 4])
assert feat[4].shape == torch.Size([2, 112, 2, 2])
assert feat[5].shape == torch.Size([2, 320, 1, 1])
assert feat[6].shape == torch.Size([2, 1280, 1, 1])
| 859 | 32.076923 | 73 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_hourglass.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hourglass import HourglassNet
def test_hourglass_backbone():
with pytest.raises(AssertionError):
# HourglassNet's num_stacks should larger than 0
HourglassNet(num_stacks=0)
with pytest.raises(AssertionError):
# len(stage_channels) should equal len(stage_blocks)
HourglassNet(
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2, 4])
with pytest.raises(AssertionError):
# len(stage_channels) should lagrer than downsample_times
HourglassNet(
downsample_times=5,
stage_channels=[256, 256, 384, 384, 384],
stage_blocks=[2, 2, 2, 2, 2])
# Test HourglassNet-52
model = HourglassNet(
num_stacks=1,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([1, 64, 64, 64])
# Test HourglassNet-104
model = HourglassNet(
num_stacks=2,
stage_channels=(64, 64, 96, 96, 96, 128),
feat_channel=64)
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert len(feat) == 2
assert feat[0].shape == torch.Size([1, 64, 64, 64])
assert feat[1].shape == torch.Size([1, 64, 64, 64])
| 1,464 | 28.3 | 65 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones.hrnet import HRModule, HRNet
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 64, 64])
assert feats[3].shape == torch.Size([1, 256, 8, 8])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 64, 64])
| 3,089 | 26.589286 | 68 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.