Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmdetection | mmdetection-master/mmdet/models/detectors/detr.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class DETR(SingleStageDetector):
r"""Implementation of `DETR: End-to-End Object Detection with
Transformers <https://arxiv.org/pdf/2005.12872>`_"""
def __init__(self,
backbone,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(DETR, self).__init__(backbone, None, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
# over-write `forward_dummy` because:
# the forward of bbox_head requires img_metas
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
warnings.warn('Warning! MultiheadAttention in DETR does not '
'support flops computation! Do not use the '
'results in your papers!')
batch_size, _, height, width = img.shape
dummy_img_metas = [
dict(
batch_input_shape=(height, width),
img_shape=(height, width, 3)) for _ in range(batch_size)
]
x = self.extract_feat(img)
outs = self.bbox_head(x, dummy_img_metas)
return outs
# over-write `onnx_export` because:
# (1) the forward of bbox_head requires img_metas
# (2) the different behavior (e.g. construction of `masks`) between
# torch and ONNX model, during the forward of bbox_head
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
# forward of this head requires img_metas
outs = self.bbox_head.forward_onnx(x, img_metas)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
| 2,483 | 33.985915 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/fast_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FastRCNN(TwoStageDetector):
"""Implementation of `Fast R-CNN <https://arxiv.org/abs/1504.08083>`_"""
def __init__(self,
backbone,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FastRCNN, self).__init__(
backbone=backbone,
neck=neck,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def forward_test(self, imgs, img_metas, proposals, **kwargs):
"""
Args:
imgs (List[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (List[List[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch.
proposals (List[List[Tensor]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. The Tensor should have a shape Px4, where
P is the number of proposals.
"""
for var, name in [(imgs, 'imgs'), (img_metas, 'img_metas')]:
if not isinstance(var, list):
raise TypeError(f'{name} must be a list, but got {type(var)}')
num_augs = len(imgs)
if num_augs != len(img_metas):
raise ValueError(f'num of augmentations ({len(imgs)}) '
f'!= num of image meta ({len(img_metas)})')
if num_augs == 1:
return self.simple_test(imgs[0], img_metas[0], proposals[0],
**kwargs)
else:
# TODO: support test-time augmentation
assert NotImplementedError
| 2,164 | 37.660714 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/faster_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class FasterRCNN(TwoStageDetector):
"""Implementation of `Faster R-CNN <https://arxiv.org/abs/1506.01497>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(FasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 809 | 27.928571 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/fcos.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FCOS(SingleStageDetector):
"""Implementation of `FCOS <https://arxiv.org/abs/1904.01355>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 635 | 30.8 | 72 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/fovea.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FOVEA(SingleStageDetector):
"""Implementation of `FoveaBox <https://arxiv.org/abs/1904.03797>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FOVEA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 642 | 31.15 | 74 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/fsaf.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class FSAF(SingleStageDetector):
"""Implementation of `FSAF <https://arxiv.org/abs/1903.00621>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(FSAF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 635 | 30.8 | 72 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/gfl.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class GFL(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(GFL, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 561 | 28.578947 | 71 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/grid_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class GridRCNN(TwoStageDetector):
"""Grid R-CNN.
This detector is the implementation of:
- Grid R-CNN (https://arxiv.org/abs/1811.12030)
- Grid R-CNN Plus: Faster and Better (https://arxiv.org/abs/1906.05688)
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(GridRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 926 | 27.090909 | 75 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/htc.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class HybridTaskCascade(CascadeRCNN):
"""Implementation of `HTC <https://arxiv.org/abs/1901.07518>`_"""
def __init__(self, **kwargs):
super(HybridTaskCascade, self).__init__(**kwargs)
@property
def with_semantic(self):
"""bool: whether the detector has a semantic head"""
return self.roi_head.with_semantic
| 498 | 28.352941 | 69 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/kd_one_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import mmcv
import torch
from mmcv.runner import load_checkpoint
from .. import build_detector
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class KnowledgeDistillationSingleStageDetector(SingleStageDetector):
r"""Implementation of `Distilling the Knowledge in a Neural Network.
<https://arxiv.org/abs/1503.02531>`_.
Args:
teacher_config (str | dict): Config file path
or the config object of teacher model.
teacher_ckpt (str, optional): Checkpoint path of teacher model.
If left as None, the model will not load any weights.
"""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_config,
teacher_ckpt=None,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
# Build teacher model
if isinstance(teacher_config, (str, Path)):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_detector(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
x = self.extract_feat(img)
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img)
out_teacher = self.teacher_model.bbox_head(teacher_x)
losses = self.bbox_head.forward_train(x, out_teacher, img_metas,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to cuda when calling cuda function."""
self.teacher_model.cuda(device=device)
return super().cuda(device=device)
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
| 4,184 | 39.240385 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/lad.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.runner import load_checkpoint
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .kd_one_stage import KnowledgeDistillationSingleStageDetector
@DETECTORS.register_module()
class LAD(KnowledgeDistillationSingleStageDetector):
"""Implementation of `LAD <https://arxiv.org/pdf/2108.10520.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
teacher_backbone,
teacher_neck,
teacher_bbox_head,
teacher_ckpt,
eval_teacher=True,
train_cfg=None,
test_cfg=None,
pretrained=None):
super(KnowledgeDistillationSingleStageDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained)
self.eval_teacher = eval_teacher
self.teacher_model = nn.Module()
self.teacher_model.backbone = build_backbone(teacher_backbone)
if teacher_neck is not None:
self.teacher_model.neck = build_neck(teacher_neck)
teacher_bbox_head.update(train_cfg=train_cfg)
teacher_bbox_head.update(test_cfg=test_cfg)
self.teacher_model.bbox_head = build_head(teacher_bbox_head)
if teacher_ckpt is not None:
load_checkpoint(
self.teacher_model, teacher_ckpt, map_location='cpu')
@property
def with_teacher_neck(self):
"""bool: whether the detector has a teacher_neck"""
return hasattr(self.teacher_model, 'neck') and \
self.teacher_model.neck is not None
def extract_teacher_feat(self, img):
"""Directly extract teacher features from the backbone+neck."""
x = self.teacher_model.backbone(img)
if self.with_teacher_neck:
x = self.teacher_model.neck(x)
return x
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# get label assignment from the teacher
with torch.no_grad():
x_teacher = self.extract_teacher_feat(img)
outs_teacher = self.teacher_model.bbox_head(x_teacher)
label_assignment_results = \
self.teacher_model.bbox_head.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# the student use the label assignment from the teacher to learn
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, label_assignment_results,
img_metas, gt_bboxes, gt_labels,
gt_bboxes_ignore)
return losses
| 3,916 | 41.11828 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/mask2former.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .maskformer import MaskFormer
@DETECTORS.register_module()
class Mask2Former(MaskFormer):
r"""Implementation of `Masked-attention Mask
Transformer for Universal Image Segmentation
<https://arxiv.org/pdf/2112.01527>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
panoptic_fusion_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super().__init__(
backbone,
neck=neck,
panoptic_head=panoptic_head,
panoptic_fusion_head=panoptic_fusion_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg)
| 840 | 29.035714 | 54 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/mask_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskRCNN(TwoStageDetector):
"""Implementation of `Mask R-CNN <https://arxiv.org/abs/1703.06870>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(MaskRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 803 | 27.714286 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/mask_scoring_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class MaskScoringRCNN(TwoStageDetector):
"""Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(MaskScoringRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 812 | 25.225806 | 47 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/maskformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import mmcv
import numpy as np
from mmdet.core import INSTANCE_OFFSET, bbox2result
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class MaskFormer(SingleStageDetector):
r"""Implementation of `Per-Pixel Classification is
NOT All You Need for Semantic Segmentation
<https://arxiv.org/pdf/2107.06278>`_."""
def __init__(self,
backbone,
neck=None,
panoptic_head=None,
panoptic_fusion_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
panoptic_head_ = copy.deepcopy(panoptic_head)
panoptic_head_.update(train_cfg=train_cfg)
panoptic_head_.update(test_cfg=test_cfg)
self.panoptic_head = build_head(panoptic_head_)
panoptic_fusion_head_ = copy.deepcopy(panoptic_fusion_head)
panoptic_fusion_head_.update(test_cfg=test_cfg)
self.panoptic_fusion_head = build_head(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_head.num_things_classes
self.num_stuff_classes = self.panoptic_head.num_stuff_classes
self.num_classes = self.panoptic_head.num_classes
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# BaseDetector.show_result default for instance segmentation
if self.num_stuff_classes > 0:
self.show_result = self._show_pan_result
def forward_dummy(self, img, img_metas):
"""Used for computing network flops. See
`mmdetection/tools/analysis_tools/get_flops.py`
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
outs = self.panoptic_head(x, img_metas)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_masks,
gt_semantic_seg=None,
gt_bboxes_ignore=None,
**kargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[Dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box.
gt_masks (list[BitmapMasks]): true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (list[tensor]): semantic segmentation mask for
images for panoptic segmentation.
Defaults to None for instance segmentation.
gt_bboxes_ignore (list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
Defaults to None.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# add batch_input_shape in img_metas
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.panoptic_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_masks,
gt_semantic_seg,
gt_bboxes_ignore)
return losses
def simple_test(self, imgs, img_metas, **kwargs):
"""Test without augmentation.
Args:
imgs (Tensor): A batch of images.
img_metas (list[dict]): List of image information.
Returns:
list[dict[str, np.array | tuple[list]] | tuple[list]]:
Semantic segmentation results and panoptic segmentation \
results of each image for panoptic segmentation, or formatted \
bbox and mask results of each image for instance segmentation.
.. code-block:: none
[
# panoptic segmentation
{
'pan_results': np.array, # shape = [h, w]
'ins_results': tuple[list],
# semantic segmentation results are not supported yet
'sem_results': np.array
},
...
]
or
.. code-block:: none
[
# instance segmentation
(
bboxes, # list[np.array]
masks # list[list[np.array]]
),
...
]
"""
feats = self.extract_feat(imgs)
mask_cls_results, mask_pred_results = self.panoptic_head.simple_test(
feats, img_metas, **kwargs)
results = self.panoptic_fusion_head.simple_test(
mask_cls_results, mask_pred_results, img_metas, **kwargs)
for i in range(len(results)):
if 'pan_results' in results[i]:
results[i]['pan_results'] = results[i]['pan_results'].detach(
).cpu().numpy()
if 'ins_results' in results[i]:
labels_per_image, bboxes, mask_pred_binary = results[i][
'ins_results']
bbox_results = bbox2result(bboxes, labels_per_image,
self.num_things_classes)
mask_results = [[] for _ in range(self.num_things_classes)]
for j, label in enumerate(labels_per_image):
mask = mask_pred_binary[j].detach().cpu().numpy()
mask_results[label].append(mask)
results[i]['ins_results'] = bbox_results, mask_results
assert 'sem_results' not in results[i], 'segmantic segmentation '\
'results are not supported yet.'
if self.num_stuff_classes == 0:
results = [res['ins_results'] for res in results]
return results
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError
def onnx_export(self, img, img_metas):
raise NotImplementedError
def _show_pan_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `panoptic result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 10,417 | 39.223938 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/nasfcos.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class NASFCOS(SingleStageDetector):
"""NAS-FCOS: Fast Neural Architecture Search for Object Detection.
https://arxiv.org/abs/1906.0442
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(NASFCOS, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 689 | 29 | 75 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/paa.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class PAA(SingleStageDetector):
"""Implementation of `PAA <https://arxiv.org/pdf/2007.08103.pdf>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(PAA, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 636 | 30.85 | 74 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/panoptic_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .panoptic_two_stage_segmentor import TwoStagePanopticSegmentor
@DETECTORS.register_module()
class PanopticFPN(TwoStagePanopticSegmentor):
r"""Implementation of `Panoptic feature pyramid
networks <https://arxiv.org/pdf/1901.02446>`_"""
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None):
super(PanopticFPN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg,
semantic_head=semantic_head,
panoptic_fusion_head=panoptic_fusion_head)
| 1,074 | 29.714286 | 67 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/panoptic_two_stage_segmentor.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.core import INSTANCE_OFFSET, bbox2roi, multiclass_nms
from mmdet.core.visualization import imshow_det_bboxes
from ..builder import DETECTORS, build_head
from ..roi_heads.mask_heads.fcn_mask_head import _do_paste_mask
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class TwoStagePanopticSegmentor(TwoStageDetector):
"""Base class of Two-stage Panoptic Segmentor.
As well as the components in TwoStageDetector, Panoptic Segmentor has extra
semantic_head and panoptic_fusion_head.
"""
def __init__(
self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None,
# for panoptic segmentation
semantic_head=None,
panoptic_fusion_head=None):
super(TwoStagePanopticSegmentor,
self).__init__(backbone, neck, rpn_head, roi_head, train_cfg,
test_cfg, pretrained, init_cfg)
if semantic_head is not None:
self.semantic_head = build_head(semantic_head)
if panoptic_fusion_head is not None:
panoptic_cfg = test_cfg.panoptic if test_cfg is not None else None
panoptic_fusion_head_ = panoptic_fusion_head.deepcopy()
panoptic_fusion_head_.update(test_cfg=panoptic_cfg)
self.panoptic_fusion_head = build_head(panoptic_fusion_head_)
self.num_things_classes = self.panoptic_fusion_head.\
num_things_classes
self.num_stuff_classes = self.panoptic_fusion_head.\
num_stuff_classes
self.num_classes = self.panoptic_fusion_head.num_classes
@property
def with_semantic_head(self):
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_panoptic_fusion_head(self):
return hasattr(self, 'panoptic_fusion_heads') and \
self.panoptic_fusion_head is not None
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/get_flops.py`
"""
raise NotImplementedError(
f'`forward_dummy` is not implemented in {self.__class__.__name__}')
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None,
proposals=None,
**kwargs):
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
semantic_loss = self.semantic_head.forward_train(x, gt_semantic_seg)
losses.update(semantic_loss)
return losses
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Simple test for mask head without augmentation."""
img_shapes = tuple(meta['ori_shape']
for meta in img_metas) if rescale else tuple(
meta['pad_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
masks = []
for img_shape in img_shapes:
out_shape = (0, self.roi_head.bbox_head.num_classes) \
+ img_shape[:2]
masks.append(det_bboxes[0].new_zeros(out_shape))
mask_pred = det_bboxes[0].new_zeros((0, 80, 28, 28))
mask_results = dict(
masks=masks, mask_pred=mask_pred, mask_feats=None)
return mask_results
_bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]
if rescale:
if not isinstance(scale_factors[0], float):
scale_factors = [
det_bboxes[0].new_tensor(scale_factor)
for scale_factor in scale_factors
]
_bboxes = [
_bboxes[i] * scale_factors[i] for i in range(len(_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self.roi_head._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
# resize the mask_preds to (K, H, W)
masks = []
for i in range(len(_bboxes)):
det_bbox = det_bboxes[i][:, :4]
det_label = det_labels[i]
mask_pred = mask_preds[i].sigmoid()
box_inds = torch.arange(mask_pred.shape[0])
mask_pred = mask_pred[box_inds, det_label][:, None]
img_h, img_w, _ = img_shapes[i]
mask_pred, _ = _do_paste_mask(
mask_pred, det_bbox, img_h, img_w, skip_empty=False)
masks.append(mask_pred)
mask_results['masks'] = masks
return mask_results
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without Augmentation."""
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
bboxes, scores = self.roi_head.simple_test_bboxes(
x, img_metas, proposal_list, None, rescale=rescale)
pan_cfg = self.test_cfg.panoptic
# class-wise predictions
det_bboxes = []
det_labels = []
for bboxe, score in zip(bboxes, scores):
det_bbox, det_label = multiclass_nms(bboxe, score,
pan_cfg.score_thr,
pan_cfg.nms,
pan_cfg.max_per_img)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
mask_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
masks = mask_results['masks']
seg_preds = self.semantic_head.simple_test(x, img_metas, rescale)
results = []
for i in range(len(det_bboxes)):
pan_results = self.panoptic_fusion_head.simple_test(
det_bboxes[i], det_labels[i], masks[i], seg_preds[i])
pan_results = pan_results.int().detach().cpu().numpy()
result = dict(pan_results=pan_results)
results.append(result)
return results
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (dict): The results.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'.
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'.
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None.
thickness (int): Thickness of lines. Default: 2.
font_size (int): Font size of texts. Default: 13.
win_name (str): The window name. Default: ''.
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`.
"""
img = mmcv.imread(img)
img = img.copy()
pan_results = result['pan_results']
# keep objects ahead
ids = np.unique(pan_results)[::-1]
legal_indices = ids != self.num_classes # for VOID label
ids = ids[legal_indices]
labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64)
segms = (pan_results[None] == ids[:, None, None])
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
segms=segms,
labels=labels,
class_names=self.CLASSES,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 10,600 | 36.860714 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/point_rend.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class PointRend(TwoStageDetector):
"""PointRend: Image Segmentation as Rendering
This detector is the implementation of
`PointRend <https://arxiv.org/abs/1912.08193>`_.
"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(PointRend, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 884 | 25.818182 | 52 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/queryinst.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .sparse_rcnn import SparseRCNN
@DETECTORS.register_module()
class QueryInst(SparseRCNN):
r"""Implementation of
`Instances as Queries <http://arxiv.org/abs/2105.01928>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(QueryInst, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
| 809 | 26.931034 | 64 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/reppoints_detector.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class RepPointsDetector(SingleStageDetector):
"""RepPoints: Point Set Representation for Object Detection.
This detector is the implementation of:
- RepPoints detector (https://arxiv.org/pdf/1904.11490)
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(RepPointsDetector,
self).__init__(backbone, neck, bbox_head, train_cfg, test_cfg,
pretrained, init_cfg)
| 784 | 30.4 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/retinanet.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class RetinaNet(SingleStageDetector):
"""Implementation of `RetinaNet <https://arxiv.org/abs/1708.02002>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(RetinaNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 655 | 31.8 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/rpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from inspect import signature
import mmcv
import torch
from mmcv.image import tensor2imgs
from mmdet.core import bbox_mapping
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class RPN(BaseDetector):
"""Implementation of Region Proposal Network."""
def __init__(self,
backbone,
neck,
rpn_head,
train_cfg,
test_cfg,
pretrained=None,
init_cfg=None):
super(RPN, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
self.neck = build_neck(neck) if neck is not None else None
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head.update(train_cfg=rpn_train_cfg)
rpn_head.update(test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Extract features.
Args:
img (torch.Tensor): Image tensor with shape (n, c, h ,w).
Returns:
list[torch.Tensor]: Multi-level features that may have
different resolutions.
"""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Dummy forward function."""
x = self.extract_feat(img)
rpn_outs = self.rpn_head(x)
return rpn_outs
def forward_train(self,
img,
img_metas,
gt_bboxes=None,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
if (isinstance(self.train_cfg.rpn, dict)
and self.train_cfg.rpn.get('debug', False)):
self.rpn_head.debug_imgs = tensor2imgs(img)
x = self.extract_feat(img)
losses = self.rpn_head.forward_train(x, img_metas, gt_bboxes, None,
gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
x = self.extract_feat(img)
# get origin input shape to onnx dynamic input shape
if torch.onnx.is_in_onnx_export():
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
if rescale:
for proposals, meta in zip(proposal_list, img_metas):
proposals[:, :4] /= proposals.new_tensor(meta['scale_factor'])
if torch.onnx.is_in_onnx_export():
return proposal_list
return [proposal.cpu().numpy() for proposal in proposal_list]
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[np.ndarray]: proposals
"""
proposal_list = self.rpn_head.aug_test_rpn(
self.extract_feats(imgs), img_metas)
if not rescale:
for proposals, img_meta in zip(proposal_list, img_metas[0]):
img_shape = img_meta['img_shape']
scale_factor = img_meta['scale_factor']
flip = img_meta['flip']
flip_direction = img_meta['flip_direction']
proposals[:, :4] = bbox_mapping(proposals[:, :4], img_shape,
scale_factor, flip,
flip_direction)
return [proposal.cpu().numpy() for proposal in proposal_list]
def show_result(self, data, result, top_k=20, **kwargs):
"""Show RPN proposals on the image.
Args:
data (str or np.ndarray): Image filename or loaded image.
result (Tensor or tuple): The results to draw over `img`
bbox_result or (bbox_result, segm_result).
top_k (int): Plot the first k bboxes only
if set positive. Default: 20
Returns:
np.ndarray: The image with bboxes drawn on it.
"""
if kwargs is not None:
kwargs['colors'] = 'green'
sig = signature(mmcv.imshow_bboxes)
for k in list(kwargs.keys()):
if k not in sig.parameters:
kwargs.pop(k)
mmcv.imshow_bboxes(data, result, top_k=top_k, **kwargs)
| 6,223 | 37.184049 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/scnet.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .cascade_rcnn import CascadeRCNN
@DETECTORS.register_module()
class SCNet(CascadeRCNN):
"""Implementation of `SCNet <https://arxiv.org/abs/2012.10150>`_"""
def __init__(self, **kwargs):
super(SCNet, self).__init__(**kwargs)
| 328 | 26.416667 | 71 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/single_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class SingleStageDetector(BaseDetector):
"""Base class for single-stage detectors.
Single-stage detectors directly and densely predict bounding boxes on the
output features of the backbone+neck.
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(SingleStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
bbox_head.update(train_cfg=train_cfg)
bbox_head.update(test_cfg=test_cfg)
self.bbox_head = build_head(bbox_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
super(SingleStageDetector, self).forward_train(img, img_metas)
x = self.extract_feat(img)
losses = self.bbox_head.forward_train(x, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
img (torch.Tensor): Images with shape (N, C, H, W).
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
feat = self.extract_feat(img)
results_list = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in results_list
]
return bbox_results
def aug_test(self, imgs, img_metas, rescale=False):
"""Test function with test time augmentation.
Args:
imgs (list[Tensor]): the outer list indicates test-time
augmentations and inner Tensor should have a shape NxCxHxW,
which contains all images in the batch.
img_metas (list[list[dict]]): the outer list indicates test-time
augs (multiscale, flip, etc.) and the inner list indicates
images in a batch. each dict has image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
assert hasattr(self.bbox_head, 'aug_test'), \
f'{self.bbox_head.__class__.__name__}' \
' does not support test-time augmentation'
feats = self.extract_feats(imgs)
results_list = self.bbox_head.aug_test(
feats, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes, det_labels, self.bbox_head.num_classes)
for det_bboxes, det_labels in results_list
]
return bbox_results
def onnx_export(self, img, img_metas, with_nms=True):
"""Test function without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head(x)
# get origin input shape to support onnx dynamic shape
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
# get pad input shape to support onnx dynamic shape for exporting
# `CornerNet` and `CentripetalNet`, which 'pad_shape' is used
# for inference
img_metas[0]['pad_shape_for_onnx'] = img_shape
if len(outs) == 2:
# add dummy score_factor
outs = (*outs, None)
# TODO Can we change to `get_bboxes` when `onnx_export` fail
det_bboxes, det_labels = self.bbox_head.onnx_export(
*outs, img_metas, with_nms=with_nms)
return det_bboxes, det_labels
| 6,645 | 37.639535 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/single_stage_instance_seg.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import warnings
import mmcv
import numpy as np
import torch
from mmdet.core.visualization.image import imshow_det_bboxes
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
INF = 1e8
@DETECTORS.register_module()
class SingleStageInstanceSegmentor(BaseDetector):
"""Base class for single-stage instance segmentors."""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
super(SingleStageInstanceSegmentor, self).__init__(init_cfg=init_cfg)
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
else:
self.neck = None
if bbox_head is not None:
bbox_head.update(train_cfg=copy.deepcopy(train_cfg))
bbox_head.update(test_cfg=copy.deepcopy(test_cfg))
self.bbox_head = build_head(bbox_head)
else:
self.bbox_head = None
assert mask_head, f'`mask_head` must ' \
f'be implemented in {self.__class__.__name__}'
mask_head.update(train_cfg=copy.deepcopy(train_cfg))
mask_head.update(test_cfg=copy.deepcopy(test_cfg))
self.mask_head = build_head(mask_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
def extract_feat(self, img):
"""Directly extract features from the backbone and neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
raise NotImplementedError(
f'`forward_dummy` is not implemented in {self.__class__.__name__}')
def forward_train(self,
img,
img_metas,
gt_masks,
gt_labels,
gt_bboxes=None,
gt_bboxes_ignore=None,
**kwargs):
"""
Args:
img (Tensor): Input images of shape (B, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_masks (list[:obj:`BitmapMasks`] | None) : The segmentation
masks for each box.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes (list[Tensor]): Each item is the truth boxes
of each image in [tl_x, tl_y, br_x, br_y] format.
Default: None.
gt_bboxes_ignore (list[Tensor] | None): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
gt_masks = [
gt_mask.to_tensor(dtype=torch.bool, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
losses = dict()
# CondInst and YOLACT have bbox_head
if self.bbox_head:
# bbox_head_preds is a tuple
bbox_head_preds = self.bbox_head(x)
# positive_infos is a list of obj:`InstanceData`
# It contains the information about the positive samples
# CondInst, YOLACT
det_losses, positive_infos = self.bbox_head.loss(
*bbox_head_preds,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels,
gt_masks=gt_masks,
img_metas=img_metas,
gt_bboxes_ignore=gt_bboxes_ignore,
**kwargs)
losses.update(det_losses)
else:
positive_infos = None
mask_loss = self.mask_head.forward_train(
x,
gt_labels,
gt_masks,
img_metas,
positive_infos=positive_infos,
gt_bboxes=gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
**kwargs)
# avoid loss override
assert not set(mask_loss.keys()) & set(losses.keys())
losses.update(mask_loss)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation.
Args:
img (torch.Tensor): Images with shape (B, C, H, W).
img_metas (list[dict]): List of image information.
rescale (bool, optional): Whether to rescale the results.
Defaults to False.
Returns:
list(tuple): Formatted bbox and mask results of multiple \
images. The outer list corresponds to each image. \
Each tuple contains two type of results of single image:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has a shape (N, img_h, img_w), N
is the number of masks with this category.
"""
feat = self.extract_feat(img)
if self.bbox_head:
outs = self.bbox_head(feat)
# results_list is list[obj:`InstanceData`]
results_list = self.bbox_head.get_results(
*outs, img_metas=img_metas, cfg=self.test_cfg, rescale=rescale)
else:
results_list = None
results_list = self.mask_head.simple_test(
feat, img_metas, rescale=rescale, instances_list=results_list)
format_results_list = []
for results in results_list:
format_results_list.append(self.format_results(results))
return format_results_list
def format_results(self, results):
"""Format the model predictions according to the interface with
dataset.
Args:
results (:obj:`InstanceData`): Processed
results of single images. Usually contains
following keys.
- scores (Tensor): Classification scores, has shape
(num_instance,)
- labels (Tensor): Has shape (num_instances,).
- masks (Tensor): Processed mask results, has
shape (num_instances, h, w).
Returns:
tuple: Formatted bbox and mask results.. It contains two items:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has shape (N, img_h, img_w), N
is the number of masks with this category.
"""
data_keys = results.keys()
assert 'scores' in data_keys
assert 'labels' in data_keys
assert 'masks' in data_keys, \
'results should contain ' \
'masks when format the results '
mask_results = [[] for _ in range(self.mask_head.num_classes)]
num_masks = len(results)
if num_masks == 0:
bbox_results = [
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.mask_head.num_classes)
]
return bbox_results, mask_results
labels = results.labels.detach().cpu().numpy()
if 'bboxes' not in results:
# create dummy bbox results to store the scores
results.bboxes = results.scores.new_zeros(len(results), 4)
det_bboxes = torch.cat([results.bboxes, results.scores[:, None]],
dim=-1)
det_bboxes = det_bboxes.detach().cpu().numpy()
bbox_results = [
det_bboxes[labels == i, :]
for i in range(self.mask_head.num_classes)
]
masks = results.masks.detach().cpu().numpy()
for idx in range(num_masks):
mask = masks[idx]
mask_results[labels[idx]].append(mask)
return bbox_results, mask_results
def aug_test(self, imgs, img_metas, rescale=False):
raise NotImplementedError
def show_result(self,
img,
result,
score_thr=0.3,
bbox_color=(72, 101, 241),
text_color=(72, 101, 241),
mask_color=None,
thickness=2,
font_size=13,
win_name='',
show=False,
wait_time=0,
out_file=None):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
result (tuple): Format bbox and mask results.
It contains two items:
- bbox_results (list[np.ndarray]): BBox results of
single image. The list corresponds to each class.
each ndarray has a shape (N, 5), N is the number of
bboxes with this category, and last dimension
5 arrange as (x1, y1, x2, y2, scores).
- mask_results (list[np.ndarray]): Mask results of
single image. The list corresponds to each class.
each ndarray has shape (N, img_h, img_w), N
is the number of masks with this category.
score_thr (float, optional): Minimum score of bboxes to be shown.
Default: 0.3.
bbox_color (str or tuple(int) or :obj:`Color`):Color of bbox lines.
The tuple of color should be in BGR order. Default: 'green'
text_color (str or tuple(int) or :obj:`Color`):Color of texts.
The tuple of color should be in BGR order. Default: 'green'
mask_color (None or str or tuple(int) or :obj:`Color`):
Color of masks. The tuple of color should be in BGR order.
Default: None
thickness (int): Thickness of lines. Default: 2
font_size (int): Font size of texts. Default: 13
win_name (str): The window name. Default: ''
wait_time (float): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
assert isinstance(result, tuple)
bbox_result, mask_result = result
bboxes = np.vstack(bbox_result)
img = mmcv.imread(img)
img = img.copy()
labels = [
np.full(bbox.shape[0], i, dtype=np.int32)
for i, bbox in enumerate(bbox_result)
]
labels = np.concatenate(labels)
if len(labels) == 0:
bboxes = np.zeros([0, 5])
masks = np.zeros([0, 0, 0])
# draw segmentation masks
else:
masks = mmcv.concat_list(mask_result)
if isinstance(masks[0], torch.Tensor):
masks = torch.stack(masks, dim=0).detach().cpu().numpy()
else:
masks = np.stack(masks, axis=0)
# dummy bboxes
if bboxes[:, :4].sum() == 0:
num_masks = len(bboxes)
x_any = masks.any(axis=1)
y_any = masks.any(axis=2)
for idx in range(num_masks):
x = np.where(x_any[idx, :])[0]
y = np.where(y_any[idx, :])[0]
if len(x) > 0 and len(y) > 0:
bboxes[idx, :4] = np.array(
[x[0], y[0], x[-1] + 1, y[-1] + 1],
dtype=np.float32)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
# draw bounding boxes
img = imshow_det_bboxes(
img,
bboxes,
labels,
masks,
class_names=self.CLASSES,
score_thr=score_thr,
bbox_color=bbox_color,
text_color=text_color,
mask_color=mask_color,
thickness=thickness,
font_size=font_size,
win_name=win_name,
show=show,
wait_time=wait_time,
out_file=out_file)
if not (show or out_file):
return img
| 13,785 | 36.873626 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/solo.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLO(SingleStageInstanceSegmentor):
"""`SOLO: Segmenting Objects by Locations
<https://arxiv.org/abs/1912.04488>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
| 870 | 27.096774 | 67 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/solov2.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage_instance_seg import SingleStageInstanceSegmentor
@DETECTORS.register_module()
class SOLOv2(SingleStageInstanceSegmentor):
"""`SOLOv2: Dynamic and Fast Instance Segmentation
<https://arxiv.org/abs/2003.10152>`_
"""
def __init__(self,
backbone,
neck=None,
bbox_head=None,
mask_head=None,
train_cfg=None,
test_cfg=None,
init_cfg=None,
pretrained=None):
super().__init__(
backbone=backbone,
neck=neck,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
init_cfg=init_cfg,
pretrained=pretrained)
| 881 | 27.451613 | 67 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/sparse_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .two_stage import TwoStageDetector
@DETECTORS.register_module()
class SparseRCNN(TwoStageDetector):
r"""Implementation of `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_"""
def __init__(self, *args, **kwargs):
super(SparseRCNN, self).__init__(*args, **kwargs)
assert self.with_rpn, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""Forward function of SparseR-CNN and QueryInst in train stage.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (List[Tensor], optional) : Segmentation masks for
each box. This is required to train QueryInst.
proposals (List[Tensor], optional): override rpn proposals with
custom proposals. Use when `with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
assert proposals is None, 'Sparse R-CNN and QueryInst ' \
'do not support external proposals'
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.forward_train(x, img_metas)
roi_losses = self.roi_head.forward_train(
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_masks=gt_masks,
imgs_whwh=imgs_whwh)
return roi_losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test time augmentation.
Args:
imgs (list[torch.Tensor]): List of multiple images
img_metas (list[dict]): List of image information.
rescale (bool): Whether to rescale the results.
Defaults to False.
Returns:
list[list[np.ndarray]]: BBox results of each image and classes.
The outer list corresponds to each image. The inner list
corresponds to each class.
"""
x = self.extract_feat(img)
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, img_metas)
results = self.roi_head.simple_test(
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh=imgs_whwh,
rescale=rescale)
return results
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
# backbone
x = self.extract_feat(img)
# rpn
num_imgs = len(img)
dummy_img_metas = [
dict(img_shape=(800, 1333, 3)) for _ in range(num_imgs)
]
proposal_boxes, proposal_features, imgs_whwh = \
self.rpn_head.simple_test_rpn(x, dummy_img_metas)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposal_boxes,
proposal_features,
dummy_img_metas)
return roi_outs
| 4,406 | 38.348214 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/tood.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class TOOD(SingleStageDetector):
r"""Implementation of `TOOD: Task-aligned One-stage Object Detection.
<https://arxiv.org/abs/2108.07755>`_."""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TOOD, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def set_epoch(self, epoch):
self.bbox_head.epoch = epoch
| 753 | 30.416667 | 73 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/trident_faster_rcnn.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .faster_rcnn import FasterRCNN
@DETECTORS.register_module()
class TridentFasterRCNN(FasterRCNN):
"""Implementation of `TridentNet <https://arxiv.org/abs/1901.01892>`_"""
def __init__(self,
backbone,
rpn_head,
roi_head,
train_cfg,
test_cfg,
neck=None,
pretrained=None,
init_cfg=None):
super(TridentFasterRCNN, self).__init__(
backbone=backbone,
neck=neck,
rpn_head=rpn_head,
roi_head=roi_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
assert self.backbone.num_branch == self.roi_head.num_branch
assert self.backbone.test_branch_idx == self.roi_head.test_branch_idx
self.num_branch = self.backbone.num_branch
self.test_branch_idx = self.backbone.test_branch_idx
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = img_metas * num_branch
proposal_list = self.rpn_head.simple_test_rpn(x, trident_img_metas)
else:
proposal_list = proposals
# TODO: Fix trident_img_metas undefined errors
# when proposals is specified
return self.roi_head.simple_test(
x, proposal_list, trident_img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
num_branch = (self.num_branch if self.test_branch_idx == -1 else 1)
trident_img_metas = [img_metas * num_branch for img_metas in img_metas]
proposal_list = self.rpn_head.aug_test_rpn(x, trident_img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def forward_train(self, img, img_metas, gt_bboxes, gt_labels, **kwargs):
"""make copies of img and gts to fit multi-branch."""
trident_gt_bboxes = tuple(gt_bboxes * self.num_branch)
trident_gt_labels = tuple(gt_labels * self.num_branch)
trident_img_metas = tuple(img_metas * self.num_branch)
return super(TridentFasterRCNN,
self).forward_train(img, trident_img_metas,
trident_gt_bboxes, trident_gt_labels)
| 2,866 | 39.380282 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/two_stage.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
from ..builder import DETECTORS, build_backbone, build_head, build_neck
from .base import BaseDetector
@DETECTORS.register_module()
class TwoStageDetector(BaseDetector):
"""Base class for two-stage detectors.
Two-stage detectors typically consisting of a region proposal network and a
task-specific regression head.
"""
def __init__(self,
backbone,
neck=None,
rpn_head=None,
roi_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(TwoStageDetector, self).__init__(init_cfg)
if pretrained:
warnings.warn('DeprecationWarning: pretrained is deprecated, '
'please use "init_cfg" instead')
backbone.pretrained = pretrained
self.backbone = build_backbone(backbone)
if neck is not None:
self.neck = build_neck(neck)
if rpn_head is not None:
rpn_train_cfg = train_cfg.rpn if train_cfg is not None else None
rpn_head_ = rpn_head.copy()
rpn_head_.update(train_cfg=rpn_train_cfg, test_cfg=test_cfg.rpn)
self.rpn_head = build_head(rpn_head_)
if roi_head is not None:
# update train and test cfg here for now
# TODO: refactor assigner & sampler
rcnn_train_cfg = train_cfg.rcnn if train_cfg is not None else None
roi_head.update(train_cfg=rcnn_train_cfg)
roi_head.update(test_cfg=test_cfg.rcnn)
roi_head.pretrained = pretrained
self.roi_head = build_head(roi_head)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
@property
def with_rpn(self):
"""bool: whether the detector has RPN"""
return hasattr(self, 'rpn_head') and self.rpn_head is not None
@property
def with_roi_head(self):
"""bool: whether the detector has a RoI head"""
return hasattr(self, 'roi_head') and self.roi_head is not None
def extract_feat(self, img):
"""Directly extract features from the backbone+neck."""
x = self.backbone(img)
if self.with_neck:
x = self.neck(x)
return x
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
outs = ()
# backbone
x = self.extract_feat(img)
# rpn
if self.with_rpn:
rpn_outs = self.rpn_head(x)
outs = outs + (rpn_outs, )
proposals = torch.randn(1000, 4).to(img.device)
# roi_head
roi_outs = self.roi_head.forward_dummy(x, proposals)
outs = outs + (roi_outs, )
return outs
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
proposals=None,
**kwargs):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
proposals : override rpn proposals with custom proposals. Use when
`with_rpn` is False.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
x = self.extract_feat(img)
losses = dict()
# RPN forward and loss
if self.with_rpn:
proposal_cfg = self.train_cfg.get('rpn_proposal',
self.test_cfg.rpn)
rpn_losses, proposal_list = self.rpn_head.forward_train(
x,
img_metas,
gt_bboxes,
gt_labels=None,
gt_bboxes_ignore=gt_bboxes_ignore,
proposal_cfg=proposal_cfg,
**kwargs)
losses.update(rpn_losses)
else:
proposal_list = proposals
roi_losses = self.roi_head.forward_train(x, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore, gt_masks,
**kwargs)
losses.update(roi_losses)
return losses
async def async_simple_test(self,
img,
img_meta,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = await self.rpn_head.async_simple_test_rpn(
x, img_meta)
else:
proposal_list = proposals
return await self.roi_head.async_simple_test(
x, proposal_list, img_meta, rescale=rescale)
def simple_test(self, img, img_metas, proposals=None, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
x = self.extract_feat(img)
if proposals is None:
proposal_list = self.rpn_head.simple_test_rpn(x, img_metas)
else:
proposal_list = proposals
return self.roi_head.simple_test(
x, proposal_list, img_metas, rescale=rescale)
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
x = self.extract_feats(imgs)
proposal_list = self.rpn_head.aug_test_rpn(x, img_metas)
return self.roi_head.aug_test(
x, proposal_list, img_metas, rescale=rescale)
def onnx_export(self, img, img_metas):
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
x = self.extract_feat(img)
proposals = self.rpn_head.onnx_export(x, img_metas)
if hasattr(self.roi_head, 'onnx_export'):
return self.roi_head.onnx_export(x, proposals, img_metas)
else:
raise NotImplementedError(
f'{self.__class__.__name__} can not '
f'be exported to ONNX. Please refer to the '
f'list of supported models,'
f'https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html#list-of-supported-models-exportable-to-onnx' # noqa E501
)
| 7,703 | 35.339623 | 148 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/vfnet.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class VFNet(SingleStageDetector):
"""Implementation of `VarifocalNet
(VFNet).<https://arxiv.org/abs/2008.13367>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(VFNet, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 658 | 30.380952 | 73 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/yolact.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result
from ..builder import DETECTORS, build_head
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLACT(SingleStageDetector):
"""Implementation of `YOLACT <https://arxiv.org/abs/1904.02689>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
segm_head,
mask_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLACT, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
self.segm_head = build_head(segm_head)
self.mask_head = build_head(mask_head)
def forward_dummy(self, img):
"""Used for computing network flops.
See `mmdetection/tools/analysis_tools/get_flops.py`
"""
feat = self.extract_feat(img)
bbox_outs = self.bbox_head(feat)
prototypes = self.mask_head.forward_dummy(feat[0])
return (bbox_outs, prototypes)
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# convert Bitmap mask or Polygon Mask to Tensor here
gt_masks = [
gt_mask.to_tensor(dtype=torch.uint8, device=img.device)
for gt_mask in gt_masks
]
x = self.extract_feat(img)
cls_score, bbox_pred, coeff_pred = self.bbox_head(x)
bbox_head_loss_inputs = (cls_score, bbox_pred) + (gt_bboxes, gt_labels,
img_metas)
losses, sampling_results = self.bbox_head.loss(
*bbox_head_loss_inputs, gt_bboxes_ignore=gt_bboxes_ignore)
segm_head_outs = self.segm_head(x[0])
loss_segm = self.segm_head.loss(segm_head_outs, gt_masks, gt_labels)
losses.update(loss_segm)
mask_pred = self.mask_head(x[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
loss_mask = self.mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
losses.update(loss_mask)
# check NaN and Inf
for loss_name in losses.keys():
assert torch.isfinite(torch.stack(losses[loss_name]))\
.all().item(), '{} becomes infinite or NaN!'\
.format(loss_name)
return losses
def simple_test(self, img, img_metas, rescale=False):
"""Test function without test-time augmentation."""
feat = self.extract_feat(img)
det_bboxes, det_labels, det_coeffs = self.bbox_head.simple_test(
feat, img_metas, rescale=rescale)
bbox_results = [
bbox2result(det_bbox, det_label, self.bbox_head.num_classes)
for det_bbox, det_label in zip(det_bboxes, det_labels)
]
segm_results = self.mask_head.simple_test(
feat,
det_bboxes,
det_labels,
det_coeffs,
img_metas,
rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, imgs, img_metas, rescale=False):
"""Test with augmentations."""
raise NotImplementedError(
'YOLACT does not support test-time augmentation')
| 4,728 | 38.082645 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/yolo.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOV3(SingleStageDetector):
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOV3, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
def onnx_export(self, img, img_metas):
"""Test function for exporting to ONNX, without test time augmentation.
Args:
img (torch.Tensor): input images.
img_metas (list[dict]): List of image information.
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
x = self.extract_feat(img)
outs = self.bbox_head.forward(x)
# get shape as tensor
img_shape = torch._shape_as_tensor(img)[2:]
img_metas[0]['img_shape_for_onnx'] = img_shape
det_bboxes, det_labels = self.bbox_head.onnx_export(*outs, img_metas)
return det_bboxes, det_labels
| 1,382 | 31.162791 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/yolof.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOF(SingleStageDetector):
r"""Implementation of `You Only Look One-level Feature
<https://arxiv.org/abs/2103.09460>`_"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(YOLOF, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
| 670 | 30.952381 | 73 | py |
mmdetection | mmdetection-master/mmdet/models/detectors/yolox.py | # Copyright (c) OpenMMLab. All rights reserved.
import random
import torch
import torch.distributed as dist
import torch.nn.functional as F
from mmcv.runner import get_dist_info
from ...utils import log_img_scale
from ..builder import DETECTORS
from .single_stage import SingleStageDetector
@DETECTORS.register_module()
class YOLOX(SingleStageDetector):
r"""Implementation of `YOLOX: Exceeding YOLO Series in 2021
<https://arxiv.org/abs/2107.08430>`_
Note: Considering the trade-off between training speed and accuracy,
multi-scale training is temporarily kept. More elegant implementation
will be adopted in the future.
Args:
backbone (nn.Module): The backbone module.
neck (nn.Module): The neck module.
bbox_head (nn.Module): The bbox head module.
train_cfg (obj:`ConfigDict`, optional): The training config
of YOLOX. Default: None.
test_cfg (obj:`ConfigDict`, optional): The testing config
of YOLOX. Default: None.
pretrained (str, optional): model pretrained path.
Default: None.
input_size (tuple): The model default input image size. The shape
order should be (height, width). Default: (640, 640).
size_multiplier (int): Image size multiplication factor.
Default: 32.
random_size_range (tuple): The multi-scale random range during
multi-scale training. The real training image size will
be multiplied by size_multiplier. Default: (15, 25).
random_size_interval (int): The iter interval of change
image size. Default: 10.
init_cfg (dict, optional): Initialization config dict.
Default: None.
"""
def __init__(self,
backbone,
neck,
bbox_head,
train_cfg=None,
test_cfg=None,
pretrained=None,
input_size=(640, 640),
size_multiplier=32,
random_size_range=(15, 25),
random_size_interval=10,
init_cfg=None):
super(YOLOX, self).__init__(backbone, neck, bbox_head, train_cfg,
test_cfg, pretrained, init_cfg)
log_img_scale(input_size, skip_square=True)
self.rank, self.world_size = get_dist_info()
self._default_input_size = input_size
self._input_size = input_size
self._random_size_range = random_size_range
self._random_size_interval = random_size_interval
self._size_multiplier = size_multiplier
self._progress_in_iter = 0
def forward_train(self,
img,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None):
"""
Args:
img (Tensor): Input images of shape (N, C, H, W).
Typically these should be mean centered and std scaled.
img_metas (list[dict]): A List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
:class:`mmdet.datasets.pipelines.Collect`.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): Specify which bounding
boxes can be ignored when computing the loss.
Returns:
dict[str, Tensor]: A dictionary of loss components.
"""
# Multi-scale training
img, gt_bboxes = self._preprocess(img, gt_bboxes)
losses = super(YOLOX, self).forward_train(img, img_metas, gt_bboxes,
gt_labels, gt_bboxes_ignore)
# random resizing
if (self._progress_in_iter + 1) % self._random_size_interval == 0:
self._input_size = self._random_resize(device=img.device)
self._progress_in_iter += 1
return losses
def _preprocess(self, img, gt_bboxes):
scale_y = self._input_size[0] / self._default_input_size[0]
scale_x = self._input_size[1] / self._default_input_size[1]
if scale_x != 1 or scale_y != 1:
img = F.interpolate(
img,
size=self._input_size,
mode='bilinear',
align_corners=False)
for gt_bbox in gt_bboxes:
gt_bbox[..., 0::2] = gt_bbox[..., 0::2] * scale_x
gt_bbox[..., 1::2] = gt_bbox[..., 1::2] * scale_y
return img, gt_bboxes
def _random_resize(self, device):
tensor = torch.LongTensor(2).to(device)
if self.rank == 0:
size = random.randint(*self._random_size_range)
aspect_ratio = float(
self._default_input_size[1]) / self._default_input_size[0]
size = (self._size_multiplier * size,
self._size_multiplier * int(aspect_ratio * size))
tensor[0] = size[0]
tensor[1] = size[1]
if self.world_size > 1:
dist.barrier()
dist.broadcast(tensor, 0)
input_size = (tensor[0].item(), tensor[1].item())
return input_size
| 5,519 | 39.291971 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/losses/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .accuracy import Accuracy, accuracy
from .ae_loss import AssociativeEmbeddingLoss
from .balanced_l1_loss import BalancedL1Loss, balanced_l1_loss
from .cross_entropy_loss import (CrossEntropyLoss, binary_cross_entropy,
cross_entropy, mask_cross_entropy)
from .dice_loss import DiceLoss
from .focal_loss import FocalLoss, sigmoid_focal_loss
from .gaussian_focal_loss import GaussianFocalLoss
from .gfocal_loss import DistributionFocalLoss, QualityFocalLoss
from .ghm_loss import GHMC, GHMR
from .iou_loss import (BoundedIoULoss, CIoULoss, DIoULoss, GIoULoss, IoULoss,
bounded_iou_loss, iou_loss)
from .kd_loss import KnowledgeDistillationKLDivLoss
from .mse_loss import MSELoss, mse_loss
from .pisa_loss import carl_loss, isr_p
from .seesaw_loss import SeesawLoss
from .smooth_l1_loss import L1Loss, SmoothL1Loss, l1_loss, smooth_l1_loss
from .utils import reduce_loss, weight_reduce_loss, weighted_loss
from .varifocal_loss import VarifocalLoss
__all__ = [
'accuracy', 'Accuracy', 'cross_entropy', 'binary_cross_entropy',
'mask_cross_entropy', 'CrossEntropyLoss', 'sigmoid_focal_loss',
'FocalLoss', 'smooth_l1_loss', 'SmoothL1Loss', 'balanced_l1_loss',
'BalancedL1Loss', 'mse_loss', 'MSELoss', 'iou_loss', 'bounded_iou_loss',
'IoULoss', 'BoundedIoULoss', 'GIoULoss', 'DIoULoss', 'CIoULoss', 'GHMC',
'GHMR', 'reduce_loss', 'weight_reduce_loss', 'weighted_loss', 'L1Loss',
'l1_loss', 'isr_p', 'carl_loss', 'AssociativeEmbeddingLoss',
'GaussianFocalLoss', 'QualityFocalLoss', 'DistributionFocalLoss',
'VarifocalLoss', 'KnowledgeDistillationKLDivLoss', 'SeesawLoss', 'DiceLoss'
]
| 1,721 | 51.181818 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/accuracy.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
@mmcv.jit(coderize=True)
def accuracy(pred, target, topk=1, thresh=None):
"""Calculate accuracy according to the prediction and target.
Args:
pred (torch.Tensor): The model prediction, shape (N, num_class)
target (torch.Tensor): The target of each prediction, shape (N, )
topk (int | tuple[int], optional): If the predictions in ``topk``
matches the target, the predictions will be regarded as
correct ones. Defaults to 1.
thresh (float, optional): If not None, predictions with scores under
this threshold are considered incorrect. Default to None.
Returns:
float | tuple[float]: If the input ``topk`` is a single integer,
the function will return a single float as accuracy. If
``topk`` is a tuple containing multiple integers, the
function will return a tuple containing accuracies of
each ``topk`` number.
"""
assert isinstance(topk, (int, tuple))
if isinstance(topk, int):
topk = (topk, )
return_single = True
else:
return_single = False
maxk = max(topk)
if pred.size(0) == 0:
accu = [pred.new_tensor(0.) for i in range(len(topk))]
return accu[0] if return_single else accu
assert pred.ndim == 2 and target.ndim == 1
assert pred.size(0) == target.size(0)
assert maxk <= pred.size(1), \
f'maxk {maxk} exceeds pred dimension {pred.size(1)}'
pred_value, pred_label = pred.topk(maxk, dim=1)
pred_label = pred_label.t() # transpose to shape (maxk, N)
correct = pred_label.eq(target.view(1, -1).expand_as(pred_label))
if thresh is not None:
# Only prediction values larger than thresh are counted as correct
correct = correct & (pred_value > thresh).t()
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / pred.size(0)))
return res[0] if return_single else res
class Accuracy(nn.Module):
def __init__(self, topk=(1, ), thresh=None):
"""Module to calculate the accuracy.
Args:
topk (tuple, optional): The criterion used to calculate the
accuracy. Defaults to (1,).
thresh (float, optional): If not None, predictions with scores
under this threshold are considered incorrect. Default to None.
"""
super().__init__()
self.topk = topk
self.thresh = thresh
def forward(self, pred, target):
"""Forward function to calculate accuracy.
Args:
pred (torch.Tensor): Prediction of models.
target (torch.Tensor): Target for each prediction.
Returns:
tuple[float]: The accuracies under different topk criterions.
"""
return accuracy(pred, target, self.topk, self.thresh)
| 2,990 | 36.3875 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/ae_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
@mmcv.jit(derivate=True, coderize=True)
def ae_loss_per_image(tl_preds, br_preds, match):
"""Associative Embedding Loss in one image.
Associative Embedding Loss including two parts: pull loss and push loss.
Pull loss makes embedding vectors from same object closer to each other.
Push loss distinguish embedding vector from different objects, and makes
the gap between them is large enough.
During computing, usually there are 3 cases:
- no object in image: both pull loss and push loss will be 0.
- one object in image: push loss will be 0 and pull loss is computed
by the two corner of the only object.
- more than one objects in image: pull loss is computed by corner pairs
from each object, push loss is computed by each object with all
other objects. We use confusion matrix with 0 in diagonal to
compute the push loss.
Args:
tl_preds (tensor): Embedding feature map of left-top corner.
br_preds (tensor): Embedding feature map of bottim-right corner.
match (list): Downsampled coordinates pair of each ground truth box.
"""
tl_list, br_list, me_list = [], [], []
if len(match) == 0: # no object in image
pull_loss = tl_preds.sum() * 0.
push_loss = tl_preds.sum() * 0.
else:
for m in match:
[tl_y, tl_x], [br_y, br_x] = m
tl_e = tl_preds[:, tl_y, tl_x].view(-1, 1)
br_e = br_preds[:, br_y, br_x].view(-1, 1)
tl_list.append(tl_e)
br_list.append(br_e)
me_list.append((tl_e + br_e) / 2.0)
tl_list = torch.cat(tl_list)
br_list = torch.cat(br_list)
me_list = torch.cat(me_list)
assert tl_list.size() == br_list.size()
# N is object number in image, M is dimension of embedding vector
N, M = tl_list.size()
pull_loss = (tl_list - me_list).pow(2) + (br_list - me_list).pow(2)
pull_loss = pull_loss.sum() / N
margin = 1 # exp setting of CornerNet, details in section 3.3 of paper
# confusion matrix of push loss
conf_mat = me_list.expand((N, N, M)).permute(1, 0, 2) - me_list
conf_weight = 1 - torch.eye(N).type_as(me_list)
conf_mat = conf_weight * (margin - conf_mat.sum(-1).abs())
if N > 1: # more than one object in current image
push_loss = F.relu(conf_mat).sum() / (N * (N - 1))
else:
push_loss = tl_preds.sum() * 0.
return pull_loss, push_loss
@LOSSES.register_module()
class AssociativeEmbeddingLoss(nn.Module):
"""Associative Embedding Loss.
More details can be found in
`Associative Embedding <https://arxiv.org/abs/1611.05424>`_ and
`CornerNet <https://arxiv.org/abs/1808.01244>`_ .
Code is modified from `kp_utils.py <https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L180>`_ # noqa: E501
Args:
pull_weight (float): Loss weight for corners from same object.
push_weight (float): Loss weight for corners from different object.
"""
def __init__(self, pull_weight=0.25, push_weight=0.25):
super(AssociativeEmbeddingLoss, self).__init__()
self.pull_weight = pull_weight
self.push_weight = push_weight
def forward(self, pred, target, match):
"""Forward function."""
batch = pred.size(0)
pull_all, push_all = 0.0, 0.0
for i in range(batch):
pull, push = ae_loss_per_image(pred[i], target[i], match[i])
pull_all += self.pull_weight * pull
push_all += self.push_weight * push
return pull_all, push_all
| 3,857 | 36.096154 | 143 | py |
mmdetection | mmdetection-master/mmdet/models/losses/balanced_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def balanced_l1_loss(pred,
target,
beta=1.0,
alpha=0.5,
gamma=1.5,
reduction='mean'):
"""Calculate balanced L1 loss.
Please see the `Libra R-CNN <https://arxiv.org/pdf/1904.02701.pdf>`_
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
beta (float): The loss is a piecewise function of prediction and target
and ``beta`` serves as a threshold for the difference between the
prediction and target. Defaults to 1.0.
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss.
Defaults to 1.5.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
b = np.e**(gamma / alpha) - 1
loss = torch.where(
diff < beta, alpha / b *
(b * diff + 1) * torch.log(b * diff / beta + 1) - alpha * diff,
gamma * diff + gamma / b - alpha * beta)
return loss
@LOSSES.register_module()
class BalancedL1Loss(nn.Module):
"""Balanced L1 Loss.
arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019)
Args:
alpha (float): The denominator ``alpha`` in the balanced L1 loss.
Defaults to 0.5.
gamma (float): The ``gamma`` in the balanced L1 loss. Defaults to 1.5.
beta (float, optional): The loss is a piecewise function of prediction
and target. ``beta`` serves as a threshold for the difference
between the prediction and target. Defaults to 1.0.
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self,
alpha=0.5,
gamma=1.5,
beta=1.0,
reduction='mean',
loss_weight=1.0):
super(BalancedL1Loss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 4).
target (torch.Tensor): The learning target of the prediction with
shape (N, 4).
weight (torch.Tensor, optional): Sample-wise loss weight with
shape (N, ).
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * balanced_l1_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
| 4,252 | 33.024 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/cross_entropy_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100,
avg_non_ignore=False):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# average loss over non-ignored elements
# pytorch's official cross_entropy average loss over non-ignored elements
# refer to https://github.com/pytorch/pytorch/blob/56b43f4fec1f76953f15a627694d4bba34588969/torch/nn/functional.py#L2660 # noqa
if (avg_factor is None) and avg_non_ignore and reduction == 'mean':
avg_factor = label.numel() - (label == ignore_index).sum().item()
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights, valid_mask
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100,
avg_non_ignore=False):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1) or (N, ).
When the shape of pred is (N, 1), label will be expanded to
one-hot format, and when the shape of pred is (N, ), label
will not be expanded to one-hot format.
label (torch.Tensor): The learning label of the prediction,
with shape (N, ).
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight, valid_mask = _expand_onehot_labels(
label, weight, pred.size(-1), ignore_index)
else:
# should mask out the ignored elements
valid_mask = ((label >= 0) & (label != ignore_index)).float()
if weight is not None:
# The inplace writing method will have a mismatched broadcast
# shape error if the weight and valid_mask dimensions
# are inconsistent such as (B,N,1) and (B,N,C).
weight = weight * valid_mask
else:
weight = valid_mask
# average loss over non-ignored elements
if (avg_factor is None) and avg_non_ignore and reduction == 'mean':
avg_factor = valid_mask.sum().item()
# weighted element-wise losses
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None,
**kwargs):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0,
avg_non_ignore=False):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
avg_non_ignore (bool): The flag decides to whether the loss is
only averaged over non-ignored targets. Default: False.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
self.avg_non_ignore = avg_non_ignore
if ((ignore_index is not None) and not self.avg_non_ignore
and self.reduction == 'mean'):
warnings.warn(
'Default ``avg_non_ignore`` is False, if you would like to '
'ignore the certain label and average loss over non-ignore '
'labels, which is the same with PyTorch official '
'cross_entropy, set ``avg_non_ignore=True``.')
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def extra_repr(self):
"""Extra repr."""
s = f'avg_non_ignore={self.avg_non_ignore}'
return s
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
avg_non_ignore=self.avg_non_ignore,
**kwargs)
return loss_cls
| 12,143 | 39.211921 | 132 | py |
mmdetection | mmdetection-master/mmdet/models/losses/dice_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weight_reduce_loss
def dice_loss(pred,
target,
weight=None,
eps=1e-3,
reduction='mean',
naive_dice=False,
avg_factor=None):
"""Calculate dice loss, there are two forms of dice loss is supported:
- the one proposed in `V-Net: Fully Convolutional Neural
Networks for Volumetric Medical Image Segmentation
<https://arxiv.org/abs/1606.04797>`_.
- the dice loss in which the power of the number in the
denominator is the first power instead of the second
power.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *)
target (torch.Tensor): The learning label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
eps (float): Avoid dividing by zero. Default: 1e-3.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
Options are "none", "mean" and "sum".
naive_dice (bool, optional): If false, use the dice
loss defined in the V-Net paper, otherwise, use the
naive dice loss in which the power of the number in the
denominator is the first power instead of the second
power.Defaults to False.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
input = pred.flatten(1)
target = target.flatten(1).float()
a = torch.sum(input * target, 1)
if naive_dice:
b = torch.sum(input, 1)
c = torch.sum(target, 1)
d = (2 * a + eps) / (b + c + eps)
else:
b = torch.sum(input * input, 1) + eps
c = torch.sum(target * target, 1) + eps
d = (2 * a) / (b + c)
loss = 1 - d
if weight is not None:
assert weight.ndim == loss.ndim
assert len(weight) == len(pred)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class DiceLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=False,
loss_weight=1.0,
eps=1e-3):
"""Compute dice loss.
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
activate (bool): Whether to activate the predictions inside,
this will disable the inside sigmoid operation.
Defaults to True.
reduction (str, optional): The method used
to reduce the loss. Options are "none",
"mean" and "sum". Defaults to 'mean'.
naive_dice (bool, optional): If false, use the dice
loss defined in the V-Net paper, otherwise, use the
naive dice loss in which the power of the number in the
denominator is the first power instead of the second
power. Defaults to False.
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
eps (float): Avoid dividing by zero. Defaults to 1e-3.
"""
super(DiceLoss, self).__init__()
self.use_sigmoid = use_sigmoid
self.reduction = reduction
self.naive_dice = naive_dice
self.loss_weight = loss_weight
self.eps = eps
self.activate = activate
def forward(self,
pred,
target,
weight=None,
reduction_override=None,
avg_factor=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction, has a shape (n, *).
target (torch.Tensor): The label of the prediction,
shape (n, *), same shape of pred.
weight (torch.Tensor, optional): The weight of loss for each
prediction, has a shape (n,). Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.activate:
if self.use_sigmoid:
pred = pred.sigmoid()
else:
raise NotImplementedError
loss = self.loss_weight * dice_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
naive_dice=self.naive_dice,
avg_factor=avg_factor)
return loss
| 5,324 | 35.22449 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/losses/focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.ops import sigmoid_focal_loss as _sigmoid_focal_loss
from ..builder import LOSSES
from .utils import weight_reduce_loss
# This method is only for debugging
def py_sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
pt = (1 - pred_sigmoid) * target + pred_sigmoid * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def py_focal_loss_with_prob(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
"""PyTorch version of `Focal Loss <https://arxiv.org/abs/1708.02002>`_.
Different from `py_sigmoid_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): The prediction probability with shape (N, C),
C is the number of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
target = target.type_as(pred)
pt = (1 - pred) * target + pred * (1 - target)
focal_weight = (alpha * target + (1 - alpha) *
(1 - target)) * pt.pow(gamma)
loss = F.binary_cross_entropy(
pred, target, reduction='none') * focal_weight
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
def sigmoid_focal_loss(pred,
target,
weight=None,
gamma=2.0,
alpha=0.25,
reduction='mean',
avg_factor=None):
r"""A wrapper of cuda version `Focal Loss
<https://arxiv.org/abs/1708.02002>`_.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# Function.apply does not accept keyword arguments, so the decorator
# "weighted_loss" is not applicable
loss = _sigmoid_focal_loss(pred.contiguous(), target.contiguous(), gamma,
alpha, None, 'none')
if weight is not None:
if weight.shape != loss.shape:
if weight.size(0) == loss.size(0):
# For most cases, weight is of shape (num_priors, ),
# which means it does not have the second axis num_class
weight = weight.view(-1, 1)
else:
# Sometimes, weight per anchor per class is also needed. e.g.
# in FSAF. But it may be flattened of shape
# (num_priors x num_class, ), while loss is still of shape
# (num_priors, num_class).
assert weight.numel() == loss.numel()
weight = weight.view(loss.size(0), -1)
assert weight.ndim == loss.ndim
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class FocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=1.0,
activated=False):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_
Args:
use_sigmoid (bool, optional): Whether to the prediction is
used for sigmoid or softmax. Defaults to True.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 0.25.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
super(FocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid focal loss supported now.'
self.use_sigmoid = use_sigmoid
self.gamma = gamma
self.alpha = alpha
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = py_focal_loss_with_prob
else:
if torch.cuda.is_available() and pred.is_cuda:
calculate_loss_func = sigmoid_focal_loss
else:
num_classes = pred.size(1)
target = F.one_hot(target, num_classes=num_classes + 1)
target = target[:, :num_classes]
calculate_loss_func = py_sigmoid_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
gamma=self.gamma,
alpha=self.alpha,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 10,420 | 41.534694 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/gaussian_focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def gaussian_focal_loss(pred, gaussian_target, alpha=2.0, gamma=4.0):
"""`Focal Loss <https://arxiv.org/abs/1708.02002>`_ for targets in gaussian
distribution.
Args:
pred (torch.Tensor): The prediction.
gaussian_target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
alpha (float, optional): A balanced form for Focal Loss.
Defaults to 2.0.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 4.0.
"""
eps = 1e-12
pos_weights = gaussian_target.eq(1)
neg_weights = (1 - gaussian_target).pow(gamma)
pos_loss = -(pred + eps).log() * (1 - pred).pow(alpha) * pos_weights
neg_loss = -(1 - pred + eps).log() * pred.pow(alpha) * neg_weights
return pos_loss + neg_loss
@LOSSES.register_module()
class GaussianFocalLoss(nn.Module):
"""GaussianFocalLoss is a variant of focal loss.
More details can be found in the `paper
<https://arxiv.org/abs/1808.01244>`_
Code is modified from `kp_utils.py
<https://github.com/princeton-vl/CornerNet/blob/master/models/py_utils/kp_utils.py#L152>`_ # noqa: E501
Please notice that the target in GaussianFocalLoss is a gaussian heatmap,
not 0/1 binary target.
Args:
alpha (float): Power of prediction.
gamma (float): Power of target for negative samples.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
"""
def __init__(self,
alpha=2.0,
gamma=4.0,
reduction='mean',
loss_weight=1.0):
super(GaussianFocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction
in gaussian distribution.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_reg = self.loss_weight * gaussian_focal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
reduction=reduction,
avg_factor=avg_factor)
return loss_reg
| 3,312 | 34.623656 | 108 | py |
mmdetection | mmdetection-master/mmdet/models/losses/gfocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def quality_focal_loss(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred.sigmoid()
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy_with_logits(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@weighted_loss
def quality_focal_loss_with_prob(pred, target, beta=2.0):
r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Different from `quality_focal_loss`, this function accepts probability
as input.
Args:
pred (torch.Tensor): Predicted joint representation of classification
and quality (IoU) estimation with shape (N, C), C is the number of
classes.
target (tuple([torch.Tensor])): Target category label with shape (N,)
and target quality label with shape (N,).
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert len(target) == 2, """target for QFL must be a tuple of two elements,
including category label and quality label, respectively"""
# label denotes the category id, score denotes the quality score
label, score = target
# negatives are supervised by 0 quality score
pred_sigmoid = pred
scale_factor = pred_sigmoid
zerolabel = scale_factor.new_zeros(pred.shape)
loss = F.binary_cross_entropy(
pred, zerolabel, reduction='none') * scale_factor.pow(beta)
# FG cat_id: [0, num_classes -1], BG cat_id: num_classes
bg_class_ind = pred.size(1)
pos = ((label >= 0) & (label < bg_class_ind)).nonzero().squeeze(1)
pos_label = label[pos].long()
# positives are supervised by bbox quality (IoU) score
scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
loss[pos, pos_label] = F.binary_cross_entropy(
pred[pos, pos_label], score[pos],
reduction='none') * scale_factor.abs().pow(beta)
loss = loss.sum(dim=1, keepdim=False)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def distribution_focal_loss(pred, label):
r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
Qualified and Distributed Bounding Boxes for Dense Object Detection
<https://arxiv.org/abs/2006.04388>`_.
Args:
pred (torch.Tensor): Predicted general distribution of bounding boxes
(before softmax) with shape (N, n+1), n is the max value of the
integral set `{0, ..., n}` in paper.
label (torch.Tensor): Target distance label for bounding boxes with
shape (N,).
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
dis_left = label.long()
dis_right = dis_left + 1
weight_left = dis_right.float() - label
weight_right = label - dis_left.float()
loss = F.cross_entropy(pred, dis_left, reduction='none') * weight_left \
+ F.cross_entropy(pred, dis_right, reduction='none') * weight_right
return loss
@LOSSES.register_module()
class QualityFocalLoss(nn.Module):
r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
Defaults to True.
beta (float): The beta parameter for calculating the modulating factor.
Defaults to 2.0.
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Loss weight of current loss.
activated (bool, optional): Whether the input is activated.
If True, it means the input has been activated and can be
treated as probabilities. Else, it should be treated as logits.
Defaults to False.
"""
def __init__(self,
use_sigmoid=True,
beta=2.0,
reduction='mean',
loss_weight=1.0,
activated=False):
super(QualityFocalLoss, self).__init__()
assert use_sigmoid is True, 'Only sigmoid in QFL supported now.'
self.use_sigmoid = use_sigmoid
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
self.activated = activated
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted joint representation of
classification and quality (IoU) estimation with shape (N, C),
C is the number of classes.
target (tuple([torch.Tensor])): Target category label with shape
(N,) and target quality label with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
if self.activated:
calculate_loss_func = quality_focal_loss_with_prob
else:
calculate_loss_func = quality_focal_loss
loss_cls = self.loss_weight * calculate_loss_func(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
@LOSSES.register_module()
class DistributionFocalLoss(nn.Module):
r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
Learning Qualified and Distributed Bounding Boxes for Dense Object
Detection <https://arxiv.org/abs/2006.04388>`_.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(DistributionFocalLoss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): Predicted general distribution of bounding
boxes (before softmax) with shape (N, n+1), n is the max value
of the integral set `{0, ..., n}` in paper.
target (torch.Tensor): Target distance label for bounding boxes
with shape (N,).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_cls = self.loss_weight * distribution_focal_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_cls
| 9,834 | 38.979675 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/ghm_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def _expand_onehot_labels(labels, label_weights, label_channels):
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
inds = torch.nonzero(
(labels >= 0) & (labels < label_channels), as_tuple=False).squeeze()
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
bin_label_weights = label_weights.view(-1, 1).expand(
label_weights.size(0), label_channels)
return bin_labels, bin_label_weights
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMC(nn.Module):
"""GHM Classification Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
use_sigmoid (bool): Can only be true for BCE based loss now.
loss_weight (float): The weight of the total GHM-C loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
bins=10,
momentum=0,
use_sigmoid=True,
loss_weight=1.0,
reduction='mean'):
super(GHMC, self).__init__()
self.bins = bins
self.momentum = momentum
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] += 1e-6
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.use_sigmoid = use_sigmoid
if not self.use_sigmoid:
raise NotImplementedError
self.loss_weight = loss_weight
self.reduction = reduction
def forward(self,
pred,
target,
label_weight,
reduction_override=None,
**kwargs):
"""Calculate the GHM-C loss.
Args:
pred (float tensor of size [batch_num, class_num]):
The direct prediction of classification fc layer.
target (float tensor of size [batch_num, class_num]):
Binary class target for each sample.
label_weight (float tensor of size [batch_num, class_num]):
the value is 1 if the sample is valid and 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
# the target should be binary class label
if pred.dim() != target.dim():
target, label_weight = _expand_onehot_labels(
target, label_weight, pred.size(-1))
target, label_weight = target.float(), label_weight.float()
edges = self.edges
mmt = self.momentum
weights = torch.zeros_like(pred)
# gradient length
g = torch.abs(pred.sigmoid().detach() - target)
valid = label_weight > 0
tot = max(valid.float().sum().item(), 1.0)
n = 0 # n valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
n += 1
if n > 0:
weights = weights / n
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none')
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
# TODO: code refactoring to make it consistent with other losses
@LOSSES.register_module()
class GHMR(nn.Module):
"""GHM Regression Loss.
Details of the theorem can be viewed in the paper
`Gradient Harmonized Single-stage Detector
<https://arxiv.org/abs/1811.05181>`_.
Args:
mu (float): The parameter for the Authentic Smooth L1 loss.
bins (int): Number of the unit regions for distribution calculation.
momentum (float): The parameter for moving average.
loss_weight (float): The weight of the total GHM-R loss.
reduction (str): Options are "none", "mean" and "sum".
Defaults to "mean"
"""
def __init__(self,
mu=0.02,
bins=10,
momentum=0,
loss_weight=1.0,
reduction='mean'):
super(GHMR, self).__init__()
self.mu = mu
self.bins = bins
edges = torch.arange(bins + 1).float() / bins
self.register_buffer('edges', edges)
self.edges[-1] = 1e3
self.momentum = momentum
if momentum > 0:
acc_sum = torch.zeros(bins)
self.register_buffer('acc_sum', acc_sum)
self.loss_weight = loss_weight
self.reduction = reduction
# TODO: support reduction parameter
def forward(self,
pred,
target,
label_weight,
avg_factor=None,
reduction_override=None):
"""Calculate the GHM-R loss.
Args:
pred (float tensor of size [batch_num, 4 (* class_num)]):
The prediction of box regression layer. Channel number can be 4
or 4 * class_num depending on whether it is class-agnostic.
target (float tensor of size [batch_num, 4 (* class_num)]):
The target regression values with the same size of pred.
label_weight (float tensor of size [batch_num, 4 (* class_num)]):
The weight of each sample, 0 if ignored.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
The gradient harmonized loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
mu = self.mu
edges = self.edges
mmt = self.momentum
# ASL1 loss
diff = pred - target
loss = torch.sqrt(diff * diff + mu * mu) - mu
# gradient length
g = torch.abs(diff / torch.sqrt(mu * mu + diff * diff)).detach()
weights = torch.zeros_like(g)
valid = label_weight > 0
tot = max(label_weight.float().sum().item(), 1.0)
n = 0 # n: valid bins
for i in range(self.bins):
inds = (g >= edges[i]) & (g < edges[i + 1]) & valid
num_in_bin = inds.sum().item()
if num_in_bin > 0:
n += 1
if mmt > 0:
self.acc_sum[i] = mmt * self.acc_sum[i] \
+ (1 - mmt) * num_in_bin
weights[inds] = tot / self.acc_sum[i]
else:
weights[inds] = tot / num_in_bin
if n > 0:
weights /= n
loss = weight_reduce_loss(
loss, weights, reduction=reduction, avg_factor=tot)
return loss * self.loss_weight
| 7,923 | 36.028037 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/iou_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import warnings
import mmcv
import torch
import torch.nn as nn
from mmdet.core import bbox_overlaps
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def iou_loss(pred, target, linear=False, mode='log', eps=1e-6):
"""IoU loss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
The loss is calculated as negative log of IoU.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
linear (bool, optional): If True, use linear scale of loss instead of
log scale. Default: False.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
eps (float): Eps to avoid log(0).
Return:
torch.Tensor: Loss tensor.
"""
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'iou_loss is deprecated, please use "mode=`linear`" '
'instead.')
ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
if mode == 'linear':
loss = 1 - ious
elif mode == 'square':
loss = 1 - ious**2
elif mode == 'log':
loss = -ious.log()
else:
raise NotImplementedError
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
"""BIoULoss.
This is an implementation of paper
`Improving Object Localization with Fitness NMS and Bounded IoU Loss.
<https://arxiv.org/abs/1711.00164>`_.
Args:
pred (torch.Tensor): Predicted bboxes.
target (torch.Tensor): Target bboxes.
beta (float): beta parameter in smoothl1.
eps (float): eps to avoid NaN.
"""
pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
pred_w = pred[:, 2] - pred[:, 0]
pred_h = pred[:, 3] - pred[:, 1]
with torch.no_grad():
target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
target_ctry = (target[:, 1] + target[:, 3]) * 0.5
target_w = target[:, 2] - target[:, 0]
target_h = target[:, 3] - target[:, 1]
dx = target_ctrx - pred_ctrx
dy = target_ctry - pred_ctry
loss_dx = 1 - torch.max(
(target_w - 2 * dx.abs()) /
(target_w + 2 * dx.abs() + eps), torch.zeros_like(dx))
loss_dy = 1 - torch.max(
(target_h - 2 * dy.abs()) /
(target_h + 2 * dy.abs() + eps), torch.zeros_like(dy))
loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w /
(target_w + eps))
loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h /
(target_h + eps))
# view(..., -1) does not work for empty tensor
loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh],
dim=-1).flatten(1)
loss = torch.where(loss_comb < beta, 0.5 * loss_comb * loss_comb / beta,
loss_comb - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def giou_loss(pred, target, eps=1e-7):
r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
Box Regression <https://arxiv.org/abs/1902.09630>`_.
Args:
pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
gious = bbox_overlaps(pred, target, mode='giou', is_aligned=True, eps=eps)
loss = 1 - gious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def diou_loss(pred, target, eps=1e-7):
r"""`Implementation of Distance-IoU Loss: Faster and Better
Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
Code is modified from https://github.com/Zzh-tju/DIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
# DIoU
dious = ious - rho2 / c2
loss = 1 - dious
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def ciou_loss(pred, target, eps=1e-7):
r"""`Implementation of paper `Enhancing Geometric Factors into
Model Learning and Inference for Object Detection and Instance
Segmentation <https://arxiv.org/abs/2005.03572>`_.
Code is modified from https://github.com/Zzh-tju/CIoU.
Args:
pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
shape (n, 4).
target (Tensor): Corresponding gt bboxes, shape (n, 4).
eps (float): Eps to avoid log(0).
Return:
Tensor: Loss tensor.
"""
# overlap
lt = torch.max(pred[:, :2], target[:, :2])
rb = torch.min(pred[:, 2:], target[:, 2:])
wh = (rb - lt).clamp(min=0)
overlap = wh[:, 0] * wh[:, 1]
# union
ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
union = ap + ag - overlap + eps
# IoU
ious = overlap / union
# enclose area
enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
cw = enclose_wh[:, 0]
ch = enclose_wh[:, 1]
c2 = cw**2 + ch**2 + eps
b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
b2_x1, b2_y1 = target[:, 0], target[:, 1]
b2_x2, b2_y2 = target[:, 2], target[:, 3]
w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2))**2 / 4
right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2))**2 / 4
rho2 = left + right
factor = 4 / math.pi**2
v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
with torch.no_grad():
alpha = (ious > 0.5).float() * v / (1 - ious + v)
# CIoU
cious = ious - (rho2 / c2 + alpha * v)
loss = 1 - cious.clamp(min=-1.0, max=1.0)
return loss
@LOSSES.register_module()
class IoULoss(nn.Module):
"""IoULoss.
Computing the IoU loss between a set of predicted bboxes and target bboxes.
Args:
linear (bool): If True, use linear scale of loss else determined
by mode. Default: False.
eps (float): Eps to avoid log(0).
reduction (str): Options are "none", "mean" and "sum".
loss_weight (float): Weight of loss.
mode (str): Loss scaling mode, including "linear", "square", and "log".
Default: 'log'
"""
def __init__(self,
linear=False,
eps=1e-6,
reduction='mean',
loss_weight=1.0,
mode='log'):
super(IoULoss, self).__init__()
assert mode in ['linear', 'square', 'log']
if linear:
mode = 'linear'
warnings.warn('DeprecationWarning: Setting "linear=True" in '
'IOULoss is deprecated, please use "mode=`linear`" '
'instead.')
self.mode = mode
self.linear = linear
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None. Options are "none", "mean" and "sum".
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if (weight is not None) and (not torch.any(weight > 0)) and (
reduction != 'none'):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# iou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * iou_loss(
pred,
target,
weight,
mode=self.mode,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class BoundedIoULoss(nn.Module):
def __init__(self, beta=0.2, eps=1e-3, reduction='mean', loss_weight=1.0):
super(BoundedIoULoss, self).__init__()
self.beta = beta
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * bounded_iou_loss(
pred,
target,
weight,
beta=self.beta,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class GIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(GIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * giou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class DIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(DIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * diou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
@LOSSES.register_module()
class CIoULoss(nn.Module):
def __init__(self, eps=1e-6, reduction='mean', loss_weight=1.0):
super(CIoULoss, self).__init__()
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
if weight is not None and not torch.any(weight > 0):
if pred.dim() == weight.dim() + 1:
weight = weight.unsqueeze(1)
return (pred * weight).sum() # 0
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if weight is not None and weight.dim() > 1:
# TODO: remove this in the future
# reduce the weight of shape (n, 4) to (n,) to match the
# giou_loss of shape (n,)
assert weight.shape == pred.shape
weight = weight.mean(-1)
loss = self.loss_weight * ciou_loss(
pred,
target,
weight,
eps=self.eps,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss
| 15,714 | 32.084211 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/kd_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def knowledge_distillation_kl_div_loss(pred,
soft_label,
T,
detach_target=True):
r"""Loss function for knowledge distilling using KL divergence.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
T (int): Temperature for distillation.
detach_target (bool): Remove soft_label from automatic differentiation
Returns:
torch.Tensor: Loss tensor with shape (N,).
"""
assert pred.size() == soft_label.size()
target = F.softmax(soft_label / T, dim=1)
if detach_target:
target = target.detach()
kd_loss = F.kl_div(
F.log_softmax(pred / T, dim=1), target, reduction='none').mean(1) * (
T * T)
return kd_loss
@LOSSES.register_module()
class KnowledgeDistillationKLDivLoss(nn.Module):
"""Loss function for knowledge distilling using KL divergence.
Args:
reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
loss_weight (float): Loss weight of current loss.
T (int): Temperature for distillation.
"""
def __init__(self, reduction='mean', loss_weight=1.0, T=10):
super(KnowledgeDistillationKLDivLoss, self).__init__()
assert T >= 1
self.reduction = reduction
self.loss_weight = loss_weight
self.T = T
def forward(self,
pred,
soft_label,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (Tensor): Predicted logits with shape (N, n + 1).
soft_label (Tensor): Target logits with shape (N, N + 1).
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_kd = self.loss_weight * knowledge_distillation_kl_div_loss(
pred,
soft_label,
weight,
reduction=reduction,
avg_factor=avg_factor,
T=self.T)
return loss_kd
| 2,912 | 31.730337 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/losses/mse_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weighted_loss
@weighted_loss
def mse_loss(pred, target):
"""Wrapper of mse loss."""
return F.mse_loss(pred, target, reduction='none')
@LOSSES.register_module()
class MSELoss(nn.Module):
"""MSELoss.
Args:
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super().__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function of loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): Weight of the loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss = self.loss_weight * mse_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss
| 1,905 | 31.862069 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/losses/pisa_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox_overlaps
@mmcv.jit(derivate=True, coderize=True)
def isr_p(cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
loss_cls,
bbox_coder,
k=2,
bias=0,
num_class=80):
"""Importance-based Sample Reweighting (ISR_P), positive part.
Args:
cls_score (Tensor): Predicted classification scores.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (tuple[Tensor]): A tuple of bbox targets, the are
labels, label_weights, bbox_targets, bbox_weights, respectively.
rois (Tensor): Anchors (single_stage) in shape (n, 4) or RoIs
(two_stage) in shape (n, 5).
sampling_results (obj): Sampling results.
loss_cls (func): Classification loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
num_class (int): Number of classes, default: 80.
Return:
tuple([Tensor]): labels, imp_based_label_weights, bbox_targets,
bbox_target_weights
"""
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
pos_labels = labels[pos_label_inds]
# if no positive samples, return the original targets
num_pos = float(pos_label_inds.size(0))
if num_pos == 0:
return labels, label_weights, bbox_targets, bbox_weights
# merge pos_assigned_gt_inds of per image to a single tensor
gts = list()
last_max_gt = 0
for i in range(len(sampling_results)):
gt_i = sampling_results[i].pos_assigned_gt_inds
gts.append(gt_i + last_max_gt)
if len(gt_i) != 0:
last_max_gt = gt_i.max() + 1
gts = torch.cat(gts)
assert len(gts) == num_pos
cls_score = cls_score.detach()
bbox_pred = bbox_pred.detach()
# For single stage detectors, rois here indicate anchors, in shape (N, 4)
# For two stage detectors, rois are in shape (N, 5)
if rois.size(-1) == 5:
pos_rois = rois[pos_label_inds][:, 1:]
else:
pos_rois = rois[pos_label_inds]
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_delta_pred = bbox_pred[pos_label_inds, pos_labels].view(-1, 4)
else:
pos_delta_pred = bbox_pred[pos_label_inds].view(-1, 4)
# compute iou of the predicted bbox and the corresponding GT
pos_delta_target = bbox_targets[pos_label_inds].view(-1, 4)
pos_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_pred)
target_bbox_pred = bbox_coder.decode(pos_rois, pos_delta_target)
ious = bbox_overlaps(pos_bbox_pred, target_bbox_pred, is_aligned=True)
pos_imp_weights = label_weights[pos_label_inds]
# Two steps to compute IoU-HLR. Samples are first sorted by IoU locally,
# then sorted again within the same-rank group
max_l_num = pos_labels.bincount().max()
for label in pos_labels.unique():
l_inds = (pos_labels == label).nonzero().view(-1)
l_gts = gts[l_inds]
for t in l_gts.unique():
t_inds = l_inds[l_gts == t]
t_ious = ious[t_inds]
_, t_iou_rank_idx = t_ious.sort(descending=True)
_, t_iou_rank = t_iou_rank_idx.sort()
ious[t_inds] += max_l_num - t_iou_rank.float()
l_ious = ious[l_inds]
_, l_iou_rank_idx = l_ious.sort(descending=True)
_, l_iou_rank = l_iou_rank_idx.sort() # IoU-HLR
# linearly map HLR to label weights
pos_imp_weights[l_inds] *= (max_l_num - l_iou_rank.float()) / max_l_num
pos_imp_weights = (bias + pos_imp_weights * (1 - bias)).pow(k)
# normalize to make the new weighted loss value equal to the original loss
pos_loss_cls = loss_cls(
cls_score[pos_label_inds], pos_labels, reduction_override='none')
if pos_loss_cls.dim() > 1:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds][:,
None]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights[:, None]
else:
ori_pos_loss_cls = pos_loss_cls * label_weights[pos_label_inds]
new_pos_loss_cls = pos_loss_cls * pos_imp_weights
pos_loss_cls_ratio = ori_pos_loss_cls.sum() / new_pos_loss_cls.sum()
pos_imp_weights = pos_imp_weights * pos_loss_cls_ratio
label_weights[pos_label_inds] = pos_imp_weights
bbox_targets = labels, label_weights, bbox_targets, bbox_weights
return bbox_targets
@mmcv.jit(derivate=True, coderize=True)
def carl_loss(cls_score,
labels,
bbox_pred,
bbox_targets,
loss_bbox,
k=1,
bias=0.2,
avg_factor=None,
sigmoid=False,
num_class=80):
"""Classification-Aware Regression Loss (CARL).
Args:
cls_score (Tensor): Predicted classification scores.
labels (Tensor): Targets of classification.
bbox_pred (Tensor): Predicted bbox deltas.
bbox_targets (Tensor): Target of bbox regression.
loss_bbox (func): Regression loss func of the head.
bbox_coder (obj): BBox coder of the head.
k (float): Power of the non-linear mapping.
bias (float): Shift of the non-linear mapping.
avg_factor (int): Average factor used in regression loss.
sigmoid (bool): Activation of the classification score.
num_class (int): Number of classes, default: 80.
Return:
dict: CARL loss dict.
"""
pos_label_inds = ((labels >= 0) &
(labels < num_class)).nonzero().reshape(-1)
if pos_label_inds.numel() == 0:
return dict(loss_carl=cls_score.sum()[None] * 0.)
pos_labels = labels[pos_label_inds]
# multiply pos_cls_score with the corresponding bbox weight
# and remain gradient
if sigmoid:
pos_cls_score = cls_score.sigmoid()[pos_label_inds, pos_labels]
else:
pos_cls_score = cls_score.softmax(-1)[pos_label_inds, pos_labels]
carl_loss_weights = (bias + (1 - bias) * pos_cls_score).pow(k)
# normalize carl_loss_weight to make its sum equal to num positive
num_pos = float(pos_cls_score.size(0))
weight_ratio = num_pos / carl_loss_weights.sum()
carl_loss_weights *= weight_ratio
if avg_factor is None:
avg_factor = bbox_targets.size(0)
# if is class agnostic, bbox pred is in shape (N, 4)
# otherwise, bbox pred is in shape (N, #classes, 4)
if bbox_pred.size(-1) > 4:
bbox_pred = bbox_pred.view(bbox_pred.size(0), -1, 4)
pos_bbox_preds = bbox_pred[pos_label_inds, pos_labels]
else:
pos_bbox_preds = bbox_pred[pos_label_inds]
ori_loss_reg = loss_bbox(
pos_bbox_preds,
bbox_targets[pos_label_inds],
reduction_override='none') / avg_factor
loss_carl = (ori_loss_reg * carl_loss_weights[:, None]).sum()
return dict(loss_carl=loss_carl[None])
| 7,216 | 38.010811 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/seesaw_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .accuracy import accuracy
from .cross_entropy_loss import cross_entropy
from .utils import weight_reduce_loss
def seesaw_ce_loss(cls_score,
labels,
label_weights,
cum_samples,
num_classes,
p,
q,
eps,
reduction='mean',
avg_factor=None):
"""Calculate the Seesaw CrossEntropy loss.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C),
C is the number of classes.
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor): Sample-wise loss weight.
cum_samples (torch.Tensor): Cumulative samples for each category.
num_classes (int): The number of classes.
p (float): The ``p`` in the mitigation factor.
q (float): The ``q`` in the compenstation factor.
eps (float): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
Returns:
torch.Tensor: The calculated loss
"""
assert cls_score.size(-1) == num_classes
assert len(cum_samples) == num_classes
onehot_labels = F.one_hot(labels, num_classes)
seesaw_weights = cls_score.new_ones(onehot_labels.size())
# mitigation factor
if p > 0:
sample_ratio_matrix = cum_samples[None, :].clamp(
min=1) / cum_samples[:, None].clamp(min=1)
index = (sample_ratio_matrix < 1.0).float()
sample_weights = sample_ratio_matrix.pow(p) * index + (1 - index)
mitigation_factor = sample_weights[labels.long(), :]
seesaw_weights = seesaw_weights * mitigation_factor
# compensation factor
if q > 0:
scores = F.softmax(cls_score.detach(), dim=1)
self_scores = scores[
torch.arange(0, len(scores)).to(scores.device).long(),
labels.long()]
score_matrix = scores / self_scores[:, None].clamp(min=eps)
index = (score_matrix > 1.0).float()
compensation_factor = score_matrix.pow(q) * index + (1 - index)
seesaw_weights = seesaw_weights * compensation_factor
cls_score = cls_score + (seesaw_weights.log() * (1 - onehot_labels))
loss = F.cross_entropy(cls_score, labels, weight=None, reduction='none')
if label_weights is not None:
label_weights = label_weights.float()
loss = weight_reduce_loss(
loss, weight=label_weights, reduction=reduction, avg_factor=avg_factor)
return loss
@LOSSES.register_module()
class SeesawLoss(nn.Module):
"""
Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)
arXiv: https://arxiv.org/abs/2008.10032
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Only False is supported.
p (float, optional): The ``p`` in the mitigation factor.
Defaults to 0.8.
q (float, optional): The ``q`` in the compenstation factor.
Defaults to 2.0.
num_classes (int, optional): The number of classes.
Default to 1203 for LVIS v1 dataset.
eps (float, optional): The minimal value of divisor to smooth
the computation of compensation factor
reduction (str, optional): The method that reduces the loss to a
scalar. Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of the loss. Defaults to 1.0
return_dict (bool, optional): Whether return the losses as a dict.
Default to True.
"""
def __init__(self,
use_sigmoid=False,
p=0.8,
q=2.0,
num_classes=1203,
eps=1e-2,
reduction='mean',
loss_weight=1.0,
return_dict=True):
super(SeesawLoss, self).__init__()
assert not use_sigmoid
self.use_sigmoid = False
self.p = p
self.q = q
self.num_classes = num_classes
self.eps = eps
self.reduction = reduction
self.loss_weight = loss_weight
self.return_dict = return_dict
# 0 for pos, 1 for neg
self.cls_criterion = seesaw_ce_loss
# cumulative samples for each category
self.register_buffer(
'cum_samples',
torch.zeros(self.num_classes + 1, dtype=torch.float))
# custom output channels of the classifier
self.custom_cls_channels = True
# custom activation of cls_score
self.custom_activation = True
# custom accuracy of the classsifier
self.custom_accuracy = True
def _split_cls_score(self, cls_score):
# split cls_score to cls_score_classes and cls_score_objectness
assert cls_score.size(-1) == self.num_classes + 2
cls_score_classes = cls_score[..., :-2]
cls_score_objectness = cls_score[..., -2:]
return cls_score_classes, cls_score_objectness
def get_cls_channels(self, num_classes):
"""Get custom classification channels.
Args:
num_classes (int): The number of classes.
Returns:
int: The custom classification channels.
"""
assert num_classes == self.num_classes
return num_classes + 2
def get_activation(self, cls_score):
"""Get custom activation of cls_score.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
Returns:
torch.Tensor: The custom activation of cls_score with shape
(N, C + 1).
"""
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
score_classes = F.softmax(cls_score_classes, dim=-1)
score_objectness = F.softmax(cls_score_objectness, dim=-1)
score_pos = score_objectness[..., [0]]
score_neg = score_objectness[..., [1]]
score_classes = score_classes * score_pos
scores = torch.cat([score_classes, score_neg], dim=-1)
return scores
def get_accuracy(self, cls_score, labels):
"""Get custom accuracy w.r.t. cls_score and labels.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
Returns:
Dict [str, torch.Tensor]: The accuracy for objectness and classes,
respectively.
"""
pos_inds = labels < self.num_classes
obj_labels = (labels == self.num_classes).long()
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
acc_objectness = accuracy(cls_score_objectness, obj_labels)
acc_classes = accuracy(cls_score_classes[pos_inds], labels[pos_inds])
acc = dict()
acc['acc_objectness'] = acc_objectness
acc['acc_classes'] = acc_classes
return acc
def forward(self,
cls_score,
labels,
label_weights=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction with shape (N, C + 2).
labels (torch.Tensor): The learning label of the prediction.
label_weights (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor | Dict [str, torch.Tensor]:
if return_dict == False: The calculated loss |
if return_dict == True: The dict of calculated losses
for objectness and classes, respectively.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
assert cls_score.size(-1) == self.num_classes + 2
pos_inds = labels < self.num_classes
# 0 for pos, 1 for neg
obj_labels = (labels == self.num_classes).long()
# accumulate the samples for each category
unique_labels = labels.unique()
for u_l in unique_labels:
inds_ = labels == u_l.item()
self.cum_samples[u_l] += inds_.sum()
if label_weights is not None:
label_weights = label_weights.float()
else:
label_weights = labels.new_ones(labels.size(), dtype=torch.float)
cls_score_classes, cls_score_objectness = self._split_cls_score(
cls_score)
# calculate loss_cls_classes (only need pos samples)
if pos_inds.sum() > 0:
loss_cls_classes = self.loss_weight * self.cls_criterion(
cls_score_classes[pos_inds], labels[pos_inds],
label_weights[pos_inds], self.cum_samples[:self.num_classes],
self.num_classes, self.p, self.q, self.eps, reduction,
avg_factor)
else:
loss_cls_classes = cls_score_classes[pos_inds].sum()
# calculate loss_cls_objectness
loss_cls_objectness = self.loss_weight * cross_entropy(
cls_score_objectness, obj_labels, label_weights, reduction,
avg_factor)
if self.return_dict:
loss_cls = dict()
loss_cls['loss_cls_objectness'] = loss_cls_objectness
loss_cls['loss_cls_classes'] = loss_cls_classes
else:
loss_cls = loss_cls_classes + loss_cls_objectness
return loss_cls
| 10,136 | 37.543726 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/smooth_l1_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
import torch.nn as nn
from ..builder import LOSSES
from .utils import weighted_loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def smooth_l1_loss(pred, target, beta=1.0):
"""Smooth L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
Returns:
torch.Tensor: Calculated loss
"""
assert beta > 0
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
diff = torch.abs(pred - target)
loss = torch.where(diff < beta, 0.5 * diff * diff / beta,
diff - 0.5 * beta)
return loss
@mmcv.jit(derivate=True, coderize=True)
@weighted_loss
def l1_loss(pred, target):
"""L1 loss.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
Returns:
torch.Tensor: Calculated loss
"""
if target.numel() == 0:
return pred.sum() * 0
assert pred.size() == target.size()
loss = torch.abs(pred - target)
return loss
@LOSSES.register_module()
class SmoothL1Loss(nn.Module):
"""Smooth L1 loss.
Args:
beta (float, optional): The threshold in the piecewise function.
Defaults to 1.0.
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum". Defaults to "mean".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, beta=1.0, reduction='mean', loss_weight=1.0):
super(SmoothL1Loss, self).__init__()
self.beta = beta
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None,
**kwargs):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * smooth_l1_loss(
pred,
target,
weight,
beta=self.beta,
reduction=reduction,
avg_factor=avg_factor,
**kwargs)
return loss_bbox
@LOSSES.register_module()
class L1Loss(nn.Module):
"""L1 loss.
Args:
reduction (str, optional): The method to reduce the loss.
Options are "none", "mean" and "sum".
loss_weight (float, optional): The weight of loss.
"""
def __init__(self, reduction='mean', loss_weight=1.0):
super(L1Loss, self).__init__()
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Defaults to None.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
loss_bbox = self.loss_weight * l1_loss(
pred, target, weight, reduction=reduction, avg_factor=avg_factor)
return loss_bbox
| 4,635 | 30.537415 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/losses/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import functools
import mmcv
import torch
import torch.nn.functional as F
def reduce_loss(loss, reduction):
"""Reduce loss as specified.
Args:
loss (Tensor): Elementwise loss tensor.
reduction (str): Options are "none", "mean" and "sum".
Return:
Tensor: Reduced loss tensor.
"""
reduction_enum = F._Reduction.get_enum(reduction)
# none: 0, elementwise_mean:1, sum: 2
if reduction_enum == 0:
return loss
elif reduction_enum == 1:
return loss.mean()
elif reduction_enum == 2:
return loss.sum()
@mmcv.jit(derivate=True, coderize=True)
def weight_reduce_loss(loss, weight=None, reduction='mean', avg_factor=None):
"""Apply element-wise weight and reduce loss.
Args:
loss (Tensor): Element-wise loss.
weight (Tensor): Element-wise weights.
reduction (str): Same as built-in losses of PyTorch.
avg_factor (float): Average factor when computing the mean of losses.
Returns:
Tensor: Processed loss values.
"""
# if weight is specified, apply element-wise weight
if weight is not None:
loss = loss * weight
# if avg_factor is not specified, just reduce the loss
if avg_factor is None:
loss = reduce_loss(loss, reduction)
else:
# if reduction is mean, then average the loss by avg_factor
if reduction == 'mean':
# Avoid causing ZeroDivisionError when avg_factor is 0.0,
# i.e., all labels of an image belong to ignore index.
eps = torch.finfo(torch.float32).eps
loss = loss.sum() / (avg_factor + eps)
# if reduction is 'none', then do nothing, otherwise raise an error
elif reduction != 'none':
raise ValueError('avg_factor can not be used with reduction="sum"')
return loss
def weighted_loss(loss_func):
"""Create a weighted version of a given loss function.
To use this decorator, the loss function must have the signature like
`loss_func(pred, target, **kwargs)`. The function only needs to compute
element-wise loss without any reduction. This decorator will add weight
and reduction arguments to the function. The decorated function will have
the signature like `loss_func(pred, target, weight=None, reduction='mean',
avg_factor=None, **kwargs)`.
:Example:
>>> import torch
>>> @weighted_loss
>>> def l1_loss(pred, target):
>>> return (pred - target).abs()
>>> pred = torch.Tensor([0, 2, 3])
>>> target = torch.Tensor([1, 1, 1])
>>> weight = torch.Tensor([1, 0, 1])
>>> l1_loss(pred, target)
tensor(1.3333)
>>> l1_loss(pred, target, weight)
tensor(1.)
>>> l1_loss(pred, target, reduction='none')
tensor([1., 1., 2.])
>>> l1_loss(pred, target, weight, avg_factor=2)
tensor(1.5000)
"""
@functools.wraps(loss_func)
def wrapper(pred,
target,
weight=None,
reduction='mean',
avg_factor=None,
**kwargs):
# get element-wise loss
loss = loss_func(pred, target, **kwargs)
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
return wrapper
| 3,310 | 30.235849 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/losses/varifocal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
@mmcv.jit(derivate=True, coderize=True)
def varifocal_loss(pred,
target,
weight=None,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
avg_factor=None):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the
number of classes
target (torch.Tensor): The learning target of the iou-aware
classification score with shape (N, C), C is the number of classes.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal Loss.
Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive example with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
"""
# pred and target should be of the same size
assert pred.size() == target.size()
pred_sigmoid = pred.sigmoid()
target = target.type_as(pred)
if iou_weighted:
focal_weight = target * (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
else:
focal_weight = (target > 0.0).float() + \
alpha * (pred_sigmoid - target).abs().pow(gamma) * \
(target <= 0.0).float()
loss = F.binary_cross_entropy_with_logits(
pred, target, reduction='none') * focal_weight
loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
return loss
@LOSSES.register_module()
class VarifocalLoss(nn.Module):
def __init__(self,
use_sigmoid=True,
alpha=0.75,
gamma=2.0,
iou_weighted=True,
reduction='mean',
loss_weight=1.0):
"""`Varifocal Loss <https://arxiv.org/abs/2008.13367>`_
Args:
use_sigmoid (bool, optional): Whether the prediction is
used for sigmoid or softmax. Defaults to True.
alpha (float, optional): A balance factor for the negative part of
Varifocal Loss, which is different from the alpha of Focal
Loss. Defaults to 0.75.
gamma (float, optional): The gamma for calculating the modulating
factor. Defaults to 2.0.
iou_weighted (bool, optional): Whether to weight the loss of the
positive examples with the iou target. Defaults to True.
reduction (str, optional): The method used to reduce the loss into
a scalar. Defaults to 'mean'. Options are "none", "mean" and
"sum".
loss_weight (float, optional): Weight of loss. Defaults to 1.0.
"""
super(VarifocalLoss, self).__init__()
assert use_sigmoid is True, \
'Only sigmoid varifocal loss supported now.'
assert alpha >= 0.0
self.use_sigmoid = use_sigmoid
self.alpha = alpha
self.gamma = gamma
self.iou_weighted = iou_weighted
self.reduction = reduction
self.loss_weight = loss_weight
def forward(self,
pred,
target,
weight=None,
avg_factor=None,
reduction_override=None):
"""Forward function.
Args:
pred (torch.Tensor): The prediction.
target (torch.Tensor): The learning target of the prediction.
weight (torch.Tensor, optional): The weight of loss for each
prediction. Defaults to None.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The reduction method used to
override the original reduction method of the loss.
Options are "none", "mean" and "sum".
Returns:
torch.Tensor: The calculated loss
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if self.use_sigmoid:
loss_cls = self.loss_weight * varifocal_loss(
pred,
target,
weight,
alpha=self.alpha,
gamma=self.gamma,
iou_weighted=self.iou_weighted,
reduction=reduction,
avg_factor=avg_factor)
else:
raise NotImplementedError
return loss_cls
| 5,365 | 38.748148 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .bfp import BFP
from .channel_mapper import ChannelMapper
from .ct_resnet_neck import CTResNetNeck
from .dilated_encoder import DilatedEncoder
from .dyhead import DyHead
from .fpg import FPG
from .fpn import FPN
from .fpn_carafe import FPN_CARAFE
from .hrfpn import HRFPN
from .nas_fpn import NASFPN
from .nasfcos_fpn import NASFCOS_FPN
from .pafpn import PAFPN
from .rfp import RFP
from .ssd_neck import SSDNeck
from .yolo_neck import YOLOV3Neck
from .yolox_pafpn import YOLOXPAFPN
__all__ = [
'FPN', 'BFP', 'ChannelMapper', 'HRFPN', 'NASFPN', 'FPN_CARAFE', 'PAFPN',
'NASFCOS_FPN', 'RFP', 'YOLOV3Neck', 'FPG', 'DilatedEncoder',
'CTResNetNeck', 'SSDNeck', 'YOLOXPAFPN', 'DyHead'
]
| 747 | 30.166667 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/necks/bfp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.cnn.bricks import NonLocal2d
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class BFP(BaseModule):
"""BFP (Balanced Feature Pyramids)
BFP takes multi-level features as inputs and gather them into a single one,
then refine the gathered feature and scatter the refined results to
multi-level features. This module is used in Libra R-CNN (CVPR 2019), see
the paper `Libra R-CNN: Towards Balanced Learning for Object Detection
<https://arxiv.org/abs/1904.02701>`_ for details.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
num_levels (int): Number of input feature levels.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
refine_level (int): Index of integration and refine level of BSF in
multi-level features from bottom to top.
refine_type (str): Type of the refine op, currently support
[None, 'conv', 'non_local'].
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
num_levels,
refine_level=2,
refine_type=None,
conv_cfg=None,
norm_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(BFP, self).__init__(init_cfg)
assert refine_type in [None, 'conv', 'non_local']
self.in_channels = in_channels
self.num_levels = num_levels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.refine_level = refine_level
self.refine_type = refine_type
assert 0 <= self.refine_level < self.num_levels
if self.refine_type == 'conv':
self.refine = ConvModule(
self.in_channels,
self.in_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
elif self.refine_type == 'non_local':
self.refine = NonLocal2d(
self.in_channels,
reduction=1,
use_scale=False,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg)
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_levels
# step 1: gather multi-level features by resize and average
feats = []
gather_size = inputs[self.refine_level].size()[2:]
for i in range(self.num_levels):
if i < self.refine_level:
gathered = F.adaptive_max_pool2d(
inputs[i], output_size=gather_size)
else:
gathered = F.interpolate(
inputs[i], size=gather_size, mode='nearest')
feats.append(gathered)
bsf = sum(feats) / len(feats)
# step 2: refine gathered features
if self.refine_type is not None:
bsf = self.refine(bsf)
# step 3: scatter refined features to multi-levels by a residual path
outs = []
for i in range(self.num_levels):
out_size = inputs[i].size()[2:]
if i < self.refine_level:
residual = F.interpolate(bsf, size=out_size, mode='nearest')
else:
residual = F.adaptive_max_pool2d(bsf, output_size=out_size)
outs.append(residual + inputs[i])
return tuple(outs)
| 3,777 | 35.679612 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/channel_mapper.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class ChannelMapper(BaseModule):
r"""Channel Mapper to reduce/increase channels of backbone features.
This is used to reduce/increase channels of backbone features.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
kernel_size (int, optional): kernel_size for reducing channels (used
at each scale). Default: 3.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: None.
act_cfg (dict, optional): Config dict for activation layer in
ConvModule. Default: dict(type='ReLU').
num_outs (int, optional): Number of output feature maps. There
would be extra_convs when num_outs larger than the length
of in_channels.
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = ChannelMapper(in_channels, 11, 3).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=3,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
num_outs=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(ChannelMapper, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.extra_convs = None
if num_outs is None:
num_outs = len(in_channels)
self.convs = nn.ModuleList()
for in_channel in in_channels:
self.convs.append(
ConvModule(
in_channel,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
if num_outs > len(in_channels):
self.extra_convs = nn.ModuleList()
for i in range(len(in_channels), num_outs):
if i == len(in_channels):
in_channel = in_channels[-1]
else:
in_channel = out_channels
self.extra_convs.append(
ConvModule(
in_channel,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.convs)
outs = [self.convs[i](inputs[i]) for i in range(len(inputs))]
if self.extra_convs:
for i in range(len(self.extra_convs)):
if i == 0:
outs.append(self.extra_convs[0](inputs[-1]))
else:
outs.append(self.extra_convs[i](outs[-1]))
return tuple(outs)
| 3,975 | 38.366337 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/necks/ct_resnet_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from mmdet.models.builder import NECKS
@NECKS.register_module()
class CTResNetNeck(BaseModule):
"""The neck used in `CenterNet <https://arxiv.org/abs/1904.07850>`_ for
object classification and box regression.
Args:
in_channel (int): Number of input channels.
num_deconv_filters (tuple[int]): Number of filters per stage.
num_deconv_kernels (tuple[int]): Number of kernels per stage.
use_dcn (bool): If True, use DCNv2. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channel,
num_deconv_filters,
num_deconv_kernels,
use_dcn=True,
init_cfg=None):
super(CTResNetNeck, self).__init__(init_cfg)
assert len(num_deconv_filters) == len(num_deconv_kernels)
self.fp16_enabled = False
self.use_dcn = use_dcn
self.in_channel = in_channel
self.deconv_layers = self._make_deconv_layer(num_deconv_filters,
num_deconv_kernels)
def _make_deconv_layer(self, num_deconv_filters, num_deconv_kernels):
"""use deconv layers to upsample backbone's output."""
layers = []
for i in range(len(num_deconv_filters)):
feat_channel = num_deconv_filters[i]
conv_module = ConvModule(
self.in_channel,
feat_channel,
3,
padding=1,
conv_cfg=dict(type='DCNv2') if self.use_dcn else None,
norm_cfg=dict(type='BN'))
layers.append(conv_module)
upsample_module = ConvModule(
feat_channel,
feat_channel,
num_deconv_kernels[i],
stride=2,
padding=1,
conv_cfg=dict(type='deconv'),
norm_cfg=dict(type='BN'))
layers.append(upsample_module)
self.in_channel = feat_channel
return nn.Sequential(*layers)
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.ConvTranspose2d):
# In order to be consistent with the source code,
# reset the ConvTranspose2d initialization parameters
m.reset_parameters()
# Simulated bilinear upsampling kernel
w = m.weight.data
f = math.ceil(w.size(2) / 2)
c = (2 * f - 1 - f % 2) / (2. * f)
for i in range(w.size(2)):
for j in range(w.size(3)):
w[0, 0, i, j] = \
(1 - math.fabs(i / f - c)) * (
1 - math.fabs(j / f - c))
for c in range(1, w.size(0)):
w[c, 0, :, :] = w[0, 0, :, :]
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# self.use_dcn is False
elif not self.use_dcn and isinstance(m, nn.Conv2d):
# In order to be consistent with the source code,
# reset the Conv2d initialization parameters
m.reset_parameters()
@auto_fp16()
def forward(self, inputs):
assert isinstance(inputs, (list, tuple))
outs = self.deconv_layers(inputs[-1])
return outs,
| 3,642 | 37.347368 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/necks/dilated_encoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import (ConvModule, caffe2_xavier_init, constant_init, is_norm,
normal_init)
from torch.nn import BatchNorm2d
from ..builder import NECKS
class Bottleneck(nn.Module):
"""Bottleneck block for DilatedEncoder used in `YOLOF.
<https://arxiv.org/abs/2103.09460>`.
The Bottleneck contains three ConvLayers and one residual connection.
Args:
in_channels (int): The number of input channels.
mid_channels (int): The number of middle output channels.
dilation (int): Dilation rate.
norm_cfg (dict): Dictionary to construct and config norm layer.
"""
def __init__(self,
in_channels,
mid_channels,
dilation,
norm_cfg=dict(type='BN', requires_grad=True)):
super(Bottleneck, self).__init__()
self.conv1 = ConvModule(
in_channels, mid_channels, 1, norm_cfg=norm_cfg)
self.conv2 = ConvModule(
mid_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
norm_cfg=norm_cfg)
self.conv3 = ConvModule(
mid_channels, in_channels, 1, norm_cfg=norm_cfg)
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.conv2(out)
out = self.conv3(out)
out = out + identity
return out
@NECKS.register_module()
class DilatedEncoder(nn.Module):
"""Dilated Encoder for YOLOF <https://arxiv.org/abs/2103.09460>`.
This module contains two types of components:
- the original FPN lateral convolution layer and fpn convolution layer,
which are 1x1 conv + 3x3 conv
- the dilated residual block
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
block_mid_channels (int): The number of middle block output channels
num_residual_blocks (int): The number of residual blocks.
block_dilations (list): The list of residual blocks dilation.
"""
def __init__(self, in_channels, out_channels, block_mid_channels,
num_residual_blocks, block_dilations):
super(DilatedEncoder, self).__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.block_mid_channels = block_mid_channels
self.num_residual_blocks = num_residual_blocks
self.block_dilations = block_dilations
self._init_layers()
def _init_layers(self):
self.lateral_conv = nn.Conv2d(
self.in_channels, self.out_channels, kernel_size=1)
self.lateral_norm = BatchNorm2d(self.out_channels)
self.fpn_conv = nn.Conv2d(
self.out_channels, self.out_channels, kernel_size=3, padding=1)
self.fpn_norm = BatchNorm2d(self.out_channels)
encoder_blocks = []
for i in range(self.num_residual_blocks):
dilation = self.block_dilations[i]
encoder_blocks.append(
Bottleneck(
self.out_channels,
self.block_mid_channels,
dilation=dilation))
self.dilated_encoder_blocks = nn.Sequential(*encoder_blocks)
def init_weights(self):
caffe2_xavier_init(self.lateral_conv)
caffe2_xavier_init(self.fpn_conv)
for m in [self.lateral_norm, self.fpn_norm]:
constant_init(m, 1)
for m in self.dilated_encoder_blocks.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, mean=0, std=0.01)
if is_norm(m):
constant_init(m, 1)
def forward(self, feature):
out = self.lateral_norm(self.lateral_conv(feature[-1]))
out = self.fpn_norm(self.fpn_conv(out))
return self.dilated_encoder_blocks(out),
| 3,958 | 34.990909 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/dyhead.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (build_activation_layer, build_norm_layer, constant_init,
normal_init)
from mmcv.ops.modulated_deform_conv import ModulatedDeformConv2d
from mmcv.runner import BaseModule
from ..builder import NECKS
from ..utils import DyReLU
# Reference:
# https://github.com/microsoft/DynamicHead
# https://github.com/jshilong/SEPC
class DyDCNv2(nn.Module):
"""ModulatedDeformConv2d with normalization layer used in DyHead.
This module cannot be configured with `conv_cfg=dict(type='DCNv2')`
because DyHead calculates offset and mask from middle-level feature.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
stride (int | tuple[int], optional): Stride of the convolution.
Default: 1.
norm_cfg (dict, optional): Config dict for normalization layer.
Default: dict(type='GN', num_groups=16, requires_grad=True).
"""
def __init__(self,
in_channels,
out_channels,
stride=1,
norm_cfg=dict(type='GN', num_groups=16, requires_grad=True)):
super().__init__()
self.with_norm = norm_cfg is not None
bias = not self.with_norm
self.conv = ModulatedDeformConv2d(
in_channels, out_channels, 3, stride=stride, padding=1, bias=bias)
if self.with_norm:
self.norm = build_norm_layer(norm_cfg, out_channels)[1]
def forward(self, x, offset, mask):
"""Forward function."""
x = self.conv(x.contiguous(), offset.contiguous(), mask)
if self.with_norm:
x = self.norm(x)
return x
class DyHeadBlock(nn.Module):
"""DyHead Block with three types of attention.
HSigmoid arguments in default act_cfg follow official code, not paper.
https://github.com/microsoft/DynamicHead/blob/master/dyhead/dyrelu.py
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
act_cfg (dict, optional): Config dict for the last activation layer of
scale-aware attention. Default: dict(type='HSigmoid', bias=3.0,
divisor=6.0).
"""
def __init__(self,
in_channels,
out_channels,
zero_init_offset=True,
act_cfg=dict(type='HSigmoid', bias=3.0, divisor=6.0)):
super().__init__()
self.zero_init_offset = zero_init_offset
# (offset_x, offset_y, mask) * kernel_size_y * kernel_size_x
self.offset_and_mask_dim = 3 * 3 * 3
self.offset_dim = 2 * 3 * 3
self.spatial_conv_high = DyDCNv2(in_channels, out_channels)
self.spatial_conv_mid = DyDCNv2(in_channels, out_channels)
self.spatial_conv_low = DyDCNv2(in_channels, out_channels, stride=2)
self.spatial_conv_offset = nn.Conv2d(
in_channels, self.offset_and_mask_dim, 3, padding=1)
self.scale_attn_module = nn.Sequential(
nn.AdaptiveAvgPool2d(1), nn.Conv2d(out_channels, 1, 1),
nn.ReLU(inplace=True), build_activation_layer(act_cfg))
self.task_attn_module = DyReLU(out_channels)
self._init_weights()
def _init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
normal_init(m, 0, 0.01)
if self.zero_init_offset:
constant_init(self.spatial_conv_offset, 0)
def forward(self, x):
"""Forward function."""
outs = []
for level in range(len(x)):
# calculate offset and mask of DCNv2 from middle-level feature
offset_and_mask = self.spatial_conv_offset(x[level])
offset = offset_and_mask[:, :self.offset_dim, :, :]
mask = offset_and_mask[:, self.offset_dim:, :, :].sigmoid()
mid_feat = self.spatial_conv_mid(x[level], offset, mask)
sum_feat = mid_feat * self.scale_attn_module(mid_feat)
summed_levels = 1
if level > 0:
low_feat = self.spatial_conv_low(x[level - 1], offset, mask)
sum_feat = sum_feat + \
low_feat * self.scale_attn_module(low_feat)
summed_levels += 1
if level < len(x) - 1:
# this upsample order is weird, but faster than natural order
# https://github.com/microsoft/DynamicHead/issues/25
high_feat = F.interpolate(
self.spatial_conv_high(x[level + 1], offset, mask),
size=x[level].shape[-2:],
mode='bilinear',
align_corners=True)
sum_feat = sum_feat + high_feat * \
self.scale_attn_module(high_feat)
summed_levels += 1
outs.append(self.task_attn_module(sum_feat / summed_levels))
return outs
@NECKS.register_module()
class DyHead(BaseModule):
"""DyHead neck consisting of multiple DyHead Blocks.
See `Dynamic Head: Unifying Object Detection Heads with Attentions
<https://arxiv.org/abs/2106.08322>`_ for details.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_blocks (int, optional): Number of DyHead Blocks. Default: 6.
zero_init_offset (bool, optional): Whether to use zero init for
`spatial_conv_offset`. Default: True.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_blocks=6,
zero_init_offset=True,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_blocks = num_blocks
self.zero_init_offset = zero_init_offset
dyhead_blocks = []
for i in range(num_blocks):
in_channels = self.in_channels if i == 0 else self.out_channels
dyhead_blocks.append(
DyHeadBlock(
in_channels,
self.out_channels,
zero_init_offset=zero_init_offset))
self.dyhead_blocks = nn.Sequential(*dyhead_blocks)
def forward(self, inputs):
"""Forward function."""
assert isinstance(inputs, (tuple, list))
outs = self.dyhead_blocks(inputs)
return tuple(outs)
| 6,949 | 38.265537 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/necks/fpg.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
class Transition(BaseModule):
"""Base class for transition.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
"""
def __init__(self, in_channels, out_channels, init_cfg=None):
super().__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
def forward(x):
pass
class UpInterpolationConv(Transition):
"""A transition used for up-sampling.
Up-sample the input by interpolation then refines the feature by
a convolution layer.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
scale_factor (int): Up-sampling factor. Default: 2.
mode (int): Interpolation mode. Default: nearest.
align_corners (bool): Whether align corners when interpolation.
Default: None.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
scale_factor=2,
mode='nearest',
align_corners=None,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.mode = mode
self.scale_factor = scale_factor
self.align_corners = align_corners
self.conv = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, x):
x = F.interpolate(
x,
scale_factor=self.scale_factor,
mode=self.mode,
align_corners=self.align_corners)
x = self.conv(x)
return x
class LastConv(Transition):
"""A transition used for refining the output of the last stage.
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of output channels.
num_inputs (int): Number of inputs of the FPN features.
kernel_size (int): Kernel size for the conv. Default: 3.
"""
def __init__(self,
in_channels,
out_channels,
num_inputs,
kernel_size=3,
init_cfg=None,
**kwargs):
super().__init__(in_channels, out_channels, init_cfg)
self.num_inputs = num_inputs
self.conv_out = ConvModule(
in_channels,
out_channels,
kernel_size,
padding=(kernel_size - 1) // 2,
**kwargs)
def forward(self, inputs):
assert len(inputs) == self.num_inputs
return self.conv_out(inputs[-1])
@NECKS.register_module()
class FPG(BaseModule):
"""FPG.
Implementation of `Feature Pyramid Grids (FPG)
<https://arxiv.org/abs/2004.03580>`_.
This implementation only gives the basic structure stated in the paper.
But users can implement different type of transitions to fully explore the
the potential power of the structure of FPG.
Args:
in_channels (int): Number of input channels (feature maps of all levels
should have the same channels).
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
paths (list[str]): Specify the path order of each stack level.
Each element in the list should be either 'bu' (bottom-up) or
'td' (top-down).
inter_channels (int): Number of inter channels.
same_up_trans (dict): Transition that goes down at the same stage.
same_down_trans (dict): Transition that goes up at the same stage.
across_lateral_trans (dict): Across-pathway same-stage
across_down_trans (dict): Across-pathway bottom-up connection.
across_up_trans (dict): Across-pathway top-down connection.
across_skip_trans (dict): Across-pathway skip connection.
output_trans (dict): Transition that trans the output of the
last stage.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
norm_cfg (dict): Config dict for normalization layer. Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
transition_types = {
'conv': ConvModule,
'interpolation_conv': UpInterpolationConv,
'last_conv': LastConv,
}
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
paths,
inter_channels=None,
same_down_trans=None,
same_up_trans=dict(
type='conv', kernel_size=3, stride=2, padding=1),
across_lateral_trans=dict(type='conv', kernel_size=1),
across_down_trans=dict(type='conv', kernel_size=3),
across_up_trans=None,
across_skip_trans=dict(type='identity'),
output_trans=dict(type='last_conv', kernel_size=3),
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
skip_inds=None,
init_cfg=[
dict(type='Caffe2Xavier', layer='Conv2d'),
dict(
type='Constant',
layer=[
'_BatchNorm', '_InstanceNorm', 'GroupNorm',
'LayerNorm'
],
val=1.0)
]):
super(FPG, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
if inter_channels is None:
self.inter_channels = [out_channels for _ in range(num_outs)]
elif isinstance(inter_channels, int):
self.inter_channels = [inter_channels for _ in range(num_outs)]
else:
assert isinstance(inter_channels, list)
assert len(inter_channels) == num_outs
self.inter_channels = inter_channels
self.stack_times = stack_times
self.paths = paths
assert isinstance(paths, list) and len(paths) == stack_times
for d in paths:
assert d in ('bu', 'td')
self.same_down_trans = same_down_trans
self.same_up_trans = same_up_trans
self.across_lateral_trans = across_lateral_trans
self.across_down_trans = across_down_trans
self.across_up_trans = across_up_trans
self.output_trans = output_trans
self.across_skip_trans = across_skip_trans
self.with_bias = norm_cfg is None
# skip inds must be specified if across skip trans is not None
if self.across_skip_trans is not None:
skip_inds is not None
self.skip_inds = skip_inds
assert len(self.skip_inds[0]) <= self.stack_times
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# build lateral 1x1 convs to reduce channels
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = nn.Conv2d(self.in_channels[i],
self.inter_channels[i - self.start_level], 1)
self.lateral_convs.append(l_conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
if self.add_extra_convs:
fpn_idx = self.backbone_end_level - self.start_level + i
extra_conv = nn.Conv2d(
self.inter_channels[fpn_idx - 1],
self.inter_channels[fpn_idx],
3,
stride=2,
padding=1)
self.extra_downsamples.append(extra_conv)
else:
self.extra_downsamples.append(nn.MaxPool2d(1, stride=2))
self.fpn_transitions = nn.ModuleList() # stack times
for s in range(self.stack_times):
stage_trans = nn.ModuleList() # num of feature levels
for i in range(self.num_outs):
# same, across_lateral, across_down, across_up
trans = nn.ModuleDict()
if s in self.skip_inds[i]:
stage_trans.append(trans)
continue
# build same-stage down trans (used in bottom-up paths)
if i == 0 or self.same_up_trans is None:
same_up_trans = None
else:
same_up_trans = self.build_trans(
self.same_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['same_up'] = same_up_trans
# build same-stage up trans (used in top-down paths)
if i == self.num_outs - 1 or self.same_down_trans is None:
same_down_trans = None
else:
same_down_trans = self.build_trans(
self.same_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['same_down'] = same_down_trans
# build across lateral trans
across_lateral_trans = self.build_trans(
self.across_lateral_trans, self.inter_channels[i],
self.inter_channels[i])
trans['across_lateral'] = across_lateral_trans
# build across down trans
if i == self.num_outs - 1 or self.across_down_trans is None:
across_down_trans = None
else:
across_down_trans = self.build_trans(
self.across_down_trans, self.inter_channels[i + 1],
self.inter_channels[i])
trans['across_down'] = across_down_trans
# build across up trans
if i == 0 or self.across_up_trans is None:
across_up_trans = None
else:
across_up_trans = self.build_trans(
self.across_up_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_up'] = across_up_trans
if self.across_skip_trans is None:
across_skip_trans = None
else:
across_skip_trans = self.build_trans(
self.across_skip_trans, self.inter_channels[i - 1],
self.inter_channels[i])
trans['across_skip'] = across_skip_trans
# build across_skip trans
stage_trans.append(trans)
self.fpn_transitions.append(stage_trans)
self.output_transition = nn.ModuleList() # output levels
for i in range(self.num_outs):
trans = self.build_trans(
self.output_trans,
self.inter_channels[i],
self.out_channels,
num_inputs=self.stack_times + 1)
self.output_transition.append(trans)
self.relu = nn.ReLU(inplace=True)
def build_trans(self, cfg, in_channels, out_channels, **extra_args):
cfg_ = cfg.copy()
trans_type = cfg_.pop('type')
trans_cls = self.transition_types[trans_type]
return trans_cls(in_channels, out_channels, **cfg_, **extra_args)
def fuse(self, fuse_dict):
out = None
for item in fuse_dict.values():
if item is not None:
if out is None:
out = item
else:
out = out + item
return out
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
# build all levels from original feature maps
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
outs = [feats]
for i in range(self.stack_times):
current_outs = outs[-1]
next_outs = []
direction = self.paths[i]
for j in range(self.num_outs):
if i in self.skip_inds[j]:
next_outs.append(outs[-1][j])
continue
# feature level
if direction == 'td':
lvl = self.num_outs - j - 1
else:
lvl = j
# get transitions
if direction == 'td':
same_trans = self.fpn_transitions[i][lvl]['same_down']
else:
same_trans = self.fpn_transitions[i][lvl]['same_up']
across_lateral_trans = self.fpn_transitions[i][lvl][
'across_lateral']
across_down_trans = self.fpn_transitions[i][lvl]['across_down']
across_up_trans = self.fpn_transitions[i][lvl]['across_up']
across_skip_trans = self.fpn_transitions[i][lvl]['across_skip']
# init output
to_fuse = dict(
same=None, lateral=None, across_up=None, across_down=None)
# same downsample/upsample
if same_trans is not None:
to_fuse['same'] = same_trans(next_outs[-1])
# across lateral
if across_lateral_trans is not None:
to_fuse['lateral'] = across_lateral_trans(
current_outs[lvl])
# across downsample
if lvl > 0 and across_up_trans is not None:
to_fuse['across_up'] = across_up_trans(current_outs[lvl -
1])
# across upsample
if (lvl < self.num_outs - 1 and across_down_trans is not None):
to_fuse['across_down'] = across_down_trans(
current_outs[lvl + 1])
if across_skip_trans is not None:
to_fuse['across_skip'] = across_skip_trans(outs[0][lvl])
x = self.fuse(to_fuse)
next_outs.append(x)
if direction == 'td':
outs.append(next_outs[::-1])
else:
outs.append(next_outs)
# output trans
final_outs = []
for i in range(self.num_outs):
lvl_out_list = []
for s in range(len(outs)):
lvl_out_list.append(outs[s][i])
lvl_out = self.output_transition[i](lvl_out_list)
final_outs.append(lvl_out)
return final_outs
| 16,387 | 39.265356 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, auto_fp16
from ..builder import NECKS
@NECKS.register_module()
class FPN(BaseModule):
r"""Feature Pyramid Network.
This is an implementation of paper `Feature Pyramid Networks for Object
Detection <https://arxiv.org/abs/1612.03144>`_.
Args:
in_channels (list[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale).
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (dict): Config dict for activation layer in ConvModule.
Default: None.
upsample_cfg (dict): Config dict for interpolate layer.
Default: dict(mode='nearest').
init_cfg (dict or list[dict], optional): Initialization config dict.
Example:
>>> import torch
>>> in_channels = [2, 3, 5, 7]
>>> scales = [340, 170, 84, 43]
>>> inputs = [torch.rand(1, c, s, s)
... for c, s in zip(in_channels, scales)]
>>> self = FPN(in_channels, 11, len(in_channels)).eval()
>>> outputs = self.forward(inputs)
>>> for i in range(len(outputs)):
... print(f'outputs[{i}].shape = {outputs[i].shape}')
outputs[0].shape = torch.Size([1, 11, 340, 340])
outputs[1].shape = torch.Size([1, 11, 170, 170])
outputs[2].shape = torch.Size([1, 11, 84, 84])
outputs[3].shape = torch.Size([1, 11, 43, 43])
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
upsample_cfg=dict(mode='nearest'),
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.relu_before_extra_convs = relu_before_extra_convs
self.no_norm_on_lateral = no_norm_on_lateral
self.fp16_enabled = False
self.upsample_cfg = upsample_cfg.copy()
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
assert isinstance(add_extra_convs, (str, bool))
if isinstance(add_extra_convs, str):
# Extra_convs_source choices: 'on_input', 'on_lateral', 'on_output'
assert add_extra_convs in ('on_input', 'on_lateral', 'on_output')
elif add_extra_convs: # True
self.add_extra_convs = 'on_input'
self.lateral_convs = nn.ModuleList()
self.fpn_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg if not self.no_norm_on_lateral else None,
act_cfg=act_cfg,
inplace=False)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_levels = num_outs - self.backbone_end_level + self.start_level
if self.add_extra_convs and extra_levels >= 1:
for i in range(extra_levels):
if i == 0 and self.add_extra_convs == 'on_input':
in_channels = self.in_channels[self.backbone_end_level - 1]
else:
in_channels = out_channels
extra_fpn_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.fpn_convs.append(extra_fpn_conv)
@auto_fp16()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
# In some cases, fixing `scale factor` (e.g. 2) is preferred, but
# it cannot co-exist with `size` in `F.interpolate`.
if 'scale_factor' in self.upsample_cfg:
# fix runtime error of "+=" inplace operation in PyTorch 1.10
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], **self.upsample_cfg)
else:
prev_shape = laterals[i - 1].shape[2:]
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, **self.upsample_cfg)
# build outputs
# part 1: from original levels
outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
extra_source = inputs[self.backbone_end_level - 1]
elif self.add_extra_convs == 'on_lateral':
extra_source = laterals[-1]
elif self.add_extra_convs == 'on_output':
extra_source = outs[-1]
else:
raise NotImplementedError
outs.append(self.fpn_convs[used_backbone_levels](extra_source))
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 8,804 | 41.95122 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/fpn_carafe.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule, build_upsample_layer, xavier_init
from mmcv.ops.carafe import CARAFEPack
from mmcv.runner import BaseModule, ModuleList
from ..builder import NECKS
@NECKS.register_module()
class FPN_CARAFE(BaseModule):
"""FPN_CARAFE is a more flexible implementation of FPN. It allows more
choice for upsample methods during the top-down pathway.
It can reproduce the performance of ICCV 2019 paper
CARAFE: Content-Aware ReAssembly of FEatures
Please refer to https://arxiv.org/abs/1905.02188 for more details.
Args:
in_channels (list[int]): Number of channels for each input feature map.
out_channels (int): Output channels of feature pyramids.
num_outs (int): Number of output stages.
start_level (int): Start level of feature pyramids.
(Default: 0)
end_level (int): End level of feature pyramids.
(Default: -1 indicates the last level).
norm_cfg (dict): Dictionary to construct and config norm layer.
activate (str): Type of activation function in ConvModule
(Default: None indicates w/o activation).
order (dict): Order of components in ConvModule.
upsample (str): Type of upsample layer.
upsample_cfg (dict): Dictionary to construct and config upsample layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
norm_cfg=None,
act_cfg=None,
order=('conv', 'norm', 'act'),
upsample_cfg=dict(
type='carafe',
up_kernel=5,
up_group=1,
encoder_kernel=3,
encoder_dilation=1),
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(FPN_CARAFE, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.act_cfg = act_cfg
self.with_bias = norm_cfg is None
self.upsample_cfg = upsample_cfg.copy()
self.upsample = self.upsample_cfg.get('type')
self.relu = nn.ReLU(inplace=False)
self.order = order
assert order in [('conv', 'norm', 'act'), ('act', 'conv', 'norm')]
assert self.upsample in [
'nearest', 'bilinear', 'deconv', 'pixel_shuffle', 'carafe', None
]
if self.upsample in ['deconv', 'pixel_shuffle']:
assert hasattr(
self.upsample_cfg,
'upsample_kernel') and self.upsample_cfg.upsample_kernel > 0
self.upsample_kernel = self.upsample_cfg.pop('upsample_kernel')
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.lateral_convs = ModuleList()
self.fpn_convs = ModuleList()
self.upsample_modules = ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if i != self.backbone_end_level - 1:
upsample_cfg_ = self.upsample_cfg.copy()
if self.upsample == 'deconv':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsample_cfg_.update(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsample_cfg_.update(channels=out_channels, scale_factor=2)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsample_cfg_.update(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsample_module = build_upsample_layer(upsample_cfg_)
self.upsample_modules.append(upsample_module)
self.lateral_convs.append(l_conv)
self.fpn_convs.append(fpn_conv)
# add extra conv layers (e.g., RetinaNet)
extra_out_levels = (
num_outs - self.backbone_end_level + self.start_level)
if extra_out_levels >= 1:
for i in range(extra_out_levels):
in_channels = (
self.in_channels[self.backbone_end_level -
1] if i == 0 else out_channels)
extra_l_conv = ConvModule(
in_channels,
out_channels,
3,
stride=2,
padding=1,
norm_cfg=norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
if self.upsample == 'deconv':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
kernel_size=self.upsample_kernel,
stride=2,
padding=(self.upsample_kernel - 1) // 2,
output_padding=(self.upsample_kernel - 1) // 2)
elif self.upsample == 'pixel_shuffle':
upsampler_cfg_ = dict(
in_channels=out_channels,
out_channels=out_channels,
scale_factor=2,
upsample_kernel=self.upsample_kernel)
elif self.upsample == 'carafe':
upsampler_cfg_ = dict(
channels=out_channels,
scale_factor=2,
**self.upsample_cfg)
else:
# suppress warnings
align_corners = (None
if self.upsample == 'nearest' else False)
upsampler_cfg_ = dict(
scale_factor=2,
mode=self.upsample,
align_corners=align_corners)
upsampler_cfg_['type'] = self.upsample
upsample_module = build_upsample_layer(upsampler_cfg_)
extra_fpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
norm_cfg=self.norm_cfg,
bias=self.with_bias,
act_cfg=act_cfg,
inplace=False,
order=self.order)
self.upsample_modules.append(upsample_module)
self.fpn_convs.append(extra_fpn_conv)
self.lateral_convs.append(extra_l_conv)
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
"""Initialize the weights of module."""
super(FPN_CARAFE, self).init_weights()
for m in self.modules():
if isinstance(m, (nn.Conv2d, nn.ConvTranspose2d)):
xavier_init(m, distribution='uniform')
for m in self.modules():
if isinstance(m, CARAFEPack):
m.init_weights()
def slice_as(self, src, dst):
"""Slice ``src`` as ``dst``
Note:
``src`` should have the same or larger size than ``dst``.
Args:
src (torch.Tensor): Tensors to be sliced.
dst (torch.Tensor): ``src`` will be sliced to have the same
size as ``dst``.
Returns:
torch.Tensor: Sliced tensor.
"""
assert (src.size(2) >= dst.size(2)) and (src.size(3) >= dst.size(3))
if src.size(2) == dst.size(2) and src.size(3) == dst.size(3):
return src
else:
return src[:, :, :dst.size(2), :dst.size(3)]
def tensor_add(self, a, b):
"""Add tensors ``a`` and ``b`` that might have different sizes."""
if a.size() == b.size():
c = a + b
else:
c = a + self.slice_as(b, a)
return c
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = []
for i, lateral_conv in enumerate(self.lateral_convs):
if i <= self.backbone_end_level - self.start_level:
input = inputs[min(i + self.start_level, len(inputs) - 1)]
else:
input = laterals[-1]
lateral = lateral_conv(input)
laterals.append(lateral)
# build top-down path
for i in range(len(laterals) - 1, 0, -1):
if self.upsample is not None:
upsample_feat = self.upsample_modules[i - 1](laterals[i])
else:
upsample_feat = laterals[i]
laterals[i - 1] = self.tensor_add(laterals[i - 1], upsample_feat)
# build outputs
num_conv_outs = len(self.fpn_convs)
outs = []
for i in range(num_conv_outs):
out = self.fpn_convs[i](laterals[i])
outs.append(out)
return tuple(outs)
| 11,149 | 39.398551 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/hrfpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from torch.utils.checkpoint import checkpoint
from ..builder import NECKS
@NECKS.register_module()
class HRFPN(BaseModule):
"""HRFPN (High Resolution Feature Pyramids)
paper: `High-Resolution Representations for Labeling Pixels and Regions
<https://arxiv.org/abs/1904.04514>`_.
Args:
in_channels (list): number of channels for each branch.
out_channels (int): output channels of feature pyramids.
num_outs (int): number of output stages.
pooling_type (str): pooling for generating feature pyramids
from {MAX, AVG}.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
with_cp (bool): Use checkpoint or not. Using checkpoint will save some
memory while slowing down the training speed.
stride (int): stride of 3x3 convolutional layers
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs=5,
pooling_type='AVG',
conv_cfg=None,
norm_cfg=None,
with_cp=False,
stride=1,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(HRFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.with_cp = with_cp
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
self.reduction_conv = ConvModule(
sum(in_channels),
out_channels,
kernel_size=1,
conv_cfg=self.conv_cfg,
act_cfg=None)
self.fpn_convs = nn.ModuleList()
for i in range(self.num_outs):
self.fpn_convs.append(
ConvModule(
out_channels,
out_channels,
kernel_size=3,
padding=1,
stride=stride,
conv_cfg=self.conv_cfg,
act_cfg=None))
if pooling_type == 'MAX':
self.pooling = F.max_pool2d
else:
self.pooling = F.avg_pool2d
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == self.num_ins
outs = [inputs[0]]
for i in range(1, self.num_ins):
outs.append(
F.interpolate(inputs[i], scale_factor=2**i, mode='bilinear'))
out = torch.cat(outs, dim=1)
if out.requires_grad and self.with_cp:
out = checkpoint(self.reduction_conv, out)
else:
out = self.reduction_conv(out)
outs = [out]
for i in range(1, self.num_outs):
outs.append(self.pooling(out, kernel_size=2**i, stride=2**i))
outputs = []
for i in range(self.num_outs):
if outs[i].requires_grad and self.with_cp:
tmp_out = checkpoint(self.fpn_convs[i], outs[i])
else:
tmp_out = self.fpn_convs[i](outs[i])
outputs.append(tmp_out)
return tuple(outputs)
| 3,509 | 33.752475 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/nas_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.ops.merge_cells import GlobalPoolingCell, SumCell
from mmcv.runner import BaseModule, ModuleList
from ..builder import NECKS
@NECKS.register_module()
class NASFPN(BaseModule):
"""NAS-FPN.
Implementation of `NAS-FPN: Learning Scalable Feature Pyramid Architecture
for Object Detection <https://arxiv.org/abs/1904.07392>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
stack_times (int): The number of times the pyramid architecture will
be stacked.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
stack_times,
start_level=0,
end_level=-1,
add_extra_convs=False,
norm_cfg=None,
init_cfg=dict(type='Caffe2Xavier', layer='Conv2d')):
super(NASFPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels) # num of input feature levels
self.num_outs = num_outs # num of output feature levels
self.stack_times = stack_times
self.norm_cfg = norm_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
# add lateral connections
self.lateral_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
l_conv = ConvModule(
in_channels[i],
out_channels,
1,
norm_cfg=norm_cfg,
act_cfg=None)
self.lateral_convs.append(l_conv)
# add extra downsample layers (stride-2 pooling or conv)
extra_levels = num_outs - self.backbone_end_level + self.start_level
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_conv = ConvModule(
out_channels, out_channels, 1, norm_cfg=norm_cfg, act_cfg=None)
self.extra_downsamples.append(
nn.Sequential(extra_conv, nn.MaxPool2d(2, 2)))
# add NAS FPN connections
self.fpn_stages = ModuleList()
for _ in range(self.stack_times):
stage = nn.ModuleDict()
# gp(p6, p4) -> p4_1
stage['gp_64_4'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_1, p4) -> p4_2
stage['sum_44_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p4_2, p3) -> p3_out
stage['sum_43_3'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p3_out, p4_2) -> p4_out
stage['sum_34_4'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p5, gp(p4_out, p3_out)) -> p5_out
stage['gp_43_5'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_55_5'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# sum(p7, gp(p5_out, p4_2)) -> p7_out
stage['gp_54_7'] = GlobalPoolingCell(with_out_conv=False)
stage['sum_77_7'] = SumCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
# gp(p7_out, p5_out) -> p6_out
stage['gp_75_6'] = GlobalPoolingCell(
in_channels=out_channels,
out_channels=out_channels,
out_norm_cfg=norm_cfg)
self.fpn_stages.append(stage)
def forward(self, inputs):
"""Forward function."""
# build P3-P5
feats = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build P6-P7 on top of P5
for downsample in self.extra_downsamples:
feats.append(downsample(feats[-1]))
p3, p4, p5, p6, p7 = feats
for stage in self.fpn_stages:
# gp(p6, p4) -> p4_1
p4_1 = stage['gp_64_4'](p6, p4, out_size=p4.shape[-2:])
# sum(p4_1, p4) -> p4_2
p4_2 = stage['sum_44_4'](p4_1, p4, out_size=p4.shape[-2:])
# sum(p4_2, p3) -> p3_out
p3 = stage['sum_43_3'](p4_2, p3, out_size=p3.shape[-2:])
# sum(p3_out, p4_2) -> p4_out
p4 = stage['sum_34_4'](p3, p4_2, out_size=p4.shape[-2:])
# sum(p5, gp(p4_out, p3_out)) -> p5_out
p5_tmp = stage['gp_43_5'](p4, p3, out_size=p5.shape[-2:])
p5 = stage['sum_55_5'](p5, p5_tmp, out_size=p5.shape[-2:])
# sum(p7, gp(p5_out, p4_2)) -> p7_out
p7_tmp = stage['gp_54_7'](p5, p4_2, out_size=p7.shape[-2:])
p7 = stage['sum_77_7'](p7, p7_tmp, out_size=p7.shape[-2:])
# gp(p7_out, p5_out) -> p6_out
p6 = stage['gp_75_6'](p7, p5, out_size=p6.shape[-2:])
return p3, p4, p5, p6, p7
| 6,626 | 40.679245 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/nasfcos_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule, caffe2_xavier_init
from mmcv.ops.merge_cells import ConcatCell
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class NASFCOS_FPN(BaseModule):
"""FPN structure in NASFPN.
Implementation of paper `NAS-FCOS: Fast Neural Architecture Search for
Object Detection <https://arxiv.org/abs/1906.04423>`_
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool): It decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, its actual mode is specified by `extra_convs_on_inputs`.
conv_cfg (dict): dictionary to construct and config conv layer.
norm_cfg (dict): dictionary to construct and config norm layer.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=1,
end_level=-1,
add_extra_convs=False,
conv_cfg=None,
norm_cfg=None,
init_cfg=None):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(NASFCOS_FPN, self).__init__(init_cfg)
assert isinstance(in_channels, list)
self.in_channels = in_channels
self.out_channels = out_channels
self.num_ins = len(in_channels)
self.num_outs = num_outs
self.norm_cfg = norm_cfg
self.conv_cfg = conv_cfg
if end_level == -1 or end_level == self.num_ins - 1:
self.backbone_end_level = self.num_ins
assert num_outs >= self.num_ins - start_level
else:
# if end_level is not the last level, no extra level is allowed
self.backbone_end_level = end_level + 1
assert end_level < self.num_ins
assert num_outs == end_level - start_level + 1
self.start_level = start_level
self.end_level = end_level
self.add_extra_convs = add_extra_convs
self.adapt_convs = nn.ModuleList()
for i in range(self.start_level, self.backbone_end_level):
adapt_conv = ConvModule(
in_channels[i],
out_channels,
1,
stride=1,
padding=0,
bias=False,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU', inplace=False))
self.adapt_convs.append(adapt_conv)
# C2 is omitted according to the paper
extra_levels = num_outs - self.backbone_end_level + self.start_level
def build_concat_cell(with_input1_conv, with_input2_conv):
cell_conv_cfg = dict(
kernel_size=1, padding=0, bias=False, groups=out_channels)
return ConcatCell(
in_channels=out_channels,
out_channels=out_channels,
with_out_conv=True,
out_conv_cfg=cell_conv_cfg,
out_norm_cfg=dict(type='BN'),
out_conv_order=('norm', 'act', 'conv'),
with_input1_conv=with_input1_conv,
with_input2_conv=with_input2_conv,
input_conv_cfg=conv_cfg,
input_norm_cfg=norm_cfg,
upsample_mode='nearest')
# Denote c3=f0, c4=f1, c5=f2 for convince
self.fpn = nn.ModuleDict()
self.fpn['c22_1'] = build_concat_cell(True, True)
self.fpn['c22_2'] = build_concat_cell(True, True)
self.fpn['c32'] = build_concat_cell(True, False)
self.fpn['c02'] = build_concat_cell(True, False)
self.fpn['c42'] = build_concat_cell(True, True)
self.fpn['c36'] = build_concat_cell(True, True)
self.fpn['c61'] = build_concat_cell(True, True) # f9
self.extra_downsamples = nn.ModuleList()
for i in range(extra_levels):
extra_act_cfg = None if i == 0 \
else dict(type='ReLU', inplace=False)
self.extra_downsamples.append(
ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
act_cfg=extra_act_cfg,
order=('act', 'norm', 'conv')))
def forward(self, inputs):
"""Forward function."""
feats = [
adapt_conv(inputs[i + self.start_level])
for i, adapt_conv in enumerate(self.adapt_convs)
]
for (i, module_name) in enumerate(self.fpn):
idx_1, idx_2 = int(module_name[1]), int(module_name[2])
res = self.fpn[module_name](feats[idx_1], feats[idx_2])
feats.append(res)
ret = []
for (idx, input_idx) in zip([9, 8, 7], [1, 2, 3]): # add P3, P4, P5
feats1, feats2 = feats[idx], feats[5]
feats2_resize = F.interpolate(
feats2,
size=feats1.size()[2:],
mode='bilinear',
align_corners=False)
feats_sum = feats1 + feats2_resize
ret.append(
F.interpolate(
feats_sum,
size=inputs[input_idx].size()[2:],
mode='bilinear',
align_corners=False))
for submodule in self.extra_downsamples:
ret.append(submodule(ret[-1]))
return tuple(ret)
def init_weights(self):
"""Initialize the weights of module."""
super(NASFCOS_FPN, self).init_weights()
for module in self.fpn.values():
if hasattr(module, 'conv_out'):
caffe2_xavier_init(module.out_conv.conv)
for modules in [
self.adapt_convs.modules(),
self.extra_downsamples.modules()
]:
for module in modules:
if isinstance(module, nn.Conv2d):
caffe2_xavier_init(module)
| 6,703 | 38.204678 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import auto_fp16
from ..builder import NECKS
from .fpn import FPN
@NECKS.register_module()
class PAFPN(FPN):
"""Path Aggregation Network for Instance Segmentation.
This is an implementation of the `PAFPN in Path Aggregation Network
<https://arxiv.org/abs/1803.01534>`_.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_outs (int): Number of output scales.
start_level (int): Index of the start input backbone level used to
build the feature pyramid. Default: 0.
end_level (int): Index of the end input backbone level (exclusive) to
build the feature pyramid. Default: -1, which means the last level.
add_extra_convs (bool | str): If bool, it decides whether to add conv
layers on top of the original feature maps. Default to False.
If True, it is equivalent to `add_extra_convs='on_input'`.
If str, it specifies the source feature map of the extra convs.
Only the following options are allowed
- 'on_input': Last feat map of neck inputs (i.e. backbone feature).
- 'on_lateral': Last feature map after lateral convs.
- 'on_output': The last output feature map after fpn convs.
relu_before_extra_convs (bool): Whether to apply relu before the extra
conv. Default: False.
no_norm_on_lateral (bool): Whether to apply norm on lateral.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Config dict for normalization layer. Default: None.
act_cfg (str): Config dict for activation layer in ConvModule.
Default: None.
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
num_outs,
start_level=0,
end_level=-1,
add_extra_convs=False,
relu_before_extra_convs=False,
no_norm_on_lateral=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=None,
init_cfg=dict(
type='Xavier', layer='Conv2d', distribution='uniform')):
super(PAFPN, self).__init__(
in_channels,
out_channels,
num_outs,
start_level,
end_level,
add_extra_convs,
relu_before_extra_convs,
no_norm_on_lateral,
conv_cfg,
norm_cfg,
act_cfg,
init_cfg=init_cfg)
# add extra bottom up pathway
self.downsample_convs = nn.ModuleList()
self.pafpn_convs = nn.ModuleList()
for i in range(self.start_level + 1, self.backbone_end_level):
d_conv = ConvModule(
out_channels,
out_channels,
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
pafpn_conv = ConvModule(
out_channels,
out_channels,
3,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg,
inplace=False)
self.downsample_convs.append(d_conv)
self.pafpn_convs.append(pafpn_conv)
@auto_fp16()
def forward(self, inputs):
"""Forward function."""
assert len(inputs) == len(self.in_channels)
# build laterals
laterals = [
lateral_conv(inputs[i + self.start_level])
for i, lateral_conv in enumerate(self.lateral_convs)
]
# build top-down path
used_backbone_levels = len(laterals)
for i in range(used_backbone_levels - 1, 0, -1):
prev_shape = laterals[i - 1].shape[2:]
# fix runtime error of "+=" inplace operation in PyTorch 1.10
laterals[i - 1] = laterals[i - 1] + F.interpolate(
laterals[i], size=prev_shape, mode='nearest')
# build outputs
# part 1: from original levels
inter_outs = [
self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
]
# part 2: add bottom-up path
for i in range(0, used_backbone_levels - 1):
inter_outs[i + 1] += self.downsample_convs[i](inter_outs[i])
outs = []
outs.append(inter_outs[0])
outs.extend([
self.pafpn_convs[i - 1](inter_outs[i])
for i in range(1, used_backbone_levels)
])
# part 3: add extra levels
if self.num_outs > len(outs):
# use max pool to get more levels on top of outputs
# (e.g., Faster R-CNN, Mask R-CNN)
if not self.add_extra_convs:
for i in range(self.num_outs - used_backbone_levels):
outs.append(F.max_pool2d(outs[-1], 1, stride=2))
# add conv layers on top of original feature maps (RetinaNet)
else:
if self.add_extra_convs == 'on_input':
orig = inputs[self.backbone_end_level - 1]
outs.append(self.fpn_convs[used_backbone_levels](orig))
elif self.add_extra_convs == 'on_lateral':
outs.append(self.fpn_convs[used_backbone_levels](
laterals[-1]))
elif self.add_extra_convs == 'on_output':
outs.append(self.fpn_convs[used_backbone_levels](outs[-1]))
else:
raise NotImplementedError
for i in range(used_backbone_levels + 1, self.num_outs):
if self.relu_before_extra_convs:
outs.append(self.fpn_convs[i](F.relu(outs[-1])))
else:
outs.append(self.fpn_convs[i](outs[-1]))
return tuple(outs)
| 6,342 | 38.64375 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/necks/rfp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import constant_init, xavier_init
from mmcv.runner import BaseModule, ModuleList
from ..builder import NECKS, build_backbone
from .fpn import FPN
class ASPP(BaseModule):
"""ASPP (Atrous Spatial Pyramid Pooling)
This is an implementation of the ASPP module used in DetectoRS
(https://arxiv.org/pdf/2006.02334.pdf)
Args:
in_channels (int): Number of input channels.
out_channels (int): Number of channels produced by this module
dilations (tuple[int]): Dilations of the four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
dilations=(1, 3, 6, 1),
init_cfg=dict(type='Kaiming', layer='Conv2d')):
super().__init__(init_cfg)
assert dilations[-1] == 1
self.aspp = nn.ModuleList()
for dilation in dilations:
kernel_size = 3 if dilation > 1 else 1
padding = dilation if dilation > 1 else 0
conv = nn.Conv2d(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=1,
dilation=dilation,
padding=padding,
bias=True)
self.aspp.append(conv)
self.gap = nn.AdaptiveAvgPool2d(1)
def forward(self, x):
avg_x = self.gap(x)
out = []
for aspp_idx in range(len(self.aspp)):
inp = avg_x if (aspp_idx == len(self.aspp) - 1) else x
out.append(F.relu_(self.aspp[aspp_idx](inp)))
out[-1] = out[-1].expand_as(out[-2])
out = torch.cat(out, dim=1)
return out
@NECKS.register_module()
class RFP(FPN):
"""RFP (Recursive Feature Pyramid)
This is an implementation of RFP in `DetectoRS
<https://arxiv.org/pdf/2006.02334.pdf>`_. Different from standard FPN, the
input of RFP should be multi level features along with origin input image
of backbone.
Args:
rfp_steps (int): Number of unrolled steps of RFP.
rfp_backbone (dict): Configuration of the backbone for RFP.
aspp_out_channels (int): Number of output channels of ASPP module.
aspp_dilations (tuple[int]): Dilation rates of four branches.
Default: (1, 3, 6, 1)
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
rfp_steps,
rfp_backbone,
aspp_out_channels,
aspp_dilations=(1, 3, 6, 1),
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super().__init__(init_cfg=init_cfg, **kwargs)
self.rfp_steps = rfp_steps
# Be careful! Pretrained weights cannot be loaded when use
# nn.ModuleList
self.rfp_modules = ModuleList()
for rfp_idx in range(1, rfp_steps):
rfp_module = build_backbone(rfp_backbone)
self.rfp_modules.append(rfp_module)
self.rfp_aspp = ASPP(self.out_channels, aspp_out_channels,
aspp_dilations)
self.rfp_weight = nn.Conv2d(
self.out_channels,
1,
kernel_size=1,
stride=1,
padding=0,
bias=True)
def init_weights(self):
# Avoid using super().init_weights(), which may alter the default
# initialization of the modules in self.rfp_modules that have missing
# keys in the pretrained checkpoint.
for convs in [self.lateral_convs, self.fpn_convs]:
for m in convs.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
for rfp_idx in range(self.rfp_steps - 1):
self.rfp_modules[rfp_idx].init_weights()
constant_init(self.rfp_weight, 0)
def forward(self, inputs):
inputs = list(inputs)
assert len(inputs) == len(self.in_channels) + 1 # +1 for input image
img = inputs.pop(0)
# FPN forward
x = super().forward(tuple(inputs))
for rfp_idx in range(self.rfp_steps - 1):
rfp_feats = [x[0]] + list(
self.rfp_aspp(x[i]) for i in range(1, len(x)))
x_idx = self.rfp_modules[rfp_idx].rfp_forward(img, rfp_feats)
# FPN forward
x_idx = super().forward(x_idx)
x_new = []
for ft_idx in range(len(x_idx)):
add_weight = torch.sigmoid(self.rfp_weight(x_idx[ft_idx]))
x_new.append(add_weight * x_idx[ft_idx] +
(1 - add_weight) * x[ft_idx])
x = x_new
return x
| 5,052 | 36.154412 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/necks/ssd_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
@NECKS.register_module()
class SSDNeck(BaseModule):
"""Extra layers of SSD backbone to generate multi-scale feature maps.
Args:
in_channels (Sequence[int]): Number of input channels per scale.
out_channels (Sequence[int]): Number of output channels per scale.
level_strides (Sequence[int]): Stride of 3x3 conv per level.
level_paddings (Sequence[int]): Padding size of 3x3 conv per level.
l2_norm_scale (float|None): L2 normalization layer init scale.
If None, not use L2 normalization on the first input feature.
last_kernel_size (int): Kernel size of the last conv layer.
Default: 3.
use_depthwise (bool): Whether to use DepthwiseSeparableConv.
Default: False.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: None.
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
init_cfg (dict or list[dict], optional): Initialization config dict.
"""
def __init__(self,
in_channels,
out_channels,
level_strides,
level_paddings,
l2_norm_scale=20.,
last_kernel_size=3,
use_depthwise=False,
conv_cfg=None,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
init_cfg=[
dict(
type='Xavier', distribution='uniform',
layer='Conv2d'),
dict(type='Constant', val=1, layer='BatchNorm2d'),
]):
super(SSDNeck, self).__init__(init_cfg)
assert len(out_channels) > len(in_channels)
assert len(out_channels) - len(in_channels) == len(level_strides)
assert len(level_strides) == len(level_paddings)
assert in_channels == out_channels[:len(in_channels)]
if l2_norm_scale:
self.l2_norm = L2Norm(in_channels[0], l2_norm_scale)
self.init_cfg += [
dict(
type='Constant',
val=self.l2_norm.scale,
override=dict(name='l2_norm'))
]
self.extra_layers = nn.ModuleList()
extra_layer_channels = out_channels[len(in_channels):]
second_conv = DepthwiseSeparableConvModule if \
use_depthwise else ConvModule
for i, (out_channel, stride, padding) in enumerate(
zip(extra_layer_channels, level_strides, level_paddings)):
kernel_size = last_kernel_size \
if i == len(extra_layer_channels) - 1 else 3
per_lvl_convs = nn.Sequential(
ConvModule(
out_channels[len(in_channels) - 1 + i],
out_channel // 2,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg),
second_conv(
out_channel // 2,
out_channel,
kernel_size,
stride=stride,
padding=padding,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.extra_layers.append(per_lvl_convs)
def forward(self, inputs):
"""Forward function."""
outs = [feat for feat in inputs]
if hasattr(self, 'l2_norm'):
outs[0] = self.l2_norm(outs[0])
feat = outs[-1]
for layer in self.extra_layers:
feat = layer(feat)
outs.append(feat)
return tuple(outs)
class L2Norm(nn.Module):
def __init__(self, n_dims, scale=20., eps=1e-10):
"""L2 normalization layer.
Args:
n_dims (int): Number of dimensions to be normalized
scale (float, optional): Defaults to 20..
eps (float, optional): Used to avoid division by zero.
Defaults to 1e-10.
"""
super(L2Norm, self).__init__()
self.n_dims = n_dims
self.weight = nn.Parameter(torch.Tensor(self.n_dims))
self.eps = eps
self.scale = scale
def forward(self, x):
"""Forward function."""
# normalization layer convert to FP32 in FP16 training
x_float = x.float()
norm = x_float.pow(2).sum(1, keepdim=True).sqrt() + self.eps
return (self.weight[None, :, None, None].float().expand_as(x_float) *
x_float / norm).type_as(x)
| 4,891 | 36.630769 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/necks/yolo_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
# Copyright (c) 2019 Western Digital Corporation or its affiliates.
import torch
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
class DetectionBlock(BaseModule):
"""Detection block in YOLO neck.
Let out_channels = n, the DetectionBlock contains:
Six ConvLayers, 1 Conv2D Layer and 1 YoloLayer.
The first 6 ConvLayers are formed the following way:
1x1xn, 3x3x2n, 1x1xn, 3x3x2n, 1x1xn, 3x3x2n.
The Conv2D layer is 1x1x255.
Some block will have branch after the fifth ConvLayer.
The input channel is arbitrary (in_channels)
Args:
in_channels (int): The number of input channels.
out_channels (int): The number of output channels.
conv_cfg (dict): Config dict for convolution layer. Default: None.
norm_cfg (dict): Dictionary to construct and config norm layer.
Default: dict(type='BN', requires_grad=True)
act_cfg (dict): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None):
super(DetectionBlock, self).__init__(init_cfg)
double_out_channels = out_channels * 2
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
self.conv1 = ConvModule(in_channels, out_channels, 1, **cfg)
self.conv2 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv3 = ConvModule(double_out_channels, out_channels, 1, **cfg)
self.conv4 = ConvModule(
out_channels, double_out_channels, 3, padding=1, **cfg)
self.conv5 = ConvModule(double_out_channels, out_channels, 1, **cfg)
def forward(self, x):
tmp = self.conv1(x)
tmp = self.conv2(tmp)
tmp = self.conv3(tmp)
tmp = self.conv4(tmp)
out = self.conv5(tmp)
return out
@NECKS.register_module()
class YOLOV3Neck(BaseModule):
"""The neck of YOLOV3.
It can be treated as a simplified version of FPN. It
will take the result from Darknet backbone and do some upsampling and
concatenation. It will finally output the detection result.
Note:
The input feats should be from top to bottom.
i.e., from high-lvl to low-lvl
But YOLOV3Neck will process them in reversed order.
i.e., from bottom (high-lvl) to top (low-lvl)
Args:
num_scales (int): The number of scales / stages.
in_channels (List[int]): The number of input channels per scale.
out_channels (List[int]): The number of output channels per scale.
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None.
norm_cfg (dict, optional): Dictionary to construct and config norm
layer. Default: dict(type='BN', requires_grad=True)
act_cfg (dict, optional): Config dict for activation layer.
Default: dict(type='LeakyReLU', negative_slope=0.1).
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_scales,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='LeakyReLU', negative_slope=0.1),
init_cfg=None):
super(YOLOV3Neck, self).__init__(init_cfg)
assert (num_scales == len(in_channels) == len(out_channels))
self.num_scales = num_scales
self.in_channels = in_channels
self.out_channels = out_channels
# shortcut
cfg = dict(conv_cfg=conv_cfg, norm_cfg=norm_cfg, act_cfg=act_cfg)
# To support arbitrary scales, the code looks awful, but it works.
# Better solution is welcomed.
self.detect1 = DetectionBlock(in_channels[0], out_channels[0], **cfg)
for i in range(1, self.num_scales):
in_c, out_c = self.in_channels[i], self.out_channels[i]
inter_c = out_channels[i - 1]
self.add_module(f'conv{i}', ConvModule(inter_c, out_c, 1, **cfg))
# in_c + out_c : High-lvl feats will be cat with low-lvl feats
self.add_module(f'detect{i+1}',
DetectionBlock(in_c + out_c, out_c, **cfg))
def forward(self, feats):
assert len(feats) == self.num_scales
# processed from bottom (high-lvl) to top (low-lvl)
outs = []
out = self.detect1(feats[-1])
outs.append(out)
for i, x in enumerate(reversed(feats[:-1])):
conv = getattr(self, f'conv{i+1}')
tmp = conv(out)
# Cat with low-lvl feats
tmp = F.interpolate(tmp, scale_factor=2)
tmp = torch.cat((tmp, x), 1)
detect = getattr(self, f'detect{i+2}')
out = detect(tmp)
outs.append(out)
return tuple(outs)
| 5,431 | 37.524823 | 77 | py |
mmdetection | mmdetection-master/mmdet/models/necks/yolox_pafpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.runner import BaseModule
from ..builder import NECKS
from ..utils import CSPLayer
@NECKS.register_module()
class YOLOXPAFPN(BaseModule):
"""Path Aggregation Network used in YOLOX.
Args:
in_channels (List[int]): Number of input channels per scale.
out_channels (int): Number of output channels (used at each scale)
num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
use_depthwise (bool): Whether to depthwise separable convolution in
blocks. Default: False
upsample_cfg (dict): Config dict for interpolate layer.
Default: `dict(scale_factor=2, mode='nearest')`
conv_cfg (dict, optional): Config dict for convolution layer.
Default: None, which means using conv2d.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN')
act_cfg (dict): Config dict for activation layer.
Default: dict(type='Swish')
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None.
"""
def __init__(self,
in_channels,
out_channels,
num_csp_blocks=3,
use_depthwise=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'),
conv_cfg=None,
norm_cfg=dict(type='BN', momentum=0.03, eps=0.001),
act_cfg=dict(type='Swish'),
init_cfg=dict(
type='Kaiming',
layer='Conv2d',
a=math.sqrt(5),
distribution='uniform',
mode='fan_in',
nonlinearity='leaky_relu')):
super(YOLOXPAFPN, self).__init__(init_cfg)
self.in_channels = in_channels
self.out_channels = out_channels
conv = DepthwiseSeparableConvModule if use_depthwise else ConvModule
# build top-down blocks
self.upsample = nn.Upsample(**upsample_cfg)
self.reduce_layers = nn.ModuleList()
self.top_down_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1, 0, -1):
self.reduce_layers.append(
ConvModule(
in_channels[idx],
in_channels[idx - 1],
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.top_down_blocks.append(
CSPLayer(
in_channels[idx - 1] * 2,
in_channels[idx - 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
# build bottom-up blocks
self.downsamples = nn.ModuleList()
self.bottom_up_blocks = nn.ModuleList()
for idx in range(len(in_channels) - 1):
self.downsamples.append(
conv(
in_channels[idx],
in_channels[idx],
3,
stride=2,
padding=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.bottom_up_blocks.append(
CSPLayer(
in_channels[idx] * 2,
in_channels[idx + 1],
num_blocks=num_csp_blocks,
add_identity=False,
use_depthwise=use_depthwise,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
self.out_convs = nn.ModuleList()
for i in range(len(in_channels)):
self.out_convs.append(
ConvModule(
in_channels[i],
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, inputs):
"""
Args:
inputs (tuple[Tensor]): input features.
Returns:
tuple[Tensor]: YOLOXPAFPN features.
"""
assert len(inputs) == len(self.in_channels)
# top-down path
inner_outs = [inputs[-1]]
for idx in range(len(self.in_channels) - 1, 0, -1):
feat_heigh = inner_outs[0]
feat_low = inputs[idx - 1]
feat_heigh = self.reduce_layers[len(self.in_channels) - 1 - idx](
feat_heigh)
inner_outs[0] = feat_heigh
upsample_feat = self.upsample(feat_heigh)
inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
torch.cat([upsample_feat, feat_low], 1))
inner_outs.insert(0, inner_out)
# bottom-up path
outs = [inner_outs[0]]
for idx in range(len(self.in_channels) - 1):
feat_low = outs[-1]
feat_height = inner_outs[idx + 1]
downsample_feat = self.downsamples[idx](feat_low)
out = self.bottom_up_blocks[idx](
torch.cat([downsample_feat, feat_height], 1))
outs.append(out)
# out convs
for idx, conv in enumerate(self.out_convs):
outs[idx] = conv(outs[idx])
return tuple(outs)
| 5,647 | 34.974522 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/plugins/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .dropblock import DropBlock
from .msdeformattn_pixel_decoder import MSDeformAttnPixelDecoder
from .pixel_decoder import PixelDecoder, TransformerEncoderPixelDecoder
__all__ = [
'DropBlock', 'PixelDecoder', 'TransformerEncoderPixelDecoder',
'MSDeformAttnPixelDecoder'
]
| 331 | 32.2 | 71 | py |
mmdetection | mmdetection-master/mmdet/models/plugins/dropblock.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS
eps = 1e-6
@PLUGIN_LAYERS.register_module()
class DropBlock(nn.Module):
"""Randomly drop some regions of feature maps.
Please refer to the method proposed in `DropBlock
<https://arxiv.org/abs/1810.12890>`_ for details.
Args:
drop_prob (float): The probability of dropping each block.
block_size (int): The size of dropped blocks.
warmup_iters (int): The drop probability will linearly increase
from `0` to `drop_prob` during the first `warmup_iters` iterations.
Default: 2000.
"""
def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs):
super(DropBlock, self).__init__()
assert block_size % 2 == 1
assert 0 < drop_prob <= 1
assert warmup_iters >= 0
self.drop_prob = drop_prob
self.block_size = block_size
self.warmup_iters = warmup_iters
self.iter_cnt = 0
def forward(self, x):
"""
Args:
x (Tensor): Input feature map on which some areas will be randomly
dropped.
Returns:
Tensor: The tensor after DropBlock layer.
"""
if not self.training:
return x
self.iter_cnt += 1
N, C, H, W = list(x.shape)
gamma = self._compute_gamma((H, W))
mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1)
mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device))
mask = F.pad(mask, [self.block_size // 2] * 4, value=0)
mask = F.max_pool2d(
input=mask,
stride=(1, 1),
kernel_size=(self.block_size, self.block_size),
padding=self.block_size // 2)
mask = 1 - mask
x = x * mask * mask.numel() / (eps + mask.sum())
return x
def _compute_gamma(self, feat_size):
"""Compute the value of gamma according to paper. gamma is the
parameter of bernoulli distribution, which controls the number of
features to drop.
gamma = (drop_prob * fm_area) / (drop_area * keep_area)
Args:
feat_size (tuple[int, int]): The height and width of feature map.
Returns:
float: The value of gamma.
"""
gamma = (self.drop_prob * feat_size[0] * feat_size[1])
gamma /= ((feat_size[0] - self.block_size + 1) *
(feat_size[1] - self.block_size + 1))
gamma /= (self.block_size**2)
factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt /
self.warmup_iters)
return gamma * factor
def extra_repr(self):
return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, '
f'warmup_iters={self.warmup_iters}')
| 2,925 | 33.023256 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/plugins/msdeformattn_pixel_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import (PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init,
normal_init, xavier_init)
from mmcv.cnn.bricks.transformer import (build_positional_encoding,
build_transformer_layer_sequence)
from mmcv.runner import BaseModule, ModuleList
from mmdet.core.anchor import MlvlPointGenerator
from mmdet.models.utils.transformer import MultiScaleDeformableAttention
@PLUGIN_LAYERS.register_module()
class MSDeformAttnPixelDecoder(BaseModule):
"""Pixel decoder with multi-scale deformable attention.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
strides (list[int] | tuple[int]): Output strides of feature from
backbone.
feat_channels (int): Number of channels for feature.
out_channels (int): Number of channels for output.
num_outs (int): Number of output scales.
norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`mmcv.ConfigDict` | dict): Config for transformer
encoder. Defaults to `DetrTransformerEncoder`.
positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for
transformer encoder position encoding. Defaults to
dict(type='SinePositionalEncoding', num_feats=128,
normalize=True).
init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict.
"""
def __init__(self,
in_channels=[256, 512, 1024, 2048],
strides=[4, 8, 16, 32],
feat_channels=256,
out_channels=256,
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention',
embed_dims=256,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=False,
norm_cfg=None,
init_cfg=None),
feedforward_channels=1024,
ffn_dropout=0.0,
operation_order=('self_attn', 'norm', 'ffn', 'norm')),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=128,
normalize=True),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.strides = strides
self.num_input_levels = len(in_channels)
self.num_encoder_levels = \
encoder.transformerlayers.attn_cfgs.num_levels
assert self.num_encoder_levels >= 1, \
'num_levels in attn_cfgs must be at least one'
input_conv_list = []
# from top to down (low to high resolution)
for i in range(self.num_input_levels - 1,
self.num_input_levels - self.num_encoder_levels - 1,
-1):
input_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
norm_cfg=norm_cfg,
act_cfg=None,
bias=True)
input_conv_list.append(input_conv)
self.input_convs = ModuleList(input_conv_list)
self.encoder = build_transformer_layer_sequence(encoder)
self.postional_encoding = build_positional_encoding(
positional_encoding)
# high resolution to low resolution
self.level_encoding = nn.Embedding(self.num_encoder_levels,
feat_channels)
# fpn-like structure
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
# from top to down (low to high resolution)
# fpn for the rest features that didn't pass in encoder
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=1, stride=1, padding=0)
self.num_outs = num_outs
self.point_generator = MlvlPointGenerator(strides)
def init_weights(self):
"""Initialize weights."""
for i in range(0, self.num_encoder_levels):
xavier_init(
self.input_convs[i].conv,
gain=1,
bias=0,
distribution='uniform')
for i in range(0, self.num_input_levels - self.num_encoder_levels):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
normal_init(self.level_encoding, mean=0, std=1)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_normal_(p)
# init_weights defined in MultiScaleDeformableAttention
for layer in self.encoder.layers:
for attn in layer.attentions:
if isinstance(attn, MultiScaleDeformableAttention):
attn.init_weights()
def forward(self, feats):
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
Returns:
tuple: A tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- multi_scale_features (list[Tensor]): Multi scale \
features, each in shape (batch_size, c, h, w).
"""
# generate padding mask for each level, for each image
batch_size = feats[0].shape[0]
encoder_input_list = []
padding_mask_list = []
level_positional_encoding_list = []
spatial_shapes = []
reference_points_list = []
for i in range(self.num_encoder_levels):
level_idx = self.num_input_levels - i - 1
feat = feats[level_idx]
feat_projected = self.input_convs[i](feat)
h, w = feat.shape[-2:]
# no padding
padding_mask_resized = feat.new_zeros(
(batch_size, ) + feat.shape[-2:], dtype=torch.bool)
pos_embed = self.postional_encoding(padding_mask_resized)
level_embed = self.level_encoding.weight[i]
level_pos_embed = level_embed.view(1, -1, 1, 1) + pos_embed
# (h_i * w_i, 2)
reference_points = self.point_generator.single_level_grid_priors(
feat.shape[-2:], level_idx, device=feat.device)
# normalize
factor = feat.new_tensor([[w, h]]) * self.strides[level_idx]
reference_points = reference_points / factor
# shape (batch_size, c, h_i, w_i) -> (h_i * w_i, batch_size, c)
feat_projected = feat_projected.flatten(2).permute(2, 0, 1)
level_pos_embed = level_pos_embed.flatten(2).permute(2, 0, 1)
padding_mask_resized = padding_mask_resized.flatten(1)
encoder_input_list.append(feat_projected)
padding_mask_list.append(padding_mask_resized)
level_positional_encoding_list.append(level_pos_embed)
spatial_shapes.append(feat.shape[-2:])
reference_points_list.append(reference_points)
# shape (batch_size, total_num_query),
# total_num_query=sum([., h_i * w_i,.])
padding_masks = torch.cat(padding_mask_list, dim=1)
# shape (total_num_query, batch_size, c)
encoder_inputs = torch.cat(encoder_input_list, dim=0)
level_positional_encodings = torch.cat(
level_positional_encoding_list, dim=0)
device = encoder_inputs.device
# shape (num_encoder_levels, 2), from low
# resolution to high resolution
spatial_shapes = torch.as_tensor(
spatial_shapes, dtype=torch.long, device=device)
# shape (0, h_0*w_0, h_0*w_0+h_1*w_1, ...)
level_start_index = torch.cat((spatial_shapes.new_zeros(
(1, )), spatial_shapes.prod(1).cumsum(0)[:-1]))
reference_points = torch.cat(reference_points_list, dim=0)
reference_points = reference_points[None, :, None].repeat(
batch_size, 1, self.num_encoder_levels, 1)
valid_radios = reference_points.new_ones(
(batch_size, self.num_encoder_levels, 2))
# shape (num_total_query, batch_size, c)
memory = self.encoder(
query=encoder_inputs,
key=None,
value=None,
query_pos=level_positional_encodings,
key_pos=None,
attn_masks=None,
key_padding_mask=None,
query_key_padding_mask=padding_masks,
spatial_shapes=spatial_shapes,
reference_points=reference_points,
level_start_index=level_start_index,
valid_radios=valid_radios)
# (num_total_query, batch_size, c) -> (batch_size, c, num_total_query)
memory = memory.permute(1, 2, 0)
# from low resolution to high resolution
num_query_per_level = [e[0] * e[1] for e in spatial_shapes]
outs = torch.split(memory, num_query_per_level, dim=-1)
outs = [
x.reshape(batch_size, -1, spatial_shapes[i][0],
spatial_shapes[i][1]) for i, x in enumerate(outs)
]
for i in range(self.num_input_levels - self.num_encoder_levels - 1, -1,
-1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + F.interpolate(
outs[-1],
size=cur_feat.shape[-2:],
mode='bilinear',
align_corners=False)
y = self.output_convs[i](y)
outs.append(y)
multi_scale_features = outs[:self.num_outs]
mask_feature = self.mask_feature(outs[-1])
return mask_feature, multi_scale_features
| 11,450 | 41.411111 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/plugins/pixel_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import PLUGIN_LAYERS, Conv2d, ConvModule, caffe2_xavier_init
from mmcv.cnn.bricks.transformer import (build_positional_encoding,
build_transformer_layer_sequence)
from mmcv.runner import BaseModule, ModuleList
@PLUGIN_LAYERS.register_module()
class PixelDecoder(BaseModule):
"""Pixel decoder with a structure like fpn.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer
encoder.Defaults to None.
positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for
transformer encoder position encoding. Defaults to
dict(type='SinePositionalEncoding', num_feats=128,
normalize=True).
init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
feat_channels,
out_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
init_cfg=None):
super().__init__(init_cfg=init_cfg)
self.in_channels = in_channels
self.num_inputs = len(in_channels)
self.lateral_convs = ModuleList()
self.output_convs = ModuleList()
self.use_bias = norm_cfg is None
for i in range(0, self.num_inputs - 1):
lateral_conv = ConvModule(
in_channels[i],
feat_channels,
kernel_size=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=None)
output_conv = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.lateral_convs.append(lateral_conv)
self.output_convs.append(output_conv)
self.last_feat_conv = ConvModule(
in_channels[-1],
feat_channels,
kernel_size=3,
padding=1,
stride=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
self.mask_feature = Conv2d(
feat_channels, out_channels, kernel_size=3, stride=1, padding=1)
def init_weights(self):
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.last_feat_conv, bias=0)
def forward(self, feats, img_metas):
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
img_metas (list[dict]): List of image information. Pass in
for creating more accurate padding mask. Not used here.
Returns:
tuple: a tuple containing the following:
- mask_feature (Tensor): Shape (batch_size, c, h, w).
- memory (Tensor): Output of last stage of backbone.\
Shape (batch_size, c, h, w).
"""
y = self.last_feat_conv(feats[-1])
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
memory = feats[-1]
return mask_feature, memory
@PLUGIN_LAYERS.register_module()
class TransformerEncoderPixelDecoder(PixelDecoder):
"""Pixel decoder with transormer encoder inside.
Args:
in_channels (list[int] | tuple[int]): Number of channels in the
input feature maps.
feat_channels (int): Number channels for feature.
out_channels (int): Number channels for output.
norm_cfg (:obj:`mmcv.ConfigDict` | dict): Config for normalization.
Defaults to dict(type='GN', num_groups=32).
act_cfg (:obj:`mmcv.ConfigDict` | dict): Config for activation.
Defaults to dict(type='ReLU').
encoder (:obj:`mmcv.ConfigDict` | dict): Config for transorformer
encoder.Defaults to None.
positional_encoding (:obj:`mmcv.ConfigDict` | dict): Config for
transformer encoder position encoding. Defaults to
dict(type='SinePositionalEncoding', num_feats=128,
normalize=True).
init_cfg (:obj:`mmcv.ConfigDict` | dict): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
feat_channels,
out_channels,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=None,
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=128,
normalize=True),
init_cfg=None):
super(TransformerEncoderPixelDecoder, self).__init__(
in_channels,
feat_channels,
out_channels,
norm_cfg,
act_cfg,
init_cfg=init_cfg)
self.last_feat_conv = None
self.encoder = build_transformer_layer_sequence(encoder)
self.encoder_embed_dims = self.encoder.embed_dims
assert self.encoder_embed_dims == feat_channels, 'embed_dims({}) of ' \
'tranformer encoder must equal to feat_channels({})'.format(
feat_channels, self.encoder_embed_dims)
self.positional_encoding = build_positional_encoding(
positional_encoding)
self.encoder_in_proj = Conv2d(
in_channels[-1], feat_channels, kernel_size=1)
self.encoder_out_proj = ConvModule(
feat_channels,
feat_channels,
kernel_size=3,
stride=1,
padding=1,
bias=self.use_bias,
norm_cfg=norm_cfg,
act_cfg=act_cfg)
def init_weights(self):
"""Initialize weights."""
for i in range(0, self.num_inputs - 2):
caffe2_xavier_init(self.lateral_convs[i].conv, bias=0)
caffe2_xavier_init(self.output_convs[i].conv, bias=0)
caffe2_xavier_init(self.mask_feature, bias=0)
caffe2_xavier_init(self.encoder_in_proj, bias=0)
caffe2_xavier_init(self.encoder_out_proj.conv, bias=0)
for p in self.encoder.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, feats, img_metas):
"""
Args:
feats (list[Tensor]): Feature maps of each level. Each has
shape of (batch_size, c, h, w).
img_metas (list[dict]): List of image information. Pass in
for creating more accurate padding mask.
Returns:
tuple: a tuple containing the following:
- mask_feature (Tensor): shape (batch_size, c, h, w).
- memory (Tensor): shape (batch_size, c, h, w).
"""
feat_last = feats[-1]
bs, c, h, w = feat_last.shape
input_img_h, input_img_w = img_metas[0]['batch_input_shape']
padding_mask = feat_last.new_ones((bs, input_img_h, input_img_w),
dtype=torch.float32)
for i in range(bs):
img_h, img_w, _ = img_metas[i]['img_shape']
padding_mask[i, :img_h, :img_w] = 0
padding_mask = F.interpolate(
padding_mask.unsqueeze(1),
size=feat_last.shape[-2:],
mode='nearest').to(torch.bool).squeeze(1)
pos_embed = self.positional_encoding(padding_mask)
feat_last = self.encoder_in_proj(feat_last)
# (batch_size, c, h, w) -> (num_queries, batch_size, c)
feat_last = feat_last.flatten(2).permute(2, 0, 1)
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
# (batch_size, h, w) -> (batch_size, h*w)
padding_mask = padding_mask.flatten(1)
memory = self.encoder(
query=feat_last,
key=None,
value=None,
query_pos=pos_embed,
query_key_padding_mask=padding_mask)
# (num_queries, batch_size, c) -> (batch_size, c, h, w)
memory = memory.permute(1, 2, 0).view(bs, self.encoder_embed_dims, h,
w)
y = self.encoder_out_proj(memory)
for i in range(self.num_inputs - 2, -1, -1):
x = feats[i]
cur_feat = self.lateral_convs[i](x)
y = cur_feat + \
F.interpolate(y, size=cur_feat.shape[-2:], mode='nearest')
y = self.output_convs[i](y)
mask_feature = self.mask_feature(y)
return mask_feature, memory
| 9,716 | 38.82377 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .base_roi_head import BaseRoIHead
from .bbox_heads import (BBoxHead, ConvFCBBoxHead, DIIHead,
DoubleConvFCBBoxHead, SABLHead, SCNetBBoxHead,
Shared2FCBBoxHead, Shared4Conv1FCBBoxHead)
from .cascade_roi_head import CascadeRoIHead
from .double_roi_head import DoubleHeadRoIHead
from .dynamic_roi_head import DynamicRoIHead
from .grid_roi_head import GridRoIHead
from .htc_roi_head import HybridTaskCascadeRoIHead
from .mask_heads import (CoarseMaskHead, FCNMaskHead, FeatureRelayHead,
FusedSemanticHead, GlobalContextHead, GridHead,
HTCMaskHead, MaskIoUHead, MaskPointHead,
SCNetMaskHead, SCNetSemanticHead)
from .mask_scoring_roi_head import MaskScoringRoIHead
from .pisa_roi_head import PISARoIHead
from .point_rend_roi_head import PointRendRoIHead
from .roi_extractors import (BaseRoIExtractor, GenericRoIExtractor,
SingleRoIExtractor)
from .scnet_roi_head import SCNetRoIHead
from .shared_heads import ResLayer
from .sparse_roi_head import SparseRoIHead
from .standard_roi_head import StandardRoIHead
from .trident_roi_head import TridentRoIHead
__all__ = [
'BaseRoIHead', 'CascadeRoIHead', 'DoubleHeadRoIHead', 'MaskScoringRoIHead',
'HybridTaskCascadeRoIHead', 'GridRoIHead', 'ResLayer', 'BBoxHead',
'ConvFCBBoxHead', 'DIIHead', 'SABLHead', 'Shared2FCBBoxHead',
'StandardRoIHead', 'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead',
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'BaseRoIExtractor', 'GenericRoIExtractor',
'SingleRoIExtractor', 'PISARoIHead', 'PointRendRoIHead', 'MaskPointHead',
'CoarseMaskHead', 'DynamicRoIHead', 'SparseRoIHead', 'TridentRoIHead',
'SCNetRoIHead', 'SCNetMaskHead', 'SCNetSemanticHead', 'SCNetBBoxHead',
'FeatureRelayHead', 'GlobalContextHead'
]
| 1,961 | 50.631579 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/base_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from mmcv.runner import BaseModule
from ..builder import build_shared_head
class BaseRoIHead(BaseModule, metaclass=ABCMeta):
"""Base class for RoIHeads."""
def __init__(self,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
super(BaseRoIHead, self).__init__(init_cfg)
self.train_cfg = train_cfg
self.test_cfg = test_cfg
if shared_head is not None:
shared_head.pretrained = pretrained
self.shared_head = build_shared_head(shared_head)
if bbox_head is not None:
self.init_bbox_head(bbox_roi_extractor, bbox_head)
if mask_head is not None:
self.init_mask_head(mask_roi_extractor, mask_head)
self.init_assigner_sampler()
@property
def with_bbox(self):
"""bool: whether the RoI head contains a `bbox_head`"""
return hasattr(self, 'bbox_head') and self.bbox_head is not None
@property
def with_mask(self):
"""bool: whether the RoI head contains a `mask_head`"""
return hasattr(self, 'mask_head') and self.mask_head is not None
@property
def with_shared_head(self):
"""bool: whether the RoI head contains a `shared_head`"""
return hasattr(self, 'shared_head') and self.shared_head is not None
@abstractmethod
def init_bbox_head(self):
"""Initialize ``bbox_head``"""
pass
@abstractmethod
def init_mask_head(self):
"""Initialize ``mask_head``"""
pass
@abstractmethod
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
pass
@abstractmethod
def forward_train(self,
x,
img_meta,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
**kwargs):
"""Forward function during training."""
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False,
**kwargs):
"""Asynchronized test function."""
raise NotImplementedError
def simple_test(self,
x,
proposal_list,
img_meta,
proposals=None,
rescale=False,
**kwargs):
"""Test without augmentation."""
def aug_test(self, x, proposal_list, img_metas, rescale=False, **kwargs):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
| 3,197 | 29.75 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/cascade_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import ModuleList
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, build_assigner,
build_sampler, merge_aug_bboxes, merge_aug_masks,
multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
@HEADS.register_module()
class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1712.00726
"""
def __init__(self,
num_stages,
stage_loss_weights,
bbox_roi_extractor=None,
bbox_head=None,
mask_roi_extractor=None,
mask_head=None,
shared_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert shared_head is None, \
'Shared head is not supported in Cascade RCNN anymore'
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
super(CascadeRoIHead, self).__init__(
bbox_roi_extractor=bbox_roi_extractor,
bbox_head=bbox_head,
mask_roi_extractor=mask_roi_extractor,
mask_head=mask_head,
shared_head=shared_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize box head and box roi extractor.
Args:
bbox_roi_extractor (dict): Config of box roi extractor.
bbox_head (dict): Config of box in box head.
"""
self.bbox_roi_extractor = ModuleList()
self.bbox_head = ModuleList()
if not isinstance(bbox_roi_extractor, list):
bbox_roi_extractor = [
bbox_roi_extractor for _ in range(self.num_stages)
]
if not isinstance(bbox_head, list):
bbox_head = [bbox_head for _ in range(self.num_stages)]
assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages
for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):
self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))
self.bbox_head.append(build_head(head))
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize mask head and mask roi extractor.
Args:
mask_roi_extractor (dict): Config of mask roi extractor.
mask_head (dict): Config of mask in mask head.
"""
self.mask_head = nn.ModuleList()
if not isinstance(mask_head, list):
mask_head = [mask_head for _ in range(self.num_stages)]
assert len(mask_head) == self.num_stages
for head in mask_head:
self.mask_head.append(build_head(head))
if mask_roi_extractor is not None:
self.share_roi_extractor = False
self.mask_roi_extractor = ModuleList()
if not isinstance(mask_roi_extractor, list):
mask_roi_extractor = [
mask_roi_extractor for _ in range(self.num_stages)
]
assert len(mask_roi_extractor) == self.num_stages
for roi_extractor in mask_roi_extractor:
self.mask_roi_extractor.append(
build_roi_extractor(roi_extractor))
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
def init_assigner_sampler(self):
"""Initialize assigner and sampler for each stage."""
self.bbox_assigner = []
self.bbox_sampler = []
if self.train_cfg is not None:
for idx, rcnn_train_cfg in enumerate(self.train_cfg):
self.bbox_assigner.append(
build_assigner(rcnn_train_cfg.assigner))
self.current_stage = idx
self.bbox_sampler.append(
build_sampler(rcnn_train_cfg.sampler, context=self))
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def _bbox_forward(self, stage, x, rois):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(stage, x, rois)
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)
loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
return bbox_results
def _mask_forward(self, stage, x, rois):
"""Mask head forward function used in both training and testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_pred = mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred)
return mask_results
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
bbox_feats=None):
"""Run forward function and calculate loss for mask head in
training."""
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(stage, x, pos_rois)
mask_targets = self.mask_head[stage].get_targets(
sampling_results, gt_masks, rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask)
return mask_results
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
losses = dict()
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
if self.with_bbox or self.with_mask:
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = self._bbox_forward_train(i, x, sampling_results,
gt_bboxes, gt_labels,
rcnn_train_cfg)
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(
i, x, sampling_results, gt_masks, rcnn_train_cfg,
bbox_results['bbox_feats'])
for name, value in mask_results['loss_mask'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine bboxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
# bbox_targets is a tuple
roi_labels = bbox_results['bbox_targets'][0]
with torch.no_grad():
cls_score = bbox_results['cls_score']
if self.bbox_head[i].custom_activation:
cls_score = self.bbox_head[i].loss_cls.get_activation(
cls_score)
# Empty proposal.
if cls_score.numel() == 0:
break
roi_labels = torch.where(
roi_labels == self.bbox_head[i].num_classes,
cls_score[:, :-1].argmax(1), roi_labels)
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head[-1].num_classes)
]] * num_imgs
if self.with_mask:
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(
len(proposals) for proposals in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head[i].bbox_pred_split(
bbox_pred, num_proposals_per_img)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
if self.bbox_head[i].custom_activation:
cls_score = [
self.bbox_head[i].loss_cls.get_activation(s)
for s in cls_score
]
refine_rois_list = []
for j in range(num_imgs):
if rois[j].shape[0] > 0:
bbox_label = cls_score[j][:, :-1].argmax(dim=1)
refined_rois = self.bbox_head[i].regress_by_class(
rois[j], bbox_label, bbox_pred[j], img_metas[j])
refine_rois_list.append(refined_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
ms_bbox_result['ensemble'] = bbox_results
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
mask_rois = bbox2roi(_bboxes)
num_mask_rois_per_img = tuple(
_bbox.size(0) for _bbox in _bboxes)
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
mask_pred = mask_pred.split(num_mask_rois_per_img, 0)
aug_masks.append([
m.sigmoid().cpu().detach().numpy() for m in mask_pred
])
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[]
for _ in range(self.mask_head[-1].num_classes)])
else:
aug_mask = [mask[i] for mask in aug_masks]
merged_masks = merge_aug_masks(
aug_mask, [[img_metas[i]]] * self.num_stages,
rcnn_test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks, _bboxes[i], det_labels[i],
rcnn_test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
ms_segm_result['ensemble'] = segm_results
if self.with_mask:
results = list(
zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, features, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(features, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
cls_score = bbox_results['cls_score']
if self.bbox_head[i].custom_activation:
cls_score = self.bbox_head[i].loss_cls.get_activation(
cls_score)
bbox_label = cls_score[:, :-1].argmax(dim=1)
rois = self.bbox_head[i].regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[]
for _ in range(self.mask_head[-1].num_classes)]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta in zip(features, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
dummy_scale_factor = np.ones(4)
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=dummy_scale_factor,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
def onnx_export(self, x, proposals, img_metas):
assert self.with_bbox, 'Bbox head must be implemented.'
assert proposals.shape[0] == 1, 'Only support one input image ' \
'while in exporting to ONNX'
# remove the scores
rois = proposals[..., :-1]
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 4)
# add dummy batch index
rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois], dim=-1)
max_shape = img_metas[0]['img_shape_for_onnx']
ms_scores = []
rcnn_test_cfg = self.test_cfg
for i in range(self.num_stages):
bbox_results = self._bbox_forward(i, x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img,
rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
assert self.bbox_head[i].reg_class_agnostic
new_rois = self.bbox_head[i].bbox_coder.decode(
rois[..., 1:], bbox_pred, max_shape=max_shape)
rois = new_rois.reshape(-1, new_rois.shape[-1])
# add dummy batch index
rois = torch.cat([rois.new_zeros(rois.shape[0], 1), rois],
dim=-1)
cls_score = sum(ms_scores) / float(len(ms_scores))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img, 4)
rois = rois.reshape(batch_size, num_proposals_per_img, -1)
det_bboxes, det_labels = self.bbox_head[-1].onnx_export(
rois, cls_score, bbox_pred, max_shape, cfg=rcnn_test_cfg)
if not self.with_mask:
return det_bboxes, det_labels
else:
batch_index = torch.arange(
det_bboxes.size(0),
device=det_bboxes.device).float().view(-1, 1, 1).expand(
det_bboxes.size(0), det_bboxes.size(1), 1)
rois = det_bboxes[..., :4]
mask_rois = torch.cat([batch_index, rois], dim=-1)
mask_rois = mask_rois.view(-1, 5)
aug_masks = []
for i in range(self.num_stages):
mask_results = self._mask_forward(i, x, mask_rois)
mask_pred = mask_results['mask_pred']
aug_masks.append(mask_pred)
max_shape = img_metas[0]['img_shape_for_onnx']
# calculate the mean of masks from several stage
mask_pred = sum(aug_masks) / len(aug_masks)
segm_results = self.mask_head[-1].onnx_export(
mask_pred, rois.reshape(-1, 4), det_labels.reshape(-1),
self.test_cfg, max_shape)
segm_results = segm_results.reshape(batch_size,
det_bboxes.shape[1],
max_shape[0], max_shape[1])
return det_bboxes, det_labels, segm_results
| 27,668 | 42.780063 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/double_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class DoubleHeadRoIHead(StandardRoIHead):
"""RoI head for Double Head RCNN.
https://arxiv.org/abs/1904.06493
"""
def __init__(self, reg_roi_scale_factor, **kwargs):
super(DoubleHeadRoIHead, self).__init__(**kwargs)
self.reg_roi_scale_factor = reg_roi_scale_factor
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing time."""
bbox_cls_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
bbox_reg_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs],
rois,
roi_scale_factor=self.reg_roi_scale_factor)
if self.with_shared_head:
bbox_cls_feats = self.shared_head(bbox_cls_feats)
bbox_reg_feats = self.shared_head(bbox_reg_feats)
cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
bbox_feats=bbox_cls_feats)
return bbox_results
| 1,250 | 34.742857 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/dynamic_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.core import bbox2roi
from mmdet.models.losses import SmoothL1Loss
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
EPS = 1e-15
@HEADS.register_module()
class DynamicRoIHead(StandardRoIHead):
"""RoI head for `Dynamic R-CNN <https://arxiv.org/abs/2004.06002>`_."""
def __init__(self, **kwargs):
super(DynamicRoIHead, self).__init__(**kwargs)
assert isinstance(self.bbox_head.loss_bbox, SmoothL1Loss)
# the IoU history of the past `update_iter_interval` iterations
self.iou_history = []
# the beta history of the past `update_iter_interval` iterations
self.beta_history = []
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cur_iou = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# record the `iou_topk`-th largest IoU in an image
iou_topk = min(self.train_cfg.dynamic_rcnn.iou_topk,
len(assign_result.max_overlaps))
ious, _ = torch.topk(assign_result.max_overlaps, iou_topk)
cur_iou.append(ious[-1].item())
sampling_results.append(sampling_result)
# average the current IoUs over images
cur_iou = np.mean(cur_iou)
self.iou_history.append(cur_iou)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
# update IoU threshold and SmoothL1 beta
update_iter_interval = self.train_cfg.dynamic_rcnn.update_iter_interval
if len(self.iou_history) % update_iter_interval == 0:
new_iou_thr, new_beta = self.update_hyperparameters()
return losses
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
num_imgs = len(img_metas)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# record the `beta_topk`-th smallest target
# `bbox_targets[2]` and `bbox_targets[3]` stand for bbox_targets
# and bbox_weights, respectively
pos_inds = bbox_targets[3][:, 0].nonzero().squeeze(1)
num_pos = len(pos_inds)
cur_target = bbox_targets[2][pos_inds, :2].abs().mean(dim=1)
beta_topk = min(self.train_cfg.dynamic_rcnn.beta_topk * num_imgs,
num_pos)
cur_target = torch.kthvalue(cur_target, beta_topk)[0].item()
self.beta_history.append(cur_target)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def update_hyperparameters(self):
"""Update hyperparameters like IoU thresholds for assigner and beta for
SmoothL1 loss based on the training statistics.
Returns:
tuple[float]: the updated ``iou_thr`` and ``beta``.
"""
new_iou_thr = max(self.train_cfg.dynamic_rcnn.initial_iou,
np.mean(self.iou_history))
self.iou_history = []
self.bbox_assigner.pos_iou_thr = new_iou_thr
self.bbox_assigner.neg_iou_thr = new_iou_thr
self.bbox_assigner.min_pos_iou = new_iou_thr
if (np.median(self.beta_history) < EPS):
# avoid 0 or too small value for new_beta
new_beta = self.bbox_head.loss_bbox.beta
else:
new_beta = min(self.train_cfg.dynamic_rcnn.initial_beta,
np.median(self.beta_history))
self.beta_history = []
self.bbox_head.loss_bbox.beta = new_beta
return new_iou_thr, new_beta
| 6,654 | 41.660256 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/grid_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.core import bbox2result, bbox2roi
from ..builder import HEADS, build_head, build_roi_extractor
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class GridRoIHead(StandardRoIHead):
"""Grid roi head for Grid R-CNN.
https://arxiv.org/abs/1811.12030
"""
def __init__(self, grid_roi_extractor, grid_head, **kwargs):
assert grid_head is not None
super(GridRoIHead, self).__init__(**kwargs)
if grid_roi_extractor is not None:
self.grid_roi_extractor = build_roi_extractor(grid_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.grid_roi_extractor = self.bbox_roi_extractor
self.grid_head = build_head(grid_head)
def _random_jitter(self, sampling_results, img_metas, amplitude=0.15):
"""Ramdom jitter positive proposals for training."""
for sampling_result, img_meta in zip(sampling_results, img_metas):
bboxes = sampling_result.pos_bboxes
random_offsets = bboxes.new_empty(bboxes.shape[0], 4).uniform_(
-amplitude, amplitude)
# before jittering
cxcy = (bboxes[:, 2:4] + bboxes[:, :2]) / 2
wh = (bboxes[:, 2:4] - bboxes[:, :2]).abs()
# after jittering
new_cxcy = cxcy + wh * random_offsets[:, :2]
new_wh = wh * (1 + random_offsets[:, 2:])
# xywh to xyxy
new_x1y1 = (new_cxcy - new_wh / 2)
new_x2y2 = (new_cxcy + new_wh / 2)
new_bboxes = torch.cat([new_x1y1, new_x2y2], dim=1)
# clip bboxes
max_shape = img_meta['img_shape']
if max_shape is not None:
new_bboxes[:, 0::2].clamp_(min=0, max=max_shape[1] - 1)
new_bboxes[:, 1::2].clamp_(min=0, max=max_shape[0] - 1)
sampling_result.pos_bboxes = new_bboxes
return sampling_results
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# grid head
grid_rois = rois[:100]
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], grid_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
grid_pred = self.grid_head(grid_feats)
outs = outs + (grid_pred, )
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
bbox_results = super(GridRoIHead,
self)._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
# Grid head forward and loss
sampling_results = self._random_jitter(sampling_results, img_metas)
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
# GN in head does not support zero shape input
if pos_rois.shape[0] == 0:
return bbox_results
grid_feats = self.grid_roi_extractor(
x[:self.grid_roi_extractor.num_inputs], pos_rois)
if self.with_shared_head:
grid_feats = self.shared_head(grid_feats)
# Accelerate training
max_sample_num_grid = self.train_cfg.get('max_num_grid', 192)
sample_idx = torch.randperm(
grid_feats.shape[0])[:min(grid_feats.shape[0], max_sample_num_grid
)]
grid_feats = grid_feats[sample_idx]
grid_pred = self.grid_head(grid_feats)
grid_targets = self.grid_head.get_targets(sampling_results,
self.train_cfg)
grid_targets = grid_targets[sample_idx]
loss_grid = self.grid_head.loss(grid_pred, grid_targets)
bbox_results['loss_bbox'].update(loss_grid)
return bbox_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=False)
# pack rois into bboxes
grid_rois = bbox2roi([det_bbox[:, :4] for det_bbox in det_bboxes])
if grid_rois.shape[0] != 0:
grid_feats = self.grid_roi_extractor(
x[:len(self.grid_roi_extractor.featmap_strides)], grid_rois)
self.grid_head.test_mode = True
grid_pred = self.grid_head(grid_feats)
# split batch grid head prediction back to each image
num_roi_per_img = tuple(len(det_bbox) for det_bbox in det_bboxes)
grid_pred = {
k: v.split(num_roi_per_img, 0)
for k, v in grid_pred.items()
}
# apply bbox post-processing to each image individually
bbox_results = []
num_imgs = len(det_bboxes)
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
bbox_results.append([
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head.num_classes)
])
else:
det_bbox = self.grid_head.get_bboxes(
det_bboxes[i], grid_pred['fused'][i], [img_metas[i]])
if rescale:
det_bbox[:, :4] /= img_metas[i]['scale_factor']
bbox_results.append(
bbox2result(det_bbox, det_labels[i],
self.bbox_head.num_classes))
else:
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head.num_classes)
] for _ in range(len(det_bboxes))]
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return list(zip(bbox_results, segm_results))
| 6,961 | 39.71345 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/htc_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from ..utils.brick_wrappers import adaptive_avg_pool2d
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class HybridTaskCascadeRoIHead(CascadeRoIHead):
"""Hybrid task cascade roi head including one bbox head and one mask head.
https://arxiv.org/abs/1901.07518
"""
def __init__(self,
num_stages,
stage_loss_weights,
semantic_roi_extractor=None,
semantic_head=None,
semantic_fusion=('bbox', 'mask'),
interleaved=True,
mask_info_flow=True,
**kwargs):
super(HybridTaskCascadeRoIHead,
self).__init__(num_stages, stage_loss_weights, **kwargs)
assert self.with_bbox
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = build_roi_extractor(
semantic_roi_extractor)
self.semantic_head = build_head(semantic_head)
self.semantic_fusion = semantic_fusion
self.interleaved = interleaved
self.mask_info_flow = mask_info_flow
@property
def with_semantic(self):
"""bool: whether the head has semantic head"""
if hasattr(self, 'semantic_head') and self.semantic_head is not None:
return True
else:
return False
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
outs = ()
# semantic head
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
# bbox heads
rois = bbox2roi([proposals])
for i in range(self.num_stages):
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask heads
if self.with_mask:
mask_rois = rois[:100]
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats = mask_feats + mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
outs = outs + (mask_pred, )
return outs
def _bbox_forward_train(self,
stage,
x,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for box head in training."""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(
stage, x, rois, semantic_feat=semantic_feat)
bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox,
rois=rois,
bbox_targets=bbox_targets,
)
return bbox_results
def _mask_forward_train(self,
stage,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
semantic_feat=None):
"""Run forward function and calculate loss for mask head in
training."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
pos_rois)
# semantic feature fusion
# element-wise sum for original features and pooled semantic features
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
pos_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats = mask_feats + mask_semantic_feat
# mask information flow
# forward all previous mask heads to obtain last_feat, and fuse it
# with the normal mask feature
if self.mask_info_flow:
last_feat = None
for i in range(stage):
last_feat = self.mask_head[i](
mask_feats, last_feat, return_logits=False)
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
else:
mask_pred = mask_head(mask_feats, return_feat=False)
mask_targets = mask_head.get_targets(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = mask_head.loss(mask_pred, mask_targets, pos_labels)
mask_results = dict(loss_mask=loss_mask)
return mask_results
def _bbox_forward(self, stage, x, rois, semantic_feat=None):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_semantic and 'bbox' in self.semantic_fusion:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats = bbox_feats + bbox_semantic_feat
cls_score, bbox_pred = bbox_head(bbox_feats)
bbox_results = dict(cls_score=cls_score, bbox_pred=bbox_pred)
return bbox_results
def _mask_forward_test(self, stage, x, bboxes, semantic_feat=None):
"""Mask head forward function for testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_rois = bbox2roi([bboxes])
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats = mask_feats + mask_semantic_feat
if self.mask_info_flow:
last_feat = None
last_pred = None
for i in range(stage):
mask_pred, last_feat = self.mask_head[i](mask_feats, last_feat)
if last_pred is not None:
mask_pred = mask_pred + last_pred
last_pred = mask_pred
mask_pred = mask_head(mask_feats, last_feat, return_feat=False)
if last_pred is not None:
mask_pred = mask_pred + last_pred
else:
mask_pred = mask_head(mask_feats)
return mask_pred
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposal_list (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None, list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None, Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# semantic segmentation part
# 2 outputs: segmentation prediction and embedded features
losses = dict()
if self.with_semantic:
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[j],
gt_bboxes[j],
gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
# bbox head forward and loss
bbox_results = \
self._bbox_forward_train(
i, x, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg, semantic_feat)
roi_labels = bbox_results['bbox_targets'][0]
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# mask head forward and loss
if self.with_mask:
# interleaved execution: use regressed bboxes by the box branch
# to train the mask branch
if self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
# re-assign and sample 512 RoIs from 512 RoIs
sampling_results = []
for j in range(num_imgs):
assign_result = bbox_assigner.assign(
proposal_list[j], gt_bboxes[j],
gt_bboxes_ignore[j], gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
mask_results = self._mask_forward_train(
i, x, sampling_results, gt_masks, rcnn_train_cfg,
semantic_feat)
for name, value in mask_results['loss_mask'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine bboxes (same as Cascade R-CNN)
if i < self.num_stages - 1 and not self.interleaved:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_bbox_result = {}
ms_segm_result = {}
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head[-1].num_classes)
]] * num_imgs
if self.with_mask:
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic_feat)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
refine_rois_list = []
for j in range(num_imgs):
if rois[j].shape[0] > 0:
bbox_label = cls_score[j][:, :-1].argmax(dim=1)
refine_rois = bbox_head.regress_by_class(
rois[j], bbox_label, bbox_pred[j], img_metas[j])
refine_rois_list.append(refine_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
bbox_result = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
ms_bbox_result['ensemble'] = bbox_result
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head[-1].num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
aug_masks = []
mask_roi_extractor = self.mask_roi_extractor[-1]
mask_feats = mask_roi_extractor(
x[:len(mask_roi_extractor.featmap_strides)], mask_rois)
if self.with_semantic and 'mask' in self.semantic_fusion:
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
mask_feats = mask_feats + mask_semantic_feat
last_feat = None
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
# split batch mask prediction back to each image
mask_pred = mask_pred.split(num_bbox_per_img, 0)
aug_masks.append(
[mask.sigmoid().cpu().numpy() for mask in mask_pred])
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[]
for _ in range(self.mask_head[-1].num_classes)])
else:
aug_mask = [mask[i] for mask in aug_masks]
merged_mask = merge_aug_masks(
aug_mask, [[img_metas[i]]] * self.num_stages,
rcnn_test_cfg)
segm_result = self.mask_head[-1].get_seg_masks(
merged_mask, _bboxes[i], det_labels[i],
rcnn_test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
ms_segm_result['ensemble'] = segm_results
if self.with_mask:
results = list(
zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))
else:
results = ms_bbox_result['ensemble']
return results
def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
if self.with_semantic:
semantic_feats = [
self.semantic_head(feat)[1] for feat in img_feats
]
else:
semantic_feats = [None] * len(img_metas)
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta, semantic in zip(img_feats, img_metas, semantic_feats):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i, x, rois, semantic_feat=semantic)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'].argmax(dim=1)
rois = bbox_head.regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
bbox_result = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
segm_result = [[]
for _ in range(self.mask_head[-1].num_classes)]
else:
aug_masks = []
aug_img_metas = []
for x, img_meta, semantic in zip(img_feats, img_metas,
semantic_feats):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor[-1](
x[:len(self.mask_roi_extractor[-1].featmap_strides)],
mask_rois)
if self.with_semantic:
semantic_feat = semantic
mask_semantic_feat = self.semantic_roi_extractor(
[semantic_feat], mask_rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[
-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats = mask_feats + mask_semantic_feat
last_feat = None
for i in range(self.num_stages):
mask_head = self.mask_head[i]
if self.mask_info_flow:
mask_pred, last_feat = mask_head(
mask_feats, last_feat)
else:
mask_pred = mask_head(mask_feats)
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
aug_img_metas.append(img_meta)
merged_masks = merge_aug_masks(aug_masks, aug_img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head[-1].get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(bbox_result, segm_result)]
else:
return [bbox_result]
| 27,702 | 43.042925 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_scoring_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2roi
from ..builder import HEADS, build_head
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class MaskScoringRoIHead(StandardRoIHead):
"""Mask Scoring RoIHead for Mask Scoring RCNN.
https://arxiv.org/abs/1903.00241
"""
def __init__(self, mask_iou_head, **kwargs):
assert mask_iou_head is not None
super(MaskScoringRoIHead, self).__init__(**kwargs)
self.mask_iou_head = build_head(mask_iou_head)
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for Mask head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
mask_results = super(MaskScoringRoIHead,
self)._mask_forward_train(x, sampling_results,
bbox_feats, gt_masks,
img_metas)
if mask_results['loss_mask'] is None:
return mask_results
# mask iou head forward and loss
pos_mask_pred = mask_results['mask_pred'][
range(mask_results['mask_pred'].size(0)), pos_labels]
mask_iou_pred = self.mask_iou_head(mask_results['mask_feats'],
pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)),
pos_labels]
mask_iou_targets = self.mask_iou_head.get_targets(
sampling_results, gt_masks, pos_mask_pred,
mask_results['mask_targets'], self.train_cfg)
loss_mask_iou = self.mask_iou_head.loss(pos_mask_iou_pred,
mask_iou_targets)
mask_results['loss_mask'].update(loss_mask_iou)
return mask_results
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
# image shapes of images in the batch
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
num_classes = self.mask_head.num_classes
segm_results = [[[] for _ in range(num_classes)]
for _ in range(num_imgs)]
mask_scores = [[[] for _ in range(num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
concat_det_labels = torch.cat(det_labels)
# get mask scores with mask iou head
mask_feats = mask_results['mask_feats']
mask_pred = mask_results['mask_pred']
mask_iou_pred = self.mask_iou_head(
mask_feats, mask_pred[range(concat_det_labels.size(0)),
concat_det_labels])
# split batch mask prediction back to each image
num_bboxes_per_img = tuple(len(_bbox) for _bbox in _bboxes)
mask_preds = mask_pred.split(num_bboxes_per_img, 0)
mask_iou_preds = mask_iou_pred.split(num_bboxes_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
mask_scores = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
mask_scores.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
segm_result = self.mask_head.get_seg_masks(
mask_preds[i], _bboxes[i], det_labels[i],
self.test_cfg, ori_shapes[i], scale_factors[i],
rescale)
# get mask scores with mask iou head
mask_score = self.mask_iou_head.get_mask_scores(
mask_iou_preds[i], det_bboxes[i], det_labels[i])
segm_results.append(segm_result)
mask_scores.append(mask_score)
return list(zip(segm_results, mask_scores))
| 5,230 | 44.885965 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/pisa_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.core import bbox2roi
from ..builder import HEADS
from ..losses.pisa_loss import carl_loss, isr_p
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class PISARoIHead(StandardRoIHead):
r"""The RoI head for `Prime Sample Attention in Object Detection
<https://arxiv.org/abs/1904.04821>`_."""
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None):
"""Forward function for training.
Args:
x (list[Tensor]): List of multi-level img features.
img_metas (list[dict]): List of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): List of region proposals.
gt_bboxes (list[Tensor]): Each item are the truth boxes for each
image in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): Class indices corresponding to each box
gt_bboxes_ignore (list[Tensor], optional): Specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : True segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
neg_label_weights = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
# neg label weight is obtained by sampling when using ISR-N
neg_label_weight = None
if isinstance(sampling_result, tuple):
sampling_result, neg_label_weight = sampling_result
sampling_results.append(sampling_result)
neg_label_weights.append(neg_label_weight)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(
x,
sampling_results,
gt_bboxes,
gt_labels,
img_metas,
neg_label_weights=neg_label_weights)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x, rois):
"""Box forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self,
x,
sampling_results,
gt_bboxes,
gt_labels,
img_metas,
neg_label_weights=None):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
# neg_label_weights obtained by sampler is image-wise, mapping back to
# the corresponding location in label weights
if neg_label_weights[0] is not None:
label_weights = bbox_targets[1]
cur_num_rois = 0
for i in range(len(sampling_results)):
num_pos = sampling_results[i].pos_inds.size(0)
num_neg = sampling_results[i].neg_inds.size(0)
label_weights[cur_num_rois + num_pos:cur_num_rois + num_pos +
num_neg] = neg_label_weights[i]
cur_num_rois += num_pos + num_neg
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Apply ISR-P
isr_cfg = self.train_cfg.get('isr', None)
if isr_cfg is not None:
bbox_targets = isr_p(
cls_score,
bbox_pred,
bbox_targets,
rois,
sampling_results,
self.bbox_head.loss_cls,
self.bbox_head.bbox_coder,
**isr_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox = self.bbox_head.loss(cls_score, bbox_pred, rois,
*bbox_targets)
# Add CARL Loss
carl_cfg = self.train_cfg.get('carl', None)
if carl_cfg is not None:
loss_carl = carl_loss(
cls_score,
bbox_targets[0],
bbox_pred,
bbox_targets[2],
self.bbox_head.loss_bbox,
**carl_cfg,
num_class=self.bbox_head.num_classes)
loss_bbox.update(loss_carl)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
| 6,656 | 40.347826 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/point_rend_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
# Modified from https://github.com/facebookresearch/detectron2/tree/master/projects/PointRend # noqa
import os
import warnings
import numpy as np
import torch
import torch.nn.functional as F
from mmcv.ops import point_sample, rel_roi_point_to_rel_img_point
from mmdet.core import bbox2roi, bbox_mapping, merge_aug_masks
from .. import builder
from ..builder import HEADS
from .standard_roi_head import StandardRoIHead
@HEADS.register_module()
class PointRendRoIHead(StandardRoIHead):
"""`PointRend <https://arxiv.org/abs/1912.08193>`_."""
def __init__(self, point_head, *args, **kwargs):
super().__init__(*args, **kwargs)
assert self.with_bbox and self.with_mask
self.init_point_head(point_head)
def init_point_head(self, point_head):
"""Initialize ``point_head``"""
self.point_head = builder.build_head(point_head)
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head and point head
in training."""
mask_results = super()._mask_forward_train(x, sampling_results,
bbox_feats, gt_masks,
img_metas)
if mask_results['loss_mask'] is not None:
loss_point = self._mask_point_forward_train(
x, sampling_results, mask_results['mask_pred'], gt_masks,
img_metas)
mask_results['loss_mask'].update(loss_point)
return mask_results
def _mask_point_forward_train(self, x, sampling_results, mask_pred,
gt_masks, img_metas):
"""Run forward function and calculate loss for point head in
training."""
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
rel_roi_points = self.point_head.get_roi_rel_points_train(
mask_pred, pos_labels, cfg=self.train_cfg)
rois = bbox2roi([res.pos_bboxes for res in sampling_results])
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
mask_point_target = self.point_head.get_targets(
rois, rel_roi_points, sampling_results, gt_masks, self.train_cfg)
loss_mask_point = self.point_head.loss(mask_point_pred,
mask_point_target, pos_labels)
return loss_mask_point
def _get_fine_grained_point_feats(self, x, rois, rel_roi_points,
img_metas):
"""Sample fine grained feats from each level feature map and
concatenate them together.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid.
img_metas (list[dict]): Image meta info.
Returns:
Tensor: The fine grained features for each points,
has shape (num_rois, feats_channels, num_points).
"""
num_imgs = len(img_metas)
fine_grained_feats = []
for idx in range(self.mask_roi_extractor.num_inputs):
feats = x[idx]
spatial_scale = 1. / float(
self.mask_roi_extractor.featmap_strides[idx])
point_feats = []
for batch_ind in range(num_imgs):
# unravel batch dim
feat = feats[batch_ind].unsqueeze(0)
inds = (rois[:, 0].long() == batch_ind)
if inds.any():
rel_img_points = rel_roi_point_to_rel_img_point(
rois[inds], rel_roi_points[inds], feat.shape[2:],
spatial_scale).unsqueeze(0)
point_feat = point_sample(feat, rel_img_points)
point_feat = point_feat.squeeze(0).transpose(0, 1)
point_feats.append(point_feat)
fine_grained_feats.append(torch.cat(point_feats, dim=0))
return torch.cat(fine_grained_feats, dim=1)
def _mask_point_forward_test(self, x, rois, label_pred, mask_pred,
img_metas):
"""Mask refining process with point head in testing.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
label_pred (Tensor): The predication class for each rois.
mask_pred (Tensor): The predication coarse masks of
shape (num_rois, num_classes, small_size, small_size).
img_metas (list[dict]): Image meta info.
Returns:
Tensor: The refined masks of shape (num_rois, num_classes,
large_size, large_size).
"""
refined_mask_pred = mask_pred.clone()
for subdivision_step in range(self.test_cfg.subdivision_steps):
refined_mask_pred = F.interpolate(
refined_mask_pred,
scale_factor=self.test_cfg.scale_factor,
mode='bilinear',
align_corners=False)
# If `subdivision_num_points` is larger or equal to the
# resolution of the next step, then we can skip this step
num_rois, channels, mask_height, mask_width = \
refined_mask_pred.shape
if (self.test_cfg.subdivision_num_points >=
self.test_cfg.scale_factor**2 * mask_height * mask_width
and
subdivision_step < self.test_cfg.subdivision_steps - 1):
continue
point_indices, rel_roi_points = \
self.point_head.get_roi_rel_points_test(
refined_mask_pred, label_pred, cfg=self.test_cfg)
fine_grained_point_feats = self._get_fine_grained_point_feats(
x, rois, rel_roi_points, img_metas)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_mask_pred = refined_mask_pred.reshape(
num_rois, channels, mask_height * mask_width)
refined_mask_pred = refined_mask_pred.scatter_(
2, point_indices, mask_point_pred)
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
mask_height, mask_width)
return refined_mask_pred
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Obtain mask prediction without augmentation."""
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
if isinstance(scale_factors[0], float):
warnings.warn(
'Scale factor in img_metas should be a '
'ndarray with shape (4,) '
'arrange as (factor_w, factor_h, factor_w, factor_h), '
'The scale_factor with float type has been deprecated. ')
scale_factors = np.array([scale_factors] * 4, dtype=np.float32)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
segm_results = [[[] for _ in range(self.mask_head.num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
_bboxes = [det_bboxes[i][:, :4] for i in range(len(det_bboxes))]
if rescale:
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
_bboxes[i] * scale_factors[i] for i in range(len(_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
# split batch mask prediction back to each image
mask_pred = mask_results['mask_pred']
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
mask_rois = mask_rois.split(num_mask_roi_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
x_i = [xx[[i]] for xx in x]
mask_rois_i = mask_rois[i]
mask_rois_i[:, 0] = 0 # TODO: remove this hack
mask_pred_i = self._mask_point_forward_test(
x_i, mask_rois_i, det_labels[i], mask_preds[i],
[img_metas])
segm_result = self.mask_head.get_seg_masks(
mask_pred_i, _bboxes[i], det_labels[i], self.test_cfg,
ori_shapes[i], scale_factors[i], rescale)
segm_results.append(segm_result)
return segm_results
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
mask_results['mask_pred'] = self._mask_point_forward_test(
x, mask_rois, det_labels, mask_results['mask_pred'],
img_meta)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return segm_result
def _onnx_get_fine_grained_point_feats(self, x, rois, rel_roi_points):
"""Export the process of sampling fine grained feats to onnx.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
rel_roi_points (Tensor): A tensor of shape (num_rois, num_points,
2) that contains [0, 1] x [0, 1] normalized coordinates of the
most uncertain points from the [mask_height, mask_width] grid.
Returns:
Tensor: The fine grained features for each points,
has shape (num_rois, feats_channels, num_points).
"""
batch_size = x[0].shape[0]
num_rois = rois.shape[0]
fine_grained_feats = []
for idx in range(self.mask_roi_extractor.num_inputs):
feats = x[idx]
spatial_scale = 1. / float(
self.mask_roi_extractor.featmap_strides[idx])
rel_img_points = rel_roi_point_to_rel_img_point(
rois, rel_roi_points, feats, spatial_scale)
channels = feats.shape[1]
num_points = rel_img_points.shape[1]
rel_img_points = rel_img_points.reshape(batch_size, -1, num_points,
2)
point_feats = point_sample(feats, rel_img_points)
point_feats = point_feats.transpose(1, 2).reshape(
num_rois, channels, num_points)
fine_grained_feats.append(point_feats)
return torch.cat(fine_grained_feats, dim=1)
def _mask_point_onnx_export(self, x, rois, label_pred, mask_pred):
"""Export mask refining process with point head to onnx.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
rois (Tensor): shape (num_rois, 5).
label_pred (Tensor): The predication class for each rois.
mask_pred (Tensor): The predication coarse masks of
shape (num_rois, num_classes, small_size, small_size).
Returns:
Tensor: The refined masks of shape (num_rois, num_classes,
large_size, large_size).
"""
refined_mask_pred = mask_pred.clone()
for subdivision_step in range(self.test_cfg.subdivision_steps):
refined_mask_pred = F.interpolate(
refined_mask_pred,
scale_factor=self.test_cfg.scale_factor,
mode='bilinear',
align_corners=False)
# If `subdivision_num_points` is larger or equal to the
# resolution of the next step, then we can skip this step
num_rois, channels, mask_height, mask_width = \
refined_mask_pred.shape
if (self.test_cfg.subdivision_num_points >=
self.test_cfg.scale_factor**2 * mask_height * mask_width
and
subdivision_step < self.test_cfg.subdivision_steps - 1):
continue
point_indices, rel_roi_points = \
self.point_head.get_roi_rel_points_test(
refined_mask_pred, label_pred, cfg=self.test_cfg)
fine_grained_point_feats = self._onnx_get_fine_grained_point_feats(
x, rois, rel_roi_points)
coarse_point_feats = point_sample(mask_pred, rel_roi_points)
mask_point_pred = self.point_head(fine_grained_point_feats,
coarse_point_feats)
point_indices = point_indices.unsqueeze(1).expand(-1, channels, -1)
refined_mask_pred = refined_mask_pred.reshape(
num_rois, channels, mask_height * mask_width)
is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT'
# avoid ScatterElements op in ONNX for TensorRT
if is_trt_backend:
mask_shape = refined_mask_pred.shape
point_shape = point_indices.shape
inds_dim0 = torch.arange(point_shape[0]).reshape(
point_shape[0], 1, 1).expand_as(point_indices)
inds_dim1 = torch.arange(point_shape[1]).reshape(
1, point_shape[1], 1).expand_as(point_indices)
inds_1d = inds_dim0.reshape(
-1) * mask_shape[1] * mask_shape[2] + inds_dim1.reshape(
-1) * mask_shape[2] + point_indices.reshape(-1)
refined_mask_pred = refined_mask_pred.reshape(-1)
refined_mask_pred[inds_1d] = mask_point_pred.reshape(-1)
refined_mask_pred = refined_mask_pred.reshape(*mask_shape)
else:
refined_mask_pred = refined_mask_pred.scatter_(
2, point_indices, mask_point_pred)
refined_mask_pred = refined_mask_pred.view(num_rois, channels,
mask_height, mask_width)
return refined_mask_pred
def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):
"""Export mask branch to onnx which supports batch inference.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
det_bboxes (Tensor): Bboxes and corresponding scores.
has shape [N, num_bboxes, 5].
det_labels (Tensor): class labels of
shape [N, num_bboxes].
Returns:
Tensor: The segmentation results of shape [N, num_bboxes,
image_height, image_width].
"""
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
raise RuntimeError('[ONNX Error] Can not record MaskHead '
'as it has not been executed this time')
batch_size = det_bboxes.size(0)
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
det_bboxes = det_bboxes[..., :4]
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
-1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
max_shape = img_metas[0]['img_shape_for_onnx']
num_det = det_bboxes.shape[1]
det_bboxes = det_bboxes.reshape(-1, 4)
det_labels = det_labels.reshape(-1)
mask_pred = self._mask_point_onnx_export(x, mask_rois, det_labels,
mask_pred)
segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,
det_labels, self.test_cfg,
max_shape)
segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],
max_shape[1])
return segm_results
| 18,743 | 46.573604 | 101 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/scnet_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn.functional as F
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
from ..builder import HEADS, build_head, build_roi_extractor
from ..utils.brick_wrappers import adaptive_avg_pool2d
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class SCNetRoIHead(CascadeRoIHead):
"""RoIHead for `SCNet <https://arxiv.org/abs/2012.10150>`_.
Args:
num_stages (int): number of cascade stages.
stage_loss_weights (list): loss weight of cascade stages.
semantic_roi_extractor (dict): config to init semantic roi extractor.
semantic_head (dict): config to init semantic head.
feat_relay_head (dict): config to init feature_relay_head.
glbctx_head (dict): config to init global context head.
"""
def __init__(self,
num_stages,
stage_loss_weights,
semantic_roi_extractor=None,
semantic_head=None,
feat_relay_head=None,
glbctx_head=None,
**kwargs):
super(SCNetRoIHead, self).__init__(num_stages, stage_loss_weights,
**kwargs)
assert self.with_bbox and self.with_mask
assert not self.with_shared_head # shared head is not supported
if semantic_head is not None:
self.semantic_roi_extractor = build_roi_extractor(
semantic_roi_extractor)
self.semantic_head = build_head(semantic_head)
if feat_relay_head is not None:
self.feat_relay_head = build_head(feat_relay_head)
if glbctx_head is not None:
self.glbctx_head = build_head(glbctx_head)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.mask_head = build_head(mask_head)
@property
def with_semantic(self):
"""bool: whether the head has semantic head"""
return hasattr(self,
'semantic_head') and self.semantic_head is not None
@property
def with_feat_relay(self):
"""bool: whether the head has feature relay head"""
return (hasattr(self, 'feat_relay_head')
and self.feat_relay_head is not None)
@property
def with_glbctx(self):
"""bool: whether the head has global context head"""
return hasattr(self, 'glbctx_head') and self.glbctx_head is not None
def _fuse_glbctx(self, roi_feats, glbctx_feat, rois):
"""Fuse global context feats with roi feats."""
assert roi_feats.size(0) == rois.size(0)
img_inds = torch.unique(rois[:, 0].cpu(), sorted=True).long()
fused_feats = torch.zeros_like(roi_feats)
for img_id in img_inds:
inds = (rois[:, 0] == img_id.item())
fused_feats[inds] = roi_feats[inds] + glbctx_feat[img_id]
return fused_feats
def _slice_pos_feats(self, feats, sampling_results):
"""Get features from pos rois."""
num_rois = [res.bboxes.size(0) for res in sampling_results]
num_pos_rois = [res.pos_bboxes.size(0) for res in sampling_results]
inds = torch.zeros(sum(num_rois), dtype=torch.bool)
start = 0
for i in range(len(num_rois)):
start = 0 if i == 0 else start + num_rois[i - 1]
stop = start + num_pos_rois[i]
inds[start:stop] = 1
sliced_feats = feats[inds]
return sliced_feats
def _bbox_forward(self,
stage,
x,
rois,
semantic_feat=None,
glbctx_feat=None):
"""Box head forward function used in both training and testing."""
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(
x[:len(bbox_roi_extractor.featmap_strides)], rois)
if self.with_semantic and semantic_feat is not None:
bbox_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if bbox_semantic_feat.shape[-2:] != bbox_feats.shape[-2:]:
bbox_semantic_feat = adaptive_avg_pool2d(
bbox_semantic_feat, bbox_feats.shape[-2:])
bbox_feats = bbox_feats + bbox_semantic_feat
if self.with_glbctx and glbctx_feat is not None:
bbox_feats = self._fuse_glbctx(bbox_feats, glbctx_feat, rois)
cls_score, bbox_pred, relayed_feat = bbox_head(
bbox_feats, return_shared_feat=True)
bbox_results = dict(
cls_score=cls_score,
bbox_pred=bbox_pred,
relayed_feat=relayed_feat)
return bbox_results
def _mask_forward(self,
x,
rois,
semantic_feat=None,
glbctx_feat=None,
relayed_feat=None):
"""Mask head forward function used in both training and testing."""
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_semantic and semantic_feat is not None:
mask_semantic_feat = self.semantic_roi_extractor([semantic_feat],
rois)
if mask_semantic_feat.shape[-2:] != mask_feats.shape[-2:]:
mask_semantic_feat = F.adaptive_avg_pool2d(
mask_semantic_feat, mask_feats.shape[-2:])
mask_feats = mask_feats + mask_semantic_feat
if self.with_glbctx and glbctx_feat is not None:
mask_feats = self._fuse_glbctx(mask_feats, glbctx_feat, rois)
if self.with_feat_relay and relayed_feat is not None:
mask_feats = mask_feats + relayed_feat
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred)
return mask_results
def _bbox_forward_train(self,
stage,
x,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
semantic_feat=None,
glbctx_feat=None):
"""Run forward function and calculate loss for box head in training."""
bbox_head = self.bbox_head[stage]
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(
stage,
x,
rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
bbox_targets = bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, rcnn_train_cfg)
loss_bbox = bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(
loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)
return bbox_results
def _mask_forward_train(self,
x,
sampling_results,
gt_masks,
rcnn_train_cfg,
semantic_feat=None,
glbctx_feat=None,
relayed_feat=None):
"""Run forward function and calculate loss for mask head in
training."""
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(
x,
pos_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results = loss_mask
return mask_results
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
gt_semantic_seg=None):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposal_list (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None, list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None, Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
gt_semantic_seg (None, list[Tensor]): semantic segmentation masks
used if the architecture supports semantic segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
losses = dict()
# semantic segmentation branch
if self.with_semantic:
semantic_pred, semantic_feat = self.semantic_head(x)
loss_seg = self.semantic_head.loss(semantic_pred, gt_semantic_seg)
losses['loss_semantic_seg'] = loss_seg
else:
semantic_feat = None
# global context branch
if self.with_glbctx:
mc_pred, glbctx_feat = self.glbctx_head(x)
loss_glbctx = self.glbctx_head.loss(mc_pred, gt_labels)
losses['loss_glbctx'] = loss_glbctx
else:
glbctx_feat = None
for i in range(self.num_stages):
self.current_stage = i
rcnn_train_cfg = self.train_cfg[i]
lw = self.stage_loss_weights[i]
# assign gts and sample proposals
sampling_results = []
bbox_assigner = self.bbox_assigner[i]
bbox_sampler = self.bbox_sampler[i]
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
for j in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[j],
gt_bboxes[j],
gt_bboxes_ignore[j],
gt_labels[j])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[j],
gt_bboxes[j],
gt_labels[j],
feats=[lvl_feat[j][None] for lvl_feat in x])
sampling_results.append(sampling_result)
bbox_results = \
self._bbox_forward_train(
i, x, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg, semantic_feat, glbctx_feat)
roi_labels = bbox_results['bbox_targets'][0]
for name, value in bbox_results['loss_bbox'].items():
losses[f's{i}.{name}'] = (
value * lw if 'loss' in name else value)
# refine boxes
if i < self.num_stages - 1:
pos_is_gts = [res.pos_is_gt for res in sampling_results]
with torch.no_grad():
proposal_list = self.bbox_head[i].refine_bboxes(
bbox_results['rois'], roi_labels,
bbox_results['bbox_pred'], pos_is_gts, img_metas)
if self.with_feat_relay:
relayed_feat = self._slice_pos_feats(bbox_results['relayed_feat'],
sampling_results)
relayed_feat = self.feat_relay_head(relayed_feat)
else:
relayed_feat = None
mask_results = self._mask_forward_train(x, sampling_results, gt_masks,
rcnn_train_cfg, semantic_feat,
glbctx_feat, relayed_feat)
mask_lw = sum(self.stage_loss_weights)
losses['loss_mask'] = mask_lw * mask_results['loss_mask']
return losses
def simple_test(self, x, proposal_list, img_metas, rescale=False):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
if self.with_semantic:
_, semantic_feat = self.semantic_head(x)
else:
semantic_feat = None
if self.with_glbctx:
mc_pred, glbctx_feat = self.glbctx_head(x)
else:
glbctx_feat = None
num_imgs = len(proposal_list)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# "ms" in variable names means multi-stage
ms_scores = []
rcnn_test_cfg = self.test_cfg
rois = bbox2roi(proposal_list)
if rois.shape[0] == 0:
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for _ in range(self.bbox_head[-1].num_classes)
]] * num_imgs
if self.with_mask:
mask_classes = self.mask_head.num_classes
segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i,
x,
rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposal_list)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
ms_scores.append(cls_score)
if i < self.num_stages - 1:
refine_rois_list = []
for j in range(num_imgs):
if rois[j].shape[0] > 0:
bbox_label = cls_score[j][:, :-1].argmax(dim=1)
refine_rois = bbox_head.regress_by_class(
rois[j], bbox_label, bbox_pred[j], img_metas[j])
refine_rois_list.append(refine_rois)
rois = torch.cat(refine_rois_list)
# average scores of each image by stages
cls_score = [
sum([score[i] for score in ms_scores]) / float(len(ms_scores))
for i in range(num_imgs)
]
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(num_imgs):
det_bbox, det_label = self.bbox_head[-1].get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
det_bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head[-1].num_classes)
for i in range(num_imgs)
]
if self.with_mask:
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
mask_classes = self.mask_head.num_classes
det_segm_results = [[[] for _ in range(mask_classes)]
for _ in range(num_imgs)]
else:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i]
for i in range(num_imgs)
]
mask_rois = bbox2roi(_bboxes)
# get relay feature on mask_rois
bbox_results = self._bbox_forward(
-1,
x,
mask_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
relayed_feat = bbox_results['relayed_feat']
relayed_feat = self.feat_relay_head(relayed_feat)
mask_results = self._mask_forward(
x,
mask_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
num_bbox_per_img = tuple(len(_bbox) for _bbox in _bboxes)
mask_preds = mask_pred.split(num_bbox_per_img, 0)
# apply mask post-processing to each image individually
det_segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
det_segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
segm_result = self.mask_head.get_seg_masks(
mask_preds[i], _bboxes[i], det_labels[i],
self.test_cfg, ori_shapes[i], scale_factors[i],
rescale)
det_segm_results.append(segm_result)
# return results
if self.with_mask:
return list(zip(det_bbox_results, det_segm_results))
else:
return det_bbox_results
def aug_test(self, img_feats, proposal_list, img_metas, rescale=False):
if self.with_semantic:
semantic_feats = [
self.semantic_head(feat)[1] for feat in img_feats
]
else:
semantic_feats = [None] * len(img_metas)
if self.with_glbctx:
glbctx_feats = [self.glbctx_head(feat)[1] for feat in img_feats]
else:
glbctx_feats = [None] * len(img_metas)
rcnn_test_cfg = self.test_cfg
aug_bboxes = []
aug_scores = []
for x, img_meta, semantic_feat, glbctx_feat in zip(
img_feats, img_metas, semantic_feats, glbctx_feats):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip)
# "ms" in variable names means multi-stage
ms_scores = []
rois = bbox2roi([proposals])
if rois.shape[0] == 0:
# There is no proposal in the single image
aug_bboxes.append(rois.new_zeros(0, 4))
aug_scores.append(rois.new_zeros(0, 1))
continue
for i in range(self.num_stages):
bbox_head = self.bbox_head[i]
bbox_results = self._bbox_forward(
i,
x,
rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
ms_scores.append(bbox_results['cls_score'])
if i < self.num_stages - 1:
bbox_label = bbox_results['cls_score'].argmax(dim=1)
rois = bbox_head.regress_by_class(
rois, bbox_label, bbox_results['bbox_pred'],
img_meta[0])
cls_score = sum(ms_scores) / float(len(ms_scores))
bboxes, scores = self.bbox_head[-1].get_bboxes(
rois,
cls_score,
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
det_bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head[-1].num_classes)
if self.with_mask:
if det_bboxes.shape[0] == 0:
det_segm_results = [[]
for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta, semantic_feat, glbctx_feat in zip(
img_feats, img_metas, semantic_feats, glbctx_feats):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip)
mask_rois = bbox2roi([_bboxes])
# get relay feature on mask_rois
bbox_results = self._bbox_forward(
-1,
x,
mask_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat)
relayed_feat = bbox_results['relayed_feat']
relayed_feat = self.feat_relay_head(relayed_feat)
mask_results = self._mask_forward(
x,
mask_rois,
semantic_feat=semantic_feat,
glbctx_feat=glbctx_feat,
relayed_feat=relayed_feat)
mask_pred = mask_results['mask_pred']
aug_masks.append(mask_pred.sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas,
self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
det_segm_results = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
rcnn_test_cfg,
ori_shape,
scale_factor=1.0,
rescale=False)
return [(det_bbox_results, det_segm_results)]
else:
return [det_bbox_results]
| 25,707 | 41.422442 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/sparse_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmdet.core import bbox2result, bbox2roi, bbox_xyxy_to_cxcywh
from mmdet.core.bbox.samplers import PseudoSampler
from ..builder import HEADS
from .cascade_roi_head import CascadeRoIHead
@HEADS.register_module()
class SparseRoIHead(CascadeRoIHead):
r"""The RoIHead for `Sparse R-CNN: End-to-End Object Detection with
Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
and `Instances as Queries <http://arxiv.org/abs/2105.01928>`_
Args:
num_stages (int): Number of stage whole iterative process.
Defaults to 6.
stage_loss_weights (Tuple[float]): The loss
weight of each stage. By default all stages have
the same weight 1.
bbox_roi_extractor (dict): Config of box roi extractor.
mask_roi_extractor (dict): Config of mask roi extractor.
bbox_head (dict): Config of box head.
mask_head (dict): Config of mask head.
train_cfg (dict, optional): Configuration information in train stage.
Defaults to None.
test_cfg (dict, optional): Configuration information in test stage.
Defaults to None.
pretrained (str, optional): model pretrained path. Default: None
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_stages=6,
stage_loss_weights=(1, 1, 1, 1, 1, 1),
proposal_feature_channel=256,
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(
type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_roi_extractor=None,
bbox_head=dict(
type='DIIHead',
num_classes=80,
num_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
hidden_channels=256,
dropout=0.0,
roi_feat_size=7,
ffn_act_cfg=dict(type='ReLU', inplace=True)),
mask_head=None,
train_cfg=None,
test_cfg=None,
pretrained=None,
init_cfg=None):
assert bbox_roi_extractor is not None
assert bbox_head is not None
assert len(stage_loss_weights) == num_stages
self.num_stages = num_stages
self.stage_loss_weights = stage_loss_weights
self.proposal_feature_channel = proposal_feature_channel
super(SparseRoIHead, self).__init__(
num_stages,
stage_loss_weights,
bbox_roi_extractor=bbox_roi_extractor,
mask_roi_extractor=mask_roi_extractor,
bbox_head=bbox_head,
mask_head=mask_head,
train_cfg=train_cfg,
test_cfg=test_cfg,
pretrained=pretrained,
init_cfg=init_cfg)
# train_cfg would be None when run the test.py
if train_cfg is not None:
for stage in range(num_stages):
assert isinstance(self.bbox_sampler[stage], PseudoSampler), \
'Sparse R-CNN and QueryInst only support `PseudoSampler`'
def _bbox_forward(self, stage, x, rois, object_feats, img_metas):
"""Box head forward function used in both training and testing. Returns
all regression, classification results and a intermediate feature.
Args:
stage (int): The index of current stage in
iterative process.
x (List[Tensor]): List of FPN features
rois (Tensor): Rois in total batch. With shape (num_proposal, 5).
the last dimension 5 represents (img_index, x1, y1, x2, y2).
object_feats (Tensor): The object feature extracted from
the previous stage.
img_metas (dict): meta information of images.
Returns:
dict[str, Tensor]: a dictionary of bbox head outputs,
Containing the following results:
- cls_score (Tensor): The score of each class, has
shape (batch_size, num_proposals, num_classes)
when use focal loss or
(batch_size, num_proposals, num_classes+1)
otherwise.
- decode_bbox_pred (Tensor): The regression results
with shape (batch_size, num_proposal, 4).
The last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- object_feats (Tensor): The object feature extracted
from current stage
- detach_cls_score_list (list[Tensor]): The detached
classification results, length is batch_size, and
each tensor has shape (num_proposal, num_classes).
- detach_proposal_list (list[tensor]): The detached
regression results, length is batch_size, and each
tensor has shape (num_proposal, 4). The last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
"""
num_imgs = len(img_metas)
bbox_roi_extractor = self.bbox_roi_extractor[stage]
bbox_head = self.bbox_head[stage]
bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],
rois)
cls_score, bbox_pred, object_feats, attn_feats = bbox_head(
bbox_feats, object_feats)
proposal_list = self.bbox_head[stage].refine_bboxes(
rois,
rois.new_zeros(len(rois)), # dummy arg
bbox_pred.view(-1, bbox_pred.size(-1)),
[rois.new_zeros(object_feats.size(1)) for _ in range(num_imgs)],
img_metas)
bbox_results = dict(
cls_score=cls_score,
decode_bbox_pred=torch.cat(proposal_list),
object_feats=object_feats,
attn_feats=attn_feats,
# detach then use it in label assign
detach_cls_score_list=[
cls_score[i].detach() for i in range(num_imgs)
],
detach_proposal_list=[item.detach() for item in proposal_list])
return bbox_results
def _mask_forward(self, stage, x, rois, attn_feats):
"""Mask head forward function used in both training and testing."""
mask_roi_extractor = self.mask_roi_extractor[stage]
mask_head = self.mask_head[stage]
mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],
rois)
# do not support caffe_c4 model anymore
mask_pred = mask_head(mask_feats, attn_feats)
mask_results = dict(mask_pred=mask_pred)
return mask_results
def _mask_forward_train(self, stage, x, attn_feats, sampling_results,
gt_masks, rcnn_train_cfg):
"""Run forward function and calculate loss for mask head in
training."""
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
attn_feats = torch.cat([
feats[res.pos_inds]
for (feats, res) in zip(attn_feats, sampling_results)
])
mask_results = self._mask_forward(stage, x, pos_rois, attn_feats)
mask_targets = self.mask_head[stage].get_targets(
sampling_results, gt_masks, rcnn_train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask)
return mask_results
def forward_train(self,
x,
proposal_boxes,
proposal_features,
img_metas,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
imgs_whwh=None,
gt_masks=None):
"""Forward function in training stage.
Args:
x (list[Tensor]): list of multi-level img features.
proposals (Tensor): Decoded proposal bboxes, has shape
(batch_size, num_proposals, 4)
proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel)
img_metas (list[dict]): list of image info dict where
each dict has: 'img_shape', 'scale_factor', 'flip',
and may also contain 'filename', 'ori_shape',
'pad_shape', and 'img_norm_cfg'. For details on the
values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
the dimension means
[img_width,img_height, img_width, img_height].
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components of all stage.
"""
num_imgs = len(img_metas)
num_proposals = proposal_boxes.size(1)
imgs_whwh = imgs_whwh.repeat(1, num_proposals, 1)
all_stage_bbox_results = []
proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
object_feats = proposal_features
all_stage_loss = {}
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
all_stage_bbox_results.append(bbox_results)
if gt_bboxes_ignore is None:
# TODO support ignore
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
cls_pred_list = bbox_results['detach_cls_score_list']
proposal_list = bbox_results['detach_proposal_list']
for i in range(num_imgs):
normalize_bbox_ccwh = bbox_xyxy_to_cxcywh(proposal_list[i] /
imgs_whwh[i])
assign_result = self.bbox_assigner[stage].assign(
normalize_bbox_ccwh, cls_pred_list[i], gt_bboxes[i],
gt_labels[i], img_metas[i])
sampling_result = self.bbox_sampler[stage].sample(
assign_result, proposal_list[i], gt_bboxes[i])
sampling_results.append(sampling_result)
bbox_targets = self.bbox_head[stage].get_targets(
sampling_results, gt_bboxes, gt_labels, self.train_cfg[stage],
True)
cls_score = bbox_results['cls_score']
decode_bbox_pred = bbox_results['decode_bbox_pred']
single_stage_loss = self.bbox_head[stage].loss(
cls_score.view(-1, cls_score.size(-1)),
decode_bbox_pred.view(-1, 4),
*bbox_targets,
imgs_whwh=imgs_whwh)
if self.with_mask:
mask_results = self._mask_forward_train(
stage, x, bbox_results['attn_feats'], sampling_results,
gt_masks, self.train_cfg[stage])
single_stage_loss['loss_mask'] = mask_results['loss_mask']
for key, value in single_stage_loss.items():
all_stage_loss[f'stage{stage}_{key}'] = value * \
self.stage_loss_weights[stage]
object_feats = bbox_results['object_feats']
return all_stage_loss
def simple_test(self,
x,
proposal_boxes,
proposal_features,
img_metas,
imgs_whwh,
rescale=False):
"""Test without augmentation.
Args:
x (list[Tensor]): list of multi-level img features.
proposal_boxes (Tensor): Decoded proposal bboxes, has shape
(batch_size, num_proposals, 4)
proposal_features (Tensor): Expanded proposal
features, has shape
(batch_size, num_proposals, proposal_feature_channel)
img_metas (dict): meta information of images.
imgs_whwh (Tensor): Tensor with shape (batch_size, 4),
the dimension means
[img_width,img_height, img_width, img_height].
rescale (bool): If True, return boxes in original image
space. Defaults to False.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has a mask branch,
it is a list[tuple] that contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
# Decode initial proposals
num_imgs = len(img_metas)
proposal_list = [proposal_boxes[i] for i in range(num_imgs)]
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
object_feats = proposal_features
if all([proposal.shape[0] == 0 for proposal in proposal_list]):
# There is no proposal in the whole batch
bbox_results = [[
np.zeros((0, 5), dtype=np.float32)
for i in range(self.bbox_head[-1].num_classes)
]] * num_imgs
return bbox_results
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
object_feats = bbox_results['object_feats']
cls_score = bbox_results['cls_score']
proposal_list = bbox_results['detach_proposal_list']
if self.with_mask:
rois = bbox2roi(proposal_list)
mask_results = self._mask_forward(stage, x, rois,
bbox_results['attn_feats'])
mask_results['mask_pred'] = mask_results['mask_pred'].reshape(
num_imgs, -1, *mask_results['mask_pred'].size()[1:])
num_classes = self.bbox_head[-1].num_classes
det_bboxes = []
det_labels = []
if self.bbox_head[-1].loss_cls.use_sigmoid:
cls_score = cls_score.sigmoid()
else:
cls_score = cls_score.softmax(-1)[..., :-1]
for img_id in range(num_imgs):
cls_score_per_img = cls_score[img_id]
scores_per_img, topk_indices = cls_score_per_img.flatten(
0, 1).topk(
self.test_cfg.max_per_img, sorted=False)
labels_per_img = topk_indices % num_classes
bbox_pred_per_img = proposal_list[img_id][topk_indices //
num_classes]
if rescale:
scale_factor = img_metas[img_id]['scale_factor']
bbox_pred_per_img /= bbox_pred_per_img.new_tensor(scale_factor)
det_bboxes.append(
torch.cat([bbox_pred_per_img, scores_per_img[:, None]], dim=1))
det_labels.append(labels_per_img)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i], num_classes)
for i in range(num_imgs)
]
if self.with_mask:
if rescale and not isinstance(scale_factors[0], float):
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
segm_results = []
mask_pred = mask_results['mask_pred']
for img_id in range(num_imgs):
mask_pred_per_img = mask_pred[img_id].flatten(0,
1)[topk_indices]
mask_pred_per_img = mask_pred_per_img[:, None, ...].repeat(
1, num_classes, 1, 1)
segm_result = self.mask_head[-1].get_seg_masks(
mask_pred_per_img, _bboxes[img_id], det_labels[img_id],
self.test_cfg, ori_shapes[img_id], scale_factors[img_id],
rescale)
segm_results.append(segm_result)
if self.with_mask:
results = list(zip(bbox_results, segm_results))
else:
results = bbox_results
return results
def aug_test(self, features, proposal_list, img_metas, rescale=False):
raise NotImplementedError(
'Sparse R-CNN and QueryInst does not support `aug_test`')
def forward_dummy(self, x, proposal_boxes, proposal_features, img_metas):
"""Dummy forward function when do the flops computing."""
all_stage_bbox_results = []
proposal_list = [proposal_boxes[i] for i in range(len(proposal_boxes))]
object_feats = proposal_features
if self.with_bbox:
for stage in range(self.num_stages):
rois = bbox2roi(proposal_list)
bbox_results = self._bbox_forward(stage, x, rois, object_feats,
img_metas)
all_stage_bbox_results.append((bbox_results, ))
proposal_list = bbox_results['detach_proposal_list']
object_feats = bbox_results['object_feats']
if self.with_mask:
rois = bbox2roi(proposal_list)
mask_results = self._mask_forward(
stage, x, rois, bbox_results['attn_feats'])
all_stage_bbox_results[-1] += (mask_results, )
return all_stage_bbox_results
| 19,280 | 44.367059 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/standard_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler
from ..builder import HEADS, build_head, build_roi_extractor
from .base_roi_head import BaseRoIHead
from .test_mixins import BBoxTestMixin, MaskTestMixin
@HEADS.register_module()
class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):
"""Simplest base roi head including one bbox head and one mask head."""
def init_assigner_sampler(self):
"""Initialize assigner and sampler."""
self.bbox_assigner = None
self.bbox_sampler = None
if self.train_cfg:
self.bbox_assigner = build_assigner(self.train_cfg.assigner)
self.bbox_sampler = build_sampler(
self.train_cfg.sampler, context=self)
def init_bbox_head(self, bbox_roi_extractor, bbox_head):
"""Initialize ``bbox_head``"""
self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)
self.bbox_head = build_head(bbox_head)
def init_mask_head(self, mask_roi_extractor, mask_head):
"""Initialize ``mask_head``"""
if mask_roi_extractor is not None:
self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)
self.share_roi_extractor = False
else:
self.share_roi_extractor = True
self.mask_roi_extractor = self.bbox_roi_extractor
self.mask_head = build_head(mask_head)
def forward_dummy(self, x, proposals):
"""Dummy forward function."""
# bbox head
outs = ()
rois = bbox2roi([proposals])
if self.with_bbox:
bbox_results = self._bbox_forward(x, rois)
outs = outs + (bbox_results['cls_score'],
bbox_results['bbox_pred'])
# mask head
if self.with_mask:
mask_rois = rois[:100]
mask_results = self._mask_forward(x, mask_rois)
outs = outs + (mask_results['mask_pred'], )
return outs
def forward_train(self,
x,
img_metas,
proposal_list,
gt_bboxes,
gt_labels,
gt_bboxes_ignore=None,
gt_masks=None,
**kwargs):
"""
Args:
x (list[Tensor]): list of multi-level img features.
img_metas (list[dict]): list of image info dict where each dict
has: 'img_shape', 'scale_factor', 'flip', and may also contain
'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.
For details on the values of these keys see
`mmdet/datasets/pipelines/formatting.py:Collect`.
proposals (list[Tensors]): list of region proposals.
gt_bboxes (list[Tensor]): Ground truth bboxes for each image with
shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.
gt_labels (list[Tensor]): class indices corresponding to each box
gt_bboxes_ignore (None | list[Tensor]): specify which bounding
boxes can be ignored when computing the loss.
gt_masks (None | Tensor) : true segmentation masks for each box
used if the architecture supports a segmentation task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
# assign gts and sample proposals
if self.with_bbox or self.with_mask:
num_imgs = len(img_metas)
if gt_bboxes_ignore is None:
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = self.bbox_assigner.assign(
proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],
gt_labels[i])
sampling_result = self.bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=[lvl_feat[i][None] for lvl_feat in x])
sampling_results.append(sampling_result)
losses = dict()
# bbox head forward and loss
if self.with_bbox:
bbox_results = self._bbox_forward_train(x, sampling_results,
gt_bboxes, gt_labels,
img_metas)
losses.update(bbox_results['loss_bbox'])
# mask head forward and loss
if self.with_mask:
mask_results = self._mask_forward_train(x, sampling_results,
bbox_results['bbox_feats'],
gt_masks, img_metas)
losses.update(mask_results['loss_mask'])
return losses
def _bbox_forward(self, x, rois):
"""Box head forward function used in both training and testing."""
# TODO: a more flexible way to decide which feature maps to use
bbox_feats = self.bbox_roi_extractor(
x[:self.bbox_roi_extractor.num_inputs], rois)
if self.with_shared_head:
bbox_feats = self.shared_head(bbox_feats)
cls_score, bbox_pred = self.bbox_head(bbox_feats)
bbox_results = dict(
cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
return bbox_results
def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels,
img_metas):
"""Run forward function and calculate loss for box head in training."""
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_results = self._bbox_forward(x, rois)
bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,
gt_labels, self.train_cfg)
loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],
bbox_results['bbox_pred'], rois,
*bbox_targets)
bbox_results.update(loss_bbox=loss_bbox)
return bbox_results
def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,
img_metas):
"""Run forward function and calculate loss for mask head in
training."""
if not self.share_roi_extractor:
pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])
mask_results = self._mask_forward(x, pos_rois)
else:
pos_inds = []
device = bbox_feats.device
for res in sampling_results:
pos_inds.append(
torch.ones(
res.pos_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds.append(
torch.zeros(
res.neg_bboxes.shape[0],
device=device,
dtype=torch.uint8))
pos_inds = torch.cat(pos_inds)
mask_results = self._mask_forward(
x, pos_inds=pos_inds, bbox_feats=bbox_feats)
mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,
self.train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.mask_head.loss(mask_results['mask_pred'],
mask_targets, pos_labels)
mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)
return mask_results
def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):
"""Mask head forward function used in both training and testing."""
assert ((rois is not None) ^
(pos_inds is not None and bbox_feats is not None))
if rois is not None:
mask_feats = self.mask_roi_extractor(
x[:self.mask_roi_extractor.num_inputs], rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
else:
assert bbox_feats is not None
mask_feats = bbox_feats[pos_inds]
mask_pred = self.mask_head(mask_feats)
mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)
return mask_results
async def async_simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Async test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = await self.async_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = bbox2result(det_bboxes, det_labels,
self.bbox_head.num_classes)
if not self.with_mask:
return bbox_results
else:
segm_results = await self.async_test_mask(
x,
img_metas,
det_bboxes,
det_labels,
rescale=rescale,
mask_test_cfg=self.test_cfg.get('mask'))
return bbox_results, segm_results
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation.
Args:
x (tuple[Tensor]): Features from upstream network. Each
has shape (batch_size, c, h, w).
proposal_list (list(Tensor)): Proposals from rpn head.
Each has shape (num_proposals, 5), last dimension
5 represent (x1, y1, x2, y2, score).
img_metas (list[dict]): Meta information of images.
rescale (bool): Whether to rescale the results to
the original image. Default: True.
Returns:
list[list[np.ndarray]] or list[tuple]: When no mask branch,
it is bbox results of each image and classes with type
`list[list[np.ndarray]]`. The outer list
corresponds to each image. The inner list
corresponds to each class. When the model has mask branch,
it contains bbox results and mask results.
The outer list corresponds to each image, and first element
of tuple is bbox results, second element is mask results.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
if not self.with_mask:
return bbox_results
else:
segm_results = self.simple_test_mask(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return list(zip(bbox_results, segm_results))
def aug_test(self, x, proposal_list, img_metas, rescale=False):
"""Test with augmentations.
If rescale is False, then returned bboxes and masks will fit the scale
of imgs[0].
"""
det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,
proposal_list,
self.test_cfg)
if rescale:
_det_bboxes = det_bboxes
else:
_det_bboxes = det_bboxes.clone()
_det_bboxes[:, :4] *= det_bboxes.new_tensor(
img_metas[0][0]['scale_factor'])
bbox_results = bbox2result(_det_bboxes, det_labels,
self.bbox_head.num_classes)
# det_bboxes always keep the original scale
if self.with_mask:
segm_results = self.aug_test_mask(x, img_metas, det_bboxes,
det_labels)
return [(bbox_results, segm_results)]
else:
return [bbox_results]
def onnx_export(self, x, proposals, img_metas, rescale=False):
"""Test without augmentation."""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes, det_labels = self.bbox_onnx_export(
x, img_metas, proposals, self.test_cfg, rescale=rescale)
if not self.with_mask:
return det_bboxes, det_labels
else:
segm_results = self.mask_onnx_export(
x, img_metas, det_bboxes, det_labels, rescale=rescale)
return det_bboxes, det_labels, segm_results
def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):
"""Export mask branch to onnx which supports batch inference.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
det_bboxes (Tensor): Bboxes and corresponding scores.
has shape [N, num_bboxes, 5].
det_labels (Tensor): class labels of
shape [N, num_bboxes].
Returns:
Tensor: The segmentation results of shape [N, num_bboxes,
image_height, image_width].
"""
# image shapes of images in the batch
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
raise RuntimeError('[ONNX Error] Can not record MaskHead '
'as it has not been executed this time')
batch_size = det_bboxes.size(0)
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
det_bboxes = det_bboxes[..., :4]
batch_index = torch.arange(
det_bboxes.size(0), device=det_bboxes.device).float().view(
-1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)
mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)
mask_rois = mask_rois.view(-1, 5)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
max_shape = img_metas[0]['img_shape_for_onnx']
num_det = det_bboxes.shape[1]
det_bboxes = det_bboxes.reshape(-1, 4)
det_labels = det_labels.reshape(-1)
segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,
det_labels, self.test_cfg,
max_shape)
segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],
max_shape[1])
return segm_results
def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,
**kwargs):
"""Export bbox branch to onnx which supports batch inference.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
proposals (Tensor): Region proposals with
batch dimension, has shape [N, num_bboxes, 5].
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
Returns:
tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]
and class labels of shape [N, num_bboxes].
"""
# get origin input shape to support onnx dynamic input shape
assert len(
img_metas
) == 1, 'Only support one input image while in exporting to ONNX'
img_shapes = img_metas[0]['img_shape_for_onnx']
rois = proposals
batch_index = torch.arange(
rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(
rois.size(0), rois.size(1), 1)
rois = torch.cat([batch_index, rois[..., :4]], dim=-1)
batch_size = rois.shape[0]
num_proposals_per_img = rois.shape[1]
# Eliminate the batch dimension
rois = rois.view(-1, 5)
bbox_results = self._bbox_forward(x, rois)
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
# Recover the batch dimension
rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))
cls_score = cls_score.reshape(batch_size, num_proposals_per_img,
cls_score.size(-1))
bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,
bbox_pred.size(-1))
det_bboxes, det_labels = self.bbox_head.onnx_export(
rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)
return det_bboxes, det_labels
| 17,132 | 42.047739 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/test_mixins.py | # Copyright (c) OpenMMLab. All rights reserved.
import sys
import warnings
import numpy as np
import torch
from mmdet.core import (bbox2roi, bbox_mapping, merge_aug_bboxes,
merge_aug_masks, multiclass_nms)
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import completed
class BBoxTestMixin:
if sys.version_info >= (3, 7):
async def async_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False,
**kwargs):
"""Asynchronized test for box head without augmentation."""
rois = bbox2roi(proposals)
roi_feats = self.bbox_roi_extractor(
x[:len(self.bbox_roi_extractor.featmap_strides)], rois)
if self.with_shared_head:
roi_feats = self.shared_head(roi_feats)
sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)
async with completed(
__name__, 'bbox_head_forward',
sleep_interval=sleep_interval):
cls_score, bbox_pred = self.bbox_head(roi_feats)
img_shape = img_metas[0]['img_shape']
scale_factor = img_metas[0]['scale_factor']
det_bboxes, det_labels = self.bbox_head.get_bboxes(
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=rescale,
cfg=rcnn_test_cfg)
return det_bboxes, det_labels
def simple_test_bboxes(self,
x,
img_metas,
proposals,
rcnn_test_cfg,
rescale=False):
"""Test only det bboxes without augmentation.
Args:
x (tuple[Tensor]): Feature maps of all scale level.
img_metas (list[dict]): Image meta info.
proposals (List[Tensor]): Region proposals.
rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.
rescale (bool): If True, return boxes in original image space.
Default: False.
Returns:
tuple[list[Tensor], list[Tensor]]: The first list contains
the boxes of the corresponding image in a batch, each
tensor has the shape (num_boxes, 5) and last dimension
5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor
in the second list is the labels with shape (num_boxes, ).
The length of both lists should be equal to batch_size.
"""
rois = bbox2roi(proposals)
if rois.shape[0] == 0:
batch_size = len(proposals)
det_bbox = rois.new_zeros(0, 5)
det_label = rois.new_zeros((0, ), dtype=torch.long)
if rcnn_test_cfg is None:
det_bbox = det_bbox[:, :4]
det_label = rois.new_zeros(
(0, self.bbox_head.fc_cls.out_features))
# There is no proposal in the whole batch
return [det_bbox] * batch_size, [det_label] * batch_size
bbox_results = self._bbox_forward(x, rois)
img_shapes = tuple(meta['img_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
# split batch bbox prediction back to each image
cls_score = bbox_results['cls_score']
bbox_pred = bbox_results['bbox_pred']
num_proposals_per_img = tuple(len(p) for p in proposals)
rois = rois.split(num_proposals_per_img, 0)
cls_score = cls_score.split(num_proposals_per_img, 0)
# some detector with_reg is False, bbox_pred will be None
if bbox_pred is not None:
# TODO move this to a sabl_roi_head
# the bbox prediction of some detectors like SABL is not Tensor
if isinstance(bbox_pred, torch.Tensor):
bbox_pred = bbox_pred.split(num_proposals_per_img, 0)
else:
bbox_pred = self.bbox_head.bbox_pred_split(
bbox_pred, num_proposals_per_img)
else:
bbox_pred = (None, ) * len(proposals)
# apply bbox post-processing to each image individually
det_bboxes = []
det_labels = []
for i in range(len(proposals)):
if rois[i].shape[0] == 0:
# There is no proposal in the single image
det_bbox = rois[i].new_zeros(0, 5)
det_label = rois[i].new_zeros((0, ), dtype=torch.long)
if rcnn_test_cfg is None:
det_bbox = det_bbox[:, :4]
det_label = rois[i].new_zeros(
(0, self.bbox_head.fc_cls.out_features))
else:
det_bbox, det_label = self.bbox_head.get_bboxes(
rois[i],
cls_score[i],
bbox_pred[i],
img_shapes[i],
scale_factors[i],
rescale=rescale,
cfg=rcnn_test_cfg)
det_bboxes.append(det_bbox)
det_labels.append(det_label)
return det_bboxes, det_labels
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
# TODO more flexible
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_results = self._bbox_forward(x, rois)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
aug_bboxes.append(bboxes)
aug_scores.append(scores)
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
if merged_bboxes.shape[0] == 0:
# There is no proposal in the single image
det_bboxes = merged_bboxes.new_zeros(0, 5)
det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long)
else:
det_bboxes, det_labels = multiclass_nms(merged_bboxes,
merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
class MaskTestMixin:
if sys.version_info >= (3, 7):
async def async_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False,
mask_test_cfg=None):
"""Asynchronized test for mask head without augmentation."""
# image shape of the first image in the batch (only one)
ori_shape = img_metas[0]['ori_shape']
scale_factor = img_metas[0]['scale_factor']
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
if rescale and not isinstance(scale_factor,
(float, torch.Tensor)):
scale_factor = det_bboxes.new_tensor(scale_factor)
_bboxes = (
det_bboxes[:, :4] *
scale_factor if rescale else det_bboxes)
mask_rois = bbox2roi([_bboxes])
mask_feats = self.mask_roi_extractor(
x[:len(self.mask_roi_extractor.featmap_strides)],
mask_rois)
if self.with_shared_head:
mask_feats = self.shared_head(mask_feats)
if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):
sleep_interval = mask_test_cfg['async_sleep_interval']
else:
sleep_interval = 0.035
async with completed(
__name__,
'mask_head_forward',
sleep_interval=sleep_interval):
mask_pred = self.mask_head(mask_feats)
segm_result = self.mask_head.get_seg_masks(
mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,
scale_factor, rescale)
return segm_result
def simple_test_mask(self,
x,
img_metas,
det_bboxes,
det_labels,
rescale=False):
"""Simple test for mask head without augmentation."""
# image shapes of images in the batch
ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)
scale_factors = tuple(meta['scale_factor'] for meta in img_metas)
if isinstance(scale_factors[0], float):
warnings.warn(
'Scale factor in img_metas should be a '
'ndarray with shape (4,) '
'arrange as (factor_w, factor_h, factor_w, factor_h), '
'The scale_factor with float type has been deprecated. ')
scale_factors = np.array([scale_factors] * 4, dtype=np.float32)
num_imgs = len(det_bboxes)
if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):
segm_results = [[[] for _ in range(self.mask_head.num_classes)]
for _ in range(num_imgs)]
else:
# if det_bboxes is rescaled to the original image size, we need to
# rescale it back to the testing scale to obtain RoIs.
if rescale:
scale_factors = [
torch.from_numpy(scale_factor).to(det_bboxes[0].device)
for scale_factor in scale_factors
]
_bboxes = [
det_bboxes[i][:, :4] *
scale_factors[i] if rescale else det_bboxes[i][:, :4]
for i in range(len(det_bboxes))
]
mask_rois = bbox2roi(_bboxes)
mask_results = self._mask_forward(x, mask_rois)
mask_pred = mask_results['mask_pred']
# split batch mask prediction back to each image
num_mask_roi_per_img = [len(det_bbox) for det_bbox in det_bboxes]
mask_preds = mask_pred.split(num_mask_roi_per_img, 0)
# apply mask post-processing to each image individually
segm_results = []
for i in range(num_imgs):
if det_bboxes[i].shape[0] == 0:
segm_results.append(
[[] for _ in range(self.mask_head.num_classes)])
else:
segm_result = self.mask_head.get_seg_masks(
mask_preds[i], _bboxes[i], det_labels[i],
self.test_cfg, ori_shapes[i], scale_factors[i],
rescale)
segm_results.append(segm_result)
return segm_results
def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):
"""Test for mask head with test time augmentation."""
if det_bboxes.shape[0] == 0:
segm_result = [[] for _ in range(self.mask_head.num_classes)]
else:
aug_masks = []
for x, img_meta in zip(feats, img_metas):
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
_bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,
scale_factor, flip, flip_direction)
mask_rois = bbox2roi([_bboxes])
mask_results = self._mask_forward(x, mask_rois)
# convert to numpy array to save memory
aug_masks.append(
mask_results['mask_pred'].sigmoid().cpu().numpy())
merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)
ori_shape = img_metas[0][0]['ori_shape']
scale_factor = det_bboxes.new_ones(4)
segm_result = self.mask_head.get_seg_masks(
merged_masks,
det_bboxes,
det_labels,
self.test_cfg,
ori_shape,
scale_factor=scale_factor,
rescale=False)
return segm_result
| 13,557 | 42.455128 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/trident_roi_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.ops import batched_nms
from mmdet.core import (bbox2result, bbox2roi, bbox_mapping, merge_aug_bboxes,
multiclass_nms)
from mmdet.models.roi_heads.standard_roi_head import StandardRoIHead
from ..builder import HEADS
@HEADS.register_module()
class TridentRoIHead(StandardRoIHead):
"""Trident roi head.
Args:
num_branch (int): Number of branches in TridentNet.
test_branch_idx (int): In inference, all 3 branches will be used
if `test_branch_idx==-1`, otherwise only branch with index
`test_branch_idx` will be used.
"""
def __init__(self, num_branch, test_branch_idx, **kwargs):
self.num_branch = num_branch
self.test_branch_idx = test_branch_idx
super(TridentRoIHead, self).__init__(**kwargs)
def merge_trident_bboxes(self, trident_det_bboxes, trident_det_labels):
"""Merge bbox predictions of each branch."""
if trident_det_bboxes.numel() == 0:
det_bboxes = trident_det_bboxes.new_zeros((0, 5))
det_labels = trident_det_bboxes.new_zeros((0, ), dtype=torch.long)
else:
nms_bboxes = trident_det_bboxes[:, :4]
nms_scores = trident_det_bboxes[:, 4].contiguous()
nms_inds = trident_det_labels
nms_cfg = self.test_cfg['nms']
det_bboxes, keep = batched_nms(nms_bboxes, nms_scores, nms_inds,
nms_cfg)
det_labels = trident_det_labels[keep]
if self.test_cfg['max_per_img'] > 0:
det_labels = det_labels[:self.test_cfg['max_per_img']]
det_bboxes = det_bboxes[:self.test_cfg['max_per_img']]
return det_bboxes, det_labels
def simple_test(self,
x,
proposal_list,
img_metas,
proposals=None,
rescale=False):
"""Test without augmentation as follows:
1. Compute prediction bbox and label per branch.
2. Merge predictions of each branch according to scores of
bboxes, i.e., bboxes with higher score are kept to give
top-k prediction.
"""
assert self.with_bbox, 'Bbox head must be implemented.'
det_bboxes_list, det_labels_list = self.simple_test_bboxes(
x, img_metas, proposal_list, self.test_cfg, rescale=rescale)
num_branch = self.num_branch if self.test_branch_idx == -1 else 1
for _ in range(len(det_bboxes_list)):
if det_bboxes_list[_].shape[0] == 0:
det_bboxes_list[_] = det_bboxes_list[_].new_empty((0, 5))
det_bboxes, det_labels = [], []
for i in range(len(img_metas) // num_branch):
det_result = self.merge_trident_bboxes(
torch.cat(det_bboxes_list[i * num_branch:(i + 1) *
num_branch]),
torch.cat(det_labels_list[i * num_branch:(i + 1) *
num_branch]))
det_bboxes.append(det_result[0])
det_labels.append(det_result[1])
bbox_results = [
bbox2result(det_bboxes[i], det_labels[i],
self.bbox_head.num_classes)
for i in range(len(det_bboxes))
]
return bbox_results
def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):
"""Test det bboxes with test time augmentation."""
aug_bboxes = []
aug_scores = []
for x, img_meta in zip(feats, img_metas):
# only one image in the batch
img_shape = img_meta[0]['img_shape']
scale_factor = img_meta[0]['scale_factor']
flip = img_meta[0]['flip']
flip_direction = img_meta[0]['flip_direction']
trident_bboxes, trident_scores = [], []
for branch_idx in range(len(proposal_list)):
proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,
scale_factor, flip, flip_direction)
rois = bbox2roi([proposals])
bbox_results = self._bbox_forward(x, rois)
bboxes, scores = self.bbox_head.get_bboxes(
rois,
bbox_results['cls_score'],
bbox_results['bbox_pred'],
img_shape,
scale_factor,
rescale=False,
cfg=None)
trident_bboxes.append(bboxes)
trident_scores.append(scores)
aug_bboxes.append(torch.cat(trident_bboxes, 0))
aug_scores.append(torch.cat(trident_scores, 0))
# after merging, bboxes will be rescaled to the original image size
merged_bboxes, merged_scores = merge_aug_bboxes(
aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)
det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,
rcnn_test_cfg.score_thr,
rcnn_test_cfg.nms,
rcnn_test_cfg.max_per_img)
return det_bboxes, det_labels
| 5,321 | 42.983471 | 78 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .bbox_head import BBoxHead
from .convfc_bbox_head import (ConvFCBBoxHead, Shared2FCBBoxHead,
Shared4Conv1FCBBoxHead)
from .dii_head import DIIHead
from .double_bbox_head import DoubleConvFCBBoxHead
from .sabl_head import SABLHead
from .scnet_bbox_head import SCNetBBoxHead
__all__ = [
'BBoxHead', 'ConvFCBBoxHead', 'Shared2FCBBoxHead',
'Shared4Conv1FCBBoxHead', 'DoubleConvFCBBoxHead', 'SABLHead', 'DIIHead',
'SCNetBBoxHead'
]
| 524 | 34 | 76 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.runner import BaseModule, auto_fp16, force_fp32
from torch.nn.modules.utils import _pair
from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
from mmdet.models.utils import build_linear_layer
@HEADS.register_module()
class BBoxHead(BaseModule):
"""Simplest RoI head, with only two fc layers for classification and
regression respectively."""
def __init__(self,
with_avg_pool=False,
with_cls=True,
with_reg=True,
roi_feat_size=7,
in_channels=256,
num_classes=80,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
clip_border=True,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
reg_decoded_bbox=False,
reg_predictor_cfg=dict(type='Linear'),
cls_predictor_cfg=dict(type='Linear'),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox=dict(
type='SmoothL1Loss', beta=1.0, loss_weight=1.0),
init_cfg=None):
super(BBoxHead, self).__init__(init_cfg)
assert with_cls or with_reg
self.with_avg_pool = with_avg_pool
self.with_cls = with_cls
self.with_reg = with_reg
self.roi_feat_size = _pair(roi_feat_size)
self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]
self.in_channels = in_channels
self.num_classes = num_classes
self.reg_class_agnostic = reg_class_agnostic
self.reg_decoded_bbox = reg_decoded_bbox
self.reg_predictor_cfg = reg_predictor_cfg
self.cls_predictor_cfg = cls_predictor_cfg
self.fp16_enabled = False
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox = build_loss(loss_bbox)
in_channels = self.in_channels
if self.with_avg_pool:
self.avg_pool = nn.AvgPool2d(self.roi_feat_size)
else:
in_channels *= self.roi_feat_area
if self.with_cls:
# need to add background class
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
else:
cls_channels = num_classes + 1
self.fc_cls = build_linear_layer(
self.cls_predictor_cfg,
in_features=in_channels,
out_features=cls_channels)
if self.with_reg:
out_dim_reg = 4 if reg_class_agnostic else 4 * num_classes
self.fc_reg = build_linear_layer(
self.reg_predictor_cfg,
in_features=in_channels,
out_features=out_dim_reg)
self.debug_imgs = None
if init_cfg is None:
self.init_cfg = []
if self.with_cls:
self.init_cfg += [
dict(
type='Normal', std=0.01, override=dict(name='fc_cls'))
]
if self.with_reg:
self.init_cfg += [
dict(
type='Normal', std=0.001, override=dict(name='fc_reg'))
]
@property
def custom_cls_channels(self):
return getattr(self.loss_cls, 'custom_cls_channels', False)
@property
def custom_activation(self):
return getattr(self.loss_cls, 'custom_activation', False)
@property
def custom_accuracy(self):
return getattr(self.loss_cls, 'custom_accuracy', False)
@auto_fp16()
def forward(self, x):
if self.with_avg_pool:
if x.numel() > 0:
x = self.avg_pool(x)
x = x.view(x.size(0), -1)
else:
# avg_pool does not support empty tensor,
# so use torch.mean instead it
x = torch.mean(x, dim=(-1, -2))
cls_score = self.fc_cls(x) if self.with_cls else None
bbox_pred = self.fc_reg(x) if self.with_reg else None
return cls_score, bbox_pred
def _get_target_single(self, pos_bboxes, neg_bboxes, pos_gt_bboxes,
pos_gt_labels, cfg):
"""Calculate the ground truth for proposals in the single image
according to the sampling results.
Args:
pos_bboxes (Tensor): Contains all the positive boxes,
has shape (num_pos, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
neg_bboxes (Tensor): Contains all the negative boxes,
has shape (num_neg, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_bboxes (Tensor): Contains gt_boxes for
all positive samples, has shape (num_pos, 4),
the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_labels (Tensor): Contains gt_labels for
all positive samples, has shape (num_pos, ).
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
Returns:
Tuple[Tensor]: Ground truth for proposals
in a single image. Containing the following Tensors:
- labels(Tensor): Gt_labels for all proposals, has
shape (num_proposals,).
- label_weights(Tensor): Labels_weights for all
proposals, has shape (num_proposals,).
- bbox_targets(Tensor):Regression target for all
proposals, has shape (num_proposals, 4), the
last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights(Tensor):Regression weights for all
proposals, has shape (num_proposals, 4).
"""
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
# original implementation uses new_zeros since BG are set to be 0
# now use empty & fill because BG cat_id = num_classes,
# FG cat_id = [0, num_classes-1]
labels = pos_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[:num_pos] = pos_weight
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
pos_bboxes, pos_gt_bboxes)
else:
# When the regression loss (e.g. `IouLoss`, `GIouLoss`)
# is applied directly on the decoded bounding boxes, both
# the predicted boxes and regression targets should be with
# absolute coordinate format.
pos_bbox_targets = pos_gt_bboxes
bbox_targets[:num_pos, :] = pos_bbox_targets
bbox_weights[:num_pos, :] = 1
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list
has shape (num_proposals, 4) when `concat=False`,
otherwise just a single tensor has shape
(num_all_proposals, 4), the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
if cls_score.numel() > 0:
loss_cls_ = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
if isinstance(loss_cls_, dict):
losses.update(loss_cls_)
else:
losses['loss_cls'] = loss_cls_
if self.custom_activation:
acc_ = self.loss_cls.get_accuracy(cls_score, labels)
losses.update(acc_)
else:
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bg_class_ind = self.num_classes
# 0~self.num_classes-1 are FG, self.num_classes is BG
pos_inds = (labels >= 0) & (labels < bg_class_ind)
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
if self.reg_decoded_bbox:
# When the regression loss (e.g. `IouLoss`,
# `GIouLoss`, `DIouLoss`) is applied directly on
# the decoded bounding boxes, it decodes the
# already encoded coordinates to absolute format.
bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)
if self.reg_class_agnostic:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), 4)[pos_inds.type(torch.bool)]
else:
pos_bbox_pred = bbox_pred.view(
bbox_pred.size(0), -1,
4)[pos_inds.type(torch.bool),
labels[pos_inds.type(torch.bool)]]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=bbox_targets.size(0),
reduction_override=reduction_override)
else:
losses['loss_bbox'] = bbox_pred[pos_inds].sum()
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
"""Transform network output for a batch into bbox predictions.
Args:
rois (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).
last dimension 5 arrange as (batch_index, x1, y1, x2, y2).
cls_score (Tensor): Box scores, has shape
(num_boxes, num_classes + 1).
bbox_pred (Tensor, optional): Box energies / deltas.
has shape (num_boxes, num_classes * 4).
img_shape (Sequence[int], optional): Maximum bounds for boxes,
specifies (H, W, C) or (H, W).
scale_factor (ndarray): Scale factor of the
image arrange as (w_scale, h_scale, w_scale, h_scale).
rescale (bool): If True, return boxes in original image space.
Default: False.
cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
Returns:
tuple[Tensor, Tensor]:
First tensor is `det_bboxes`, has the shape
(num_boxes, 5) and last
dimension 5 represent (tl_x, tl_y, br_x, br_y, score).
Second tensor is the labels with shape (num_boxes, ).
"""
# some loss (Seesaw loss..) may have custom activation
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_score)
else:
scores = F.softmax(
cls_score, dim=-1) if cls_score is not None else None
# bbox_pred would be None in some detector when with_reg is False,
# e.g. Grid R-CNN.
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[..., 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[:, 1:].clone()
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])
if rescale and bboxes.size(0) > 0:
scale_factor = bboxes.new_tensor(scale_factor)
bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view(
bboxes.size()[0], -1)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(bboxes, scores,
cfg.score_thr, cfg.nms,
cfg.max_per_img)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image. The first column is
the image id and the next 4 columns are x1, y1, x2, y2.
labels (Tensor): Shape (n*bs, ).
bbox_preds (Tensor): Shape (n*bs, 4) or (n*bs, 4*#class).
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
Example:
>>> # xdoctest: +REQUIRES(module:kwarray)
>>> import kwarray
>>> import numpy as np
>>> from mmdet.core.bbox.demodata import random_boxes
>>> self = BBoxHead(reg_class_agnostic=True)
>>> n_roi = 2
>>> n_img = 4
>>> scale = 512
>>> rng = np.random.RandomState(0)
>>> img_metas = [{'img_shape': (scale, scale)}
... for _ in range(n_img)]
>>> # Create rois in the expected format
>>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
>>> img_ids = torch.randint(0, n_img, (n_roi,))
>>> img_ids = img_ids.float()
>>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)
>>> # Create other args
>>> labels = torch.randint(0, 2, (n_roi,)).long()
>>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
>>> # For each image, pretend random positive boxes are gts
>>> is_label_pos = (labels.numpy() > 0).astype(np.int)
>>> lbl_per_img = kwarray.group_items(is_label_pos,
... img_ids.numpy())
>>> pos_per_img = [sum(lbl_per_img.get(gid, []))
... for gid in range(n_img)]
>>> pos_is_gts = [
>>> torch.randint(0, 2, (npos,)).byte().sort(
>>> descending=True)[0]
>>> for npos in pos_per_img
>>> ]
>>> bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
>>> pos_is_gts, img_metas)
>>> print(bboxes_list)
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() <= len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
bbox_pred_ = bbox_preds[inds]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): Rois from `rpn_head` or last stage
`bbox_head`, has shape (num_proposals, 4) or
(num_proposals, 5).
label (Tensor): Only used when `self.reg_class_agnostic`
is False, has shape (num_proposals, ).
bbox_pred (Tensor): Regression prediction of
current stage `bbox_head`. When `self.reg_class_agnostic`
is False, it has shape (n, num_classes * 4), otherwise
it has shape (n, 4).
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5, repr(rois.shape)
if not self.reg_class_agnostic:
label = label * 4
inds = torch.stack((label, label + 1, label + 2, label + 3), 1)
bbox_pred = torch.gather(bbox_pred, 1, inds)
assert bbox_pred.size(1) == 4
max_shape = img_meta['img_shape']
if rois.size(1) == 4:
new_rois = self.bbox_coder.decode(
rois, bbox_pred, max_shape=max_shape)
else:
bboxes = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, max_shape=max_shape)
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
def onnx_export(self,
rois,
cls_score,
bbox_pred,
img_shape,
cfg=None,
**kwargs):
"""Transform network output for a batch into bbox predictions.
Args:
rois (Tensor): Boxes to be transformed.
Has shape (B, num_boxes, 5)
cls_score (Tensor): Box scores. has shape
(B, num_boxes, num_classes + 1), 1 represent the background.
bbox_pred (Tensor, optional): Box energies / deltas for,
has shape (B, num_boxes, num_classes * 4) when.
img_shape (torch.Tensor): Shape of image.
cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Default: None
Returns:
tuple[Tensor, Tensor]: dets of shape [N, num_det, 5]
and class labels of shape [N, num_det].
"""
assert rois.ndim == 3, 'Only support export two stage ' \
'model to ONNX ' \
'with batch dimension. '
if self.custom_cls_channels:
scores = self.loss_cls.get_activation(cls_score)
else:
scores = F.softmax(
cls_score, dim=-1) if cls_score is not None else None
if bbox_pred is not None:
bboxes = self.bbox_coder.decode(
rois[..., 1:], bbox_pred, max_shape=img_shape)
else:
bboxes = rois[..., 1:].clone()
if img_shape is not None:
max_shape = bboxes.new_tensor(img_shape)[..., :2]
min_xy = bboxes.new_tensor(0)
max_xy = torch.cat(
[max_shape] * 2, dim=-1).flip(-1).unsqueeze(-2)
bboxes = torch.where(bboxes < min_xy, min_xy, bboxes)
bboxes = torch.where(bboxes > max_xy, max_xy, bboxes)
# Replace multiclass_nms with ONNX::NonMaxSuppression in deployment
from mmdet.core.export import add_dummy_nms_for_onnx
max_output_boxes_per_class = cfg.nms.get('max_output_boxes_per_class',
cfg.max_per_img)
iou_threshold = cfg.nms.get('iou_threshold', 0.5)
score_threshold = cfg.score_thr
nms_pre = cfg.get('deploy_nms_pre', -1)
scores = scores[..., :self.num_classes]
if self.reg_class_agnostic:
return add_dummy_nms_for_onnx(
bboxes,
scores,
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=nms_pre,
after_top_k=cfg.max_per_img)
else:
batch_size = scores.shape[0]
labels = torch.arange(
self.num_classes, dtype=torch.long).to(scores.device)
labels = labels.view(1, 1, -1).expand_as(scores)
labels = labels.reshape(batch_size, -1)
scores = scores.reshape(batch_size, -1)
bboxes = bboxes.reshape(batch_size, -1, 4)
max_size = torch.max(img_shape)
# Offset bboxes of each class so that bboxes of different labels
# do not overlap.
offsets = (labels * max_size + 1).unsqueeze(2)
bboxes_for_nms = bboxes + offsets
batch_dets, labels = add_dummy_nms_for_onnx(
bboxes_for_nms,
scores.unsqueeze(2),
max_output_boxes_per_class,
iou_threshold,
score_threshold,
pre_top_k=nms_pre,
after_top_k=cfg.max_per_img,
labels=labels)
# Offset the bboxes back after dummy nms.
offsets = (labels * max_size + 1).unsqueeze(2)
# Indexing + inplace operation fails with dynamic shape in ONNX
# original style: batch_dets[..., :4] -= offsets
bboxes, scores = batch_dets[..., 0:4], batch_dets[..., 4:5]
bboxes -= offsets
batch_dets = torch.cat([bboxes, scores], dim=2)
return batch_dets, labels
| 25,657 | 42.122689 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.models.builder import HEADS
from mmdet.models.utils import build_linear_layer
from .bbox_head import BBoxHead
@HEADS.register_module()
class ConvFCBBoxHead(BBoxHead):
r"""More general bbox head, with shared conv and fc layers and two optional
separated branches.
.. code-block:: none
/-> cls convs -> cls fcs -> cls
shared convs -> shared fcs
\-> reg convs -> reg fcs -> reg
""" # noqa: W605
def __init__(self,
num_shared_convs=0,
num_shared_fcs=0,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
conv_out_channels=256,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=None,
init_cfg=None,
*args,
**kwargs):
super(ConvFCBBoxHead, self).__init__(
*args, init_cfg=init_cfg, **kwargs)
assert (num_shared_convs + num_shared_fcs + num_cls_convs +
num_cls_fcs + num_reg_convs + num_reg_fcs > 0)
if num_cls_convs > 0 or num_reg_convs > 0:
assert num_shared_fcs == 0
if not self.with_cls:
assert num_cls_convs == 0 and num_cls_fcs == 0
if not self.with_reg:
assert num_reg_convs == 0 and num_reg_fcs == 0
self.num_shared_convs = num_shared_convs
self.num_shared_fcs = num_shared_fcs
self.num_cls_convs = num_cls_convs
self.num_cls_fcs = num_cls_fcs
self.num_reg_convs = num_reg_convs
self.num_reg_fcs = num_reg_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# add shared convs and fcs
self.shared_convs, self.shared_fcs, last_layer_dim = \
self._add_conv_fc_branch(
self.num_shared_convs, self.num_shared_fcs, self.in_channels,
True)
self.shared_out_channels = last_layer_dim
# add cls specific branch
self.cls_convs, self.cls_fcs, self.cls_last_dim = \
self._add_conv_fc_branch(
self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels)
# add reg specific branch
self.reg_convs, self.reg_fcs, self.reg_last_dim = \
self._add_conv_fc_branch(
self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels)
if self.num_shared_fcs == 0 and not self.with_avg_pool:
if self.num_cls_fcs == 0:
self.cls_last_dim *= self.roi_feat_area
if self.num_reg_fcs == 0:
self.reg_last_dim *= self.roi_feat_area
self.relu = nn.ReLU(inplace=True)
# reconstruct fc_cls and fc_reg since input channels are changed
if self.with_cls:
if self.custom_cls_channels:
cls_channels = self.loss_cls.get_cls_channels(self.num_classes)
else:
cls_channels = self.num_classes + 1
self.fc_cls = build_linear_layer(
self.cls_predictor_cfg,
in_features=self.cls_last_dim,
out_features=cls_channels)
if self.with_reg:
out_dim_reg = (4 if self.reg_class_agnostic else 4 *
self.num_classes)
self.fc_reg = build_linear_layer(
self.reg_predictor_cfg,
in_features=self.reg_last_dim,
out_features=out_dim_reg)
if init_cfg is None:
# when init_cfg is None,
# It has been set to
# [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))],
# [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))]
# after `super(ConvFCBBoxHead, self).__init__()`
# we only need to append additional configuration
# for `shared_fcs`, `cls_fcs` and `reg_fcs`
self.init_cfg += [
dict(
type='Xavier',
distribution='uniform',
override=[
dict(name='shared_fcs'),
dict(name='cls_fcs'),
dict(name='reg_fcs')
])
]
def _add_conv_fc_branch(self,
num_branch_convs,
num_branch_fcs,
in_channels,
is_shared=False):
"""Add shared or separable branch.
convs -> avg pool (optional) -> fcs
"""
last_layer_dim = in_channels
# add branch specific conv layers
branch_convs = nn.ModuleList()
if num_branch_convs > 0:
for i in range(num_branch_convs):
conv_in_channels = (
last_layer_dim if i == 0 else self.conv_out_channels)
branch_convs.append(
ConvModule(
conv_in_channels,
self.conv_out_channels,
3,
padding=1,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
last_layer_dim = self.conv_out_channels
# add branch specific fc layers
branch_fcs = nn.ModuleList()
if num_branch_fcs > 0:
# for shared branch, only consider self.with_avg_pool
# for separated branches, also consider self.num_shared_fcs
if (is_shared
or self.num_shared_fcs == 0) and not self.with_avg_pool:
last_layer_dim *= self.roi_feat_area
for i in range(num_branch_fcs):
fc_in_channels = (
last_layer_dim if i == 0 else self.fc_out_channels)
branch_fcs.append(
nn.Linear(fc_in_channels, self.fc_out_channels))
last_layer_dim = self.fc_out_channels
return branch_convs, branch_fcs, last_layer_dim
def forward(self, x):
# shared part
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
# separate branches
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
@HEADS.register_module()
class Shared2FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared2FCBBoxHead, self).__init__(
num_shared_convs=0,
num_shared_fcs=2,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
@HEADS.register_module()
class Shared4Conv1FCBBoxHead(ConvFCBBoxHead):
def __init__(self, fc_out_channels=1024, *args, **kwargs):
super(Shared4Conv1FCBBoxHead, self).__init__(
num_shared_convs=4,
num_shared_fcs=1,
num_cls_convs=0,
num_cls_fcs=0,
num_reg_convs=0,
num_reg_fcs=0,
fc_out_channels=fc_out_channels,
*args,
**kwargs)
| 8,364 | 35.369565 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/dii_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import (bias_init_with_prob, build_activation_layer,
build_norm_layer)
from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention
from mmcv.runner import auto_fp16, force_fp32
from mmdet.core import multi_apply
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.dense_heads.atss_head import reduce_mean
from mmdet.models.losses import accuracy
from mmdet.models.utils import build_transformer
from .bbox_head import BBoxHead
@HEADS.register_module()
class DIIHead(BBoxHead):
r"""Dynamic Instance Interactive Head for `Sparse R-CNN: End-to-End Object
Detection with Learnable Proposals <https://arxiv.org/abs/2011.12450>`_
Args:
num_classes (int): Number of class in dataset.
Defaults to 80.
num_ffn_fcs (int): The number of fully-connected
layers in FFNs. Defaults to 2.
num_heads (int): The hidden dimension of FFNs.
Defaults to 8.
num_cls_fcs (int): The number of fully-connected
layers in classification subnet. Defaults to 1.
num_reg_fcs (int): The number of fully-connected
layers in regression subnet. Defaults to 3.
feedforward_channels (int): The hidden dimension
of FFNs. Defaults to 2048
in_channels (int): Hidden_channels of MultiheadAttention.
Defaults to 256.
dropout (float): Probability of drop the channel.
Defaults to 0.0
ffn_act_cfg (dict): The activation config for FFNs.
dynamic_conv_cfg (dict): The convolution config
for DynamicConv.
loss_iou (dict): The config for iou or giou loss.
"""
def __init__(self,
num_classes=80,
num_ffn_fcs=2,
num_heads=8,
num_cls_fcs=1,
num_reg_fcs=3,
feedforward_channels=2048,
in_channels=256,
dropout=0.0,
ffn_act_cfg=dict(type='ReLU', inplace=True),
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=256,
feat_channels=64,
out_channels=256,
input_feat_shape=7,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
loss_iou=dict(type='GIoULoss', loss_weight=2.0),
init_cfg=None,
**kwargs):
assert init_cfg is None, 'To prevent abnormal initialization ' \
'behavior, init_cfg is not allowed to be set'
super(DIIHead, self).__init__(
num_classes=num_classes,
reg_decoded_bbox=True,
reg_class_agnostic=True,
init_cfg=init_cfg,
**kwargs)
self.loss_iou = build_loss(loss_iou)
self.in_channels = in_channels
self.fp16_enabled = False
self.attention = MultiheadAttention(in_channels, num_heads, dropout)
self.attention_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.instance_interactive_conv = build_transformer(dynamic_conv_cfg)
self.instance_interactive_conv_dropout = nn.Dropout(dropout)
self.instance_interactive_conv_norm = build_norm_layer(
dict(type='LN'), in_channels)[1]
self.ffn = FFN(
in_channels,
feedforward_channels,
num_ffn_fcs,
act_cfg=ffn_act_cfg,
dropout=dropout)
self.ffn_norm = build_norm_layer(dict(type='LN'), in_channels)[1]
self.cls_fcs = nn.ModuleList()
for _ in range(num_cls_fcs):
self.cls_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.cls_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.cls_fcs.append(
build_activation_layer(dict(type='ReLU', inplace=True)))
# over load the self.fc_cls in BBoxHead
if self.loss_cls.use_sigmoid:
self.fc_cls = nn.Linear(in_channels, self.num_classes)
else:
self.fc_cls = nn.Linear(in_channels, self.num_classes + 1)
self.reg_fcs = nn.ModuleList()
for _ in range(num_reg_fcs):
self.reg_fcs.append(
nn.Linear(in_channels, in_channels, bias=False))
self.reg_fcs.append(
build_norm_layer(dict(type='LN'), in_channels)[1])
self.reg_fcs.append(
build_activation_layer(dict(type='ReLU', inplace=True)))
# over load the self.fc_cls in BBoxHead
self.fc_reg = nn.Linear(in_channels, 4)
assert self.reg_class_agnostic, 'DIIHead only ' \
'suppport `reg_class_agnostic=True` '
assert self.reg_decoded_bbox, 'DIIHead only ' \
'suppport `reg_decoded_bbox=True`'
def init_weights(self):
"""Use xavier initialization for all weight parameter and set
classification head bias as a specific value when use focal loss."""
super(DIIHead, self).init_weights()
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
else:
# adopt the default initialization for
# the weight and bias of the layer norm
pass
if self.loss_cls.use_sigmoid:
bias_init = bias_init_with_prob(0.01)
nn.init.constant_(self.fc_cls.bias, bias_init)
@auto_fp16()
def forward(self, roi_feat, proposal_feat):
"""Forward function of Dynamic Instance Interactive Head.
Args:
roi_feat (Tensor): Roi-pooling features with shape
(batch_size*num_proposals, feature_dimensions,
pooling_h , pooling_w).
proposal_feat (Tensor): Intermediate feature get from
diihead in last stage, has shape
(batch_size, num_proposals, feature_dimensions)
Returns:
tuple[Tensor]: Usually a tuple of classification scores
and bbox prediction and a intermediate feature.
- cls_scores (Tensor): Classification scores for
all proposals, has shape
(batch_size, num_proposals, num_classes).
- bbox_preds (Tensor): Box energies / deltas for
all proposals, has shape
(batch_size, num_proposals, 4).
- obj_feat (Tensor): Object feature before classification
and regression subnet, has shape
(batch_size, num_proposal, feature_dimensions).
"""
N, num_proposals = proposal_feat.shape[:2]
# Self attention
proposal_feat = proposal_feat.permute(1, 0, 2)
proposal_feat = self.attention_norm(self.attention(proposal_feat))
attn_feats = proposal_feat.permute(1, 0, 2)
# instance interactive
proposal_feat = attn_feats.reshape(-1, self.in_channels)
proposal_feat_iic = self.instance_interactive_conv(
proposal_feat, roi_feat)
proposal_feat = proposal_feat + self.instance_interactive_conv_dropout(
proposal_feat_iic)
obj_feat = self.instance_interactive_conv_norm(proposal_feat)
# FFN
obj_feat = self.ffn_norm(self.ffn(obj_feat))
cls_feat = obj_feat
reg_feat = obj_feat
for cls_layer in self.cls_fcs:
cls_feat = cls_layer(cls_feat)
for reg_layer in self.reg_fcs:
reg_feat = reg_layer(reg_feat)
cls_score = self.fc_cls(cls_feat).view(
N, num_proposals, self.num_classes
if self.loss_cls.use_sigmoid else self.num_classes + 1)
bbox_delta = self.fc_reg(reg_feat).view(N, num_proposals, 4)
return cls_score, bbox_delta, obj_feat.view(
N, num_proposals, self.in_channels), attn_feats
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def loss(self,
cls_score,
bbox_pred,
labels,
label_weights,
bbox_targets,
bbox_weights,
imgs_whwh=None,
reduction_override=None,
**kwargs):
""""Loss function of DIIHead, get loss of all images.
Args:
cls_score (Tensor): Classification prediction
results of all class, has shape
(batch_size * num_proposals_single_image, num_classes)
bbox_pred (Tensor): Regression prediction results,
has shape
(batch_size * num_proposals_single_image, 4), the last
dimension 4 represents [tl_x, tl_y, br_x, br_y].
labels (Tensor): Label of each proposals, has shape
(batch_size * num_proposals_single_image
label_weights (Tensor): Classification loss
weight of each proposals, has shape
(batch_size * num_proposals_single_image
bbox_targets (Tensor): Regression targets of each
proposals, has shape
(batch_size * num_proposals_single_image, 4),
the last dimension 4 represents
[tl_x, tl_y, br_x, br_y].
bbox_weights (Tensor): Regression loss weight of each
proposals's coordinate, has shape
(batch_size * num_proposals_single_image, 4),
imgs_whwh (Tensor): imgs_whwh (Tensor): Tensor with\
shape (batch_size, num_proposals, 4), the last
dimension means
[img_width,img_height, img_width, img_height].
reduction_override (str, optional): The reduction
method used to override the original reduction
method of the loss. Options are "none",
"mean" and "sum". Defaults to None,
Returns:
dict[str, Tensor]: Dictionary of loss components
"""
losses = dict()
bg_class_ind = self.num_classes
# note in spare rcnn num_gt == num_pos
pos_inds = (labels >= 0) & (labels < bg_class_ind)
num_pos = pos_inds.sum().float()
avg_factor = reduce_mean(num_pos)
if cls_score is not None:
if cls_score.numel() > 0:
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['pos_acc'] = accuracy(cls_score[pos_inds],
labels[pos_inds])
if bbox_pred is not None:
# 0~self.num_classes-1 are FG, self.num_classes is BG
# do not perform bounding box regression for BG anymore.
if pos_inds.any():
pos_bbox_pred = bbox_pred.reshape(bbox_pred.size(0),
4)[pos_inds.type(torch.bool)]
imgs_whwh = imgs_whwh.reshape(bbox_pred.size(0),
4)[pos_inds.type(torch.bool)]
losses['loss_bbox'] = self.loss_bbox(
pos_bbox_pred / imgs_whwh,
bbox_targets[pos_inds.type(torch.bool)] / imgs_whwh,
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=avg_factor)
losses['loss_iou'] = self.loss_iou(
pos_bbox_pred,
bbox_targets[pos_inds.type(torch.bool)],
bbox_weights[pos_inds.type(torch.bool)],
avg_factor=avg_factor)
else:
losses['loss_bbox'] = bbox_pred.sum() * 0
losses['loss_iou'] = bbox_pred.sum() * 0
return losses
def _get_target_single(self, pos_inds, neg_inds, pos_bboxes, neg_bboxes,
pos_gt_bboxes, pos_gt_labels, cfg):
"""Calculate the ground truth for proposals in the single image
according to the sampling results.
Almost the same as the implementation in `bbox_head`,
we add pos_inds and neg_inds to select positive and
negative samples instead of selecting the first num_pos
as positive samples.
Args:
pos_inds (Tensor): The length is equal to the
positive sample numbers contain all index
of the positive sample in the origin proposal set.
neg_inds (Tensor): The length is equal to the
negative sample numbers contain all index
of the negative sample in the origin proposal set.
pos_bboxes (Tensor): Contains all the positive boxes,
has shape (num_pos, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
neg_bboxes (Tensor): Contains all the negative boxes,
has shape (num_neg, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_bboxes (Tensor): Contains gt_boxes for
all positive samples, has shape (num_pos, 4),
the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
pos_gt_labels (Tensor): Contains gt_labels for
all positive samples, has shape (num_pos, ).
cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following Tensors:
- labels(Tensor): Gt_labels for all proposals, has
shape (num_proposals,).
- label_weights(Tensor): Labels_weights for all proposals, has
shape (num_proposals,).
- bbox_targets(Tensor):Regression target for all proposals, has
shape (num_proposals, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
- bbox_weights(Tensor):Regression weights for all proposals,
has shape (num_proposals, 4).
"""
num_pos = pos_bboxes.size(0)
num_neg = neg_bboxes.size(0)
num_samples = num_pos + num_neg
# original implementation uses new_zeros since BG are set to be 0
# now use empty & fill because BG cat_id = num_classes,
# FG cat_id = [0, num_classes-1]
labels = pos_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_bboxes.new_zeros(num_samples)
bbox_targets = pos_bboxes.new_zeros(num_samples, 4)
bbox_weights = pos_bboxes.new_zeros(num_samples, 4)
if num_pos > 0:
labels[pos_inds] = pos_gt_labels
pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight
label_weights[pos_inds] = pos_weight
if not self.reg_decoded_bbox:
pos_bbox_targets = self.bbox_coder.encode(
pos_bboxes, pos_gt_bboxes)
else:
pos_bbox_targets = pos_gt_bboxes
bbox_targets[pos_inds, :] = pos_bbox_targets
bbox_weights[pos_inds, :] = 1
if num_neg > 0:
label_weights[neg_inds] = 1.0
return labels, label_weights, bbox_targets, bbox_weights
def get_targets(self,
sampling_results,
gt_bboxes,
gt_labels,
rcnn_train_cfg,
concat=True):
"""Calculate the ground truth for all samples in a batch according to
the sampling_results.
Almost the same as the implementation in bbox_head, we passed
additional parameters pos_inds_list and neg_inds_list to
`_get_target_single` function.
Args:
sampling_results (List[obj:SamplingResults]): Assign results of
all images in a batch after sampling.
gt_bboxes (list[Tensor]): Gt_bboxes of all images in a batch,
each tensor has shape (num_gt, 4), the last dimension 4
represents [tl_x, tl_y, br_x, br_y].
gt_labels (list[Tensor]): Gt_labels of all images in a batch,
each tensor has shape (num_gt,).
rcnn_train_cfg (obj:`ConfigDict`): `train_cfg` of RCNN.
concat (bool): Whether to concatenate the results of all
the images in a single batch.
Returns:
Tuple[Tensor]: Ground truth for proposals in a single image.
Containing the following list of Tensors:
- labels (list[Tensor],Tensor): Gt_labels for all
proposals in a batch, each tensor in list has
shape (num_proposals,) when `concat=False`, otherwise just
a single tensor has shape (num_all_proposals,).
- label_weights (list[Tensor]): Labels_weights for
all proposals in a batch, each tensor in list has shape
(num_proposals,) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals,).
- bbox_targets (list[Tensor],Tensor): Regression target
for all proposals in a batch, each tensor in list has
shape (num_proposals, 4) when `concat=False`, otherwise
just a single tensor has shape (num_all_proposals, 4),
the last dimension 4 represents [tl_x, tl_y, br_x, br_y].
- bbox_weights (list[tensor],Tensor): Regression weights for
all proposals in a batch, each tensor in list has shape
(num_proposals, 4) when `concat=False`, otherwise just a
single tensor has shape (num_all_proposals, 4).
"""
pos_inds_list = [res.pos_inds for res in sampling_results]
neg_inds_list = [res.neg_inds for res in sampling_results]
pos_bboxes_list = [res.pos_bboxes for res in sampling_results]
neg_bboxes_list = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]
labels, label_weights, bbox_targets, bbox_weights = multi_apply(
self._get_target_single,
pos_inds_list,
neg_inds_list,
pos_bboxes_list,
neg_bboxes_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bbox_targets = torch.cat(bbox_targets, 0)
bbox_weights = torch.cat(bbox_weights, 0)
return labels, label_weights, bbox_targets, bbox_weights
| 19,199 | 43.964871 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/double_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, ModuleList
from mmdet.models.backbones.resnet import Bottleneck
from mmdet.models.builder import HEADS
from .bbox_head import BBoxHead
class BasicResBlock(BaseModule):
"""Basic residual block.
This block is a little different from the block in the ResNet backbone.
The kernel size of conv1 is 1 in this block while 3 in ResNet BasicBlock.
Args:
in_channels (int): Channels of the input feature map.
out_channels (int): Channels of the output feature map.
conv_cfg (dict): The config dict for convolution layers.
norm_cfg (dict): The config dict for normalization layers.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
in_channels,
out_channels,
conv_cfg=None,
norm_cfg=dict(type='BN'),
init_cfg=None):
super(BasicResBlock, self).__init__(init_cfg)
# main path
self.conv1 = ConvModule(
in_channels,
in_channels,
kernel_size=3,
padding=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg)
self.conv2 = ConvModule(
in_channels,
out_channels,
kernel_size=1,
bias=False,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
# identity path
self.conv_identity = ConvModule(
in_channels,
out_channels,
kernel_size=1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=None)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
identity = x
x = self.conv1(x)
x = self.conv2(x)
identity = self.conv_identity(identity)
out = x + identity
out = self.relu(out)
return out
@HEADS.register_module()
class DoubleConvFCBBoxHead(BBoxHead):
r"""Bbox head used in Double-Head R-CNN
.. code-block:: none
/-> cls
/-> shared convs ->
\-> reg
roi features
/-> cls
\-> shared fc ->
\-> reg
""" # noqa: W605
def __init__(self,
num_convs=0,
num_fcs=0,
conv_out_channels=1024,
fc_out_channels=1024,
conv_cfg=None,
norm_cfg=dict(type='BN'),
init_cfg=dict(
type='Normal',
override=[
dict(type='Normal', name='fc_cls', std=0.01),
dict(type='Normal', name='fc_reg', std=0.001),
dict(
type='Xavier',
name='fc_branch',
distribution='uniform')
]),
**kwargs):
kwargs.setdefault('with_avg_pool', True)
super(DoubleConvFCBBoxHead, self).__init__(init_cfg=init_cfg, **kwargs)
assert self.with_avg_pool
assert num_convs > 0
assert num_fcs > 0
self.num_convs = num_convs
self.num_fcs = num_fcs
self.conv_out_channels = conv_out_channels
self.fc_out_channels = fc_out_channels
self.conv_cfg = conv_cfg
self.norm_cfg = norm_cfg
# increase the channel of input features
self.res_block = BasicResBlock(self.in_channels,
self.conv_out_channels)
# add conv heads
self.conv_branch = self._add_conv_branch()
# add fc heads
self.fc_branch = self._add_fc_branch()
out_dim_reg = 4 if self.reg_class_agnostic else 4 * self.num_classes
self.fc_reg = nn.Linear(self.conv_out_channels, out_dim_reg)
self.fc_cls = nn.Linear(self.fc_out_channels, self.num_classes + 1)
self.relu = nn.ReLU(inplace=True)
def _add_conv_branch(self):
"""Add the fc branch which consists of a sequential of conv layers."""
branch_convs = ModuleList()
for i in range(self.num_convs):
branch_convs.append(
Bottleneck(
inplanes=self.conv_out_channels,
planes=self.conv_out_channels // 4,
conv_cfg=self.conv_cfg,
norm_cfg=self.norm_cfg))
return branch_convs
def _add_fc_branch(self):
"""Add the fc branch which consists of a sequential of fc layers."""
branch_fcs = ModuleList()
for i in range(self.num_fcs):
fc_in_channels = (
self.in_channels *
self.roi_feat_area if i == 0 else self.fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, self.fc_out_channels))
return branch_fcs
def forward(self, x_cls, x_reg):
# conv head
x_conv = self.res_block(x_reg)
for conv in self.conv_branch:
x_conv = conv(x_conv)
if self.with_avg_pool:
x_conv = self.avg_pool(x_conv)
x_conv = x_conv.view(x_conv.size(0), -1)
bbox_pred = self.fc_reg(x_conv)
# fc head
x_fc = x_cls.view(x_cls.size(0), -1)
for fc in self.fc_branch:
x_fc = self.relu(fc(x_fc))
cls_score = self.fc_cls(x_fc)
return cls_score, bbox_pred
| 5,733 | 31.03352 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/sabl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import ConvModule
from mmcv.runner import BaseModule, force_fp32
from mmdet.core import build_bbox_coder, multi_apply, multiclass_nms
from mmdet.models.builder import HEADS, build_loss
from mmdet.models.losses import accuracy
@HEADS.register_module()
class SABLHead(BaseModule):
"""Side-Aware Boundary Localization (SABL) for RoI-Head.
Side-Aware features are extracted by conv layers
with an attention mechanism.
Boundary Localization with Bucketing and Bucketing Guided Rescoring
are implemented in BucketingBBoxCoder.
Please refer to https://arxiv.org/abs/1912.04260 for more details.
Args:
cls_in_channels (int): Input channels of cls RoI feature. \
Defaults to 256.
reg_in_channels (int): Input channels of reg RoI feature. \
Defaults to 256.
roi_feat_size (int): Size of RoI features. Defaults to 7.
reg_feat_up_ratio (int): Upsample ratio of reg features. \
Defaults to 2.
reg_pre_kernel (int): Kernel of 2D conv layers before \
attention pooling. Defaults to 3.
reg_post_kernel (int): Kernel of 1D conv layers after \
attention pooling. Defaults to 3.
reg_pre_num (int): Number of pre convs. Defaults to 2.
reg_post_num (int): Number of post convs. Defaults to 1.
num_classes (int): Number of classes in dataset. Defaults to 80.
cls_out_channels (int): Hidden channels in cls fcs. Defaults to 1024.
reg_offset_out_channels (int): Hidden and output channel \
of reg offset branch. Defaults to 256.
reg_cls_out_channels (int): Hidden and output channel \
of reg cls branch. Defaults to 256.
num_cls_fcs (int): Number of fcs for cls branch. Defaults to 1.
num_reg_fcs (int): Number of fcs for reg branch.. Defaults to 0.
reg_class_agnostic (bool): Class agnostic regression or not. \
Defaults to True.
norm_cfg (dict): Config of norm layers. Defaults to None.
bbox_coder (dict): Config of bbox coder. Defaults 'BucketingBBoxCoder'.
loss_cls (dict): Config of classification loss.
loss_bbox_cls (dict): Config of classification loss for bbox branch.
loss_bbox_reg (dict): Config of regression loss for bbox branch.
init_cfg (dict or list[dict], optional): Initialization config dict.
Default: None
"""
def __init__(self,
num_classes,
cls_in_channels=256,
reg_in_channels=256,
roi_feat_size=7,
reg_feat_up_ratio=2,
reg_pre_kernel=3,
reg_post_kernel=3,
reg_pre_num=2,
reg_post_num=1,
cls_out_channels=1024,
reg_offset_out_channels=256,
reg_cls_out_channels=256,
num_cls_fcs=1,
num_reg_fcs=0,
reg_class_agnostic=True,
norm_cfg=None,
bbox_coder=dict(
type='BucketingBBoxCoder',
num_buckets=14,
scale_factor=1.7),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0),
loss_bbox_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0),
loss_bbox_reg=dict(
type='SmoothL1Loss', beta=0.1, loss_weight=1.0),
init_cfg=None):
super(SABLHead, self).__init__(init_cfg)
self.cls_in_channels = cls_in_channels
self.reg_in_channels = reg_in_channels
self.roi_feat_size = roi_feat_size
self.reg_feat_up_ratio = int(reg_feat_up_ratio)
self.num_buckets = bbox_coder['num_buckets']
assert self.reg_feat_up_ratio // 2 >= 1
self.up_reg_feat_size = roi_feat_size * self.reg_feat_up_ratio
assert self.up_reg_feat_size == bbox_coder['num_buckets']
self.reg_pre_kernel = reg_pre_kernel
self.reg_post_kernel = reg_post_kernel
self.reg_pre_num = reg_pre_num
self.reg_post_num = reg_post_num
self.num_classes = num_classes
self.cls_out_channels = cls_out_channels
self.reg_offset_out_channels = reg_offset_out_channels
self.reg_cls_out_channels = reg_cls_out_channels
self.num_cls_fcs = num_cls_fcs
self.num_reg_fcs = num_reg_fcs
self.reg_class_agnostic = reg_class_agnostic
assert self.reg_class_agnostic
self.norm_cfg = norm_cfg
self.bbox_coder = build_bbox_coder(bbox_coder)
self.loss_cls = build_loss(loss_cls)
self.loss_bbox_cls = build_loss(loss_bbox_cls)
self.loss_bbox_reg = build_loss(loss_bbox_reg)
self.cls_fcs = self._add_fc_branch(self.num_cls_fcs,
self.cls_in_channels,
self.roi_feat_size,
self.cls_out_channels)
self.side_num = int(np.ceil(self.num_buckets / 2))
if self.reg_feat_up_ratio > 1:
self.upsample_x = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.upsample_y = nn.ConvTranspose1d(
reg_in_channels,
reg_in_channels,
self.reg_feat_up_ratio,
stride=self.reg_feat_up_ratio)
self.reg_pre_convs = nn.ModuleList()
for i in range(self.reg_pre_num):
reg_pre_conv = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=reg_pre_kernel,
padding=reg_pre_kernel // 2,
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_pre_convs.append(reg_pre_conv)
self.reg_post_conv_xs = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_x = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(1, reg_post_kernel),
padding=(0, reg_post_kernel // 2),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_xs.append(reg_post_conv_x)
self.reg_post_conv_ys = nn.ModuleList()
for i in range(self.reg_post_num):
reg_post_conv_y = ConvModule(
reg_in_channels,
reg_in_channels,
kernel_size=(reg_post_kernel, 1),
padding=(reg_post_kernel // 2, 0),
norm_cfg=norm_cfg,
act_cfg=dict(type='ReLU'))
self.reg_post_conv_ys.append(reg_post_conv_y)
self.reg_conv_att_x = nn.Conv2d(reg_in_channels, 1, 1)
self.reg_conv_att_y = nn.Conv2d(reg_in_channels, 1, 1)
self.fc_cls = nn.Linear(self.cls_out_channels, self.num_classes + 1)
self.relu = nn.ReLU(inplace=True)
self.reg_cls_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_cls_out_channels)
self.reg_offset_fcs = self._add_fc_branch(self.num_reg_fcs,
self.reg_in_channels, 1,
self.reg_offset_out_channels)
self.fc_reg_cls = nn.Linear(self.reg_cls_out_channels, 1)
self.fc_reg_offset = nn.Linear(self.reg_offset_out_channels, 1)
if init_cfg is None:
self.init_cfg = [
dict(
type='Xavier',
layer='Linear',
distribution='uniform',
override=[
dict(type='Normal', name='reg_conv_att_x', std=0.01),
dict(type='Normal', name='reg_conv_att_y', std=0.01),
dict(type='Normal', name='fc_reg_cls', std=0.01),
dict(type='Normal', name='fc_cls', std=0.01),
dict(type='Normal', name='fc_reg_offset', std=0.001)
])
]
if self.reg_feat_up_ratio > 1:
self.init_cfg += [
dict(
type='Kaiming',
distribution='normal',
override=[
dict(name='upsample_x'),
dict(name='upsample_y')
])
]
@property
def custom_cls_channels(self):
return getattr(self.loss_cls, 'custom_cls_channels', False)
@property
def custom_activation(self):
return getattr(self.loss_cls, 'custom_activation', False)
@property
def custom_accuracy(self):
return getattr(self.loss_cls, 'custom_accuracy', False)
def _add_fc_branch(self, num_branch_fcs, in_channels, roi_feat_size,
fc_out_channels):
in_channels = in_channels * roi_feat_size * roi_feat_size
branch_fcs = nn.ModuleList()
for i in range(num_branch_fcs):
fc_in_channels = (in_channels if i == 0 else fc_out_channels)
branch_fcs.append(nn.Linear(fc_in_channels, fc_out_channels))
return branch_fcs
def cls_forward(self, cls_x):
cls_x = cls_x.view(cls_x.size(0), -1)
for fc in self.cls_fcs:
cls_x = self.relu(fc(cls_x))
cls_score = self.fc_cls(cls_x)
return cls_score
def attention_pool(self, reg_x):
"""Extract direction-specific features fx and fy with attention
methanism."""
reg_fx = reg_x
reg_fy = reg_x
reg_fx_att = self.reg_conv_att_x(reg_fx).sigmoid()
reg_fy_att = self.reg_conv_att_y(reg_fy).sigmoid()
reg_fx_att = reg_fx_att / reg_fx_att.sum(dim=2).unsqueeze(2)
reg_fy_att = reg_fy_att / reg_fy_att.sum(dim=3).unsqueeze(3)
reg_fx = (reg_fx * reg_fx_att).sum(dim=2)
reg_fy = (reg_fy * reg_fy_att).sum(dim=3)
return reg_fx, reg_fy
def side_aware_feature_extractor(self, reg_x):
"""Refine and extract side-aware features without split them."""
for reg_pre_conv in self.reg_pre_convs:
reg_x = reg_pre_conv(reg_x)
reg_fx, reg_fy = self.attention_pool(reg_x)
if self.reg_post_num > 0:
reg_fx = reg_fx.unsqueeze(2)
reg_fy = reg_fy.unsqueeze(3)
for i in range(self.reg_post_num):
reg_fx = self.reg_post_conv_xs[i](reg_fx)
reg_fy = self.reg_post_conv_ys[i](reg_fy)
reg_fx = reg_fx.squeeze(2)
reg_fy = reg_fy.squeeze(3)
if self.reg_feat_up_ratio > 1:
reg_fx = self.relu(self.upsample_x(reg_fx))
reg_fy = self.relu(self.upsample_y(reg_fy))
reg_fx = torch.transpose(reg_fx, 1, 2)
reg_fy = torch.transpose(reg_fy, 1, 2)
return reg_fx.contiguous(), reg_fy.contiguous()
def reg_pred(self, x, offset_fcs, cls_fcs):
"""Predict bucketing estimation (cls_pred) and fine regression (offset
pred) with side-aware features."""
x_offset = x.view(-1, self.reg_in_channels)
x_cls = x.view(-1, self.reg_in_channels)
for fc in offset_fcs:
x_offset = self.relu(fc(x_offset))
for fc in cls_fcs:
x_cls = self.relu(fc(x_cls))
offset_pred = self.fc_reg_offset(x_offset)
cls_pred = self.fc_reg_cls(x_cls)
offset_pred = offset_pred.view(x.size(0), -1)
cls_pred = cls_pred.view(x.size(0), -1)
return offset_pred, cls_pred
def side_aware_split(self, feat):
"""Split side-aware features aligned with orders of bucketing
targets."""
l_end = int(np.ceil(self.up_reg_feat_size / 2))
r_start = int(np.floor(self.up_reg_feat_size / 2))
feat_fl = feat[:, :l_end]
feat_fr = feat[:, r_start:].flip(dims=(1, ))
feat_fl = feat_fl.contiguous()
feat_fr = feat_fr.contiguous()
feat = torch.cat([feat_fl, feat_fr], dim=-1)
return feat
def bbox_pred_split(self, bbox_pred, num_proposals_per_img):
"""Split batch bbox prediction back to each image."""
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_preds = bucket_cls_preds.split(num_proposals_per_img, 0)
bucket_offset_preds = bucket_offset_preds.split(
num_proposals_per_img, 0)
bbox_pred = tuple(zip(bucket_cls_preds, bucket_offset_preds))
return bbox_pred
def reg_forward(self, reg_x):
outs = self.side_aware_feature_extractor(reg_x)
edge_offset_preds = []
edge_cls_preds = []
reg_fx = outs[0]
reg_fy = outs[1]
offset_pred_x, cls_pred_x = self.reg_pred(reg_fx, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_y, cls_pred_y = self.reg_pred(reg_fy, self.reg_offset_fcs,
self.reg_cls_fcs)
offset_pred_x = self.side_aware_split(offset_pred_x)
offset_pred_y = self.side_aware_split(offset_pred_y)
cls_pred_x = self.side_aware_split(cls_pred_x)
cls_pred_y = self.side_aware_split(cls_pred_y)
edge_offset_preds = torch.cat([offset_pred_x, offset_pred_y], dim=-1)
edge_cls_preds = torch.cat([cls_pred_x, cls_pred_y], dim=-1)
return (edge_cls_preds, edge_offset_preds)
def forward(self, x):
bbox_pred = self.reg_forward(x)
cls_score = self.cls_forward(x)
return cls_score, bbox_pred
def get_targets(self, sampling_results, gt_bboxes, gt_labels,
rcnn_train_cfg):
pos_proposals = [res.pos_bboxes for res in sampling_results]
neg_proposals = [res.neg_bboxes for res in sampling_results]
pos_gt_bboxes = [res.pos_gt_bboxes for res in sampling_results]
pos_gt_labels = [res.pos_gt_labels for res in sampling_results]
cls_reg_targets = self.bucket_target(pos_proposals, neg_proposals,
pos_gt_bboxes, pos_gt_labels,
rcnn_train_cfg)
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = cls_reg_targets
return (labels, label_weights, (bucket_cls_targets,
bucket_offset_targets),
(bucket_cls_weights, bucket_offset_weights))
def bucket_target(self,
pos_proposals_list,
neg_proposals_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
rcnn_train_cfg,
concat=True):
(labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights) = multi_apply(
self._bucket_target_single,
pos_proposals_list,
neg_proposals_list,
pos_gt_bboxes_list,
pos_gt_labels_list,
cfg=rcnn_train_cfg)
if concat:
labels = torch.cat(labels, 0)
label_weights = torch.cat(label_weights, 0)
bucket_cls_targets = torch.cat(bucket_cls_targets, 0)
bucket_cls_weights = torch.cat(bucket_cls_weights, 0)
bucket_offset_targets = torch.cat(bucket_offset_targets, 0)
bucket_offset_weights = torch.cat(bucket_offset_weights, 0)
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def _bucket_target_single(self, pos_proposals, neg_proposals,
pos_gt_bboxes, pos_gt_labels, cfg):
"""Compute bucketing estimation targets and fine regression targets for
a single image.
Args:
pos_proposals (Tensor): positive proposals of a single image,
Shape (n_pos, 4)
neg_proposals (Tensor): negative proposals of a single image,
Shape (n_neg, 4).
pos_gt_bboxes (Tensor): gt bboxes assigned to positive proposals
of a single image, Shape (n_pos, 4).
pos_gt_labels (Tensor): gt labels assigned to positive proposals
of a single image, Shape (n_pos, ).
cfg (dict): Config of calculating targets
Returns:
tuple:
- labels (Tensor): Labels in a single image. \
Shape (n,).
- label_weights (Tensor): Label weights in a single image.\
Shape (n,)
- bucket_cls_targets (Tensor): Bucket cls targets in \
a single image. Shape (n, num_buckets*2).
- bucket_cls_weights (Tensor): Bucket cls weights in \
a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset targets \
in a single image. Shape (n, num_buckets*2).
- bucket_offset_targets (Tensor): Bucket offset weights \
in a single image. Shape (n, num_buckets*2).
"""
num_pos = pos_proposals.size(0)
num_neg = neg_proposals.size(0)
num_samples = num_pos + num_neg
labels = pos_gt_bboxes.new_full((num_samples, ),
self.num_classes,
dtype=torch.long)
label_weights = pos_proposals.new_zeros(num_samples)
bucket_cls_targets = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_cls_weights = pos_proposals.new_zeros(num_samples,
4 * self.side_num)
bucket_offset_targets = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
bucket_offset_weights = pos_proposals.new_zeros(
num_samples, 4 * self.side_num)
if num_pos > 0:
labels[:num_pos] = pos_gt_labels
label_weights[:num_pos] = 1.0
(pos_bucket_offset_targets, pos_bucket_offset_weights,
pos_bucket_cls_targets,
pos_bucket_cls_weights) = self.bbox_coder.encode(
pos_proposals, pos_gt_bboxes)
bucket_cls_targets[:num_pos, :] = pos_bucket_cls_targets
bucket_cls_weights[:num_pos, :] = pos_bucket_cls_weights
bucket_offset_targets[:num_pos, :] = pos_bucket_offset_targets
bucket_offset_weights[:num_pos, :] = pos_bucket_offset_weights
if num_neg > 0:
label_weights[-num_neg:] = 1.0
return (labels, label_weights, bucket_cls_targets, bucket_cls_weights,
bucket_offset_targets, bucket_offset_weights)
def loss(self,
cls_score,
bbox_pred,
rois,
labels,
label_weights,
bbox_targets,
bbox_weights,
reduction_override=None):
losses = dict()
if cls_score is not None:
avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)
losses['loss_cls'] = self.loss_cls(
cls_score,
labels,
label_weights,
avg_factor=avg_factor,
reduction_override=reduction_override)
losses['acc'] = accuracy(cls_score, labels)
if bbox_pred is not None:
bucket_cls_preds, bucket_offset_preds = bbox_pred
bucket_cls_targets, bucket_offset_targets = bbox_targets
bucket_cls_weights, bucket_offset_weights = bbox_weights
# edge cls
bucket_cls_preds = bucket_cls_preds.view(-1, self.side_num)
bucket_cls_targets = bucket_cls_targets.view(-1, self.side_num)
bucket_cls_weights = bucket_cls_weights.view(-1, self.side_num)
losses['loss_bbox_cls'] = self.loss_bbox_cls(
bucket_cls_preds,
bucket_cls_targets,
bucket_cls_weights,
avg_factor=bucket_cls_targets.size(0),
reduction_override=reduction_override)
losses['loss_bbox_reg'] = self.loss_bbox_reg(
bucket_offset_preds,
bucket_offset_targets,
bucket_offset_weights,
avg_factor=bucket_offset_targets.size(0),
reduction_override=reduction_override)
return losses
@force_fp32(apply_to=('cls_score', 'bbox_pred'))
def get_bboxes(self,
rois,
cls_score,
bbox_pred,
img_shape,
scale_factor,
rescale=False,
cfg=None):
if isinstance(cls_score, list):
cls_score = sum(cls_score) / float(len(cls_score))
scores = F.softmax(cls_score, dim=1) if cls_score is not None else None
if bbox_pred is not None:
bboxes, confidences = self.bbox_coder.decode(
rois[:, 1:], bbox_pred, img_shape)
else:
bboxes = rois[:, 1:].clone()
confidences = None
if img_shape is not None:
bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1] - 1)
bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0] - 1)
if rescale and bboxes.size(0) > 0:
if isinstance(scale_factor, float):
bboxes /= scale_factor
else:
bboxes /= torch.from_numpy(scale_factor).to(bboxes.device)
if cfg is None:
return bboxes, scores
else:
det_bboxes, det_labels = multiclass_nms(
bboxes,
scores,
cfg.score_thr,
cfg.nms,
cfg.max_per_img,
score_factors=confidences)
return det_bboxes, det_labels
@force_fp32(apply_to=('bbox_preds', ))
def refine_bboxes(self, rois, labels, bbox_preds, pos_is_gts, img_metas):
"""Refine bboxes during training.
Args:
rois (Tensor): Shape (n*bs, 5), where n is image number per GPU,
and bs is the sampled RoIs per image.
labels (Tensor): Shape (n*bs, ).
bbox_preds (list[Tensor]): Shape [(n*bs, num_buckets*2), \
(n*bs, num_buckets*2)].
pos_is_gts (list[Tensor]): Flags indicating if each positive bbox
is a gt bbox.
img_metas (list[dict]): Meta info of each image.
Returns:
list[Tensor]: Refined bboxes of each image in a mini-batch.
"""
img_ids = rois[:, 0].long().unique(sorted=True)
assert img_ids.numel() == len(img_metas)
bboxes_list = []
for i in range(len(img_metas)):
inds = torch.nonzero(
rois[:, 0] == i, as_tuple=False).squeeze(dim=1)
num_rois = inds.numel()
bboxes_ = rois[inds, 1:]
label_ = labels[inds]
edge_cls_preds, edge_offset_preds = bbox_preds
edge_cls_preds_ = edge_cls_preds[inds]
edge_offset_preds_ = edge_offset_preds[inds]
bbox_pred_ = [edge_cls_preds_, edge_offset_preds_]
img_meta_ = img_metas[i]
pos_is_gts_ = pos_is_gts[i]
bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,
img_meta_)
# filter gt bboxes
pos_keep = 1 - pos_is_gts_
keep_inds = pos_is_gts_.new_ones(num_rois)
keep_inds[:len(pos_is_gts_)] = pos_keep
bboxes_list.append(bboxes[keep_inds.type(torch.bool)])
return bboxes_list
@force_fp32(apply_to=('bbox_pred', ))
def regress_by_class(self, rois, label, bbox_pred, img_meta):
"""Regress the bbox for the predicted class. Used in Cascade R-CNN.
Args:
rois (Tensor): shape (n, 4) or (n, 5)
label (Tensor): shape (n, )
bbox_pred (list[Tensor]): shape [(n, num_buckets *2), \
(n, num_buckets *2)]
img_meta (dict): Image meta info.
Returns:
Tensor: Regressed bboxes, the same shape as input rois.
"""
assert rois.size(1) == 4 or rois.size(1) == 5
if rois.size(1) == 4:
new_rois, _ = self.bbox_coder.decode(rois, bbox_pred,
img_meta['img_shape'])
else:
bboxes, _ = self.bbox_coder.decode(rois[:, 1:], bbox_pred,
img_meta['img_shape'])
new_rois = torch.cat((rois[:, [0]], bboxes), dim=1)
return new_rois
| 25,392 | 41.534338 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/bbox_heads/scnet_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet.models.builder import HEADS
from .convfc_bbox_head import ConvFCBBoxHead
@HEADS.register_module()
class SCNetBBoxHead(ConvFCBBoxHead):
"""BBox head for `SCNet <https://arxiv.org/abs/2012.10150>`_.
This inherits ``ConvFCBBoxHead`` with modified forward() function, allow us
to get intermediate shared feature.
"""
def _forward_shared(self, x):
"""Forward function for shared part."""
if self.num_shared_convs > 0:
for conv in self.shared_convs:
x = conv(x)
if self.num_shared_fcs > 0:
if self.with_avg_pool:
x = self.avg_pool(x)
x = x.flatten(1)
for fc in self.shared_fcs:
x = self.relu(fc(x))
return x
def _forward_cls_reg(self, x):
"""Forward function for classification and regression parts."""
x_cls = x
x_reg = x
for conv in self.cls_convs:
x_cls = conv(x_cls)
if x_cls.dim() > 2:
if self.with_avg_pool:
x_cls = self.avg_pool(x_cls)
x_cls = x_cls.flatten(1)
for fc in self.cls_fcs:
x_cls = self.relu(fc(x_cls))
for conv in self.reg_convs:
x_reg = conv(x_reg)
if x_reg.dim() > 2:
if self.with_avg_pool:
x_reg = self.avg_pool(x_reg)
x_reg = x_reg.flatten(1)
for fc in self.reg_fcs:
x_reg = self.relu(fc(x_reg))
cls_score = self.fc_cls(x_cls) if self.with_cls else None
bbox_pred = self.fc_reg(x_reg) if self.with_reg else None
return cls_score, bbox_pred
def forward(self, x, return_shared_feat=False):
"""Forward function.
Args:
x (Tensor): input features
return_shared_feat (bool): If True, return cls-reg-shared feature.
Return:
out (tuple[Tensor]): contain ``cls_score`` and ``bbox_pred``,
if ``return_shared_feat`` is True, append ``x_shared`` to the
returned tuple.
"""
x_shared = self._forward_shared(x)
out = self._forward_cls_reg(x_shared)
if return_shared_feat:
out += (x_shared, )
return out
| 2,307 | 28.589744 | 79 | py |
mmdetection | mmdetection-master/mmdet/models/roi_heads/mask_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .coarse_mask_head import CoarseMaskHead
from .dynamic_mask_head import DynamicMaskHead
from .fcn_mask_head import FCNMaskHead
from .feature_relay_head import FeatureRelayHead
from .fused_semantic_head import FusedSemanticHead
from .global_context_head import GlobalContextHead
from .grid_head import GridHead
from .htc_mask_head import HTCMaskHead
from .mask_point_head import MaskPointHead
from .maskiou_head import MaskIoUHead
from .scnet_mask_head import SCNetMaskHead
from .scnet_semantic_head import SCNetSemanticHead
__all__ = [
'FCNMaskHead', 'HTCMaskHead', 'FusedSemanticHead', 'GridHead',
'MaskIoUHead', 'CoarseMaskHead', 'MaskPointHead', 'SCNetMaskHead',
'SCNetSemanticHead', 'GlobalContextHead', 'FeatureRelayHead',
'DynamicMaskHead'
]
| 817 | 37.952381 | 70 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.