Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/mmdet/core/bbox/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .assigners import (AssignResult, BaseAssigner, CenterRegionAssigner, MaxIoUAssigner, RegionAssigner) from .builder import build_assigner, build_bbox_coder, build_sampler from .coder import (BaseBBoxCoder, DeltaXYWHBBoxCoder, DistancePointBBoxCoder, PseudoBBoxCoder, TBLRBBoxCoder) from .iou_calculators import BboxOverlaps2D, bbox_overlaps from .samplers import (BaseSampler, CombinedSampler, InstanceBalancedPosSampler, IoUBalancedNegSampler, OHEMSampler, PseudoSampler, RandomSampler, SamplingResult, ScoreHLRSampler) from .transforms import (bbox2distance, bbox2result, bbox2roi, bbox_cxcywh_to_xyxy, bbox_flip, bbox_mapping, bbox_mapping_back, bbox_rescale, bbox_xyxy_to_cxcywh, distance2bbox, find_inside_bboxes, roi2bbox) __all__ = [ 'bbox_overlaps', 'BboxOverlaps2D', 'BaseAssigner', 'MaxIoUAssigner', 'AssignResult', 'BaseSampler', 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'build_assigner', 'build_sampler', 'bbox_flip', 'bbox_mapping', 'bbox_mapping_back', 'bbox2roi', 'roi2bbox', 'bbox2result', 'distance2bbox', 'bbox2distance', 'build_bbox_coder', 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'DistancePointBBoxCoder', 'CenterRegionAssigner', 'bbox_rescale', 'bbox_cxcywh_to_xyxy', 'bbox_xyxy_to_cxcywh', 'RegionAssigner', 'find_inside_bboxes' ]
1,688
57.241379
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/builder.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg BBOX_ASSIGNERS = Registry('bbox_assigner') BBOX_SAMPLERS = Registry('bbox_sampler') BBOX_CODERS = Registry('bbox_coder') def build_assigner(cfg, **default_args): """Builder of box assigner.""" return build_from_cfg(cfg, BBOX_ASSIGNERS, default_args) def build_sampler(cfg, **default_args): """Builder of box sampler.""" return build_from_cfg(cfg, BBOX_SAMPLERS, default_args) def build_bbox_coder(cfg, **default_args): """Builder of box coder.""" return build_from_cfg(cfg, BBOX_CODERS, default_args)
628
27.590909
60
py
mmdetection
mmdetection-master/mmdet/core/bbox/demodata.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from mmdet.utils.util_random import ensure_rng def random_boxes(num=1, scale=1, rng=None): """Simple version of ``kwimage.Boxes.random`` Returns: Tensor: shape (n, 4) in x1, y1, x2, y2 format. References: https://gitlab.kitware.com/computer-vision/kwimage/blob/master/kwimage/structs/boxes.py#L1390 Example: >>> num = 3 >>> scale = 512 >>> rng = 0 >>> boxes = random_boxes(num, scale, rng) >>> print(boxes) tensor([[280.9925, 278.9802, 308.6148, 366.1769], [216.9113, 330.6978, 224.0446, 456.5878], [405.3632, 196.3221, 493.3953, 270.7942]]) """ rng = ensure_rng(rng) tlbr = rng.rand(num, 4).astype(np.float32) tl_x = np.minimum(tlbr[:, 0], tlbr[:, 2]) tl_y = np.minimum(tlbr[:, 1], tlbr[:, 3]) br_x = np.maximum(tlbr[:, 0], tlbr[:, 2]) br_y = np.maximum(tlbr[:, 1], tlbr[:, 3]) tlbr[:, 0] = tl_x * scale tlbr[:, 1] = tl_y * scale tlbr[:, 2] = br_x * scale tlbr[:, 3] = br_y * scale boxes = torch.from_numpy(tlbr) return boxes
1,181
26.488372
101
py
mmdetection
mmdetection-master/mmdet/core/bbox/transforms.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch def find_inside_bboxes(bboxes, img_h, img_w): """Find bboxes as long as a part of bboxes is inside the image. Args: bboxes (Tensor): Shape (N, 4). img_h (int): Image height. img_w (int): Image width. Returns: Tensor: Index of the remaining bboxes. """ inside_inds = (bboxes[:, 0] < img_w) & (bboxes[:, 2] > 0) \ & (bboxes[:, 1] < img_h) & (bboxes[:, 3] > 0) return inside_inds def bbox_flip(bboxes, img_shape, direction='horizontal'): """Flip bboxes horizontally or vertically. Args: bboxes (Tensor): Shape (..., 4*k) img_shape (tuple): Image shape. direction (str): Flip direction, options are "horizontal", "vertical", "diagonal". Default: "horizontal" Returns: Tensor: Flipped bboxes. """ assert bboxes.shape[-1] % 4 == 0 assert direction in ['horizontal', 'vertical', 'diagonal'] flipped = bboxes.clone() if direction == 'horizontal': flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] elif direction == 'vertical': flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] else: flipped[..., 0::4] = img_shape[1] - bboxes[..., 2::4] flipped[..., 1::4] = img_shape[0] - bboxes[..., 3::4] flipped[..., 2::4] = img_shape[1] - bboxes[..., 0::4] flipped[..., 3::4] = img_shape[0] - bboxes[..., 1::4] return flipped def bbox_mapping(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from the original image scale to testing scale.""" new_bboxes = bboxes * bboxes.new_tensor(scale_factor) if flip: new_bboxes = bbox_flip(new_bboxes, img_shape, flip_direction) return new_bboxes def bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction='horizontal'): """Map bboxes from testing scale to original image scale.""" new_bboxes = bbox_flip(bboxes, img_shape, flip_direction) if flip else bboxes new_bboxes = new_bboxes.view(-1, 4) / new_bboxes.new_tensor(scale_factor) return new_bboxes.view(bboxes.shape) def bbox2roi(bbox_list): """Convert a list of bboxes to roi format. Args: bbox_list (list[Tensor]): a list of bboxes corresponding to a batch of images. Returns: Tensor: shape (n, 5), [batch_ind, x1, y1, x2, y2] """ rois_list = [] for img_id, bboxes in enumerate(bbox_list): if bboxes.size(0) > 0: img_inds = bboxes.new_full((bboxes.size(0), 1), img_id) rois = torch.cat([img_inds, bboxes[:, :4]], dim=-1) else: rois = bboxes.new_zeros((0, 5)) rois_list.append(rois) rois = torch.cat(rois_list, 0) return rois def roi2bbox(rois): """Convert rois to bounding box format. Args: rois (torch.Tensor): RoIs with the shape (n, 5) where the first column indicates batch id of each RoI. Returns: list[torch.Tensor]: Converted boxes of corresponding rois. """ bbox_list = [] img_ids = torch.unique(rois[:, 0].cpu(), sorted=True) for img_id in img_ids: inds = (rois[:, 0] == img_id.item()) bbox = rois[inds, 1:] bbox_list.append(bbox) return bbox_list def bbox2result(bboxes, labels, num_classes): """Convert detection results to a list of numpy arrays. Args: bboxes (torch.Tensor | np.ndarray): shape (n, 5) labels (torch.Tensor | np.ndarray): shape (n, ) num_classes (int): class number, including background class Returns: list(ndarray): bbox results of each class """ if bboxes.shape[0] == 0: return [np.zeros((0, 5), dtype=np.float32) for i in range(num_classes)] else: if isinstance(bboxes, torch.Tensor): bboxes = bboxes.detach().cpu().numpy() labels = labels.detach().cpu().numpy() return [bboxes[labels == i, :] for i in range(num_classes)] def distance2bbox(points, distance, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). distance (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ x1 = points[..., 0] - distance[..., 0] y1 = points[..., 1] - distance[..., 1] x2 = points[..., 0] + distance[..., 2] y2 = points[..., 1] + distance[..., 3] bboxes = torch.stack([x1, y1, x2, y2], -1) if max_shape is not None: if bboxes.dim() == 2 and not torch.onnx.is_in_onnx_export(): # speed up bboxes[:, 0::2].clamp_(min=0, max=max_shape[1]) bboxes[:, 1::2].clamp_(min=0, max=max_shape[0]) return bboxes # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes def bbox2distance(points, bbox, max_dis=None, eps=0.1): """Decode bounding box based on distances. Args: points (Tensor): Shape (n, 2), [x, y]. bbox (Tensor): Shape (n, 4), "xyxy" format max_dis (float): Upper bound of the distance. eps (float): a small value to ensure target < max_dis, instead <= Returns: Tensor: Decoded distances. """ left = points[:, 0] - bbox[:, 0] top = points[:, 1] - bbox[:, 1] right = bbox[:, 2] - points[:, 0] bottom = bbox[:, 3] - points[:, 1] if max_dis is not None: left = left.clamp(min=0, max=max_dis - eps) top = top.clamp(min=0, max=max_dis - eps) right = right.clamp(min=0, max=max_dis - eps) bottom = bottom.clamp(min=0, max=max_dis - eps) return torch.stack([left, top, right, bottom], -1) def bbox_rescale(bboxes, scale_factor=1.0): """Rescale bounding box w.r.t. scale_factor. Args: bboxes (Tensor): Shape (n, 4) for bboxes or (n, 5) for rois scale_factor (float): rescale factor Returns: Tensor: Rescaled bboxes. """ if bboxes.size(1) == 5: bboxes_ = bboxes[:, 1:] inds_ = bboxes[:, 0] else: bboxes_ = bboxes cx = (bboxes_[:, 0] + bboxes_[:, 2]) * 0.5 cy = (bboxes_[:, 1] + bboxes_[:, 3]) * 0.5 w = bboxes_[:, 2] - bboxes_[:, 0] h = bboxes_[:, 3] - bboxes_[:, 1] w = w * scale_factor h = h * scale_factor x1 = cx - 0.5 * w x2 = cx + 0.5 * w y1 = cy - 0.5 * h y2 = cy + 0.5 * h if bboxes.size(1) == 5: rescaled_bboxes = torch.stack([inds_, x1, y1, x2, y2], dim=-1) else: rescaled_bboxes = torch.stack([x1, y1, x2, y2], dim=-1) return rescaled_bboxes def bbox_cxcywh_to_xyxy(bbox): """Convert bbox coordinates from (cx, cy, w, h) to (x1, y1, x2, y2). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ cx, cy, w, h = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(cx - 0.5 * w), (cy - 0.5 * h), (cx + 0.5 * w), (cy + 0.5 * h)] return torch.cat(bbox_new, dim=-1) def bbox_xyxy_to_cxcywh(bbox): """Convert bbox coordinates from (x1, y1, x2, y2) to (cx, cy, w, h). Args: bbox (Tensor): Shape (n, 4) for bboxes. Returns: Tensor: Converted bboxes. """ x1, y1, x2, y2 = bbox.split((1, 1, 1, 1), dim=-1) bbox_new = [(x1 + x2) / 2, (y1 + y2) / 2, (x2 - x1), (y2 - y1)] return torch.cat(bbox_new, dim=-1)
8,962
32.073801
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .approx_max_iou_assigner import ApproxMaxIoUAssigner from .ascend_assign_result import AscendAssignResult from .ascend_max_iou_assigner import AscendMaxIoUAssigner from .assign_result import AssignResult from .atss_assigner import ATSSAssigner from .base_assigner import BaseAssigner from .center_region_assigner import CenterRegionAssigner from .grid_assigner import GridAssigner from .hungarian_assigner import HungarianAssigner from .mask_hungarian_assigner import MaskHungarianAssigner from .max_iou_assigner import MaxIoUAssigner from .point_assigner import PointAssigner from .region_assigner import RegionAssigner from .sim_ota_assigner import SimOTAAssigner from .task_aligned_assigner import TaskAlignedAssigner from .uniform_assigner import UniformAssigner __all__ = [ 'BaseAssigner', 'MaxIoUAssigner', 'ApproxMaxIoUAssigner', 'AssignResult', 'PointAssigner', 'ATSSAssigner', 'CenterRegionAssigner', 'GridAssigner', 'HungarianAssigner', 'RegionAssigner', 'UniformAssigner', 'SimOTAAssigner', 'TaskAlignedAssigner', 'MaskHungarianAssigner', 'AscendAssignResult', 'AscendMaxIoUAssigner' ]
1,171
44.076923
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/approx_max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .max_iou_assigner import MaxIoUAssigner @BBOX_ASSIGNERS.register_module() class ApproxMaxIoUAssigner(MaxIoUAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with an integer indicating the ground truth index. (semi-positive index: gt label (0-based), -1: background) - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, approxs, squares, approxs_per_octave, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to approxs. This method assign a gt bbox to each group of approxs (bboxes), each group of approxs is represent by a base approx (bbox) and will be assigned with -1, or a semi-positive number. background_label (-1) means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to background_label (-1) 2. use the max IoU of each group of approxs to assign 2. assign proposals whose iou with all gts < neg_iou_thr to background 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: approxs (Tensor): Bounding boxes to be assigned, shape(approxs_per_octave*n, 4). squares (Tensor): Base Bounding boxes to be assigned, shape(n, 4). approxs_per_octave (int): number of approxs per octave gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_squares = squares.size(0) num_gts = gt_bboxes.size(0) if num_squares == 0 or num_gts == 0: # No predictions and/or truth, return empty assignment overlaps = approxs.new(num_gts, num_squares) assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) return assign_result # re-organize anchors by approxs_per_octave x num_squares approxs = torch.transpose( approxs.view(num_squares, approxs_per_octave, 4), 0, 1).contiguous().view(-1, 4) assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( num_gts > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = approxs.device approxs = approxs.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() all_overlaps = self.iou_calculator(approxs, gt_bboxes) overlaps, _ = all_overlaps.view(approxs_per_octave, num_squares, num_gts).max(dim=0) overlaps = torch.transpose(overlaps, 0, 1) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and squares.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( squares, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, squares, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result
6,697
44.564626
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/ascend_assign_result.py
# Copyright (c) OpenMMLab. All rights reserved. from mmdet.utils import util_mixins class AscendAssignResult(util_mixins.NiceRepr): """Stores ascend assignments between predicted and truth boxes. Arguments: batch_num_gts (list[int]): the number of truth boxes considered. batch_pos_mask (IntTensor): Positive samples mask in all images. batch_neg_mask (IntTensor): Negative samples mask in all images. batch_max_overlaps (FloatTensor): The max overlaps of all bboxes and ground truth boxes. batch_anchor_gt_indes(None | LongTensor): The assigned truth box index of all anchors. batch_anchor_gt_labels(None | LongTensor): The gt labels of all anchors """ def __init__(self, batch_num_gts, batch_pos_mask, batch_neg_mask, batch_max_overlaps, batch_anchor_gt_indes=None, batch_anchor_gt_labels=None): self.batch_num_gts = batch_num_gts self.batch_pos_mask = batch_pos_mask self.batch_neg_mask = batch_neg_mask self.batch_max_overlaps = batch_max_overlaps self.batch_anchor_gt_indes = batch_anchor_gt_indes self.batch_anchor_gt_labels = batch_anchor_gt_labels # Interface for possible user-defined properties self._extra_properties = {}
1,403
39.114286
72
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/ascend_max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ....utils import masked_fill from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .ascend_assign_result import AscendAssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class AscendMaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). `min_pos_iou` is set to avoid assigning bboxes that have extremely small iou with GT as positive samples. It brings about 0.3 mAP improvements in 1x schedule but does not affect the performance of 3x schedule. More comparisons can be found in `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, batch_bboxes, batch_gt_bboxes, batch_gt_bboxes_ignore=None, batch_gt_labels=None, batch_bboxes_ignore_mask=None, batch_num_gts=None): """Assign gt to bboxes. Args: batch_bboxes (Tensor): Bounding boxes to be assigned, shape(b, n, 4). batch_gt_bboxes (Tensor): Ground truth boxes, shape (b, k, 4). batch_gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. batch_gt_labels (Tensor, optional): Label of gt_bboxes, shape (b, k, ). batch_bboxes_ignore_mask: (b, n) batch_num_gts:(b, ) Returns: :obj:`AssignResult`: The assign result. """ batch_overlaps = self.iou_calculator(batch_gt_bboxes, batch_bboxes) batch_overlaps = masked_fill( batch_overlaps, batch_bboxes_ignore_mask.unsqueeze(1).float(), -1, neg=True) if self.ignore_iof_thr > 0 and batch_gt_bboxes_ignore is not None: if self.ignore_wrt_candidates: batch_ignore_overlaps = self.iou_calculator( batch_bboxes, batch_gt_bboxes_ignore, mode='iof') batch_ignore_overlaps = masked_fill(batch_ignore_overlaps, batch_bboxes_ignore_mask, -1) batch_ignore_max_overlaps, _ = batch_ignore_overlaps.max(dim=2) else: batch_ignore_overlaps = self.iou_calculator( batch_gt_bboxes_ignore, batch_bboxes, mode='iof') batch_ignore_overlaps = masked_fill(batch_ignore_overlaps, batch_bboxes_ignore_mask, -1) batch_ignore_max_overlaps, _ = \ batch_ignore_overlaps.max(dim=1) batch_ignore_mask = \ batch_ignore_max_overlaps > self.ignore_iof_thr batch_overlaps = masked_fill(batch_overlaps, batch_ignore_mask, -1) batch_assign_result = self.batch_assign_wrt_overlaps( batch_overlaps, batch_gt_labels, batch_num_gts) return batch_assign_result def batch_assign_wrt_overlaps(self, batch_overlaps, batch_gt_labels=None, batch_num_gts=None): num_images, num_gts, num_bboxes = batch_overlaps.size() batch_max_overlaps, batch_argmax_overlaps = batch_overlaps.max(dim=1) if isinstance(self.neg_iou_thr, float): batch_neg_mask = \ ((batch_max_overlaps >= 0) & (batch_max_overlaps < self.neg_iou_thr)).int() elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 batch_neg_mask = \ ((batch_max_overlaps >= self.neg_iou_thr[0]) & (batch_max_overlaps < self.neg_iou_thr[1])).int() else: batch_neg_mask = torch.zeros( batch_max_overlaps.size(), dtype=torch.int, device=batch_max_overlaps.device) batch_pos_mask = (batch_max_overlaps >= self.pos_iou_thr).int() if self.match_low_quality: batch_gt_max_overlaps, batch_gt_argmax_overlaps = \ batch_overlaps.max(dim=2) batch_index_bool = (batch_gt_max_overlaps >= self.min_pos_iou) & \ (batch_gt_max_overlaps > 0) if self.gt_max_assign_all: pos_inds_low_quality = \ (batch_overlaps == batch_gt_max_overlaps.unsqueeze(2)) & \ batch_index_bool.unsqueeze(2) for i in range(num_gts): pos_inds_low_quality_gt = pos_inds_low_quality[:, i, :] batch_argmax_overlaps[pos_inds_low_quality_gt] = i batch_pos_mask[pos_inds_low_quality_gt] = 1 else: index_temp = torch.arange( 0, num_gts, device=batch_max_overlaps.device) for index_image in range(num_images): gt_argmax_overlaps = batch_gt_argmax_overlaps[index_image] index_bool = batch_index_bool[index_image] pos_inds_low_quality = gt_argmax_overlaps[index_bool] batch_argmax_overlaps[index_image][pos_inds_low_quality] \ = index_temp[index_bool] batch_pos_mask[index_image][pos_inds_low_quality] = 1 batch_neg_mask = batch_neg_mask * (1 - batch_pos_mask) if batch_gt_labels is not None: batch_anchor_gt_labels = torch.zeros((num_images, num_bboxes), dtype=batch_gt_labels.dtype, device=batch_gt_labels.device) for index_image in range(num_images): batch_anchor_gt_labels[index_image] = torch.index_select( batch_gt_labels[index_image], 0, batch_argmax_overlaps[index_image]) else: batch_anchor_gt_labels = None return AscendAssignResult(batch_num_gts, batch_pos_mask, batch_neg_mask, batch_max_overlaps, batch_argmax_overlaps, batch_anchor_gt_labels)
8,841
48.396648
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/assign_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.utils import util_mixins class AssignResult(util_mixins.NiceRepr): """Stores assignments between predicted and truth boxes. Attributes: num_gts (int): the number of truth boxes considered when computing this assignment gt_inds (LongTensor): for each predicted box indicates the 1-based index of the assigned truth box. 0 means unassigned and -1 means ignore. max_overlaps (FloatTensor): the iou between the predicted box and its assigned truth box. labels (None | LongTensor): If specified, for each predicted box indicates the category label of the assigned truth box. Example: >>> # An assign result between 4 predicted boxes and 9 true boxes >>> # where only two boxes were assigned. >>> num_gts = 9 >>> max_overlaps = torch.LongTensor([0, .5, .9, 0]) >>> gt_inds = torch.LongTensor([-1, 1, 2, 0]) >>> labels = torch.LongTensor([0, 3, 4, 0]) >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,), labels.shape=(4,))> >>> # Force addition of gt labels (when adding gt as proposals) >>> new_labels = torch.LongTensor([3, 4, 5]) >>> self.add_gt_(new_labels) >>> print(str(self)) # xdoctest: +IGNORE_WANT <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,), labels.shape=(7,))> """ def __init__(self, num_gts, gt_inds, max_overlaps, labels=None): self.num_gts = num_gts self.gt_inds = gt_inds self.max_overlaps = max_overlaps self.labels = labels # Interface for possible user-defined properties self._extra_properties = {} @property def num_preds(self): """int: the number of predictions in this assignment""" return len(self.gt_inds) def set_extra_property(self, key, value): """Set user-defined new property.""" assert key not in self.info self._extra_properties[key] = value def get_extra_property(self, key): """Get user-defined property.""" return self._extra_properties.get(key, None) @property def info(self): """dict: a dictionary of info about the object""" basic_info = { 'num_gts': self.num_gts, 'num_preds': self.num_preds, 'gt_inds': self.gt_inds, 'max_overlaps': self.max_overlaps, 'labels': self.labels, } basic_info.update(self._extra_properties) return basic_info def __nice__(self): """str: a "nice" summary string describing this assign result""" parts = [] parts.append(f'num_gts={self.num_gts!r}') if self.gt_inds is None: parts.append(f'gt_inds={self.gt_inds!r}') else: parts.append(f'gt_inds.shape={tuple(self.gt_inds.shape)!r}') if self.max_overlaps is None: parts.append(f'max_overlaps={self.max_overlaps!r}') else: parts.append('max_overlaps.shape=' f'{tuple(self.max_overlaps.shape)!r}') if self.labels is None: parts.append(f'labels={self.labels!r}') else: parts.append(f'labels.shape={tuple(self.labels.shape)!r}') return ', '.join(parts) @classmethod def random(cls, **kwargs): """Create random AssignResult for tests or debugging. Args: num_preds: number of predicted boxes num_gts: number of true boxes p_ignore (float): probability of a predicted box assigned to an ignored truth p_assigned (float): probability of a predicted box not being assigned p_use_label (float | bool): with labels or not rng (None | int | numpy.random.RandomState): seed or state Returns: :obj:`AssignResult`: Randomly generated assign results. Example: >>> from mmdet.core.bbox.assigners.assign_result import * # NOQA >>> self = AssignResult.random() >>> print(self.info) """ from mmdet.core.bbox import demodata rng = demodata.ensure_rng(kwargs.get('rng', None)) num_gts = kwargs.get('num_gts', None) num_preds = kwargs.get('num_preds', None) p_ignore = kwargs.get('p_ignore', 0.3) p_assigned = kwargs.get('p_assigned', 0.7) p_use_label = kwargs.get('p_use_label', 0.5) num_classes = kwargs.get('p_use_label', 3) if num_gts is None: num_gts = rng.randint(0, 8) if num_preds is None: num_preds = rng.randint(0, 16) if num_gts == 0: max_overlaps = torch.zeros(num_preds, dtype=torch.float32) gt_inds = torch.zeros(num_preds, dtype=torch.int64) if p_use_label is True or p_use_label < rng.rand(): labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = None else: import numpy as np # Create an overlap for each predicted box max_overlaps = torch.from_numpy(rng.rand(num_preds)) # Construct gt_inds for each predicted box is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned) # maximum number of assignments constraints n_assigned = min(num_preds, min(num_gts, is_assigned.sum())) assigned_idxs = np.where(is_assigned)[0] rng.shuffle(assigned_idxs) assigned_idxs = assigned_idxs[0:n_assigned] assigned_idxs.sort() is_assigned[:] = 0 is_assigned[assigned_idxs] = True is_ignore = torch.from_numpy( rng.rand(num_preds) < p_ignore) & is_assigned gt_inds = torch.zeros(num_preds, dtype=torch.int64) true_idxs = np.arange(num_gts) rng.shuffle(true_idxs) true_idxs = torch.from_numpy(true_idxs) gt_inds[is_assigned] = true_idxs[:n_assigned].long() gt_inds = torch.from_numpy( rng.randint(1, num_gts + 1, size=num_preds)) gt_inds[is_ignore] = -1 gt_inds[~is_assigned] = 0 max_overlaps[~is_assigned] = 0 if p_use_label is True or p_use_label < rng.rand(): if num_classes == 0: labels = torch.zeros(num_preds, dtype=torch.int64) else: labels = torch.from_numpy( # remind that we set FG labels to [0, num_class-1] # since mmdet v2.0 # BG cat_id: num_class rng.randint(0, num_classes, size=num_preds)) labels[~is_assigned] = 0 else: labels = None self = cls(num_gts, gt_inds, max_overlaps, labels) return self def add_gt_(self, gt_labels): """Add ground truth as assigned results. Args: gt_labels (torch.Tensor): Labels of gt boxes """ self_inds = torch.arange( 1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device) self.gt_inds = torch.cat([self_inds, self.gt_inds]) self.max_overlaps = torch.cat( [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]) if self.labels is not None: self.labels = torch.cat([gt_labels, self.labels])
7,761
36.497585
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/atss_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class ATSSAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt If ``alpha`` is not None, it means that the dynamic cost ATSSAssigner is adopted, which is currently only used in the DDOD. Args: topk (float): number of bbox selected in each level """ def __init__(self, topk, alpha=None, iou_calculator=dict(type='BboxOverlaps2D'), ignore_iof_thr=-1): self.topk = topk self.alpha = alpha self.iou_calculator = build_iou_calculator(iou_calculator) self.ignore_iof_thr = ignore_iof_thr """Assign a corresponding gt bbox or background to each bbox. Args: topk (int): number of bbox selected in each level. alpha (float): param of cost rate for each proposal only in DDOD. Default None. iou_calculator (dict): builder of IoU calculator. Default dict(type='BboxOverlaps2D'). ignore_iof_thr (int): whether ignore max overlaps or not. Default -1 (1 or -1). """ # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py def assign(self, bboxes, num_level_bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, cls_scores=None, bbox_preds=None): """Assign gt to bboxes. The assignment is done in following steps 1. compute iou between all bbox (bbox of all pyramid levels) and gt 2. compute center distance between all bbox and gt 3. on each pyramid level, for each gt, select k bbox whose center are closest to the gt center, so we total select k*l bbox as candidates for each gt 4. get corresponding iou for the these candidates, and compute the mean and std, set mean + std as the iou threshold 5. select these candidates whose iou are greater than or equal to the threshold as positive 6. limit the positive sample's center in gt If ``alpha`` is not None, and ``cls_scores`` and `bbox_preds` are not None, the overlaps calculation in the first step will also include dynamic cost, which is currently only used in the DDOD. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). num_level_bboxes (List): num of bboxes in each level gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. Default None. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). cls_scores (list[Tensor]): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. Default None. bbox_preds (list[Tensor]): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. Default None. Returns: :obj:`AssignResult`: The assign result. """ INF = 100000000 bboxes = bboxes[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0) message = 'Invalid alpha parameter because cls_scores or ' \ 'bbox_preds are None. If you want to use the ' \ 'cost-based ATSSAssigner, please set cls_scores, ' \ 'bbox_preds and self.alpha at the same time. ' if self.alpha is None: # ATSSAssigner overlaps = self.iou_calculator(bboxes, gt_bboxes) if cls_scores is not None or bbox_preds is not None: warnings.warn(message) else: # Dynamic cost ATSSAssigner in DDOD assert cls_scores is not None and bbox_preds is not None, message # compute cls cost for bbox and GT cls_cost = torch.sigmoid(cls_scores[:, gt_labels]) # compute iou between all bbox and gt overlaps = self.iou_calculator(bbox_preds, gt_bboxes) # make sure that we are in element-wise multiplication assert cls_cost.shape == overlaps.shape # overlaps is actually a cost matrix overlaps = cls_cost**(1 - self.alpha) * overlaps**self.alpha # assign 0 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), 0, dtype=torch.long) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) # compute center distance between all bbox and gt gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 gt_points = torch.stack((gt_cx, gt_cy), dim=1) bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0 bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0 bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1) distances = (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt() if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) ignore_idxs = ignore_max_overlaps > self.ignore_iof_thr distances[ignore_idxs, :] = INF assigned_gt_inds[ignore_idxs] = -1 # Selecting candidates based on the center distance candidate_idxs = [] start_idx = 0 for level, bboxes_per_level in enumerate(num_level_bboxes): # on each pyramid level, for each gt, # select k bbox whose center are closest to the gt center end_idx = start_idx + bboxes_per_level distances_per_level = distances[start_idx:end_idx, :] selectable_k = min(self.topk, bboxes_per_level) _, topk_idxs_per_level = distances_per_level.topk( selectable_k, dim=0, largest=False) candidate_idxs.append(topk_idxs_per_level + start_idx) start_idx = end_idx candidate_idxs = torch.cat(candidate_idxs, dim=0) # get corresponding iou for the these candidates, and compute the # mean and std, set mean + std as the iou threshold candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)] overlaps_mean_per_gt = candidate_overlaps.mean(0) overlaps_std_per_gt = candidate_overlaps.std(0) overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :] # limit the positive sample's center in gt for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_bboxes_cx = bboxes_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_bboxes_cy = bboxes_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest IoU will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels)
10,174
42.297872
87
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/base_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseAssigner(metaclass=ABCMeta): """Base assigner that assigns boxes to ground truth boxes.""" @abstractmethod def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign boxes to either a ground truth boxes or a negative boxes."""
375
33.181818
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/center_region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner def scale_boxes(bboxes, scale): """Expand an array of boxes by a given scale. Args: bboxes (Tensor): Shape (m, 4) scale (float): The scale factor of bboxes Returns: (Tensor): Shape (m, 4). Scaled bboxes """ assert bboxes.size(1) == 4 w_half = (bboxes[:, 2] - bboxes[:, 0]) * .5 h_half = (bboxes[:, 3] - bboxes[:, 1]) * .5 x_c = (bboxes[:, 2] + bboxes[:, 0]) * .5 y_c = (bboxes[:, 3] + bboxes[:, 1]) * .5 w_half *= scale h_half *= scale boxes_scaled = torch.zeros_like(bboxes) boxes_scaled[:, 0] = x_c - w_half boxes_scaled[:, 2] = x_c + w_half boxes_scaled[:, 1] = y_c - h_half boxes_scaled[:, 3] = y_c + h_half return boxes_scaled def is_located_in(points, bboxes): """Are points located in bboxes. Args: points (Tensor): Points, shape: (m, 2). bboxes (Tensor): Bounding boxes, shape: (n, 4). Return: Tensor: Flags indicating if points are located in bboxes, shape: (m, n). """ assert points.size(1) == 2 assert bboxes.size(1) == 4 return (points[:, 0].unsqueeze(1) > bboxes[:, 0].unsqueeze(0)) & \ (points[:, 0].unsqueeze(1) < bboxes[:, 2].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) > bboxes[:, 1].unsqueeze(0)) & \ (points[:, 1].unsqueeze(1) < bboxes[:, 3].unsqueeze(0)) def bboxes_area(bboxes): """Compute the area of an array of bboxes. Args: bboxes (Tensor): The coordinates ox bboxes. Shape: (m, 4) Returns: Tensor: Area of the bboxes. Shape: (m, ) """ assert bboxes.size(1) == 4 w = (bboxes[:, 2] - bboxes[:, 0]) h = (bboxes[:, 3] - bboxes[:, 1]) areas = w * h return areas @BBOX_ASSIGNERS.register_module() class CenterRegionAssigner(BaseAssigner): """Assign pixels at the center region of a bbox as positive. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: negative samples - semi-positive numbers: positive sample, index (0-based) of assigned gt Args: pos_scale (float): Threshold within which pixels are labelled as positive. neg_scale (float): Threshold above which pixels are labelled as positive. min_pos_iof (float): Minimum iof of a pixel with a gt to be labelled as positive. Default: 1e-2 ignore_gt_scale (float): Threshold within which the pixels are ignored when the gt is labelled as shadowed. Default: 0.5 foreground_dominate (bool): If True, the bbox will be assigned as positive when a gt's kernel region overlaps with another's shadowed (ignored) region, otherwise it is set as ignored. Default to False. """ def __init__(self, pos_scale, neg_scale, min_pos_iof=1e-2, ignore_gt_scale=0.5, foreground_dominate=False, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_scale = pos_scale self.neg_scale = neg_scale self.min_pos_iof = min_pos_iof self.ignore_gt_scale = ignore_gt_scale self.foreground_dominate = foreground_dominate self.iou_calculator = build_iou_calculator(iou_calculator) def get_gt_priorities(self, gt_bboxes): """Get gt priorities according to their areas. Smaller gt has higher priority. Args: gt_bboxes (Tensor): Ground truth boxes, shape (k, 4). Returns: Tensor: The priority of gts so that gts with larger priority is \ more likely to be assigned. Shape (k, ) """ gt_areas = bboxes_area(gt_bboxes) # Rank all gt bbox areas. Smaller objects has larger priority _, sort_idx = gt_areas.sort(descending=True) sort_idx = sort_idx.argsort() return sort_idx def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assigns gts to every bbox (proposal/anchor), each bbox \ will be assigned with -1, or a semi-positive number. -1 means \ negative sample, semi-positive number is the index (0-based) of \ assigned gt. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (tensor, optional): Label of gt_bboxes, shape (num_gts,). Returns: :obj:`AssignResult`: The assigned result. Note that \ shadowed_labels of shape (N, 2) is also added as an \ `assign_result` attribute. `shadowed_labels` is a tensor \ composed of N pairs of anchor_ind, class_label], where N \ is the number of anchors that lie in the outer region of a \ gt, anchor_ind is the shadowed anchor index and class_label \ is the shadowed class label. Example: >>> self = CenterRegionAssigner(0.2, 0.2) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 10]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ # There are in total 5 steps in the pixel assignment # 1. Find core (the center region, say inner 0.2) # and shadow (the relatively ourter part, say inner 0.2-0.5) # regions of every gt. # 2. Find all prior bboxes that lie in gt_core and gt_shadow regions # 3. Assign prior bboxes in gt_core with a one-hot id of the gt in # the image. # 3.1. For overlapping objects, the prior bboxes in gt_core is # assigned with the object with smallest area # 4. Assign prior bboxes with class label according to its gt id. # 4.1. Assign -1 to prior bboxes lying in shadowed gts # 4.2. Assign positive prior boxes with the corresponding label # 5. Find pixels lying in the shadow of an object and assign them with # background label, but set the loss weight of its corresponding # gt to zero. assert bboxes.size(1) == 4, 'bboxes must have size of 4' # 1. Find core positive and shadow region of every gt gt_core = scale_boxes(gt_bboxes, self.pos_scale) gt_shadow = scale_boxes(gt_bboxes, self.neg_scale) # 2. Find prior bboxes that lie in gt_core and gt_shadow regions bbox_centers = (bboxes[:, 2:4] + bboxes[:, 0:2]) / 2 # The center points lie within the gt boxes is_bbox_in_gt = is_located_in(bbox_centers, gt_bboxes) # Only calculate bbox and gt_core IoF. This enables small prior bboxes # to match large gts bbox_and_gt_core_overlaps = self.iou_calculator( bboxes, gt_core, mode='iof') # The center point of effective priors should be within the gt box is_bbox_in_gt_core = is_bbox_in_gt & ( bbox_and_gt_core_overlaps > self.min_pos_iof) # shape (n, k) is_bbox_in_gt_shadow = ( self.iou_calculator(bboxes, gt_shadow, mode='iof') > self.min_pos_iof) # Rule out center effective positive pixels is_bbox_in_gt_shadow &= (~is_bbox_in_gt_core) num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) if num_gts == 0 or num_bboxes == 0: # If no gts exist, assign all pixels to negative assigned_gt_ids = \ is_bbox_in_gt_core.new_zeros((num_bboxes,), dtype=torch.long) pixels_in_gt_shadow = assigned_gt_ids.new_empty((0, 2)) else: # Step 3: assign a one-hot gt id to each pixel, and smaller objects # have high priority to assign the pixel. sort_idx = self.get_gt_priorities(gt_bboxes) assigned_gt_ids, pixels_in_gt_shadow = \ self.assign_one_hot_gt_indices(is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=sort_idx) if gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0: # No ground truth or boxes, return empty assignment gt_bboxes_ignore = scale_boxes( gt_bboxes_ignore, scale=self.ignore_gt_scale) is_bbox_in_ignored_gts = is_located_in(bbox_centers, gt_bboxes_ignore) is_bbox_in_ignored_gts = is_bbox_in_ignored_gts.any(dim=1) assigned_gt_ids[is_bbox_in_ignored_gts] = -1 # 4. Assign prior bboxes with class label according to its gt id. assigned_labels = None shadowed_pixel_labels = None if gt_labels is not None: # Default assigned label is the background (-1) assigned_labels = assigned_gt_ids.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_ids > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[assigned_gt_ids[pos_inds] - 1] # 5. Find pixels lying in the shadow of an object shadowed_pixel_labels = pixels_in_gt_shadow.clone() if pixels_in_gt_shadow.numel() > 0: pixel_idx, gt_idx =\ pixels_in_gt_shadow[:, 0], pixels_in_gt_shadow[:, 1] assert (assigned_gt_ids[pixel_idx] != gt_idx).all(), \ 'Some pixels are dually assigned to ignore and gt!' shadowed_pixel_labels[:, 1] = gt_labels[gt_idx - 1] override = ( assigned_labels[pixel_idx] == shadowed_pixel_labels[:, 1]) if self.foreground_dominate: # When a pixel is both positive and shadowed, set it as pos shadowed_pixel_labels = shadowed_pixel_labels[~override] else: # When a pixel is both pos and shadowed, set it as shadowed assigned_labels[pixel_idx[override]] = -1 assigned_gt_ids[pixel_idx[override]] = 0 assign_result = AssignResult( num_gts, assigned_gt_ids, None, labels=assigned_labels) # Add shadowed_labels as assign_result property. Shape: (num_shadow, 2) assign_result.set_extra_property('shadowed_labels', shadowed_pixel_labels) return assign_result def assign_one_hot_gt_indices(self, is_bbox_in_gt_core, is_bbox_in_gt_shadow, gt_priority=None): """Assign only one gt index to each prior box. Gts with large gt_priority are more likely to be assigned. Args: is_bbox_in_gt_core (Tensor): Bool tensor indicating the bbox center is in the core area of a gt (e.g. 0-0.2). Shape: (num_prior, num_gt). is_bbox_in_gt_shadow (Tensor): Bool tensor indicating the bbox center is in the shadowed area of a gt (e.g. 0.2-0.5). Shape: (num_prior, num_gt). gt_priority (Tensor): Priorities of gts. The gt with a higher priority is more likely to be assigned to the bbox when the bbox match with multiple gts. Shape: (num_gt, ). Returns: tuple: Returns (assigned_gt_inds, shadowed_gt_inds). - assigned_gt_inds: The assigned gt index of each prior bbox \ (i.e. index from 1 to num_gts). Shape: (num_prior, ). - shadowed_gt_inds: shadowed gt indices. It is a tensor of \ shape (num_ignore, 2) with first column being the \ shadowed prior bbox indices and the second column the \ shadowed gt indices (1-based). """ num_bboxes, num_gts = is_bbox_in_gt_core.shape if gt_priority is None: gt_priority = torch.arange( num_gts, device=is_bbox_in_gt_core.device) assert gt_priority.size(0) == num_gts # The bigger gt_priority, the more preferable to be assigned # The assigned inds are by default 0 (background) assigned_gt_inds = is_bbox_in_gt_core.new_zeros((num_bboxes, ), dtype=torch.long) # Shadowed bboxes are assigned to be background. But the corresponding # label is ignored during loss calculation, which is done through # shadowed_gt_inds shadowed_gt_inds = torch.nonzero(is_bbox_in_gt_shadow, as_tuple=False) if is_bbox_in_gt_core.sum() == 0: # No gt match shadowed_gt_inds[:, 1] += 1 # 1-based. For consistency issue return assigned_gt_inds, shadowed_gt_inds # The priority of each prior box and gt pair. If one prior box is # matched bo multiple gts. Only the pair with the highest priority # is saved pair_priority = is_bbox_in_gt_core.new_full((num_bboxes, num_gts), -1, dtype=torch.long) # Each bbox could match with multiple gts. # The following codes deal with this situation # Matched bboxes (to any gt). Shape: (num_pos_anchor, ) inds_of_match = torch.any(is_bbox_in_gt_core, dim=1) # The matched gt index of each positive bbox. Length >= num_pos_anchor # , since one bbox could match multiple gts matched_bbox_gt_inds = torch.nonzero( is_bbox_in_gt_core, as_tuple=False)[:, 1] # Assign priority to each bbox-gt pair. pair_priority[is_bbox_in_gt_core] = gt_priority[matched_bbox_gt_inds] _, argmax_priority = pair_priority[inds_of_match].max(dim=1) assigned_gt_inds[inds_of_match] = argmax_priority + 1 # 1-based # Zero-out the assigned anchor box to filter the shadowed gt indices is_bbox_in_gt_core[inds_of_match, argmax_priority] = 0 # Concat the shadowed indices due to overlapping with that out side of # effective scale. shape: (total_num_ignore, 2) shadowed_gt_inds = torch.cat( (shadowed_gt_inds, torch.nonzero( is_bbox_in_gt_core, as_tuple=False)), dim=0) # `is_bbox_in_gt_core` should be changed back to keep arguments intact. is_bbox_in_gt_core[inds_of_match, argmax_priority] = 1 # 1-based shadowed gt indices, to be consistent with `assigned_gt_inds` if shadowed_gt_inds.numel() > 0: shadowed_gt_inds[:, 1] += 1 return assigned_gt_inds, shadowed_gt_inds
15,477
44.928783
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/grid_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class GridAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, box_responsible_flags, gt_bboxes, gt_labels=None): """Assign gt to bboxes. The process is very much like the max iou assigner, except that positive samples are constrained within the cell that the gt boxes fell in. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to -1 2. assign proposals whose iou with all gts <= neg_iou_thr to 0 3. for each bbox within a cell, if the iou with its nearest gt > pos_iou_thr and the center of that gt falls inside the cell, assign it to that bbox 4. for each gt bbox, assign its nearest proposals within the cell the gt bbox falls in to itself. Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). box_responsible_flags (Tensor): flag to indicate whether box is responsible for prediction, shape(n, ) gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = gt_bboxes.size(0), bboxes.size(0) # compute iou between all gt and bboxes overlaps = self.iou_calculator(gt_bboxes, bboxes) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # 2. assign negative: below # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts # shape of max_overlaps == argmax_overlaps == num_bboxes max_overlaps, argmax_overlaps = overlaps.max(dim=0) if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps <= self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, (tuple, list)): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps > self.neg_iou_thr[0]) & (max_overlaps <= self.neg_iou_thr[1])] = 0 # 3. assign positive: falls into responsible cell and above # positive IOU threshold, the order matters. # the prior condition of comparison is to filter out all # unrelated anchors, i.e. not box_responsible_flags overlaps[:, ~box_responsible_flags.type(torch.bool)] = -1. # calculate max_overlaps again, but this time we only consider IOUs # for anchors responsible for prediction max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals # shape of gt_max_overlaps == gt_argmax_overlaps == num_gts gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) pos_inds = (max_overlaps > self.pos_iou_thr) & box_responsible_flags.type(torch.bool) assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 # 4. assign positive to max overlapped anchors within responsible cell for i in range(num_gts): if gt_max_overlaps[i] > self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = (overlaps[i, :] == gt_max_overlaps[i]) & \ box_responsible_flags.type(torch.bool) assigned_gt_inds[max_iou_inds] = i + 1 elif box_responsible_flags[gt_argmax_overlaps[i]]: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 # assign labels of positive anchors if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
6,863
42.719745
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/hungarian_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from scipy.optimize import linear_sum_assignment from ..builder import BBOX_ASSIGNERS from ..match_costs import build_match_cost from ..transforms import bbox_cxcywh_to_xyxy from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class HungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: classification cost, regression L1 cost and regression iou cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. bbox_weight (int | float, optional): The scale factor for regression L1 cost. Default 1.0. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 1.0. iou_calculator (dict | optional): The config for the iou calculation. Default type `BboxOverlaps2D`. iou_mode (str | optional): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). Default "giou". """ def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=1.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.reg_cost = build_match_cost(reg_cost) self.iou_cost = build_match_cost(iou_cost) def assign(self, bbox_pred, cls_pred, gt_bboxes, gt_labels, img_meta, gt_bboxes_ignore=None, eps=1e-7): """Computes one-to-one matching based on the weighted costs. This method assign each query prediction to a ground truth or background. The `assigned_gt_inds` with -1 means don't care, 0 means negative sample, and positive number is the index (1-based) of assigned gt. The assignment is done in the following steps, the order matters. 1. assign every prediction to -1 2. compute the weighted costs 3. do Hungarian matching on CPU based on the costs 4. assign all to 0 (background) first, then for each matched pair between predictions and gts, treat this prediction as foreground and assign the corresponding gt index (plus 1) to it. Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape [num_query, 4]. cls_pred (Tensor): Predicted classification logits, shape [num_query, num_class]. gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape [num_gt, 4]. gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). img_meta (dict): Meta information for current image. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`. Default None. eps (int | float, optional): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) img_h, img_w, _ = img_meta['img_shape'] factor = gt_bboxes.new_tensor([img_w, img_h, img_w, img_h]).unsqueeze(0) # 2. compute the weighted costs # classification and bboxcost. cls_cost = self.cls_cost(cls_pred, gt_labels) # regression L1 cost normalize_gt_bboxes = gt_bboxes / factor reg_cost = self.reg_cost(bbox_pred, normalize_gt_bboxes) # regression iou cost, defaultly giou is used in official DETR. bboxes = bbox_cxcywh_to_xyxy(bbox_pred) * factor iou_cost = self.iou_cost(bboxes, gt_bboxes) # weighted sum of above three costs cost = cls_cost + reg_cost + iou_cost # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to( bbox_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to( bbox_pred.device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
6,439
45
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/mask_hungarian_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from scipy.optimize import linear_sum_assignment from mmdet.core.bbox.builder import BBOX_ASSIGNERS from mmdet.core.bbox.match_costs.builder import build_match_cost from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaskHungarianAssigner(BaseAssigner): """Computes one-to-one matching between predictions and ground truth for mask. This class computes an assignment between the targets and the predictions based on the costs. The costs are weighted sum of three components: classification cost, mask focal cost and mask dice cost. The targets don't include the no_object, so generally there are more predictions than targets. After the one-to-one matching, the un-matched are treated as backgrounds. Thus each query prediction will be assigned with `0` or a positive integer indicating the ground truth index: - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: cls_cost (:obj:`mmcv.ConfigDict` | dict): Classification cost config. mask_cost (:obj:`mmcv.ConfigDict` | dict): Mask cost config. dice_cost (:obj:`mmcv.ConfigDict` | dict): Dice cost config. """ def __init__(self, cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict( type='FocalLossCost', weight=1.0, binary_input=True), dice_cost=dict(type='DiceCost', weight=1.0)): self.cls_cost = build_match_cost(cls_cost) self.mask_cost = build_match_cost(mask_cost) self.dice_cost = build_match_cost(dice_cost) def assign(self, cls_pred, mask_pred, gt_labels, gt_mask, img_meta, gt_bboxes_ignore=None, eps=1e-7): """Computes one-to-one matching based on the weighted costs. Args: cls_pred (Tensor | None): Class prediction in shape (num_query, cls_out_channels). mask_pred (Tensor): Mask prediction in shape (num_query, H, W). gt_labels (Tensor): Label of 'gt_mask'in shape = (num_gt, ). gt_mask (Tensor): Ground truth mask in shape = (num_gt, H, W). img_meta (dict): Meta information for current image. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`. Default None. eps (int | float, optional): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ assert gt_bboxes_ignore is None, \ 'Only case when gt_bboxes_ignore is None is supported.' # K-Net sometimes passes cls_pred=None to this assigner. # So we should use the shape of mask_pred num_gt, num_query = gt_labels.shape[0], mask_pred.shape[0] # 1. assign -1 by default assigned_gt_inds = mask_pred.new_full((num_query, ), -1, dtype=torch.long) assigned_labels = mask_pred.new_full((num_query, ), -1, dtype=torch.long) if num_gt == 0 or num_query == 0: # No ground truth or boxes, return empty assignment if num_gt == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 return AssignResult( num_gt, assigned_gt_inds, None, labels=assigned_labels) # 2. compute the weighted costs # classification and maskcost. if self.cls_cost.weight != 0 and cls_pred is not None: cls_cost = self.cls_cost(cls_pred, gt_labels) else: cls_cost = 0 if self.mask_cost.weight != 0: # mask_pred shape = [num_query, h, w] # gt_mask shape = [num_gt, h, w] # mask_cost shape = [num_query, num_gt] mask_cost = self.mask_cost(mask_pred, gt_mask) else: mask_cost = 0 if self.dice_cost.weight != 0: dice_cost = self.dice_cost(mask_pred, gt_mask) else: dice_cost = 0 cost = cls_cost + mask_cost + dice_cost # 3. do Hungarian matching on CPU using linear_sum_assignment cost = cost.detach().cpu() matched_row_inds, matched_col_inds = linear_sum_assignment(cost) matched_row_inds = torch.from_numpy(matched_row_inds).to( mask_pred.device) matched_col_inds = torch.from_numpy(matched_col_inds).to( mask_pred.device) # 4. assign backgrounds and foregrounds # assign all indices to backgrounds first assigned_gt_inds[:] = 0 # assign foregrounds based on matching results assigned_gt_inds[matched_row_inds] = matched_col_inds + 1 assigned_labels[matched_row_inds] = gt_labels[matched_col_inds] return AssignResult( num_gt, assigned_gt_inds, None, labels=assigned_labels)
5,312
41.166667
77
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/max_iou_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class MaxIoUAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, or a semi-positive integer indicating the ground truth index. - -1: negative sample, no assigned gt - semi-positive integer: positive sample, index (0-based) of assigned gt Args: pos_iou_thr (float): IoU threshold for positive bboxes. neg_iou_thr (float or tuple): IoU threshold for negative bboxes. min_pos_iou (float): Minimum iou for a bbox to be considered as a positive bbox. Positive samples can have smaller IoU than pos_iou_thr due to the 4th step (assign max IoU sample to each gt). `min_pos_iou` is set to avoid assigning bboxes that have extremely small iou with GT as positive samples. It brings about 0.3 mAP improvements in 1x schedule but does not affect the performance of 3x schedule. More comparisons can be found in `PR #7464 <https://github.com/open-mmlab/mmdetection/pull/7464>`_. gt_max_assign_all (bool): Whether to assign all bboxes with the same highest overlap with some gt to that gt. ignore_iof_thr (float): IoF threshold for ignoring bboxes (if `gt_bboxes_ignore` is specified). Negative values mean not ignoring any bboxes. ignore_wrt_candidates (bool): Whether to compute the iof between `bboxes` and `gt_bboxes_ignore`, or the contrary. match_low_quality (bool): Whether to allow low quality matches. This is usually allowed for RPN and single stage detectors, but not allowed in the second stage. Details are demonstrated in Step 4. gpu_assign_thr (int): The upper bound of the number of GT for GPU assign. When the number of gt is above this threshold, will assign on CPU device. Negative values mean not assign on CPU. """ def __init__(self, pos_iou_thr, neg_iou_thr, min_pos_iou=.0, gt_max_assign_all=True, ignore_iof_thr=-1, ignore_wrt_candidates=True, match_low_quality=True, gpu_assign_thr=-1, iou_calculator=dict(type='BboxOverlaps2D')): self.pos_iou_thr = pos_iou_thr self.neg_iou_thr = neg_iou_thr self.min_pos_iou = min_pos_iou self.gt_max_assign_all = gt_max_assign_all self.ignore_iof_thr = ignore_iof_thr self.ignore_wrt_candidates = ignore_wrt_candidates self.gpu_assign_thr = gpu_assign_thr self.match_low_quality = match_low_quality self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to bboxes. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, or a semi-positive number. -1 means negative sample, semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every bbox to the background 2. assign proposals whose iou with all gts < neg_iou_thr to 0 3. for each bbox, if the iou with its nearest gt >= pos_iou_thr, assign it to that bbox 4. for each gt bbox, assign its nearest proposals (may be more than one) to itself Args: bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. Example: >>> self = MaxIoUAssigner(0.5, 0.5) >>> bboxes = torch.Tensor([[0, 0, 10, 10], [10, 10, 20, 20]]) >>> gt_bboxes = torch.Tensor([[0, 0, 10, 9]]) >>> assign_result = self.assign(bboxes, gt_bboxes) >>> expected_gt_inds = torch.LongTensor([1, 0]) >>> assert torch.all(assign_result.gt_inds == expected_gt_inds) """ assign_on_cpu = True if (self.gpu_assign_thr > 0) and ( gt_bboxes.shape[0] > self.gpu_assign_thr) else False # compute overlap and assign gt on CPU when number of GT is large if assign_on_cpu: device = bboxes.device bboxes = bboxes.cpu() gt_bboxes = gt_bboxes.cpu() if gt_bboxes_ignore is not None: gt_bboxes_ignore = gt_bboxes_ignore.cpu() if gt_labels is not None: gt_labels = gt_labels.cpu() overlaps = self.iou_calculator(gt_bboxes, bboxes) if (self.ignore_iof_thr > 0 and gt_bboxes_ignore is not None and gt_bboxes_ignore.numel() > 0 and bboxes.numel() > 0): if self.ignore_wrt_candidates: ignore_overlaps = self.iou_calculator( bboxes, gt_bboxes_ignore, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=1) else: ignore_overlaps = self.iou_calculator( gt_bboxes_ignore, bboxes, mode='iof') ignore_max_overlaps, _ = ignore_overlaps.max(dim=0) overlaps[:, ignore_max_overlaps > self.ignore_iof_thr] = -1 assign_result = self.assign_wrt_overlaps(overlaps, gt_labels) if assign_on_cpu: assign_result.gt_inds = assign_result.gt_inds.to(device) assign_result.max_overlaps = assign_result.max_overlaps.to(device) if assign_result.labels is not None: assign_result.labels = assign_result.labels.to(device) return assign_result def assign_wrt_overlaps(self, overlaps, gt_labels=None): """Assign w.r.t. the overlaps of bboxes with gts. Args: overlaps (Tensor): Overlaps between k gt_bboxes and n bboxes, shape(k, n). gt_labels (Tensor, optional): Labels of k gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_gts, num_bboxes = overlaps.size(0), overlaps.size(1) # 1. assign -1 by default assigned_gt_inds = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = overlaps.new_zeros((num_bboxes, )) if num_gts == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = overlaps.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) # for each anchor, which gt best overlaps with it # for each anchor, the max iou of all gts max_overlaps, argmax_overlaps = overlaps.max(dim=0) # for each gt, which anchor best overlaps with it # for each gt, the max iou of all proposals gt_max_overlaps, gt_argmax_overlaps = overlaps.max(dim=1) # 2. assign negative: below # the negative inds are set to be 0 if isinstance(self.neg_iou_thr, float): assigned_gt_inds[(max_overlaps >= 0) & (max_overlaps < self.neg_iou_thr)] = 0 elif isinstance(self.neg_iou_thr, tuple): assert len(self.neg_iou_thr) == 2 assigned_gt_inds[(max_overlaps >= self.neg_iou_thr[0]) & (max_overlaps < self.neg_iou_thr[1])] = 0 # 3. assign positive: above positive IoU threshold pos_inds = max_overlaps >= self.pos_iou_thr assigned_gt_inds[pos_inds] = argmax_overlaps[pos_inds] + 1 if self.match_low_quality: # Low-quality matching will overwrite the assigned_gt_inds assigned # in Step 3. Thus, the assigned gt might not be the best one for # prediction. # For example, if bbox A has 0.9 and 0.8 iou with GT bbox 1 & 2, # bbox 1 will be assigned as the best target for bbox A in step 3. # However, if GT bbox 2's gt_argmax_overlaps = A, bbox A's # assigned_gt_inds will be overwritten to be bbox 2. # This might be the reason that it is not used in ROI Heads. for i in range(num_gts): if gt_max_overlaps[i] >= self.min_pos_iou: if self.gt_max_assign_all: max_iou_inds = overlaps[i, :] == gt_max_overlaps[i] assigned_gt_inds[max_iou_inds] = i + 1 else: assigned_gt_inds[gt_argmax_overlaps[i]] = i + 1 if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels)
10,168
45.43379
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/point_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class PointAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each point. Each proposals will be assigned with `0`, or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt """ def __init__(self, scale=4, pos_num=3): self.scale = scale self.pos_num = pos_num def assign(self, points, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): """Assign gt to points. This method assign a gt bbox to every points set, each points set will be assigned with the background_label (-1), or a label number. -1 is background, and semi-positive number is the index (0-based) of assigned gt. The assignment is done in following steps, the order matters. 1. assign every points to the background_label (-1) 2. A point is assigned to some gt bbox if (i) the point is within the k closest points to the gt bbox (ii) the distance between this point and the gt is smaller than other gt bboxes Args: points (Tensor): points to be assigned, shape(n, 3) while last dimension stands for (x, y, stride). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. NOTE: currently unused. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`AssignResult`: The assign result. """ num_points = points.shape[0] num_gts = gt_bboxes.shape[0] if num_gts == 0 or num_points == 0: # If no truth assign everything to the background assigned_gt_inds = points.new_full((num_points, ), 0, dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = points.new_full((num_points, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) points_xy = points[:, :2] points_stride = points[:, 2] points_lvl = torch.log2( points_stride).int() # [3...,4...,5...,6...,7...] lvl_min, lvl_max = points_lvl.min(), points_lvl.max() # assign gt box gt_bboxes_xy = (gt_bboxes[:, :2] + gt_bboxes[:, 2:]) / 2 gt_bboxes_wh = (gt_bboxes[:, 2:] - gt_bboxes[:, :2]).clamp(min=1e-6) scale = self.scale gt_bboxes_lvl = ((torch.log2(gt_bboxes_wh[:, 0] / scale) + torch.log2(gt_bboxes_wh[:, 1] / scale)) / 2).int() gt_bboxes_lvl = torch.clamp(gt_bboxes_lvl, min=lvl_min, max=lvl_max) # stores the assigned gt index of each point assigned_gt_inds = points.new_zeros((num_points, ), dtype=torch.long) # stores the assigned gt dist (to this point) of each point assigned_gt_dist = points.new_full((num_points, ), float('inf')) points_range = torch.arange(points.shape[0]) for idx in range(num_gts): gt_lvl = gt_bboxes_lvl[idx] # get the index of points in this level lvl_idx = gt_lvl == points_lvl points_index = points_range[lvl_idx] # get the points in this level lvl_points = points_xy[lvl_idx, :] # get the center point of gt gt_point = gt_bboxes_xy[[idx], :] # get width and height of gt gt_wh = gt_bboxes_wh[[idx], :] # compute the distance between gt center and # all points in this level points_gt_dist = ((lvl_points - gt_point) / gt_wh).norm(dim=1) # find the nearest k points to gt center in this level min_dist, min_dist_index = torch.topk( points_gt_dist, self.pos_num, largest=False) # the index of nearest k points to gt center in this level min_dist_points_index = points_index[min_dist_index] # The less_than_recorded_index stores the index # of min_dist that is less then the assigned_gt_dist. Where # assigned_gt_dist stores the dist from previous assigned gt # (if exist) to each point. less_than_recorded_index = min_dist < assigned_gt_dist[ min_dist_points_index] # The min_dist_points_index stores the index of points satisfy: # (1) it is k nearest to current gt center in this level. # (2) it is closer to current gt center than other gt center. min_dist_points_index = min_dist_points_index[ less_than_recorded_index] # assign the result assigned_gt_inds[min_dist_points_index] = idx + 1 assigned_gt_dist[min_dist_points_index] = min_dist[ less_than_recorded_index] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_points, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None return AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels)
5,995
43.414815
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/region_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.core import anchor_inside_flags from ..builder import BBOX_ASSIGNERS from .assign_result import AssignResult from .base_assigner import BaseAssigner def calc_region(bbox, ratio, stride, featmap_size=None): """Calculate region of the box defined by the ratio, the ratio is from the center of the box to every edge.""" # project bbox on the feature f_bbox = bbox / stride x1 = torch.round((1 - ratio) * f_bbox[0] + ratio * f_bbox[2]) y1 = torch.round((1 - ratio) * f_bbox[1] + ratio * f_bbox[3]) x2 = torch.round(ratio * f_bbox[0] + (1 - ratio) * f_bbox[2]) y2 = torch.round(ratio * f_bbox[1] + (1 - ratio) * f_bbox[3]) if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2) def anchor_ctr_inside_region_flags(anchors, stride, region): """Get the flag indicate whether anchor centers are inside regions.""" x1, y1, x2, y2 = region f_anchors = anchors / stride x = (f_anchors[:, 0] + f_anchors[:, 2]) * 0.5 y = (f_anchors[:, 1] + f_anchors[:, 3]) * 0.5 flags = (x >= x1) & (x <= x2) & (y >= y1) & (y <= y2) return flags @BBOX_ASSIGNERS.register_module() class RegionAssigner(BaseAssigner): """Assign a corresponding gt bbox or background to each bbox. Each proposals will be assigned with `-1`, `0`, or a positive integer indicating the ground truth index. - -1: don't care - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: center_ratio: ratio of the region in the center of the bbox to define positive sample. ignore_ratio: ratio of the region to define ignore samples. """ def __init__(self, center_ratio=0.2, ignore_ratio=0.5): self.center_ratio = center_ratio self.ignore_ratio = ignore_ratio def assign(self, mlvl_anchors, mlvl_valid_flags, gt_bboxes, img_meta, featmap_sizes, anchor_scale, anchor_strides, gt_bboxes_ignore=None, gt_labels=None, allowed_border=0): """Assign gt to anchors. This method assign a gt bbox to every bbox (proposal/anchor), each bbox will be assigned with -1, 0, or a positive number. -1 means don't care, 0 means negative sample, positive number is the index (1-based) of assigned gt. The assignment is done in following steps, and the order matters. 1. Assign every anchor to 0 (negative) 2. (For each gt_bboxes) Compute ignore flags based on ignore_region then assign -1 to anchors w.r.t. ignore flags 3. (For each gt_bboxes) Compute pos flags based on center_region then assign gt_bboxes to anchors w.r.t. pos flags 4. (For each gt_bboxes) Compute ignore flags based on adjacent anchor level then assign -1 to anchors w.r.t. ignore flags 5. Assign anchor outside of image to -1 Args: mlvl_anchors (list[Tensor]): Multi level anchors. mlvl_valid_flags (list[Tensor]): Multi level valid flags. gt_bboxes (Tensor): Ground truth bboxes of image img_meta (dict): Meta info of image. featmap_sizes (list[Tensor]): Feature mapsize each level anchor_scale (int): Scale of the anchor. anchor_strides (list[int]): Stride of the anchor. gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: :obj:`AssignResult`: The assign result. """ if gt_bboxes_ignore is not None: raise NotImplementedError num_gts = gt_bboxes.shape[0] num_bboxes = sum(x.shape[0] for x in mlvl_anchors) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = gt_bboxes.new_zeros((num_bboxes, )) assigned_gt_inds = gt_bboxes.new_zeros((num_bboxes, ), dtype=torch.long) if gt_labels is None: assigned_labels = None else: assigned_labels = gt_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gts, assigned_gt_inds, max_overlaps, labels=assigned_labels) num_lvls = len(mlvl_anchors) r1 = (1 - self.center_ratio) / 2 r2 = (1 - self.ignore_ratio) / 2 scale = torch.sqrt((gt_bboxes[:, 2] - gt_bboxes[:, 0]) * (gt_bboxes[:, 3] - gt_bboxes[:, 1])) min_anchor_size = scale.new_full( (1, ), float(anchor_scale * anchor_strides[0])) target_lvls = torch.floor( torch.log2(scale) - torch.log2(min_anchor_size) + 0.5) target_lvls = target_lvls.clamp(min=0, max=num_lvls - 1).long() # 1. assign 0 (negative) by default mlvl_assigned_gt_inds = [] mlvl_ignore_flags = [] for lvl in range(num_lvls): h, w = featmap_sizes[lvl] assert h * w == mlvl_anchors[lvl].shape[0] assigned_gt_inds = gt_bboxes.new_full((h * w, ), 0, dtype=torch.long) ignore_flags = torch.zeros_like(assigned_gt_inds) mlvl_assigned_gt_inds.append(assigned_gt_inds) mlvl_ignore_flags.append(ignore_flags) for gt_id in range(num_gts): lvl = target_lvls[gt_id].item() featmap_size = featmap_sizes[lvl] stride = anchor_strides[lvl] anchors = mlvl_anchors[lvl] gt_bbox = gt_bboxes[gt_id, :4] # Compute regions ignore_region = calc_region(gt_bbox, r2, stride, featmap_size) ctr_region = calc_region(gt_bbox, r1, stride, featmap_size) # 2. Assign -1 to ignore flags ignore_flags = anchor_ctr_inside_region_flags( anchors, stride, ignore_region) mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 3. Assign gt_bboxes to pos flags pos_flags = anchor_ctr_inside_region_flags(anchors, stride, ctr_region) mlvl_assigned_gt_inds[lvl][pos_flags] = gt_id + 1 # 4. Assign -1 to ignore adjacent lvl if lvl > 0: d_lvl = lvl - 1 d_anchors = mlvl_anchors[d_lvl] d_featmap_size = featmap_sizes[d_lvl] d_stride = anchor_strides[d_lvl] d_ignore_region = calc_region(gt_bbox, r2, d_stride, d_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( d_anchors, d_stride, d_ignore_region) mlvl_ignore_flags[d_lvl][ignore_flags] = 1 if lvl < num_lvls - 1: u_lvl = lvl + 1 u_anchors = mlvl_anchors[u_lvl] u_featmap_size = featmap_sizes[u_lvl] u_stride = anchor_strides[u_lvl] u_ignore_region = calc_region(gt_bbox, r2, u_stride, u_featmap_size) ignore_flags = anchor_ctr_inside_region_flags( u_anchors, u_stride, u_ignore_region) mlvl_ignore_flags[u_lvl][ignore_flags] = 1 # 4. (cont.) Assign -1 to ignore adjacent lvl for lvl in range(num_lvls): ignore_flags = mlvl_ignore_flags[lvl] mlvl_assigned_gt_inds[lvl][ignore_flags] = -1 # 5. Assign -1 to anchor outside of image flat_assigned_gt_inds = torch.cat(mlvl_assigned_gt_inds) flat_anchors = torch.cat(mlvl_anchors) flat_valid_flags = torch.cat(mlvl_valid_flags) assert (flat_assigned_gt_inds.shape[0] == flat_anchors.shape[0] == flat_valid_flags.shape[0]) inside_flags = anchor_inside_flags(flat_anchors, flat_valid_flags, img_meta['img_shape'], allowed_border) outside_flags = ~inside_flags flat_assigned_gt_inds[outside_flags] = -1 if gt_labels is not None: assigned_labels = torch.zeros_like(flat_assigned_gt_inds) pos_flags = assigned_gt_inds > 0 assigned_labels[pos_flags] = gt_labels[ flat_assigned_gt_inds[pos_flags] - 1] else: assigned_labels = None return AssignResult( num_gts, flat_assigned_gt_inds, None, labels=assigned_labels)
9,473
41.484305
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/sim_ota_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import torch import torch.nn.functional as F from ..builder import BBOX_ASSIGNERS from ..iou_calculators import bbox_overlaps from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class SimOTAAssigner(BaseAssigner): """Computes matching between predictions and ground truth. Args: center_radius (int | float, optional): Ground truth center size to judge whether a prior is in center. Default 2.5. candidate_topk (int, optional): The candidate top-k which used to get top-k ious to calculate dynamic-k. Default 10. iou_weight (int | float, optional): The scale factor for regression iou cost. Default 3.0. cls_weight (int | float, optional): The scale factor for classification cost. Default 1.0. """ def __init__(self, center_radius=2.5, candidate_topk=10, iou_weight=3.0, cls_weight=1.0): self.center_radius = center_radius self.candidate_topk = candidate_topk self.iou_weight = iou_weight self.cls_weight = cls_weight def assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. It will switch to CPU mode when GPU is out of memory. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: assign_result (obj:`AssignResult`): The assigned result. """ try: assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) return assign_result except RuntimeError: origin_device = pred_scores.device warnings.warn('OOM RuntimeError is raised due to the huge memory ' 'cost during label assignment. CPU mode is applied ' 'in this batch. If you want to avoid this issue, ' 'try to reduce the batch size or image size.') torch.cuda.empty_cache() pred_scores = pred_scores.cpu() priors = priors.cpu() decoded_bboxes = decoded_bboxes.cpu() gt_bboxes = gt_bboxes.cpu().float() gt_labels = gt_labels.cpu() assign_result = self._assign(pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore, eps) assign_result.gt_inds = assign_result.gt_inds.to(origin_device) assign_result.max_overlaps = assign_result.max_overlaps.to( origin_device) assign_result.labels = assign_result.labels.to(origin_device) return assign_result def _assign(self, pred_scores, priors, decoded_bboxes, gt_bboxes, gt_labels, gt_bboxes_ignore=None, eps=1e-7): """Assign gt to priors using SimOTA. Args: pred_scores (Tensor): Classification scores of one image, a 2D-Tensor with shape [num_priors, num_classes] priors (Tensor): All priors of one image, a 2D-Tensor with shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format. decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format. gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format. gt_labels (Tensor): Ground truth labels of one image, a Tensor with shape [num_gts]. gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. eps (float): A value added to the denominator for numerical stability. Default 1e-7. Returns: :obj:`AssignResult`: The assigned result. """ INF = 100000.0 num_gt = gt_bboxes.size(0) num_bboxes = decoded_bboxes.size(0) # assign 0 by default assigned_gt_inds = decoded_bboxes.new_full((num_bboxes, ), 0, dtype=torch.long) valid_mask, is_in_boxes_and_center = self.get_in_gt_and_in_center_info( priors, gt_bboxes) valid_decoded_bbox = decoded_bboxes[valid_mask] valid_pred_scores = pred_scores[valid_mask] num_valid = valid_decoded_bbox.size(0) if num_gt == 0 or num_bboxes == 0 or num_valid == 0: # No ground truth or boxes, return empty assignment max_overlaps = decoded_bboxes.new_zeros((num_bboxes, )) if num_gt == 0: # No truth, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = decoded_bboxes.new_full((num_bboxes, ), -1, dtype=torch.long) return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes) iou_cost = -torch.log(pairwise_ious + eps) gt_onehot_label = ( F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1]).float().unsqueeze(0).repeat( num_valid, 1, 1)) valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1) cls_cost = ( F.binary_cross_entropy( valid_pred_scores.to(dtype=torch.float32).sqrt_(), gt_onehot_label, reduction='none', ).sum(-1).to(dtype=valid_pred_scores.dtype)) cost_matrix = ( cls_cost * self.cls_weight + iou_cost * self.iou_weight + (~is_in_boxes_and_center) * INF) matched_pred_ious, matched_gt_inds = \ self.dynamic_k_matching( cost_matrix, pairwise_ious, num_gt, valid_mask) # convert to AssignResult format assigned_gt_inds[valid_mask] = matched_gt_inds + 1 assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long() max_overlaps = assigned_gt_inds.new_full((num_bboxes, ), -INF, dtype=torch.float32) max_overlaps[valid_mask] = matched_pred_ious return AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) def get_in_gt_and_in_center_info(self, priors, gt_bboxes): num_gt = gt_bboxes.size(0) repeated_x = priors[:, 0].unsqueeze(1).repeat(1, num_gt) repeated_y = priors[:, 1].unsqueeze(1).repeat(1, num_gt) repeated_stride_x = priors[:, 2].unsqueeze(1).repeat(1, num_gt) repeated_stride_y = priors[:, 3].unsqueeze(1).repeat(1, num_gt) # is prior centers in gt bboxes, shape: [n_prior, n_gt] l_ = repeated_x - gt_bboxes[:, 0] t_ = repeated_y - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - repeated_x b_ = gt_bboxes[:, 3] - repeated_y deltas = torch.stack([l_, t_, r_, b_], dim=1) is_in_gts = deltas.min(dim=1).values > 0 is_in_gts_all = is_in_gts.sum(dim=1) > 0 # is prior centers in gt centers gt_cxs = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0 gt_cys = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0 ct_box_l = gt_cxs - self.center_radius * repeated_stride_x ct_box_t = gt_cys - self.center_radius * repeated_stride_y ct_box_r = gt_cxs + self.center_radius * repeated_stride_x ct_box_b = gt_cys + self.center_radius * repeated_stride_y cl_ = repeated_x - ct_box_l ct_ = repeated_y - ct_box_t cr_ = ct_box_r - repeated_x cb_ = ct_box_b - repeated_y ct_deltas = torch.stack([cl_, ct_, cr_, cb_], dim=1) is_in_cts = ct_deltas.min(dim=1).values > 0 is_in_cts_all = is_in_cts.sum(dim=1) > 0 # in boxes or in centers, shape: [num_priors] is_in_gts_or_centers = is_in_gts_all | is_in_cts_all # both in boxes and centers, shape: [num_fg, num_gt] is_in_boxes_and_centers = ( is_in_gts[is_in_gts_or_centers, :] & is_in_cts[is_in_gts_or_centers, :]) return is_in_gts_or_centers, is_in_boxes_and_centers def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask): matching_matrix = torch.zeros_like(cost, dtype=torch.uint8) # select candidate topk ious for dynamic-k calculation candidate_topk = min(self.candidate_topk, pairwise_ious.size(0)) topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0) # calculate dynamic k for each gt dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1) for gt_idx in range(num_gt): _, pos_idx = torch.topk( cost[:, gt_idx], k=dynamic_ks[gt_idx], largest=False) matching_matrix[:, gt_idx][pos_idx] = 1 del topk_ious, dynamic_ks, pos_idx prior_match_gt_mask = matching_matrix.sum(1) > 1 if prior_match_gt_mask.sum() > 0: cost_min, cost_argmin = torch.min( cost[prior_match_gt_mask, :], dim=1) matching_matrix[prior_match_gt_mask, :] *= 0 matching_matrix[prior_match_gt_mask, cost_argmin] = 1 # get foreground mask inside box and center prior fg_mask_inboxes = matching_matrix.sum(1) > 0 valid_mask[valid_mask.clone()] = fg_mask_inboxes matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1) matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes] return matched_pred_ious, matched_gt_inds
11,495
43.55814
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/task_aligned_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from .assign_result import AssignResult from .base_assigner import BaseAssigner INF = 100000000 @BBOX_ASSIGNERS.register_module() class TaskAlignedAssigner(BaseAssigner): """Task aligned assigner used in the paper: `TOOD: Task-aligned One-stage Object Detection. <https://arxiv.org/abs/2108.07755>`_. Assign a corresponding gt bbox or background to each predicted bbox. Each bbox will be assigned with `0` or a positive integer indicating the ground truth index. - 0: negative sample, no assigned gt - positive integer: positive sample, index (1-based) of assigned gt Args: topk (int): number of bbox selected in each level iou_calculator (dict): Config dict for iou calculator. Default: dict(type='BboxOverlaps2D') """ def __init__(self, topk, iou_calculator=dict(type='BboxOverlaps2D')): assert topk >= 1 self.topk = topk self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, pred_scores, decode_bboxes, anchors, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None, alpha=1, beta=6): """Assign gt to bboxes. The assignment is done in following steps 1. compute alignment metric between all bbox (bbox of all pyramid levels) and gt 2. select top-k bbox as candidates for each gt 3. limit the positive sample's center in gt (because the anchor-free detector only can predict positive distance) Args: pred_scores (Tensor): predicted class probability, shape(n, num_classes) decode_bboxes (Tensor): predicted bounding boxes, shape(n, 4) anchors (Tensor): pre-defined anchors, shape(n, 4). gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4). gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are labelled as `ignored`, e.g., crowd boxes in COCO. gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ). Returns: :obj:`TaskAlignedAssignResult`: The assign result. """ anchors = anchors[:, :4] num_gt, num_bboxes = gt_bboxes.size(0), anchors.size(0) # compute alignment metric between all bbox and gt overlaps = self.iou_calculator(decode_bboxes, gt_bboxes).detach() bbox_scores = pred_scores[:, gt_labels].detach() # assign 0 by default assigned_gt_inds = anchors.new_full((num_bboxes, ), 0, dtype=torch.long) assign_metrics = anchors.new_zeros((num_bboxes, )) if num_gt == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment max_overlaps = anchors.new_zeros((num_bboxes, )) if num_gt == 0: # No gt boxes, assign everything to background assigned_gt_inds[:] = 0 if gt_labels is None: assigned_labels = None else: assigned_labels = anchors.new_full((num_bboxes, ), -1, dtype=torch.long) assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result # select top-k bboxes as candidates for each gt alignment_metrics = bbox_scores**alpha * overlaps**beta topk = min(self.topk, alignment_metrics.size(0)) _, candidate_idxs = alignment_metrics.topk(topk, dim=0, largest=True) candidate_metrics = alignment_metrics[candidate_idxs, torch.arange(num_gt)] is_pos = candidate_metrics > 0 # limit the positive sample's center in gt anchors_cx = (anchors[:, 0] + anchors[:, 2]) / 2.0 anchors_cy = (anchors[:, 1] + anchors[:, 3]) / 2.0 for gt_idx in range(num_gt): candidate_idxs[:, gt_idx] += gt_idx * num_bboxes ep_anchors_cx = anchors_cx.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) ep_anchors_cy = anchors_cy.view(1, -1).expand( num_gt, num_bboxes).contiguous().view(-1) candidate_idxs = candidate_idxs.view(-1) # calculate the left, top, right, bottom distance between positive # bbox center and gt side l_ = ep_anchors_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0] t_ = ep_anchors_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1] r_ = gt_bboxes[:, 2] - ep_anchors_cx[candidate_idxs].view(-1, num_gt) b_ = gt_bboxes[:, 3] - ep_anchors_cy[candidate_idxs].view(-1, num_gt) is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01 is_pos = is_pos & is_in_gts # if an anchor box is assigned to multiple gts, # the one with the highest iou will be selected. overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1) index = candidate_idxs.view(-1)[is_pos.view(-1)] overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index] overlaps_inf = overlaps_inf.view(num_gt, -1).t() max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1) assigned_gt_inds[ max_overlaps != -INF] = argmax_overlaps[max_overlaps != -INF] + 1 assign_metrics[max_overlaps != -INF] = alignment_metrics[ max_overlaps != -INF, argmax_overlaps[max_overlaps != -INF]] if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels) assign_result.assign_metrics = assign_metrics return assign_result
6,554
42.125
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/assigners/uniform_assigner.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_ASSIGNERS from ..iou_calculators import build_iou_calculator from ..transforms import bbox_xyxy_to_cxcywh from .assign_result import AssignResult from .base_assigner import BaseAssigner @BBOX_ASSIGNERS.register_module() class UniformAssigner(BaseAssigner): """Uniform Matching between the anchors and gt boxes, which can achieve balance in positive anchors, and gt_bboxes_ignore was not considered for now. Args: pos_ignore_thr (float): the threshold to ignore positive anchors neg_ignore_thr (float): the threshold to ignore negative anchors match_times(int): Number of positive anchors for each gt box. Default 4. iou_calculator (dict): iou_calculator config """ def __init__(self, pos_ignore_thr, neg_ignore_thr, match_times=4, iou_calculator=dict(type='BboxOverlaps2D')): self.match_times = match_times self.pos_ignore_thr = pos_ignore_thr self.neg_ignore_thr = neg_ignore_thr self.iou_calculator = build_iou_calculator(iou_calculator) def assign(self, bbox_pred, anchor, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None): num_gts, num_bboxes = gt_bboxes.size(0), bbox_pred.size(0) # 1. assign -1 by default assigned_gt_inds = bbox_pred.new_full((num_bboxes, ), 0, dtype=torch.long) assigned_labels = bbox_pred.new_full((num_bboxes, ), -1, dtype=torch.long) if num_gts == 0 or num_bboxes == 0: # No ground truth or boxes, return empty assignment if num_gts == 0: # No ground truth, assign all to background assigned_gt_inds[:] = 0 assign_result = AssignResult( num_gts, assigned_gt_inds, None, labels=assigned_labels) assign_result.set_extra_property( 'pos_idx', bbox_pred.new_empty(0, dtype=torch.bool)) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred.new_empty((0, 4))) assign_result.set_extra_property('target_boxes', bbox_pred.new_empty((0, 4))) return assign_result # 2. Compute the L1 cost between boxes # Note that we use anchors and predict boxes both cost_bbox = torch.cdist( bbox_xyxy_to_cxcywh(bbox_pred), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) cost_bbox_anchors = torch.cdist( bbox_xyxy_to_cxcywh(anchor), bbox_xyxy_to_cxcywh(gt_bboxes), p=1) # We found that topk function has different results in cpu and # cuda mode. In order to ensure consistency with the source code, # we also use cpu mode. # TODO: Check whether the performance of cpu and cuda are the same. C = cost_bbox.cpu() C1 = cost_bbox_anchors.cpu() # self.match_times x n index = torch.topk( C, # c=b,n,x c[i]=n,x k=self.match_times, dim=0, largest=False)[1] # self.match_times x n index1 = torch.topk(C1, k=self.match_times, dim=0, largest=False)[1] # (self.match_times*2) x n indexes = torch.cat((index, index1), dim=1).reshape(-1).to(bbox_pred.device) pred_overlaps = self.iou_calculator(bbox_pred, gt_bboxes) anchor_overlaps = self.iou_calculator(anchor, gt_bboxes) pred_max_overlaps, _ = pred_overlaps.max(dim=1) anchor_max_overlaps, _ = anchor_overlaps.max(dim=0) # 3. Compute the ignore indexes use gt_bboxes and predict boxes ignore_idx = pred_max_overlaps > self.neg_ignore_thr assigned_gt_inds[ignore_idx] = -1 # 4. Compute the ignore indexes of positive sample use anchors # and predict boxes pos_gt_index = torch.arange( 0, C1.size(1), device=bbox_pred.device).repeat(self.match_times * 2) pos_ious = anchor_overlaps[indexes, pos_gt_index] pos_ignore_idx = pos_ious < self.pos_ignore_thr pos_gt_index_with_ignore = pos_gt_index + 1 pos_gt_index_with_ignore[pos_ignore_idx] = -1 assigned_gt_inds[indexes] = pos_gt_index_with_ignore if gt_labels is not None: assigned_labels = assigned_gt_inds.new_full((num_bboxes, ), -1) pos_inds = torch.nonzero( assigned_gt_inds > 0, as_tuple=False).squeeze() if pos_inds.numel() > 0: assigned_labels[pos_inds] = gt_labels[ assigned_gt_inds[pos_inds] - 1] else: assigned_labels = None assign_result = AssignResult( num_gts, assigned_gt_inds, anchor_max_overlaps, labels=assigned_labels) assign_result.set_extra_property('pos_idx', ~pos_ignore_idx) assign_result.set_extra_property('pos_predicted_boxes', bbox_pred[indexes]) assign_result.set_extra_property('target_boxes', gt_bboxes[pos_gt_index]) return assign_result
5,556
39.860294
77
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .base_bbox_coder import BaseBBoxCoder from .bucketing_bbox_coder import BucketingBBoxCoder from .delta_xywh_bbox_coder import DeltaXYWHBBoxCoder from .distance_point_bbox_coder import DistancePointBBoxCoder from .legacy_delta_xywh_bbox_coder import LegacyDeltaXYWHBBoxCoder from .pseudo_bbox_coder import PseudoBBoxCoder from .tblr_bbox_coder import TBLRBBoxCoder from .yolo_bbox_coder import YOLOBBoxCoder __all__ = [ 'BaseBBoxCoder', 'PseudoBBoxCoder', 'DeltaXYWHBBoxCoder', 'LegacyDeltaXYWHBBoxCoder', 'TBLRBBoxCoder', 'YOLOBBoxCoder', 'BucketingBBoxCoder', 'DistancePointBBoxCoder' ]
654
39.9375
66
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/base_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod class BaseBBoxCoder(metaclass=ABCMeta): """Base bounding box coder.""" def __init__(self, **kwargs): pass @abstractmethod def encode(self, bboxes, gt_bboxes): """Encode deltas between bboxes and ground truth boxes.""" @abstractmethod def decode(self, bboxes, bboxes_pred): """Decode the predicted bboxes according to prediction and base boxes."""
496
25.157895
71
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/bucketing_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch import torch.nn.functional as F from ..builder import BBOX_CODERS from ..transforms import bbox_rescale from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class BucketingBBoxCoder(BaseBBoxCoder): """Bucketing BBox Coder for Side-Aware Boundary Localization (SABL). Boundary Localization with Bucketing and Bucketing Guided Rescoring are implemented here. Please refer to https://arxiv.org/abs/1912.04260 for more details. Args: num_buckets (int): Number of buckets. scale_factor (int): Scale factor of proposals to generate buckets. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset upperbound to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True, clip_border=True): super(BucketingBBoxCoder, self).__init__() self.num_buckets = num_buckets self.scale_factor = scale_factor self.offset_topk = offset_topk self.offset_upperbound = offset_upperbound self.cls_ignore_neighbor = cls_ignore_neighbor self.clip_border = clip_border def encode(self, bboxes, gt_bboxes): """Get bucketing estimation and fine regression targets during training. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: encoded_bboxes(tuple[Tensor]): bucketing estimation and fine regression targets and weights """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2bucket(bboxes, gt_bboxes, self.num_buckets, self.scale_factor, self.offset_topk, self.offset_upperbound, self.cls_ignore_neighbor) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Predictions for bucketing estimation and fine regression max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. Returns: torch.Tensor: Decoded boxes. """ assert len(pred_bboxes) == 2 cls_preds, offset_preds = pred_bboxes assert cls_preds.size(0) == bboxes.size(0) and offset_preds.size( 0) == bboxes.size(0) decoded_bboxes = bucket2bbox(bboxes, cls_preds, offset_preds, self.num_buckets, self.scale_factor, max_shape, self.clip_border) return decoded_bboxes @mmcv.jit(coderize=True) def generat_buckets(proposals, num_buckets, scale_factor=1.0): """Generate buckets w.r.t bucket number and scale factor of proposals. Args: proposals (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. Returns: tuple[Tensor]: (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) - bucket_w: Width of buckets on x-axis. Shape (n, ). - bucket_h: Height of buckets on y-axis. Shape (n, ). - l_buckets: Left buckets. Shape (n, ceil(side_num/2)). - r_buckets: Right buckets. Shape (n, ceil(side_num/2)). - t_buckets: Top buckets. Shape (n, ceil(side_num/2)). - d_buckets: Down buckets. Shape (n, ceil(side_num/2)). """ proposals = bbox_rescale(proposals, scale_factor) # number of buckets in each side side_num = int(np.ceil(num_buckets / 2.0)) pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] px1 = proposals[..., 0] py1 = proposals[..., 1] px2 = proposals[..., 2] py2 = proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets # left buckets l_buckets = px1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # right buckets r_buckets = px2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_w[:, None] # top buckets t_buckets = py1[:, None] + (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] # down buckets d_buckets = py2[:, None] - (0.5 + torch.arange( 0, side_num).to(proposals).float())[None, :] * bucket_h[:, None] return bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets @mmcv.jit(coderize=True) def bbox2bucket(proposals, gt, num_buckets, scale_factor, offset_topk=2, offset_upperbound=1.0, cls_ignore_neighbor=True): """Generate buckets estimation and fine regression targets. Args: proposals (Tensor): Shape (n, 4) gt (Tensor): Shape (n, 4) num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. offset_topk (int): Topk buckets are used to generate bucket fine regression targets. Defaults to 2. offset_upperbound (float): Offset allowance to generate bucket fine regression targets. To avoid too large offset displacements. Defaults to 1.0. cls_ignore_neighbor (bool): Ignore second nearest bucket or Not. Defaults to True. Returns: tuple[Tensor]: (offsets, offsets_weights, bucket_labels, cls_weights). - offsets: Fine regression targets. \ Shape (n, num_buckets*2). - offsets_weights: Fine regression weights. \ Shape (n, num_buckets*2). - bucket_labels: Bucketing estimation labels. \ Shape (n, num_buckets*2). - cls_weights: Bucketing estimation weights. \ Shape (n, num_buckets*2). """ assert proposals.size() == gt.size() # generate buckets proposals = proposals.float() gt = gt.float() (bucket_w, bucket_h, l_buckets, r_buckets, t_buckets, d_buckets) = generat_buckets(proposals, num_buckets, scale_factor) gx1 = gt[..., 0] gy1 = gt[..., 1] gx2 = gt[..., 2] gy2 = gt[..., 3] # generate offset targets and weights # offsets from buckets to gts l_offsets = (l_buckets - gx1[:, None]) / bucket_w[:, None] r_offsets = (r_buckets - gx2[:, None]) / bucket_w[:, None] t_offsets = (t_buckets - gy1[:, None]) / bucket_h[:, None] d_offsets = (d_buckets - gy2[:, None]) / bucket_h[:, None] # select top-k nearest buckets l_topk, l_label = l_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) r_topk, r_label = r_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) t_topk, t_label = t_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) d_topk, d_label = d_offsets.abs().topk( offset_topk, dim=1, largest=False, sorted=True) offset_l_weights = l_offsets.new_zeros(l_offsets.size()) offset_r_weights = r_offsets.new_zeros(r_offsets.size()) offset_t_weights = t_offsets.new_zeros(t_offsets.size()) offset_d_weights = d_offsets.new_zeros(d_offsets.size()) inds = torch.arange(0, proposals.size(0)).to(proposals).long() # generate offset weights of top-k nearest buckets for k in range(offset_topk): if k >= 1: offset_l_weights[inds, l_label[:, k]] = (l_topk[:, k] < offset_upperbound).float() offset_r_weights[inds, r_label[:, k]] = (r_topk[:, k] < offset_upperbound).float() offset_t_weights[inds, t_label[:, k]] = (t_topk[:, k] < offset_upperbound).float() offset_d_weights[inds, d_label[:, k]] = (d_topk[:, k] < offset_upperbound).float() else: offset_l_weights[inds, l_label[:, k]] = 1.0 offset_r_weights[inds, r_label[:, k]] = 1.0 offset_t_weights[inds, t_label[:, k]] = 1.0 offset_d_weights[inds, d_label[:, k]] = 1.0 offsets = torch.cat([l_offsets, r_offsets, t_offsets, d_offsets], dim=-1) offsets_weights = torch.cat([ offset_l_weights, offset_r_weights, offset_t_weights, offset_d_weights ], dim=-1) # generate bucket labels and weight side_num = int(np.ceil(num_buckets / 2.0)) labels = torch.stack( [l_label[:, 0], r_label[:, 0], t_label[:, 0], d_label[:, 0]], dim=-1) batch_size = labels.size(0) bucket_labels = F.one_hot(labels.view(-1), side_num).view(batch_size, -1).float() bucket_cls_l_weights = (l_offsets.abs() < 1).float() bucket_cls_r_weights = (r_offsets.abs() < 1).float() bucket_cls_t_weights = (t_offsets.abs() < 1).float() bucket_cls_d_weights = (d_offsets.abs() < 1).float() bucket_cls_weights = torch.cat([ bucket_cls_l_weights, bucket_cls_r_weights, bucket_cls_t_weights, bucket_cls_d_weights ], dim=-1) # ignore second nearest buckets for cls if necessary if cls_ignore_neighbor: bucket_cls_weights = (~((bucket_cls_weights == 1) & (bucket_labels == 0))).float() else: bucket_cls_weights[:] = 1.0 return offsets, offsets_weights, bucket_labels, bucket_cls_weights @mmcv.jit(coderize=True) def bucket2bbox(proposals, cls_preds, offset_preds, num_buckets, scale_factor=1.0, max_shape=None, clip_border=True): """Apply bucketing estimation (cls preds) and fine regression (offset preds) to generate det bboxes. Args: proposals (Tensor): Boxes to be transformed. Shape (n, 4) cls_preds (Tensor): bucketing estimation. Shape (n, num_buckets*2). offset_preds (Tensor): fine regression. Shape (n, num_buckets*2). num_buckets (int): Number of buckets. scale_factor (float): Scale factor to rescale proposals. max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Returns: tuple[Tensor]: (bboxes, loc_confidence). - bboxes: predicted bboxes. Shape (n, 4) - loc_confidence: localization confidence of predicted bboxes. Shape (n,). """ side_num = int(np.ceil(num_buckets / 2.0)) cls_preds = cls_preds.view(-1, side_num) offset_preds = offset_preds.view(-1, side_num) scores = F.softmax(cls_preds, dim=1) score_topk, score_label = scores.topk(2, dim=1, largest=True, sorted=True) rescaled_proposals = bbox_rescale(proposals, scale_factor) pw = rescaled_proposals[..., 2] - rescaled_proposals[..., 0] ph = rescaled_proposals[..., 3] - rescaled_proposals[..., 1] px1 = rescaled_proposals[..., 0] py1 = rescaled_proposals[..., 1] px2 = rescaled_proposals[..., 2] py2 = rescaled_proposals[..., 3] bucket_w = pw / num_buckets bucket_h = ph / num_buckets score_inds_l = score_label[0::4, 0] score_inds_r = score_label[1::4, 0] score_inds_t = score_label[2::4, 0] score_inds_d = score_label[3::4, 0] l_buckets = px1 + (0.5 + score_inds_l.float()) * bucket_w r_buckets = px2 - (0.5 + score_inds_r.float()) * bucket_w t_buckets = py1 + (0.5 + score_inds_t.float()) * bucket_h d_buckets = py2 - (0.5 + score_inds_d.float()) * bucket_h offsets = offset_preds.view(-1, 4, side_num) inds = torch.arange(proposals.size(0)).to(proposals).long() l_offsets = offsets[:, 0, :][inds, score_inds_l] r_offsets = offsets[:, 1, :][inds, score_inds_r] t_offsets = offsets[:, 2, :][inds, score_inds_t] d_offsets = offsets[:, 3, :][inds, score_inds_d] x1 = l_buckets - l_offsets * bucket_w x2 = r_buckets - r_offsets * bucket_w y1 = t_buckets - t_offsets * bucket_h y2 = d_buckets - d_offsets * bucket_h if clip_border and max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.cat([x1[:, None], y1[:, None], x2[:, None], y2[:, None]], dim=-1) # bucketing guided rescoring loc_confidence = score_topk[:, 0] top2_neighbor_inds = (score_label[:, 0] - score_label[:, 1]).abs() == 1 loc_confidence += score_topk[:, 1] * top2_neighbor_inds.float() loc_confidence = loc_confidence.view(-1, 4).mean(dim=1) return bboxes, loc_confidence
14,119
39.113636
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class DeltaXYWHBBoxCoder(BaseBBoxCoder): """Delta XYWH BBox coder. Following the practice in `R-CNN <https://arxiv.org/abs/1311.2524>`_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Args: target_means (Sequence[float]): Denormalizing means of target for delta coordinates target_stds (Sequence[float]): Denormalizing standard deviation of target for delta coordinates clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.), clip_border=True, add_ctr_clamp=False, ctr_clamp=32): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds self.clip_border = clip_border self.add_ctr_clamp = add_ctr_clamp self.ctr_clamp = ctr_clamp def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor): Basic boxes. Shape (B, N, 4) or (N, 4) pred_bboxes (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) if pred_bboxes.ndim == 3: assert pred_bboxes.size(1) == bboxes.size(1) if pred_bboxes.ndim == 2 and not torch.onnx.is_in_onnx_export(): # single image decode decoded_bboxes = delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) else: if pred_bboxes.ndim == 3 and not torch.onnx.is_in_onnx_export(): warnings.warn( 'DeprecationWarning: onnx_delta2bbox is deprecated ' 'in the case of batch decoding and non-ONNX, ' 'please use “delta2bbox” instead. In order to improve ' 'the decoding speed, the batch function will no ' 'longer be supported. ') decoded_bboxes = onnx_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip, self.clip_border, self.add_ctr_clamp, self.ctr_clamp) return decoded_bboxes @mmcv.jit(coderize=True) def bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of :func:`delta2bbox`. Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] ph = proposals[..., 3] - proposals[..., 1] gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] gh = gt[..., 3] - gt[..., 1] dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas @mmcv.jit(coderize=True) def delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4). deltas (Tensor): Encoded offsets relative to each roi. Has shape (N, num_classes * 4) or (N, 4). Note N = num_base_anchors * W * H, when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (tuple[int, int]): Maximum bounds for boxes, specifies (H, W). Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp. When set to True, the center of the prediction bounding box will be clamped to avoid being too far away from the center of the anchor. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ num_bboxes, num_classes = deltas.size(0), deltas.size(1) // 4 if num_bboxes == 0: return deltas deltas = deltas.reshape(-1, 4) means = deltas.new_tensor(means).view(1, -1) stds = deltas.new_tensor(stds).view(1, -1) denorm_deltas = deltas * stds + means dxy = denorm_deltas[:, :2] dwh = denorm_deltas[:, 2:] # Compute width/height of each roi rois_ = rois.repeat(1, num_classes).reshape(-1, 4) pxy = ((rois_[:, :2] + rois_[:, 2:]) * 0.5) pwh = (rois_[:, 2:] - rois_[:, :2]) dxy_wh = pwh * dxy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dxy_wh = torch.clamp(dxy_wh, max=ctr_clamp, min=-ctr_clamp) dwh = torch.clamp(dwh, max=max_ratio) else: dwh = dwh.clamp(min=-max_ratio, max=max_ratio) gxy = pxy + dxy_wh gwh = pwh * dwh.exp() x1y1 = gxy - (gwh * 0.5) x2y2 = gxy + (gwh * 0.5) bboxes = torch.cat([x1y1, x2y2], dim=-1) if clip_border and max_shape is not None: bboxes[..., 0::2].clamp_(min=0, max=max_shape[1]) bboxes[..., 1::2].clamp_(min=0, max=max_shape[0]) bboxes = bboxes.reshape(num_bboxes, -1) return bboxes def onnx_delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000, clip_border=True, add_ctr_clamp=False, ctr_clamp=32): """Apply deltas to shift/scale base boxes. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of :func:`bbox2delta`. Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) or (B, N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4). Note N = num_anchors * W * H when rois is a grid of anchors.Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates. Default (0., 0., 0., 0.). stds (Sequence[float]): Denormalizing standard deviation for delta coordinates. Default (1., 1., 1., 1.). max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If rois shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Default None. wh_ratio_clip (float): Maximum aspect ratio for boxes. Default 16 / 1000. clip_border (bool, optional): Whether clip the objects outside the border of the image. Default True. add_ctr_clamp (bool): Whether to add center clamp, when added, the predicted box is clamped is its center is too far away from the original anchor's center. Only used by YOLOF. Default False. ctr_clamp (int): the maximum pixel shift to clamp. Only used by YOLOF. Default 32. Returns: Tensor: Boxes with shape (B, N, num_classes * 4) or (B, N, 4) or (N, num_classes * 4) or (N, 4), where 4 represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> delta2bbox(rois, deltas, max_shape=(32, 32, 3)) tensor([[0.0000, 0.0000, 1.0000, 1.0000], [0.1409, 0.1409, 2.8591, 2.8591], [0.0000, 0.3161, 4.1945, 0.6839], [5.0000, 5.0000, 5.0000, 5.0000]]) """ means = deltas.new_tensor(means).view(1, -1).repeat(1, deltas.size(-1) // 4) stds = deltas.new_tensor(stds).view(1, -1).repeat(1, deltas.size(-1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[..., 0::4] dy = denorm_deltas[..., 1::4] dw = denorm_deltas[..., 2::4] dh = denorm_deltas[..., 3::4] x1, y1 = rois[..., 0], rois[..., 1] x2, y2 = rois[..., 2], rois[..., 3] # Compute center of each roi px = ((x1 + x2) * 0.5).unsqueeze(-1).expand_as(dx) py = ((y1 + y2) * 0.5).unsqueeze(-1).expand_as(dy) # Compute width/height of each roi pw = (x2 - x1).unsqueeze(-1).expand_as(dw) ph = (y2 - y1).unsqueeze(-1).expand_as(dh) dx_width = pw * dx dy_height = ph * dy max_ratio = np.abs(np.log(wh_ratio_clip)) if add_ctr_clamp: dx_width = torch.clamp(dx_width, max=ctr_clamp, min=-ctr_clamp) dy_height = torch.clamp(dy_height, max=ctr_clamp, min=-ctr_clamp) dw = torch.clamp(dw, max=max_ratio) dh = torch.clamp(dh, max=max_ratio) else: dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + dx_width gy = py + dy_height # Convert center-xy/width/height to top-left, bottom-right x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx x1, y1, x2, y2 = dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view(deltas.size()) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = x1.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(x1) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = x1.new_tensor(0) max_xy = torch.cat( [max_shape] * (deltas.size(-1) // 2), dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes
15,978
39.659033
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/distance_point_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_CODERS from ..transforms import bbox2distance, distance2bbox from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class DistancePointBBoxCoder(BaseBBoxCoder): """Distance Point BBox coder. This coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, clip_border=True): super(BaseBBoxCoder, self).__init__() self.clip_border = clip_border def encode(self, points, gt_bboxes, max_dis=None, eps=0.1): """Encode bounding box to distances. Args: points (Tensor): Shape (N, 2), The format is [x, y]. gt_bboxes (Tensor): Shape (N, 4), The format is "xyxy" max_dis (float): Upper bound of the distance. Default None. eps (float): a small value to ensure target < max_dis, instead <=. Default 0.1. Returns: Tensor: Box transformation deltas. The shape is (N, 4). """ assert points.size(0) == gt_bboxes.size(0) assert points.size(-1) == 2 assert gt_bboxes.size(-1) == 4 return bbox2distance(points, gt_bboxes, max_dis, eps) def decode(self, points, pred_bboxes, max_shape=None): """Decode distance prediction to bounding box. Args: points (Tensor): Shape (B, N, 2) or (N, 2). pred_bboxes (Tensor): Distance from the given point to 4 boundaries (left, top, right, bottom). Shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]], and the length of max_shape should also be B. Default None. Returns: Tensor: Boxes with shape (N, 4) or (B, N, 4) """ assert points.size(0) == pred_bboxes.size(0) assert points.size(-1) == 2 assert pred_bboxes.size(-1) == 4 if self.clip_border is False: max_shape = None return distance2bbox(points, pred_bboxes, max_shape)
2,481
37.78125
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/legacy_delta_xywh_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class LegacyDeltaXYWHBBoxCoder(BaseBBoxCoder): """Legacy Delta XYWH BBox coder used in MMDet V1.x. Following the practice in R-CNN [1]_, this coder encodes bbox (x1, y1, x2, y2) into delta (dx, dy, dw, dh) and decodes delta (dx, dy, dw, dh) back to original bbox (x1, y1, x2, y2). Note: The main difference between :class`LegacyDeltaXYWHBBoxCoder` and :class:`DeltaXYWHBBoxCoder` is whether ``+ 1`` is used during width and height calculation. We suggest to only use this coder when testing with MMDet V1.x models. References: .. [1] https://arxiv.org/abs/1311.2524 Args: target_means (Sequence[float]): denormalizing means of target for delta coordinates target_stds (Sequence[float]): denormalizing standard deviation of target for delta coordinates """ def __init__(self, target_means=(0., 0., 0., 0.), target_stds=(1., 1., 1., 1.)): super(BaseBBoxCoder, self).__init__() self.means = target_means self.stds = target_stds def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground-truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = legacy_bbox2delta(bboxes, gt_bboxes, self.means, self.stds) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None, wh_ratio_clip=16 / 1000): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes. pred_bboxes (torch.Tensor): Encoded boxes with shape max_shape (tuple[int], optional): Maximum shape of boxes. Defaults to None. wh_ratio_clip (float, optional): The allowed ratio between width and height. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(0) == bboxes.size(0) decoded_bboxes = legacy_delta2bbox(bboxes, pred_bboxes, self.means, self.stds, max_shape, wh_ratio_clip) return decoded_bboxes @mmcv.jit(coderize=True) def legacy_bbox2delta(proposals, gt, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.)): """Compute deltas of proposals w.r.t. gt in the MMDet V1.x manner. We usually compute the deltas of x, y, w, h of proposals w.r.t ground truth bboxes to get regression target. This is the inverse function of `delta2bbox()` Args: proposals (Tensor): Boxes to be transformed, shape (N, ..., 4) gt (Tensor): Gt bboxes to be used as base, shape (N, ..., 4) means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates Returns: Tensor: deltas with shape (N, 4), where columns represent dx, dy, dw, dh. """ assert proposals.size() == gt.size() proposals = proposals.float() gt = gt.float() px = (proposals[..., 0] + proposals[..., 2]) * 0.5 py = (proposals[..., 1] + proposals[..., 3]) * 0.5 pw = proposals[..., 2] - proposals[..., 0] + 1.0 ph = proposals[..., 3] - proposals[..., 1] + 1.0 gx = (gt[..., 0] + gt[..., 2]) * 0.5 gy = (gt[..., 1] + gt[..., 3]) * 0.5 gw = gt[..., 2] - gt[..., 0] + 1.0 gh = gt[..., 3] - gt[..., 1] + 1.0 dx = (gx - px) / pw dy = (gy - py) / ph dw = torch.log(gw / pw) dh = torch.log(gh / ph) deltas = torch.stack([dx, dy, dw, dh], dim=-1) means = deltas.new_tensor(means).unsqueeze(0) stds = deltas.new_tensor(stds).unsqueeze(0) deltas = deltas.sub_(means).div_(stds) return deltas @mmcv.jit(coderize=True) def legacy_delta2bbox(rois, deltas, means=(0., 0., 0., 0.), stds=(1., 1., 1., 1.), max_shape=None, wh_ratio_clip=16 / 1000): """Apply deltas to shift/scale base boxes in the MMDet V1.x manner. Typically the rois are anchor or proposed bounding boxes and the deltas are network outputs used to shift/scale those boxes. This is the inverse function of `bbox2delta()` Args: rois (Tensor): Boxes to be transformed. Has shape (N, 4) deltas (Tensor): Encoded offsets with respect to each roi. Has shape (N, 4 * num_classes). Note N = num_anchors * W * H when rois is a grid of anchors. Offset encoding follows [1]_. means (Sequence[float]): Denormalizing means for delta coordinates stds (Sequence[float]): Denormalizing standard deviation for delta coordinates max_shape (tuple[int, int]): Maximum bounds for boxes. specifies (H, W) wh_ratio_clip (float): Maximum aspect ratio for boxes. Returns: Tensor: Boxes with shape (N, 4), where columns represent tl_x, tl_y, br_x, br_y. References: .. [1] https://arxiv.org/abs/1311.2524 Example: >>> rois = torch.Tensor([[ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 0., 0., 1., 1.], >>> [ 5., 5., 5., 5.]]) >>> deltas = torch.Tensor([[ 0., 0., 0., 0.], >>> [ 1., 1., 1., 1.], >>> [ 0., 0., 2., -1.], >>> [ 0.7, -1.9, -0.5, 0.3]]) >>> legacy_delta2bbox(rois, deltas, max_shape=(32, 32)) tensor([[0.0000, 0.0000, 1.5000, 1.5000], [0.0000, 0.0000, 5.2183, 5.2183], [0.0000, 0.1321, 7.8891, 0.8679], [5.3967, 2.4251, 6.0033, 3.7749]]) """ means = deltas.new_tensor(means).repeat(1, deltas.size(1) // 4) stds = deltas.new_tensor(stds).repeat(1, deltas.size(1) // 4) denorm_deltas = deltas * stds + means dx = denorm_deltas[:, 0::4] dy = denorm_deltas[:, 1::4] dw = denorm_deltas[:, 2::4] dh = denorm_deltas[:, 3::4] max_ratio = np.abs(np.log(wh_ratio_clip)) dw = dw.clamp(min=-max_ratio, max=max_ratio) dh = dh.clamp(min=-max_ratio, max=max_ratio) # Compute center of each roi px = ((rois[:, 0] + rois[:, 2]) * 0.5).unsqueeze(1).expand_as(dx) py = ((rois[:, 1] + rois[:, 3]) * 0.5).unsqueeze(1).expand_as(dy) # Compute width/height of each roi pw = (rois[:, 2] - rois[:, 0] + 1.0).unsqueeze(1).expand_as(dw) ph = (rois[:, 3] - rois[:, 1] + 1.0).unsqueeze(1).expand_as(dh) # Use exp(network energy) to enlarge/shrink each roi gw = pw * dw.exp() gh = ph * dh.exp() # Use network energy to shift the center of each roi gx = px + pw * dx gy = py + ph * dy # Convert center-xy/width/height to top-left, bottom-right # The true legacy box coder should +- 0.5 here. # However, current implementation improves the performance when testing # the models trained in MMDetection 1.X (~0.5 bbox AP, 0.2 mask AP) x1 = gx - gw * 0.5 y1 = gy - gh * 0.5 x2 = gx + gw * 0.5 y2 = gy + gh * 0.5 if max_shape is not None: x1 = x1.clamp(min=0, max=max_shape[1] - 1) y1 = y1.clamp(min=0, max=max_shape[0] - 1) x2 = x2.clamp(min=0, max=max_shape[1] - 1) y2 = y2.clamp(min=0, max=max_shape[0] - 1) bboxes = torch.stack([x1, y1, x2, y2], dim=-1).view_as(deltas) return bboxes
8,257
37.0553
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/pseudo_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class PseudoBBoxCoder(BaseBBoxCoder): """Pseudo bounding box coder.""" def __init__(self, **kwargs): super(BaseBBoxCoder, self).__init__(**kwargs) def encode(self, bboxes, gt_bboxes): """torch.Tensor: return the given ``bboxes``""" return gt_bboxes def decode(self, bboxes, pred_bboxes): """torch.Tensor: return the given ``pred_bboxes``""" return pred_bboxes
577
27.9
60
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/tblr_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class TBLRBBoxCoder(BaseBBoxCoder): """TBLR BBox coder. Following the practice in `FSAF <https://arxiv.org/abs/1903.00621>`_, this coder encodes gt bboxes (x1, y1, x2, y2) into (top, bottom, left, right) and decode it back to the original. Args: normalizer (list | float): Normalization factor to be divided with when coding the coordinates. If it is a list, it should have length of 4 indicating normalization factor in tblr dims. Otherwise it is a unified float factor for all dims. Default: 4.0 clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. """ def __init__(self, normalizer=4.0, clip_border=True): super(BaseBBoxCoder, self).__init__() self.normalizer = normalizer self.clip_border = clip_border def encode(self, bboxes, gt_bboxes): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes`` in the (top, left, bottom, right) order. Args: bboxes (torch.Tensor): source boxes, e.g., object proposals. gt_bboxes (torch.Tensor): target of the transformation, e.g., ground truth boxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 encoded_bboxes = bboxes2tblr( bboxes, gt_bboxes, normalizer=self.normalizer) return encoded_bboxes def decode(self, bboxes, pred_bboxes, max_shape=None): """Apply transformation `pred_bboxes` to `boxes`. Args: bboxes (torch.Tensor): Basic boxes.Shape (B, N, 4) or (N, 4) pred_bboxes (torch.Tensor): Encoded boxes with shape (B, N, 4) or (N, 4) max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If bboxes shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. Returns: torch.Tensor: Decoded boxes. """ decoded_bboxes = tblr2bboxes( bboxes, pred_bboxes, normalizer=self.normalizer, max_shape=max_shape, clip_border=self.clip_border) return decoded_bboxes @mmcv.jit(coderize=True) def bboxes2tblr(priors, gts, normalizer=4.0, normalize_by_wh=True): """Encode ground truth boxes to tblr coordinate. It first convert the gt coordinate to tblr format, (top, bottom, left, right), relative to prior box centers. The tblr coordinate may be normalized by the side length of prior bboxes if `normalize_by_wh` is specified as True, and it is then normalized by the `normalizer` factor. Args: priors (Tensor): Prior boxes in point form Shape: (num_proposals,4). gts (Tensor): Coords of ground truth for each prior in point-form Shape: (num_proposals, 4). normalizer (Sequence[float] | float): normalization parameter of encoded boxes. If it is a list, it has to have length = 4. Default: 4.0 normalize_by_wh (bool): Whether to normalize tblr coordinate by the side length (wh) of prior bboxes. Return: encoded boxes (Tensor), Shape: (num_proposals, 4) """ # dist b/t match center and prior's center if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == gts.size(0) prior_centers = (priors[:, 0:2] + priors[:, 2:4]) / 2 xmin, ymin, xmax, ymax = gts.split(1, dim=1) top = prior_centers[:, 1].unsqueeze(1) - ymin bottom = ymax - prior_centers[:, 1].unsqueeze(1) left = prior_centers[:, 0].unsqueeze(1) - xmin right = xmax - prior_centers[:, 0].unsqueeze(1) loc = torch.cat((top, bottom, left, right), dim=1) if normalize_by_wh: # Normalize tblr by anchor width and height wh = priors[:, 2:4] - priors[:, 0:2] w, h = torch.split(wh, 1, dim=1) loc[:, :2] /= h # tb is normalized by h loc[:, 2:] /= w # lr is normalized by w # Normalize tblr by the given normalization factor return loc / normalizer @mmcv.jit(coderize=True) def tblr2bboxes(priors, tblr, normalizer=4.0, normalize_by_wh=True, max_shape=None, clip_border=True): """Decode tblr outputs to prediction boxes. The process includes 3 steps: 1) De-normalize tblr coordinates by multiplying it with `normalizer`; 2) De-normalize tblr coordinates by the prior bbox width and height if `normalize_by_wh` is `True`; 3) Convert tblr (top, bottom, left, right) pair relative to the center of priors back to (xmin, ymin, xmax, ymax) coordinate. Args: priors (Tensor): Prior boxes in point form (x0, y0, x1, y1) Shape: (N,4) or (B, N, 4). tblr (Tensor): Coords of network output in tblr form Shape: (N, 4) or (B, N, 4). normalizer (Sequence[float] | float): Normalization parameter of encoded boxes. By list, it represents the normalization factors at tblr dims. By float, it is the unified normalization factor at all dims. Default: 4.0 normalize_by_wh (bool): Whether the tblr coordinates have been normalized by the side length (wh) of prior bboxes. max_shape (Sequence[int] or torch.Tensor or Sequence[ Sequence[int]],optional): Maximum bounds for boxes, specifies (H, W, C) or (H, W). If priors shape is (B, N, 4), then the max_shape should be a Sequence[Sequence[int]] and the length of max_shape should also be B. clip_border (bool, optional): Whether clip the objects outside the border of the image. Defaults to True. Return: encoded boxes (Tensor): Boxes with shape (N, 4) or (B, N, 4) """ if not isinstance(normalizer, float): normalizer = torch.tensor(normalizer, device=priors.device) assert len(normalizer) == 4, 'Normalizer must have length = 4' assert priors.size(0) == tblr.size(0) if priors.ndim == 3: assert priors.size(1) == tblr.size(1) loc_decode = tblr * normalizer prior_centers = (priors[..., 0:2] + priors[..., 2:4]) / 2 if normalize_by_wh: wh = priors[..., 2:4] - priors[..., 0:2] w, h = torch.split(wh, 1, dim=-1) # Inplace operation with slice would failed for exporting to ONNX th = h * loc_decode[..., :2] # tb tw = w * loc_decode[..., 2:] # lr loc_decode = torch.cat([th, tw], dim=-1) # Cannot be exported using onnx when loc_decode.split(1, dim=-1) top, bottom, left, right = loc_decode.split((1, 1, 1, 1), dim=-1) xmin = prior_centers[..., 0].unsqueeze(-1) - left xmax = prior_centers[..., 0].unsqueeze(-1) + right ymin = prior_centers[..., 1].unsqueeze(-1) - top ymax = prior_centers[..., 1].unsqueeze(-1) + bottom bboxes = torch.cat((xmin, ymin, xmax, ymax), dim=-1) if clip_border and max_shape is not None: # clip bboxes with dynamic `min` and `max` for onnx if torch.onnx.is_in_onnx_export(): from mmdet.core.export import dynamic_clip_for_onnx xmin, ymin, xmax, ymax = dynamic_clip_for_onnx( xmin, ymin, xmax, ymax, max_shape) bboxes = torch.cat([xmin, ymin, xmax, ymax], dim=-1) return bboxes if not isinstance(max_shape, torch.Tensor): max_shape = priors.new_tensor(max_shape) max_shape = max_shape[..., :2].type_as(priors) if max_shape.ndim == 2: assert bboxes.ndim == 3 assert max_shape.size(0) == bboxes.size(0) min_xy = priors.new_tensor(0) max_xy = torch.cat([max_shape, max_shape], dim=-1).flip(-1).unsqueeze(-2) bboxes = torch.where(bboxes < min_xy, min_xy, bboxes) bboxes = torch.where(bboxes > max_xy, max_xy, bboxes) return bboxes
8,625
40.671498
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/coder/yolo_bbox_coder.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import torch from ..builder import BBOX_CODERS from .base_bbox_coder import BaseBBoxCoder @BBOX_CODERS.register_module() class YOLOBBoxCoder(BaseBBoxCoder): """YOLO BBox coder. Following `YOLO <https://arxiv.org/abs/1506.02640>`_, this coder divide image into grids, and encode bbox (x1, y1, x2, y2) into (cx, cy, dw, dh). cx, cy in [0., 1.], denotes relative center position w.r.t the center of bboxes. dw, dh are the same as :obj:`DeltaXYWHBBoxCoder`. Args: eps (float): Min value of cx, cy when encoding. """ def __init__(self, eps=1e-6): super(BaseBBoxCoder, self).__init__() self.eps = eps @mmcv.jit(coderize=True) def encode(self, bboxes, gt_bboxes, stride): """Get box regression transformation deltas that can be used to transform the ``bboxes`` into the ``gt_bboxes``. Args: bboxes (torch.Tensor): Source boxes, e.g., anchors. gt_bboxes (torch.Tensor): Target of the transformation, e.g., ground-truth boxes. stride (torch.Tensor | int): Stride of bboxes. Returns: torch.Tensor: Box transformation deltas """ assert bboxes.size(0) == gt_bboxes.size(0) assert bboxes.size(-1) == gt_bboxes.size(-1) == 4 x_center_gt = (gt_bboxes[..., 0] + gt_bboxes[..., 2]) * 0.5 y_center_gt = (gt_bboxes[..., 1] + gt_bboxes[..., 3]) * 0.5 w_gt = gt_bboxes[..., 2] - gt_bboxes[..., 0] h_gt = gt_bboxes[..., 3] - gt_bboxes[..., 1] x_center = (bboxes[..., 0] + bboxes[..., 2]) * 0.5 y_center = (bboxes[..., 1] + bboxes[..., 3]) * 0.5 w = bboxes[..., 2] - bboxes[..., 0] h = bboxes[..., 3] - bboxes[..., 1] w_target = torch.log((w_gt / w).clamp(min=self.eps)) h_target = torch.log((h_gt / h).clamp(min=self.eps)) x_center_target = ((x_center_gt - x_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) y_center_target = ((y_center_gt - y_center) / stride + 0.5).clamp( self.eps, 1 - self.eps) encoded_bboxes = torch.stack( [x_center_target, y_center_target, w_target, h_target], dim=-1) return encoded_bboxes @mmcv.jit(coderize=True) def decode(self, bboxes, pred_bboxes, stride): """Apply transformation `pred_bboxes` to `boxes`. Args: boxes (torch.Tensor): Basic boxes, e.g. anchors. pred_bboxes (torch.Tensor): Encoded boxes with shape stride (torch.Tensor | int): Strides of bboxes. Returns: torch.Tensor: Decoded boxes. """ assert pred_bboxes.size(-1) == bboxes.size(-1) == 4 xy_centers = (bboxes[..., :2] + bboxes[..., 2:]) * 0.5 + ( pred_bboxes[..., :2] - 0.5) * stride whs = (bboxes[..., 2:] - bboxes[..., :2]) * 0.5 * pred_bboxes[..., 2:].exp() decoded_bboxes = torch.stack( (xy_centers[..., 0] - whs[..., 0], xy_centers[..., 1] - whs[..., 1], xy_centers[..., 0] + whs[..., 0], xy_centers[..., 1] + whs[..., 1]), dim=-1) return decoded_bboxes
3,252
37.72619
77
py
mmdetection
mmdetection-master/mmdet/core/bbox/iou_calculators/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_iou_calculator from .iou2d_calculator import BboxOverlaps2D, bbox_overlaps __all__ = ['build_iou_calculator', 'BboxOverlaps2D', 'bbox_overlaps']
221
36
69
py
mmdetection
mmdetection-master/mmdet/core/bbox/iou_calculators/builder.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg IOU_CALCULATORS = Registry('IoU calculator') def build_iou_calculator(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, IOU_CALCULATORS, default_args)
293
28.4
61
py
mmdetection
mmdetection-master/mmdet/core/bbox/iou_calculators/iou2d_calculator.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from .builder import IOU_CALCULATORS def cast_tensor_type(x, scale=1., dtype=None): if dtype == 'fp16': # scale is for preventing overflows x = (x / scale).half() return x def fp16_clamp(x, min=None, max=None): if not x.is_cuda and x.dtype == torch.float16: # clamp for cpu float16, tensor fp16 has no clamp implementation return x.float().clamp(min, max).half() return x.clamp(min, max) @IOU_CALCULATORS.register_module() class BboxOverlaps2D: """2D Overlaps (e.g. IoUs, GIoUs) Calculator.""" def __init__(self, scale=1., dtype=None): self.scale = scale self.dtype = dtype def __call__(self, bboxes1, bboxes2, mode='iou', is_aligned=False): """Calculate IoU between 2D bboxes. Args: bboxes1 (Tensor): bboxes have shape (m, 4) in <x1, y1, x2, y2> format, or shape (m, 5) in <x1, y1, x2, y2, score> format. bboxes2 (Tensor): bboxes have shape (n, 4) in <x1, y1, x2, y2> format, shape (n, 5) in <x1, y1, x2, y2, score> format, or be empty. mode (str): "iou" (intersection over union), "iof" (intersection over foreground), or "giou" (generalized intersection over union). is_aligned (bool, optional): If True, then m and n must be equal. Default False. Returns: Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,) """ assert bboxes1.size(-1) in [0, 4, 5] assert bboxes2.size(-1) in [0, 4, 5] if bboxes2.size(-1) == 5: bboxes2 = bboxes2[..., :4] if bboxes1.size(-1) == 5: bboxes1 = bboxes1[..., :4] if self.dtype == 'fp16': # change tensor type to save cpu and cuda memory and keep speed bboxes1 = cast_tensor_type(bboxes1, self.scale, self.dtype) bboxes2 = cast_tensor_type(bboxes2, self.scale, self.dtype) overlaps = bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) if not overlaps.is_cuda and overlaps.dtype == torch.float16: # resume cpu float32 overlaps = overlaps.float() return overlaps return bbox_overlaps(bboxes1, bboxes2, mode, is_aligned) def __repr__(self): """str: a string describing the module""" repr_str = self.__class__.__name__ + f'(' \ f'scale={self.scale}, dtype={self.dtype})' return repr_str def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6): """Calculate overlap between two set of bboxes. FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889 Note: Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou', there are some new generated variable when calculating IOU using bbox_overlaps function: 1) is_aligned is False area1: M x 1 area2: N x 1 lt: M x N x 2 rb: M x N x 2 wh: M x N x 2 overlap: M x N x 1 union: M x N x 1 ious: M x N x 1 Total memory: S = (9 x N x M + N + M) * 4 Byte, When using FP16, we can reduce: R = (9 x N x M + N + M) * 4 / 2 Byte R large than (N + M) * 4 * 2 is always true when N and M >= 1. Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2, N + 1 < 3 * N, when N or M is 1. Given M = 40 (ground truth), N = 400000 (three anchor boxes in per grid, FPN, R-CNNs), R = 275 MB (one times) A special case (dense detection), M = 512 (ground truth), R = 3516 MB = 3.43 GB When the batch size is B, reduce: B x R Therefore, CUDA memory runs out frequently. Experiments on GeForce RTX 2080Ti (11019 MiB): | dtype | M | N | Use | Real | Ideal | |:----:|:----:|:----:|:----:|:----:|:----:| | FP32 | 512 | 400000 | 8020 MiB | -- | -- | | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB | | FP32 | 40 | 400000 | 1540 MiB | -- | -- | | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB | 2) is_aligned is True area1: N x 1 area2: N x 1 lt: N x 2 rb: N x 2 wh: N x 2 overlap: N x 1 union: N x 1 ious: N x 1 Total memory: S = 11 x N * 4 Byte When using FP16, we can reduce: R = 11 x N * 4 / 2 Byte So do the 'giou' (large than 'iou'). Time-wise, FP16 is generally faster than FP32. When gpu_assign_thr is not -1, it takes more time on cpu but not reduce memory. There, we can reduce half the memory and keep the speed. If ``is_aligned`` is ``False``, then calculate the overlaps between each bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of bboxes1 and bboxes2. Args: bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty. bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty. B indicates the batch dim, in shape (B1, B2, ..., Bn). If ``is_aligned`` is ``True``, then m and n must be equal. mode (str): "iou" (intersection over union), "iof" (intersection over foreground) or "giou" (generalized intersection over union). Default "iou". is_aligned (bool, optional): If True, then m and n must be equal. Default False. eps (float, optional): A value added to the denominator for numerical stability. Default 1e-6. Returns: Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) Example: >>> bboxes1 = torch.FloatTensor([ >>> [0, 0, 10, 10], >>> [10, 10, 20, 20], >>> [32, 32, 38, 42], >>> ]) >>> bboxes2 = torch.FloatTensor([ >>> [0, 0, 10, 20], >>> [0, 10, 10, 19], >>> [10, 10, 20, 20], >>> ]) >>> overlaps = bbox_overlaps(bboxes1, bboxes2) >>> assert overlaps.shape == (3, 3) >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) >>> assert overlaps.shape == (3, ) Example: >>> empty = torch.empty(0, 4) >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]]) >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) """ assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}' # Either the boxes are empty or the length of boxes' last dimension is 4 assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0) assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0) # Batch dim must be the same # Batch dim: (B1, B2, ... Bn) assert bboxes1.shape[:-2] == bboxes2.shape[:-2] batch_shape = bboxes1.shape[:-2] rows = bboxes1.size(-2) cols = bboxes2.size(-2) if is_aligned: assert rows == cols if rows * cols == 0: if is_aligned: return bboxes1.new(batch_shape + (rows, )) else: return bboxes1.new(batch_shape + (rows, cols)) area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * ( bboxes1[..., 3] - bboxes1[..., 1]) area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * ( bboxes2[..., 3] - bboxes2[..., 1]) if is_aligned: lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2] rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1 + area2 - overlap else: union = area1 if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2]) enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:]) else: lt = torch.max(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) # [B, rows, cols, 2] rb = torch.min(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) # [B, rows, cols, 2] wh = fp16_clamp(rb - lt, min=0) overlap = wh[..., 0] * wh[..., 1] if mode in ['iou', 'giou']: union = area1[..., None] + area2[..., None, :] - overlap else: union = area1[..., None] if mode == 'giou': enclosed_lt = torch.min(bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]) enclosed_rb = torch.max(bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]) eps = union.new_tensor([eps]) union = torch.max(union, eps) ious = overlap / union if mode in ['iou', 'iof']: return ious # calculate gious enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0) enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] enclose_area = torch.max(enclose_area, eps) gious = ious - (enclose_area - union) / enclose_area return gious
9,602
35.793103
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/match_costs/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import build_match_cost from .match_cost import (BBoxL1Cost, ClassificationCost, CrossEntropyLossCost, DiceCost, FocalLossCost, IoUCost) __all__ = [ 'build_match_cost', 'ClassificationCost', 'BBoxL1Cost', 'IoUCost', 'FocalLossCost', 'DiceCost', 'CrossEntropyLossCost' ]
366
35.7
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/match_costs/builder.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.utils import Registry, build_from_cfg MATCH_COST = Registry('Match Cost') def build_match_cost(cfg, default_args=None): """Builder of IoU calculator.""" return build_from_cfg(cfg, MATCH_COST, default_args)
275
26.6
56
py
mmdetection
mmdetection-master/mmdet/core/bbox/match_costs/match_cost.py
# Copyright (c) OpenMMLab. All rights reserved. import torch import torch.nn.functional as F from mmdet.core.bbox.iou_calculators import bbox_overlaps from mmdet.core.bbox.transforms import bbox_cxcywh_to_xyxy, bbox_xyxy_to_cxcywh from .builder import MATCH_COST @MATCH_COST.register_module() class BBoxL1Cost: """BBoxL1Cost. Args: weight (int | float, optional): loss_weight box_format (str, optional): 'xyxy' for DETR, 'xywh' for Sparse_RCNN Examples: >>> from mmdet.core.bbox.match_costs.match_cost import BBoxL1Cost >>> import torch >>> self = BBoxL1Cost() >>> bbox_pred = torch.rand(1, 4) >>> gt_bboxes= torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(bbox_pred, gt_bboxes, factor) tensor([[1.6172, 1.6422]]) """ def __init__(self, weight=1., box_format='xyxy'): self.weight = weight assert box_format in ['xyxy', 'xywh'] self.box_format = box_format def __call__(self, bbox_pred, gt_bboxes): """ Args: bbox_pred (Tensor): Predicted boxes with normalized coordinates (cx, cy, w, h), which are all in range [0, 1]. Shape (num_query, 4). gt_bboxes (Tensor): Ground truth boxes with normalized coordinates (x1, y1, x2, y2). Shape (num_gt, 4). Returns: torch.Tensor: bbox_cost value with weight """ if self.box_format == 'xywh': gt_bboxes = bbox_xyxy_to_cxcywh(gt_bboxes) elif self.box_format == 'xyxy': bbox_pred = bbox_cxcywh_to_xyxy(bbox_pred) bbox_cost = torch.cdist(bbox_pred, gt_bboxes, p=1) return bbox_cost * self.weight @MATCH_COST.register_module() class FocalLossCost: """FocalLossCost. Args: weight (int | float, optional): loss_weight alpha (int | float, optional): focal_loss alpha gamma (int | float, optional): focal_loss gamma eps (float, optional): default 1e-12 binary_input (bool, optional): Whether the input is binary, default False. Examples: >>> from mmdet.core.bbox.match_costs.match_cost import FocalLossCost >>> import torch >>> self = FocalLossCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3236, -0.3364, -0.2699], [-0.3439, -0.3209, -0.4807], [-0.4099, -0.3795, -0.2929], [-0.1950, -0.1207, -0.2626]]) """ def __init__(self, weight=1., alpha=0.25, gamma=2, eps=1e-12, binary_input=False): self.weight = weight self.alpha = alpha self.gamma = gamma self.eps = eps self.binary_input = binary_input def _focal_loss_cost(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = pos_cost[:, gt_labels] - neg_cost[:, gt_labels] return cls_cost * self.weight def _mask_focal_loss_cost(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classfication logits in shape (num_query, d1, ..., dn), dtype=torch.float32. gt_labels (Tensor): Ground truth in shape (num_gt, d1, ..., dn), dtype=torch.long. Labels should be binary. Returns: Tensor: Focal cost matrix with weight in shape\ (num_query, num_gt). """ cls_pred = cls_pred.flatten(1) gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] cls_pred = cls_pred.sigmoid() neg_cost = -(1 - cls_pred + self.eps).log() * ( 1 - self.alpha) * cls_pred.pow(self.gamma) pos_cost = -(cls_pred + self.eps).log() * self.alpha * ( 1 - cls_pred).pow(self.gamma) cls_cost = torch.einsum('nc,mc->nm', pos_cost, gt_labels) + \ torch.einsum('nc,mc->nm', neg_cost, (1 - gt_labels)) return cls_cost / n * self.weight def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classfication logits. gt_labels (Tensor)): Labels. Returns: Tensor: Focal cost matrix with weight in shape\ (num_query, num_gt). """ if self.binary_input: return self._mask_focal_loss_cost(cls_pred, gt_labels) else: return self._focal_loss_cost(cls_pred, gt_labels) @MATCH_COST.register_module() class ClassificationCost: """ClsSoftmaxCost. Args: weight (int | float, optional): loss_weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import \ ... ClassificationCost >>> import torch >>> self = ClassificationCost() >>> cls_pred = torch.rand(4, 3) >>> gt_labels = torch.tensor([0, 1, 2]) >>> factor = torch.tensor([10, 8, 10, 8]) >>> self(cls_pred, gt_labels) tensor([[-0.3430, -0.3525, -0.3045], [-0.3077, -0.2931, -0.3992], [-0.3664, -0.3455, -0.2881], [-0.3343, -0.2701, -0.3956]]) """ def __init__(self, weight=1.): self.weight = weight def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits, shape (num_query, num_class). gt_labels (Tensor): Label of `gt_bboxes`, shape (num_gt,). Returns: torch.Tensor: cls_cost value with weight """ # Following the official DETR repo, contrary to the loss that # NLL is used, we approximate it in 1 - cls_score[gt_label]. # The 1 is a constant that doesn't change the matching, # so it can be omitted. cls_score = cls_pred.softmax(-1) cls_cost = -cls_score[:, gt_labels] return cls_cost * self.weight @MATCH_COST.register_module() class IoUCost: """IoUCost. Args: iou_mode (str, optional): iou mode such as 'iou' | 'giou' weight (int | float, optional): loss weight Examples: >>> from mmdet.core.bbox.match_costs.match_cost import IoUCost >>> import torch >>> self = IoUCost() >>> bboxes = torch.FloatTensor([[1,1, 2, 2], [2, 2, 3, 4]]) >>> gt_bboxes = torch.FloatTensor([[0, 0, 2, 4], [1, 2, 3, 4]]) >>> self(bboxes, gt_bboxes) tensor([[-0.1250, 0.1667], [ 0.1667, -0.5000]]) """ def __init__(self, iou_mode='giou', weight=1.): self.weight = weight self.iou_mode = iou_mode def __call__(self, bboxes, gt_bboxes): """ Args: bboxes (Tensor): Predicted boxes with unnormalized coordinates (x1, y1, x2, y2). Shape (num_query, 4). gt_bboxes (Tensor): Ground truth boxes with unnormalized coordinates (x1, y1, x2, y2). Shape (num_gt, 4). Returns: torch.Tensor: iou_cost value with weight """ # overlaps: [num_bboxes, num_gt] overlaps = bbox_overlaps( bboxes, gt_bboxes, mode=self.iou_mode, is_aligned=False) # The 1 is a constant that doesn't change the matching, so omitted. iou_cost = -overlaps return iou_cost * self.weight @MATCH_COST.register_module() class DiceCost: """Cost of mask assignments based on dice losses. Args: weight (int | float, optional): loss_weight. Defaults to 1. pred_act (bool, optional): Whether to apply sigmoid to mask_pred. Defaults to False. eps (float, optional): default 1e-12. naive_dice (bool, optional): If True, use the naive dice loss in which the power of the number in the denominator is the first power. If Flase, use the second power that is adopted by K-Net and SOLO. Defaults to True. """ def __init__(self, weight=1., pred_act=False, eps=1e-3, naive_dice=True): self.weight = weight self.pred_act = pred_act self.eps = eps self.naive_dice = naive_dice def binary_mask_dice_loss(self, mask_preds, gt_masks): """ Args: mask_preds (Tensor): Mask prediction in shape (num_query, *). gt_masks (Tensor): Ground truth in shape (num_gt, *) store 0 or 1, 0 for negative class and 1 for positive class. Returns: Tensor: Dice cost matrix in shape (num_query, num_gt). """ mask_preds = mask_preds.flatten(1) gt_masks = gt_masks.flatten(1).float() numerator = 2 * torch.einsum('nc,mc->nm', mask_preds, gt_masks) if self.naive_dice: denominator = mask_preds.sum(-1)[:, None] + \ gt_masks.sum(-1)[None, :] else: denominator = mask_preds.pow(2).sum(1)[:, None] + \ gt_masks.pow(2).sum(1)[None, :] loss = 1 - (numerator + self.eps) / (denominator + self.eps) return loss def __call__(self, mask_preds, gt_masks): """ Args: mask_preds (Tensor): Mask prediction logits in shape (num_query, *) gt_masks (Tensor): Ground truth in shape (num_gt, *) Returns: Tensor: Dice cost matrix with weight in shape (num_query, num_gt). """ if self.pred_act: mask_preds = mask_preds.sigmoid() dice_cost = self.binary_mask_dice_loss(mask_preds, gt_masks) return dice_cost * self.weight @MATCH_COST.register_module() class CrossEntropyLossCost: """CrossEntropyLossCost. Args: weight (int | float, optional): loss weight. Defaults to 1. use_sigmoid (bool, optional): Whether the prediction uses sigmoid of softmax. Defaults to True. Examples: >>> from mmdet.core.bbox.match_costs import CrossEntropyLossCost >>> import torch >>> bce = CrossEntropyLossCost(use_sigmoid=True) >>> cls_pred = torch.tensor([[7.6, 1.2], [-1.3, 10]]) >>> gt_labels = torch.tensor([[1, 1], [1, 0]]) >>> print(bce(cls_pred, gt_labels)) """ def __init__(self, weight=1., use_sigmoid=True): assert use_sigmoid, 'use_sigmoid = False is not supported yet.' self.weight = weight self.use_sigmoid = use_sigmoid def _binary_cross_entropy(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): The prediction with shape (num_query, 1, *) or (num_query, *). gt_labels (Tensor): The learning label of prediction with shape (num_gt, *). Returns: Tensor: Cross entropy cost matrix in shape (num_query, num_gt). """ cls_pred = cls_pred.flatten(1).float() gt_labels = gt_labels.flatten(1).float() n = cls_pred.shape[1] pos = F.binary_cross_entropy_with_logits( cls_pred, torch.ones_like(cls_pred), reduction='none') neg = F.binary_cross_entropy_with_logits( cls_pred, torch.zeros_like(cls_pred), reduction='none') cls_cost = torch.einsum('nc,mc->nm', pos, gt_labels) + \ torch.einsum('nc,mc->nm', neg, 1 - gt_labels) cls_cost = cls_cost / n return cls_cost def __call__(self, cls_pred, gt_labels): """ Args: cls_pred (Tensor): Predicted classification logits. gt_labels (Tensor): Labels. Returns: Tensor: Cross entropy cost matrix with weight in shape (num_query, num_gt). """ if self.use_sigmoid: cls_cost = self._binary_cross_entropy(cls_pred, gt_labels) else: raise NotImplementedError return cls_cost * self.weight
12,651
34.144444
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .base_sampler import BaseSampler from .combined_sampler import CombinedSampler from .instance_balanced_pos_sampler import InstanceBalancedPosSampler from .iou_balanced_neg_sampler import IoUBalancedNegSampler from .mask_pseudo_sampler import MaskPseudoSampler from .mask_sampling_result import MaskSamplingResult from .ohem_sampler import OHEMSampler from .pseudo_sampler import PseudoSampler from .random_sampler import RandomSampler from .sampling_result import SamplingResult from .score_hlr_sampler import ScoreHLRSampler __all__ = [ 'BaseSampler', 'PseudoSampler', 'RandomSampler', 'InstanceBalancedPosSampler', 'IoUBalancedNegSampler', 'CombinedSampler', 'OHEMSampler', 'SamplingResult', 'ScoreHLRSampler', 'MaskPseudoSampler', 'MaskSamplingResult' ]
827
40.4
77
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/base_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import torch from .sampling_result import SamplingResult class BaseSampler(metaclass=ABCMeta): """Base class of samplers.""" def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): self.num = num self.pos_fraction = pos_fraction self.neg_pos_ub = neg_pos_ub self.add_gt_as_proposals = add_gt_as_proposals self.pos_sampler = self self.neg_sampler = self @abstractmethod def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive samples.""" pass @abstractmethod def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative samples.""" pass def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: :obj:`SamplingResult`: Sampling result. Example: >>> from mmdet.core.bbox import RandomSampler >>> from mmdet.core.bbox import AssignResult >>> from mmdet.core.bbox.demodata import ensure_rng, random_boxes >>> rng = ensure_rng(None) >>> assign_result = AssignResult.random(rng=rng) >>> bboxes = random_boxes(assign_result.num_preds, rng=rng) >>> gt_bboxes = random_boxes(assign_result.num_gts, rng=rng) >>> gt_labels = None >>> self = RandomSampler(num=32, pos_fraction=0.5, neg_pos_ub=-1, >>> add_gt_as_proposals=False) >>> self = self.sample(assign_result, bboxes, gt_bboxes, gt_labels) """ if len(bboxes.shape) < 2: bboxes = bboxes[None, :] bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals and len(gt_bboxes) > 0: if gt_labels is None: raise ValueError( 'gt_labels must be given when add_gt_as_proposals is True') bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) # We found that sampled indices have duplicated items occasionally. # (may be a bug of PyTorch) pos_inds = pos_inds.unique() num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes=bboxes, **kwargs) neg_inds = neg_inds.unique() sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
3,920
37.067961
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/combined_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. from ..builder import BBOX_SAMPLERS, build_sampler from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class CombinedSampler(BaseSampler): """A sampler that combines positive sampler and negative sampler.""" def __init__(self, pos_sampler, neg_sampler, **kwargs): super(CombinedSampler, self).__init__(**kwargs) self.pos_sampler = build_sampler(pos_sampler, **kwargs) self.neg_sampler = build_sampler(neg_sampler, **kwargs) def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError
748
33.045455
72
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/instance_balanced_pos_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class InstanceBalancedPosSampler(RandomSampler): """Instance balanced sampler that samples equal number of positive samples for each instance.""" def _sample_pos(self, assign_result, num_expected, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected positive samples Returns: Tensor or ndarray: sampled indices. """ pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: unique_gt_inds = assign_result.gt_inds[pos_inds].unique() num_gts = len(unique_gt_inds) num_per_gt = int(round(num_expected / float(num_gts)) + 1) sampled_inds = [] for i in unique_gt_inds: inds = torch.nonzero( assign_result.gt_inds == i.item(), as_tuple=False) if inds.numel() != 0: inds = inds.squeeze(1) else: continue if len(inds) > num_per_gt: inds = self.random_choice(inds, num_per_gt) sampled_inds.append(inds) sampled_inds = torch.cat(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array( list(set(pos_inds.cpu()) - set(sampled_inds.cpu()))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) extra_inds = torch.from_numpy(extra_inds).to( assign_result.gt_inds.device).long() sampled_inds = torch.cat([sampled_inds, extra_inds]) elif len(sampled_inds) > num_expected: sampled_inds = self.random_choice(sampled_inds, num_expected) return sampled_inds
2,319
39.701754
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/iou_balanced_neg_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from ..builder import BBOX_SAMPLERS from .random_sampler import RandomSampler @BBOX_SAMPLERS.register_module() class IoUBalancedNegSampler(RandomSampler): """IoU Balanced Sampling. arXiv: https://arxiv.org/pdf/1904.02701.pdf (CVPR 2019) Sampling proposals according to their IoU. `floor_fraction` of needed RoIs are sampled from proposals whose IoU are lower than `floor_thr` randomly. The others are sampled from proposals whose IoU are higher than `floor_thr`. These proposals are sampled from some bins evenly, which are split by `num_bins` via IoU evenly. Args: num (int): number of proposals. pos_fraction (float): fraction of positive proposals. floor_thr (float): threshold (minimum) IoU for IoU balanced sampling, set to -1 if all using IoU balanced sampling. floor_fraction (float): sampling fraction of proposals under floor_thr. num_bins (int): number of bins in IoU balanced sampling. """ def __init__(self, num, pos_fraction, floor_thr=-1, floor_fraction=0, num_bins=3, **kwargs): super(IoUBalancedNegSampler, self).__init__(num, pos_fraction, **kwargs) assert floor_thr >= 0 or floor_thr == -1 assert 0 <= floor_fraction <= 1 assert num_bins >= 1 self.floor_thr = floor_thr self.floor_fraction = floor_fraction self.num_bins = num_bins def sample_via_interval(self, max_overlaps, full_set, num_expected): """Sample according to the iou interval. Args: max_overlaps (torch.Tensor): IoU between bounding boxes and ground truth boxes. full_set (set(int)): A full set of indices of boxes。 num_expected (int): Number of expected samples。 Returns: np.ndarray: Indices of samples """ max_iou = max_overlaps.max() iou_interval = (max_iou - self.floor_thr) / self.num_bins per_num_expected = int(num_expected / self.num_bins) sampled_inds = [] for i in range(self.num_bins): start_iou = self.floor_thr + i * iou_interval end_iou = self.floor_thr + (i + 1) * iou_interval tmp_set = set( np.where( np.logical_and(max_overlaps >= start_iou, max_overlaps < end_iou))[0]) tmp_inds = list(tmp_set & full_set) if len(tmp_inds) > per_num_expected: tmp_sampled_set = self.random_choice(tmp_inds, per_num_expected) else: tmp_sampled_set = np.array(tmp_inds, dtype=np.int) sampled_inds.append(tmp_sampled_set) sampled_inds = np.concatenate(sampled_inds) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(full_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate([sampled_inds, extra_inds]) return sampled_inds def _sample_neg(self, assign_result, num_expected, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): The assigned results of boxes. num_expected (int): The number of expected negative samples Returns: Tensor or ndarray: sampled indices. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: max_overlaps = assign_result.max_overlaps.cpu().numpy() # balance sampling for negative samples neg_set = set(neg_inds.cpu().numpy()) if self.floor_thr > 0: floor_set = set( np.where( np.logical_and(max_overlaps >= 0, max_overlaps < self.floor_thr))[0]) iou_sampling_set = set( np.where(max_overlaps >= self.floor_thr)[0]) elif self.floor_thr == 0: floor_set = set(np.where(max_overlaps == 0)[0]) iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) else: floor_set = set() iou_sampling_set = set( np.where(max_overlaps > self.floor_thr)[0]) # for sampling interval calculation self.floor_thr = 0 floor_neg_inds = list(floor_set & neg_set) iou_sampling_neg_inds = list(iou_sampling_set & neg_set) num_expected_iou_sampling = int(num_expected * (1 - self.floor_fraction)) if len(iou_sampling_neg_inds) > num_expected_iou_sampling: if self.num_bins >= 2: iou_sampled_inds = self.sample_via_interval( max_overlaps, set(iou_sampling_neg_inds), num_expected_iou_sampling) else: iou_sampled_inds = self.random_choice( iou_sampling_neg_inds, num_expected_iou_sampling) else: iou_sampled_inds = np.array( iou_sampling_neg_inds, dtype=np.int) num_expected_floor = num_expected - len(iou_sampled_inds) if len(floor_neg_inds) > num_expected_floor: sampled_floor_inds = self.random_choice( floor_neg_inds, num_expected_floor) else: sampled_floor_inds = np.array(floor_neg_inds, dtype=np.int) sampled_inds = np.concatenate( (sampled_floor_inds, iou_sampled_inds)) if len(sampled_inds) < num_expected: num_extra = num_expected - len(sampled_inds) extra_inds = np.array(list(neg_set - set(sampled_inds))) if len(extra_inds) > num_extra: extra_inds = self.random_choice(extra_inds, num_extra) sampled_inds = np.concatenate((sampled_inds, extra_inds)) sampled_inds = torch.from_numpy(sampled_inds).long().to( assign_result.gt_inds.device) return sampled_inds
6,740
41.396226
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/mask_pseudo_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from mmdet.core.bbox.builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .mask_sampling_result import MaskSamplingResult @BBOX_SAMPLERS.register_module() class MaskPseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result, masks, gt_masks, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Assigned results masks (torch.Tensor): Bounding boxes gt_masks (torch.Tensor): Ground truth boxes Returns: :obj:`SamplingResult`: sampler results """ pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = masks.new_zeros(masks.shape[0], dtype=torch.uint8) sampling_result = MaskSamplingResult(pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags) return sampling_result
1,583
34.2
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/mask_sampling_result.py
# Copyright (c) OpenMMLab. All rights reserved. """copy from https://github.com/ZwwWayne/K-Net/blob/main/knet/det/mask_pseudo_sampler.py.""" import torch from .sampling_result import SamplingResult class MaskSamplingResult(SamplingResult): """Mask sampling result.""" def __init__(self, pos_inds, neg_inds, masks, gt_masks, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_masks = masks[pos_inds] self.neg_masks = masks[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_masks.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_masks.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_masks = torch.empty_like(gt_masks) else: self.pos_gt_masks = gt_masks[self.pos_assigned_gt_inds, :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def masks(self): """torch.Tensor: concatenated positive and negative boxes""" return torch.cat([self.pos_masks, self.neg_masks]) def __nice__(self): data = self.info.copy() data['pos_masks'] = data.pop('pos_masks').shape data['neg_masks'] = data.pop('neg_masks').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_masks': self.pos_masks, 'neg_masks': self.neg_masks, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, }
2,031
32.311475
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/ohem_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class OHEMSampler(BaseSampler): r"""Online Hard Example Mining Sampler described in `Training Region-based Object Detectors with Online Hard Example Mining <https://arxiv.org/abs/1604.03540>`_. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, loss_key='loss_cls', **kwargs): super(OHEMSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.context = context if not hasattr(self.context, 'num_stages'): self.bbox_head = self.context.bbox_head else: self.bbox_head = self.context.bbox_head[self.context.current_stage] self.loss_key = loss_key def hard_mining(self, inds, num_expected, bboxes, labels, feats): with torch.no_grad(): rois = bbox2roi([bboxes]) if not hasattr(self.context, 'num_stages'): bbox_results = self.context._bbox_forward(feats, rois) else: bbox_results = self.context._bbox_forward( self.context.current_stage, feats, rois) cls_score = bbox_results['cls_score'] loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=rois, labels=labels, label_weights=cls_score.new_ones(cls_score.size(0)), bbox_targets=None, bbox_weights=None, reduction_override='none')[self.loss_key] _, topk_loss_inds = loss.topk(num_expected) return inds[topk_loss_inds] def _sample_pos(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample positive boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected positive samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of positive samples """ # Sample some hard positive samples pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.hard_mining(pos_inds, num_expected, bboxes[pos_inds], assign_result.labels[pos_inds], feats) def _sample_neg(self, assign_result, num_expected, bboxes=None, feats=None, **kwargs): """Sample negative boxes. Args: assign_result (:obj:`AssignResult`): Assigned results num_expected (int): Number of expected negative samples bboxes (torch.Tensor, optional): Boxes. Defaults to None. feats (list[torch.Tensor], optional): Multi-level features. Defaults to None. Returns: torch.Tensor: Indices of negative samples """ # Sample some hard negative samples neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: neg_labels = assign_result.labels.new_empty( neg_inds.size(0)).fill_(self.bbox_head.num_classes) return self.hard_mining(neg_inds, num_expected, bboxes[neg_inds], neg_labels, feats)
4,221
36.696429
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/pseudo_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class PseudoSampler(BaseSampler): """A pseudo sampler that does not do sampling actually.""" def __init__(self, **kwargs): pass def _sample_pos(self, **kwargs): """Sample positive samples.""" raise NotImplementedError def _sample_neg(self, **kwargs): """Sample negative samples.""" raise NotImplementedError def sample(self, assign_result, bboxes, gt_bboxes, *args, **kwargs): """Directly returns the positive and negative indices of samples. Args: assign_result (:obj:`AssignResult`): Assigned results bboxes (torch.Tensor): Bounding boxes gt_bboxes (torch.Tensor): Ground truth boxes Returns: :obj:`SamplingResult`: sampler results """ pos_inds = torch.nonzero( assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique() neg_inds = torch.nonzero( assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique() gt_flags = bboxes.new_zeros(bboxes.shape[0], dtype=torch.uint8) sampling_result = SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags) return sampling_result
1,470
33.209302
79
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/random_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from ..builder import BBOX_SAMPLERS from .base_sampler import BaseSampler @BBOX_SAMPLERS.register_module() class RandomSampler(BaseSampler): """Random sampler. Args: num (int): Number of samples pos_fraction (float): Fraction of positive samples neg_pos_ub (int, optional): Upper bound number of negative and positive samples. Defaults to -1. add_gt_as_proposals (bool, optional): Whether to add ground truth boxes as proposals. Defaults to True. """ def __init__(self, num, pos_fraction, neg_pos_ub=-1, add_gt_as_proposals=True, **kwargs): from mmdet.core.bbox import demodata super(RandomSampler, self).__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.rng = demodata.ensure_rng(kwargs.get('rng', None)) def random_choice(self, gallery, num): """Random select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) # This is a temporary fix. We can revert the following code # when PyTorch fixes the abnormal return of torch.randperm. # See: https://github.com/open-mmlab/mmdetection/pull/5014 perm = torch.randperm(gallery.numel())[:num].to(device=gallery.device) rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0, as_tuple=False) if pos_inds.numel() != 0: pos_inds = pos_inds.squeeze(1) if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, **kwargs): """Randomly sample some negative samples.""" neg_inds = torch.nonzero(assign_result.gt_inds == 0, as_tuple=False) if neg_inds.numel() != 0: neg_inds = neg_inds.squeeze(1) if len(neg_inds) <= num_expected: return neg_inds else: return self.random_choice(neg_inds, num_expected)
3,071
36.012048
78
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/sampling_result.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmdet.utils import util_mixins class SamplingResult(util_mixins.NiceRepr): """Bbox sampling result. Example: >>> # xdoctest: +IGNORE_WANT >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random(rng=10) >>> print(f'self = {self}') self = <SamplingResult({ 'neg_bboxes': torch.Size([12, 4]), 'neg_inds': tensor([ 0, 1, 2, 4, 5, 6, 7, 8, 9, 10, 11, 12]), 'num_gts': 4, 'pos_assigned_gt_inds': tensor([], dtype=torch.int64), 'pos_bboxes': torch.Size([0, 4]), 'pos_inds': tensor([], dtype=torch.int64), 'pos_is_gt': tensor([], dtype=torch.uint8) })> """ def __init__(self, pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags): self.pos_inds = pos_inds self.neg_inds = neg_inds self.pos_bboxes = bboxes[pos_inds] self.neg_bboxes = bboxes[neg_inds] self.pos_is_gt = gt_flags[pos_inds] self.num_gts = gt_bboxes.shape[0] self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1 if gt_bboxes.numel() == 0: # hack for index error case assert self.pos_assigned_gt_inds.numel() == 0 self.pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4) else: if len(gt_bboxes.shape) < 2: gt_bboxes = gt_bboxes.view(-1, 4) self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long(), :] if assign_result.labels is not None: self.pos_gt_labels = assign_result.labels[pos_inds] else: self.pos_gt_labels = None @property def bboxes(self): """torch.Tensor: concatenated positive and negative boxes""" return torch.cat([self.pos_bboxes, self.neg_bboxes]) def to(self, device): """Change the device of the data inplace. Example: >>> self = SamplingResult.random() >>> print(f'self = {self.to(None)}') >>> # xdoctest: +REQUIRES(--gpu) >>> print(f'self = {self.to(0)}') """ _dict = self.__dict__ for key, value in _dict.items(): if isinstance(value, torch.Tensor): _dict[key] = value.to(device) return self def __nice__(self): data = self.info.copy() data['pos_bboxes'] = data.pop('pos_bboxes').shape data['neg_bboxes'] = data.pop('neg_bboxes').shape parts = [f"'{k}': {v!r}" for k, v in sorted(data.items())] body = ' ' + ',\n '.join(parts) return '{\n' + body + '\n}' @property def info(self): """Returns a dictionary of info about the object.""" return { 'pos_inds': self.pos_inds, 'neg_inds': self.neg_inds, 'pos_bboxes': self.pos_bboxes, 'neg_bboxes': self.neg_bboxes, 'pos_is_gt': self.pos_is_gt, 'num_gts': self.num_gts, 'pos_assigned_gt_inds': self.pos_assigned_gt_inds, } @classmethod def random(cls, rng=None, **kwargs): """ Args: rng (None | int | numpy.random.RandomState): seed or state. kwargs (keyword arguments): - num_preds: number of predicted boxes - num_gts: number of true boxes - p_ignore (float): probability of a predicted box assigned to \ an ignored truth. - p_assigned (float): probability of a predicted box not being \ assigned. - p_use_label (float | bool): with labels or not. Returns: :obj:`SamplingResult`: Randomly generated sampling result. Example: >>> from mmdet.core.bbox.samplers.sampling_result import * # NOQA >>> self = SamplingResult.random() >>> print(self.__dict__) """ from mmdet.core.bbox import demodata from mmdet.core.bbox.assigners.assign_result import AssignResult from mmdet.core.bbox.samplers.random_sampler import RandomSampler rng = demodata.ensure_rng(rng) # make probabilistic? num = 32 pos_fraction = 0.5 neg_pos_ub = -1 assign_result = AssignResult.random(rng=rng, **kwargs) # Note we could just compute an assignment bboxes = demodata.random_boxes(assign_result.num_preds, rng=rng) gt_bboxes = demodata.random_boxes(assign_result.num_gts, rng=rng) if rng.rand() > 0.2: # sometimes algorithms squeeze their data, be robust to that gt_bboxes = gt_bboxes.squeeze() bboxes = bboxes.squeeze() if assign_result.labels is None: gt_labels = None else: gt_labels = None # todo if gt_labels is None: add_gt_as_proposals = False else: add_gt_as_proposals = True # make probabilistic? sampler = RandomSampler( num, pos_fraction, neg_pos_ub=neg_pos_ub, add_gt_as_proposals=add_gt_as_proposals, rng=rng) self = sampler.sample(assign_result, bboxes, gt_bboxes, gt_labels) return self
5,389
34
81
py
mmdetection
mmdetection-master/mmdet/core/bbox/samplers/score_hlr_sampler.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops import nms_match from ..builder import BBOX_SAMPLERS from ..transforms import bbox2roi from .base_sampler import BaseSampler from .sampling_result import SamplingResult @BBOX_SAMPLERS.register_module() class ScoreHLRSampler(BaseSampler): r"""Importance-based Sample Reweighting (ISR_N), described in `Prime Sample Attention in Object Detection <https://arxiv.org/abs/1904.04821>`_. Score hierarchical local rank (HLR) differentiates with RandomSampler in negative part. It firstly computes Score-HLR in a two-step way, then linearly maps score hlr to the loss weights. Args: num (int): Total number of sampled RoIs. pos_fraction (float): Fraction of positive samples. context (:class:`BaseRoIHead`): RoI head that the sampler belongs to. neg_pos_ub (int): Upper bound of the ratio of num negative to num positive, -1 means no upper bound. add_gt_as_proposals (bool): Whether to add ground truth as proposals. k (float): Power of the non-linear mapping. bias (float): Shift of the non-linear mapping. score_thr (float): Minimum score that a negative sample is to be considered as valid bbox. """ def __init__(self, num, pos_fraction, context, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0, score_thr=0.05, iou_thr=0.5, **kwargs): super().__init__(num, pos_fraction, neg_pos_ub, add_gt_as_proposals) self.k = k self.bias = bias self.score_thr = score_thr self.iou_thr = iou_thr self.context = context # context of cascade detectors is a list, so distinguish them here. if not hasattr(context, 'num_stages'): self.bbox_roi_extractor = context.bbox_roi_extractor self.bbox_head = context.bbox_head self.with_shared_head = context.with_shared_head if self.with_shared_head: self.shared_head = context.shared_head else: self.bbox_roi_extractor = context.bbox_roi_extractor[ context.current_stage] self.bbox_head = context.bbox_head[context.current_stage] @staticmethod def random_choice(gallery, num): """Randomly select some elements from the gallery. If `gallery` is a Tensor, the returned indices will be a Tensor; If `gallery` is a ndarray or list, the returned indices will be a ndarray. Args: gallery (Tensor | ndarray | list): indices pool. num (int): expected sample num. Returns: Tensor or ndarray: sampled indices. """ assert len(gallery) >= num is_tensor = isinstance(gallery, torch.Tensor) if not is_tensor: if torch.cuda.is_available(): device = torch.cuda.current_device() else: device = 'cpu' gallery = torch.tensor(gallery, dtype=torch.long, device=device) perm = torch.randperm(gallery.numel(), device=gallery.device)[:num] rand_inds = gallery[perm] if not is_tensor: rand_inds = rand_inds.cpu().numpy() return rand_inds def _sample_pos(self, assign_result, num_expected, **kwargs): """Randomly sample some positive samples.""" pos_inds = torch.nonzero(assign_result.gt_inds > 0).flatten() if pos_inds.numel() <= num_expected: return pos_inds else: return self.random_choice(pos_inds, num_expected) def _sample_neg(self, assign_result, num_expected, bboxes, feats=None, img_meta=None, **kwargs): """Sample negative samples. Score-HLR sampler is done in the following steps: 1. Take the maximum positive score prediction of each negative samples as s_i. 2. Filter out negative samples whose s_i <= score_thr, the left samples are called valid samples. 3. Use NMS-Match to divide valid samples into different groups, samples in the same group will greatly overlap with each other 4. Rank the matched samples in two-steps to get Score-HLR. (1) In the same group, rank samples with their scores. (2) In the same score rank across different groups, rank samples with their scores again. 5. Linearly map Score-HLR to the final label weights. Args: assign_result (:obj:`AssignResult`): result of assigner. num_expected (int): Expected number of samples. bboxes (Tensor): bbox to be sampled. feats (Tensor): Features come from FPN. img_meta (dict): Meta information dictionary. """ neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten() num_neg = neg_inds.size(0) if num_neg == 0: return neg_inds, None with torch.no_grad(): neg_bboxes = bboxes[neg_inds] neg_rois = bbox2roi([neg_bboxes]) bbox_result = self.context._bbox_forward(feats, neg_rois) cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[ 'bbox_pred'] ori_loss = self.bbox_head.loss( cls_score=cls_score, bbox_pred=None, rois=None, labels=neg_inds.new_full((num_neg, ), self.bbox_head.num_classes), label_weights=cls_score.new_ones(num_neg), bbox_targets=None, bbox_weights=None, reduction_override='none')['loss_cls'] # filter out samples with the max score lower than score_thr max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1) valid_inds = (max_score > self.score_thr).nonzero().view(-1) invalid_inds = (max_score <= self.score_thr).nonzero().view(-1) num_valid = valid_inds.size(0) num_invalid = invalid_inds.size(0) num_expected = min(num_neg, num_expected) num_hlr = min(num_valid, num_expected) num_rand = num_expected - num_hlr if num_valid > 0: valid_rois = neg_rois[valid_inds] valid_max_score = max_score[valid_inds] valid_argmax_score = argmax_score[valid_inds] valid_bbox_pred = bbox_pred[valid_inds] # valid_bbox_pred shape: [num_valid, #num_classes, 4] valid_bbox_pred = valid_bbox_pred.view( valid_bbox_pred.size(0), -1, 4) selected_bbox_pred = valid_bbox_pred[range(num_valid), valid_argmax_score] pred_bboxes = self.bbox_head.bbox_coder.decode( valid_rois[:, 1:], selected_bbox_pred) pred_bboxes_with_score = torch.cat( [pred_bboxes, valid_max_score[:, None]], -1) group = nms_match(pred_bboxes_with_score, self.iou_thr) # imp: importance imp = cls_score.new_zeros(num_valid) for g in group: g_score = valid_max_score[g] # g_score has already sorted rank = g_score.new_tensor(range(g_score.size(0))) imp[g] = num_valid - rank + g_score _, imp_rank_inds = imp.sort(descending=True) _, imp_rank = imp_rank_inds.sort() hlr_inds = imp_rank_inds[:num_expected] if num_rand > 0: rand_inds = torch.randperm(num_invalid)[:num_rand] select_inds = torch.cat( [valid_inds[hlr_inds], invalid_inds[rand_inds]]) else: select_inds = valid_inds[hlr_inds] neg_label_weights = cls_score.new_ones(num_expected) up_bound = max(num_expected, num_valid) imp_weights = (up_bound - imp_rank[hlr_inds].float()) / up_bound neg_label_weights[:num_hlr] = imp_weights neg_label_weights[num_hlr:] = imp_weights.min() neg_label_weights = (self.bias + (1 - self.bias) * neg_label_weights).pow( self.k) ori_selected_loss = ori_loss[select_inds] new_loss = ori_selected_loss * neg_label_weights norm_ratio = ori_selected_loss.sum() / new_loss.sum() neg_label_weights *= norm_ratio else: neg_label_weights = cls_score.new_ones(num_expected) select_inds = torch.randperm(num_neg)[:num_expected] return neg_inds[select_inds], neg_label_weights def sample(self, assign_result, bboxes, gt_bboxes, gt_labels=None, img_meta=None, **kwargs): """Sample positive and negative bboxes. This is a simple implementation of bbox sampling given candidates, assigning results and ground truth bboxes. Args: assign_result (:obj:`AssignResult`): Bbox assigning results. bboxes (Tensor): Boxes to be sampled from. gt_bboxes (Tensor): Ground truth bboxes. gt_labels (Tensor, optional): Class labels of ground truth bboxes. Returns: tuple[:obj:`SamplingResult`, Tensor]: Sampling result and negative label weights. """ bboxes = bboxes[:, :4] gt_flags = bboxes.new_zeros((bboxes.shape[0], ), dtype=torch.uint8) if self.add_gt_as_proposals: bboxes = torch.cat([gt_bboxes, bboxes], dim=0) assign_result.add_gt_(gt_labels) gt_ones = bboxes.new_ones(gt_bboxes.shape[0], dtype=torch.uint8) gt_flags = torch.cat([gt_ones, gt_flags]) num_expected_pos = int(self.num * self.pos_fraction) pos_inds = self.pos_sampler._sample_pos( assign_result, num_expected_pos, bboxes=bboxes, **kwargs) num_sampled_pos = pos_inds.numel() num_expected_neg = self.num - num_sampled_pos if self.neg_pos_ub >= 0: _pos = max(1, num_sampled_pos) neg_upper_bound = int(self.neg_pos_ub * _pos) if num_expected_neg > neg_upper_bound: num_expected_neg = neg_upper_bound neg_inds, neg_label_weights = self.neg_sampler._sample_neg( assign_result, num_expected_neg, bboxes, img_meta=img_meta, **kwargs) return SamplingResult(pos_inds, neg_inds, bboxes, gt_bboxes, assign_result, gt_flags), neg_label_weights
11,235
41.240602
79
py
mmdetection
mmdetection-master/mmdet/core/data_structures/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .general_data import GeneralData from .instance_data import InstanceData __all__ = ['GeneralData', 'InstanceData']
169
27.333333
47
py
mmdetection
mmdetection-master/mmdet/core/data_structures/general_data.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import numpy as np import torch from mmdet.utils.util_mixins import NiceRepr class GeneralData(NiceRepr): """A general data structure of OpenMMlab. A data structure that stores the meta information, the annotations of the images or the model predictions, which can be used in communication between components. The attributes in `GeneralData` are divided into two parts, the `meta_info_fields` and the `data_fields` respectively. - `meta_info_fields`: Usually contains the information about the image such as filename, image_shape, pad_shape, etc. All attributes in it are immutable once set, but the user can add new meta information with `set_meta_info` function, all information can be accessed with methods `meta_info_keys`, `meta_info_values`, `meta_info_items`. - `data_fields`: Annotations or model predictions are stored. The attributes can be accessed or modified by dict-like or object-like operations, such as `.` , `[]`, `in`, `del`, `pop(str)` `get(str)`, `keys()`, `values()`, `items()`. Users can also apply tensor-like methods to all obj:`torch.Tensor` in the `data_fileds`, such as `.cuda()`, `.cpu()`, `.numpy()`, `device`, `.to()` `.detach()`, `.numpy()` Args: meta_info (dict, optional): A dict contains the meta information of single image. such as `img_shape`, `scale_factor`, etc. Default: None. data (dict, optional): A dict contains annotations of single image or model predictions. Default: None. Examples: >>> from mmdet.core import GeneralData >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) >>> instance_data = GeneralData(meta_info=img_meta) >>> img_shape in instance_data True >>> instance_data.det_labels = torch.LongTensor([0, 1, 2, 3]) >>> instance_data["det_scores"] = torch.Tensor([0.01, 0.1, 0.2, 0.3]) >>> print(results) <GeneralData( META INFORMATION img_shape: (800, 1196, 3) pad_shape: (800, 1216, 3) DATA FIELDS shape of det_labels: torch.Size([4]) shape of det_scores: torch.Size([4]) ) at 0x7f84acd10f90> >>> instance_data.det_scores tensor([0.0100, 0.1000, 0.2000, 0.3000]) >>> instance_data.det_labels tensor([0, 1, 2, 3]) >>> instance_data['det_labels'] tensor([0, 1, 2, 3]) >>> 'det_labels' in instance_data True >>> instance_data.img_shape (800, 1196, 3) >>> 'det_scores' in instance_data True >>> del instance_data.det_scores >>> 'det_scores' in instance_data False >>> det_labels = instance_data.pop('det_labels', None) >>> det_labels tensor([0, 1, 2, 3]) >>> 'det_labels' in instance_data >>> False """ def __init__(self, meta_info=None, data=None): self._meta_info_fields = set() self._data_fields = set() if meta_info is not None: self.set_meta_info(meta_info=meta_info) if data is not None: self.set_data(data) def set_meta_info(self, meta_info): """Add meta information. Args: meta_info (dict): A dict contains the meta information of image. such as `img_shape`, `scale_factor`, etc. Default: None. """ assert isinstance(meta_info, dict), f'meta should be a `dict` but get {meta_info}' meta = copy.deepcopy(meta_info) for k, v in meta.items(): # should be consistent with original meta_info if k in self._meta_info_fields: ori_value = getattr(self, k) if isinstance(ori_value, (torch.Tensor, np.ndarray)): if (ori_value == v).all(): continue else: raise KeyError( f'img_meta_info {k} has been set as ' f'{getattr(self, k)} before, which is immutable ') elif ori_value == v: continue else: raise KeyError( f'img_meta_info {k} has been set as ' f'{getattr(self, k)} before, which is immutable ') else: self._meta_info_fields.add(k) self.__dict__[k] = v def set_data(self, data): """Update a dict to `data_fields`. Args: data (dict): A dict contains annotations of image or model predictions. Default: None. """ assert isinstance(data, dict), f'meta should be a `dict` but get {data}' for k, v in data.items(): self.__setattr__(k, v) def new(self, meta_info=None, data=None): """Return a new results with same image meta information. Args: meta_info (dict, optional): A dict contains the meta information of image. such as `img_shape`, `scale_factor`, etc. Default: None. data (dict, optional): A dict contains annotations of image or model predictions. Default: None. """ new_data = self.__class__() new_data.set_meta_info(dict(self.meta_info_items())) if meta_info is not None: new_data.set_meta_info(meta_info) if data is not None: new_data.set_data(data) return new_data def keys(self): """ Returns: list: Contains all keys in data_fields. """ return [key for key in self._data_fields] def meta_info_keys(self): """ Returns: list: Contains all keys in meta_info_fields. """ return [key for key in self._meta_info_fields] def values(self): """ Returns: list: Contains all values in data_fields. """ return [getattr(self, k) for k in self.keys()] def meta_info_values(self): """ Returns: list: Contains all values in meta_info_fields. """ return [getattr(self, k) for k in self.meta_info_keys()] def items(self): for k in self.keys(): yield (k, getattr(self, k)) def meta_info_items(self): for k in self.meta_info_keys(): yield (k, getattr(self, k)) def __setattr__(self, name, val): if name in ('_meta_info_fields', '_data_fields'): if not hasattr(self, name): super().__setattr__(name, val) else: raise AttributeError( f'{name} has been used as a ' f'private attribute, which is immutable. ') else: if name in self._meta_info_fields: raise AttributeError(f'`{name}` is used in meta information,' f'which is immutable') self._data_fields.add(name) super().__setattr__(name, val) def __delattr__(self, item): if item in ('_meta_info_fields', '_data_fields'): raise AttributeError(f'{item} has been used as a ' f'private attribute, which is immutable. ') if item in self._meta_info_fields: raise KeyError(f'{item} is used in meta information, ' f'which is immutable.') super().__delattr__(item) if item in self._data_fields: self._data_fields.remove(item) # dict-like methods __setitem__ = __setattr__ __delitem__ = __delattr__ def __getitem__(self, name): return getattr(self, name) def get(self, *args): assert len(args) < 3, '`get` get more than 2 arguments' return self.__dict__.get(*args) def pop(self, *args): assert len(args) < 3, '`pop` get more than 2 arguments' name = args[0] if name in self._meta_info_fields: raise KeyError(f'{name} is a key in meta information, ' f'which is immutable') if args[0] in self._data_fields: self._data_fields.remove(args[0]) return self.__dict__.pop(*args) # with default value elif len(args) == 2: return args[1] else: raise KeyError(f'{args[0]}') def __contains__(self, item): return item in self._data_fields or \ item in self._meta_info_fields # Tensor-like methods def to(self, *args, **kwargs): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if hasattr(v, 'to'): v = v.to(*args, **kwargs) new_data[k] = v return new_data # Tensor-like methods def cpu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.cpu() new_data[k] = v return new_data # Tensor-like methods def npu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.npu() new_data[k] = v return new_data # Tensor-like methods def mlu(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.mlu() new_data[k] = v return new_data # Tensor-like methods def cuda(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.cuda() new_data[k] = v return new_data # Tensor-like methods def detach(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.detach() new_data[k] = v return new_data # Tensor-like methods def numpy(self): """Apply same name function to all tensors in data_fields.""" new_data = self.new() for k, v in self.items(): if isinstance(v, torch.Tensor): v = v.detach().cpu().numpy() new_data[k] = v return new_data def __nice__(self): repr = '\n \n META INFORMATION \n' for k, v in self.meta_info_items(): repr += f'{k}: {v} \n' repr += '\n DATA FIELDS \n' for k, v in self.items(): if isinstance(v, (torch.Tensor, np.ndarray)): repr += f'shape of {k}: {v.shape} \n' else: repr += f'{k}: {v} \n' return repr + '\n'
11,414
32.872404
79
py
mmdetection
mmdetection-master/mmdet/core/data_structures/instance_data.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import numpy as np import torch from .general_data import GeneralData class InstanceData(GeneralData): """Data structure for instance-level annnotations or predictions. Subclass of :class:`GeneralData`. All value in `data_fields` should have the same length. This design refer to https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/instances.py # noqa E501 Examples: >>> from mmdet.core import InstanceData >>> import numpy as np >>> img_meta = dict(img_shape=(800, 1196, 3), pad_shape=(800, 1216, 3)) >>> results = InstanceData(img_meta) >>> img_shape in results True >>> results.det_labels = torch.LongTensor([0, 1, 2, 3]) >>> results["det_scores"] = torch.Tensor([0.01, 0.7, 0.6, 0.3]) >>> results["det_masks"] = np.ndarray(4, 2, 2) >>> len(results) 4 >>> print(resutls) <InstanceData( META INFORMATION pad_shape: (800, 1216, 3) img_shape: (800, 1196, 3) PREDICTIONS shape of det_labels: torch.Size([4]) shape of det_masks: (4, 2, 2) shape of det_scores: torch.Size([4]) ) at 0x7fe26b5ca990> >>> sorted_results = results[results.det_scores.sort().indices] >>> sorted_results.det_scores tensor([0.0100, 0.3000, 0.6000, 0.7000]) >>> sorted_results.det_labels tensor([0, 3, 2, 1]) >>> print(results[results.scores > 0.5]) <InstanceData( META INFORMATION pad_shape: (800, 1216, 3) img_shape: (800, 1196, 3) PREDICTIONS shape of det_labels: torch.Size([2]) shape of det_masks: (2, 2, 2) shape of det_scores: torch.Size([2]) ) at 0x7fe26b6d7790> >>> results[results.det_scores > 0.5].det_labels tensor([1, 2]) >>> results[results.det_scores > 0.5].det_scores tensor([0.7000, 0.6000]) """ def __setattr__(self, name, value): if name in ('_meta_info_fields', '_data_fields'): if not hasattr(self, name): super().__setattr__(name, value) else: raise AttributeError( f'{name} has been used as a ' f'private attribute, which is immutable. ') else: assert isinstance(value, (torch.Tensor, np.ndarray, list)), \ f'Can set {type(value)}, only support' \ f' {(torch.Tensor, np.ndarray, list)}' if self._data_fields: assert len(value) == len(self), f'the length of ' \ f'values {len(value)} is ' \ f'not consistent with' \ f' the length ' \ f'of this :obj:`InstanceData` ' \ f'{len(self)} ' super().__setattr__(name, value) def __getitem__(self, item): """ Args: item (str, obj:`slice`, obj`torch.LongTensor`, obj:`torch.BoolTensor`): get the corresponding values according to item. Returns: obj:`InstanceData`: Corresponding values. """ assert len(self), ' This is a empty instance' assert isinstance( item, (str, slice, int, torch.LongTensor, torch.BoolTensor)) if isinstance(item, str): return getattr(self, item) if type(item) == int: if item >= len(self) or item < -len(self): raise IndexError(f'Index {item} out of range!') else: # keep the dimension item = slice(item, None, len(self)) new_data = self.new() if isinstance(item, (torch.Tensor)): assert item.dim() == 1, 'Only support to get the' \ ' values along the first dimension.' if isinstance(item, torch.BoolTensor): assert len(item) == len(self), f'The shape of the' \ f' input(BoolTensor)) ' \ f'{len(item)} ' \ f' does not match the shape ' \ f'of the indexed tensor ' \ f'in results_filed ' \ f'{len(self)} at ' \ f'first dimension. ' for k, v in self.items(): if isinstance(v, torch.Tensor): new_data[k] = v[item] elif isinstance(v, np.ndarray): new_data[k] = v[item.cpu().numpy()] elif isinstance(v, list): r_list = [] # convert to indexes from boolTensor if isinstance(item, torch.BoolTensor): indexes = torch.nonzero(item).view(-1) else: indexes = item for index in indexes: r_list.append(v[index]) new_data[k] = r_list else: # item is a slice for k, v in self.items(): new_data[k] = v[item] return new_data @staticmethod def cat(instances_list): """Concat the predictions of all :obj:`InstanceData` in the list. Args: instances_list (list[:obj:`InstanceData`]): A list of :obj:`InstanceData`. Returns: obj:`InstanceData` """ assert all( isinstance(results, InstanceData) for results in instances_list) assert len(instances_list) > 0 if len(instances_list) == 1: return instances_list[0] new_data = instances_list[0].new() for k in instances_list[0]._data_fields: values = [results[k] for results in instances_list] v0 = values[0] if isinstance(v0, torch.Tensor): values = torch.cat(values, dim=0) elif isinstance(v0, np.ndarray): values = np.concatenate(values, axis=0) elif isinstance(v0, list): values = list(itertools.chain(*values)) else: raise ValueError( f'Can not concat the {k} which is a {type(v0)}') new_data[k] = values return new_data def __len__(self): if len(self._data_fields): for v in self.values(): return len(v) else: raise AssertionError('This is an empty `InstanceData`.')
6,926
35.650794
109
py
mmdetection
mmdetection-master/mmdet/core/evaluation/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .class_names import (cityscapes_classes, coco_classes, dataset_aliases, get_classes, imagenet_det_classes, imagenet_vid_classes, objects365v1_classes, objects365v2_classes, oid_challenge_classes, oid_v6_classes, voc_classes) from .eval_hooks import DistEvalHook, EvalHook from .mean_ap import average_precision, eval_map, print_map_summary from .panoptic_utils import INSTANCE_OFFSET from .recall import (eval_recalls, plot_iou_recall, plot_num_recall, print_recall_summary) __all__ = [ 'voc_classes', 'imagenet_det_classes', 'imagenet_vid_classes', 'coco_classes', 'cityscapes_classes', 'dataset_aliases', 'get_classes', 'DistEvalHook', 'EvalHook', 'average_precision', 'eval_map', 'print_map_summary', 'eval_recalls', 'print_recall_summary', 'plot_num_recall', 'plot_iou_recall', 'oid_v6_classes', 'oid_challenge_classes', 'objects365v1_classes', 'objects365v2_classes', 'INSTANCE_OFFSET' ]
1,100
49.045455
76
py
mmdetection
mmdetection-master/mmdet/core/evaluation/bbox_overlaps.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np def bbox_overlaps(bboxes1, bboxes2, mode='iou', eps=1e-6, use_legacy_coordinate=False): """Calculate the ious between each bbox of bboxes1 and bboxes2. Args: bboxes1 (ndarray): Shape (n, 4) bboxes2 (ndarray): Shape (k, 4) mode (str): IOU (intersection over union) or IOF (intersection over foreground) use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Note when function is used in `VOCDataset`, it should be True to align with the official implementation `http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCdevkit_18-May-2011.tar` Default: False. Returns: ious (ndarray): Shape (n, k) """ assert mode in ['iou', 'iof'] if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. bboxes1 = bboxes1.astype(np.float32) bboxes2 = bboxes2.astype(np.float32) rows = bboxes1.shape[0] cols = bboxes2.shape[0] ious = np.zeros((rows, cols), dtype=np.float32) if rows * cols == 0: return ious exchange = False if bboxes1.shape[0] > bboxes2.shape[0]: bboxes1, bboxes2 = bboxes2, bboxes1 ious = np.zeros((cols, rows), dtype=np.float32) exchange = True area1 = (bboxes1[:, 2] - bboxes1[:, 0] + extra_length) * ( bboxes1[:, 3] - bboxes1[:, 1] + extra_length) area2 = (bboxes2[:, 2] - bboxes2[:, 0] + extra_length) * ( bboxes2[:, 3] - bboxes2[:, 1] + extra_length) for i in range(bboxes1.shape[0]): x_start = np.maximum(bboxes1[i, 0], bboxes2[:, 0]) y_start = np.maximum(bboxes1[i, 1], bboxes2[:, 1]) x_end = np.minimum(bboxes1[i, 2], bboxes2[:, 2]) y_end = np.minimum(bboxes1[i, 3], bboxes2[:, 3]) overlap = np.maximum(x_end - x_start + extra_length, 0) * np.maximum( y_end - y_start + extra_length, 0) if mode == 'iou': union = area1[i] + area2 - overlap else: union = area1[i] if not exchange else area2 union = np.maximum(union, eps) ious[i, :] = overlap / union if exchange: ious = ious.T return ious
2,454
36.19697
86
py
mmdetection
mmdetection-master/mmdet/core/evaluation/class_names.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv def wider_face_classes(): return ['face'] def voc_classes(): return [ 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] def imagenet_det_classes(): return [ 'accordion', 'airplane', 'ant', 'antelope', 'apple', 'armadillo', 'artichoke', 'axe', 'baby_bed', 'backpack', 'bagel', 'balance_beam', 'banana', 'band_aid', 'banjo', 'baseball', 'basketball', 'bathing_cap', 'beaker', 'bear', 'bee', 'bell_pepper', 'bench', 'bicycle', 'binder', 'bird', 'bookshelf', 'bow_tie', 'bow', 'bowl', 'brassiere', 'burrito', 'bus', 'butterfly', 'camel', 'can_opener', 'car', 'cart', 'cattle', 'cello', 'centipede', 'chain_saw', 'chair', 'chime', 'cocktail_shaker', 'coffee_maker', 'computer_keyboard', 'computer_mouse', 'corkscrew', 'cream', 'croquet_ball', 'crutch', 'cucumber', 'cup_or_mug', 'diaper', 'digital_clock', 'dishwasher', 'dog', 'domestic_cat', 'dragonfly', 'drum', 'dumbbell', 'electric_fan', 'elephant', 'face_powder', 'fig', 'filing_cabinet', 'flower_pot', 'flute', 'fox', 'french_horn', 'frog', 'frying_pan', 'giant_panda', 'goldfish', 'golf_ball', 'golfcart', 'guacamole', 'guitar', 'hair_dryer', 'hair_spray', 'hamburger', 'hammer', 'hamster', 'harmonica', 'harp', 'hat_with_a_wide_brim', 'head_cabbage', 'helmet', 'hippopotamus', 'horizontal_bar', 'horse', 'hotdog', 'iPod', 'isopod', 'jellyfish', 'koala_bear', 'ladle', 'ladybug', 'lamp', 'laptop', 'lemon', 'lion', 'lipstick', 'lizard', 'lobster', 'maillot', 'maraca', 'microphone', 'microwave', 'milk_can', 'miniskirt', 'monkey', 'motorcycle', 'mushroom', 'nail', 'neck_brace', 'oboe', 'orange', 'otter', 'pencil_box', 'pencil_sharpener', 'perfume', 'person', 'piano', 'pineapple', 'ping-pong_ball', 'pitcher', 'pizza', 'plastic_bag', 'plate_rack', 'pomegranate', 'popsicle', 'porcupine', 'power_drill', 'pretzel', 'printer', 'puck', 'punching_bag', 'purse', 'rabbit', 'racket', 'ray', 'red_panda', 'refrigerator', 'remote_control', 'rubber_eraser', 'rugby_ball', 'ruler', 'salt_or_pepper_shaker', 'saxophone', 'scorpion', 'screwdriver', 'seal', 'sheep', 'ski', 'skunk', 'snail', 'snake', 'snowmobile', 'snowplow', 'soap_dispenser', 'soccer_ball', 'sofa', 'spatula', 'squirrel', 'starfish', 'stethoscope', 'stove', 'strainer', 'strawberry', 'stretcher', 'sunglasses', 'swimming_trunks', 'swine', 'syringe', 'table', 'tape_player', 'tennis_ball', 'tick', 'tie', 'tiger', 'toaster', 'traffic_light', 'train', 'trombone', 'trumpet', 'turtle', 'tv_or_monitor', 'unicycle', 'vacuum', 'violin', 'volleyball', 'waffle_iron', 'washer', 'water_bottle', 'watercraft', 'whale', 'wine_bottle', 'zebra' ] def imagenet_vid_classes(): return [ 'airplane', 'antelope', 'bear', 'bicycle', 'bird', 'bus', 'car', 'cattle', 'dog', 'domestic_cat', 'elephant', 'fox', 'giant_panda', 'hamster', 'horse', 'lion', 'lizard', 'monkey', 'motorcycle', 'rabbit', 'red_panda', 'sheep', 'snake', 'squirrel', 'tiger', 'train', 'turtle', 'watercraft', 'whale', 'zebra' ] def coco_classes(): return [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant', 'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat', 'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket', 'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush' ] def cityscapes_classes(): return [ 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle' ] def oid_challenge_classes(): return [ 'Footwear', 'Jeans', 'House', 'Tree', 'Woman', 'Man', 'Land vehicle', 'Person', 'Wheel', 'Bus', 'Human face', 'Bird', 'Dress', 'Girl', 'Vehicle', 'Building', 'Cat', 'Car', 'Belt', 'Elephant', 'Dessert', 'Butterfly', 'Train', 'Guitar', 'Poster', 'Book', 'Boy', 'Bee', 'Flower', 'Window', 'Hat', 'Human head', 'Dog', 'Human arm', 'Drink', 'Human mouth', 'Human hair', 'Human nose', 'Human hand', 'Table', 'Marine invertebrates', 'Fish', 'Sculpture', 'Rose', 'Street light', 'Glasses', 'Fountain', 'Skyscraper', 'Swimwear', 'Brassiere', 'Drum', 'Duck', 'Countertop', 'Furniture', 'Ball', 'Human leg', 'Boat', 'Balloon', 'Bicycle helmet', 'Goggles', 'Door', 'Human eye', 'Shirt', 'Toy', 'Teddy bear', 'Pasta', 'Tomato', 'Human ear', 'Vehicle registration plate', 'Microphone', 'Musical keyboard', 'Tower', 'Houseplant', 'Flowerpot', 'Fruit', 'Vegetable', 'Musical instrument', 'Suit', 'Motorcycle', 'Bagel', 'French fries', 'Hamburger', 'Chair', 'Salt and pepper shakers', 'Snail', 'Airplane', 'Horse', 'Laptop', 'Computer keyboard', 'Football helmet', 'Cocktail', 'Juice', 'Tie', 'Computer monitor', 'Human beard', 'Bottle', 'Saxophone', 'Lemon', 'Mouse', 'Sock', 'Cowboy hat', 'Sun hat', 'Football', 'Porch', 'Sunglasses', 'Lobster', 'Crab', 'Picture frame', 'Van', 'Crocodile', 'Surfboard', 'Shorts', 'Helicopter', 'Helmet', 'Sports uniform', 'Taxi', 'Swan', 'Goose', 'Coat', 'Jacket', 'Handbag', 'Flag', 'Skateboard', 'Television', 'Tire', 'Spoon', 'Palm tree', 'Stairs', 'Salad', 'Castle', 'Oven', 'Microwave oven', 'Wine', 'Ceiling fan', 'Mechanical fan', 'Cattle', 'Truck', 'Box', 'Ambulance', 'Desk', 'Wine glass', 'Reptile', 'Tank', 'Traffic light', 'Billboard', 'Tent', 'Insect', 'Spider', 'Treadmill', 'Cupboard', 'Shelf', 'Seat belt', 'Human foot', 'Bicycle', 'Bicycle wheel', 'Couch', 'Bookcase', 'Fedora', 'Backpack', 'Bench', 'Oyster', 'Moths and butterflies', 'Lavender', 'Waffle', 'Fork', 'Animal', 'Accordion', 'Mobile phone', 'Plate', 'Coffee cup', 'Saucer', 'Platter', 'Dagger', 'Knife', 'Bull', 'Tortoise', 'Sea turtle', 'Deer', 'Weapon', 'Apple', 'Ski', 'Taco', 'Traffic sign', 'Beer', 'Necklace', 'Sunflower', 'Piano', 'Organ', 'Harpsichord', 'Bed', 'Cabinetry', 'Nightstand', 'Curtain', 'Chest of drawers', 'Drawer', 'Parrot', 'Sandal', 'High heels', 'Tableware', 'Cart', 'Mushroom', 'Kite', 'Missile', 'Seafood', 'Camera', 'Paper towel', 'Toilet paper', 'Sombrero', 'Radish', 'Lighthouse', 'Segway', 'Pig', 'Watercraft', 'Golf cart', 'studio couch', 'Dolphin', 'Whale', 'Earrings', 'Otter', 'Sea lion', 'Whiteboard', 'Monkey', 'Gondola', 'Zebra', 'Baseball glove', 'Scarf', 'Adhesive tape', 'Trousers', 'Scoreboard', 'Lily', 'Carnivore', 'Power plugs and sockets', 'Office building', 'Sandwich', 'Swimming pool', 'Headphones', 'Tin can', 'Crown', 'Doll', 'Cake', 'Frog', 'Beetle', 'Ant', 'Gas stove', 'Canoe', 'Falcon', 'Blue jay', 'Egg', 'Fire hydrant', 'Raccoon', 'Muffin', 'Wall clock', 'Coffee', 'Mug', 'Tea', 'Bear', 'Waste container', 'Home appliance', 'Candle', 'Lion', 'Mirror', 'Starfish', 'Marine mammal', 'Wheelchair', 'Umbrella', 'Alpaca', 'Violin', 'Cello', 'Brown bear', 'Canary', 'Bat', 'Ruler', 'Plastic bag', 'Penguin', 'Watermelon', 'Harbor seal', 'Pen', 'Pumpkin', 'Harp', 'Kitchen appliance', 'Roller skates', 'Bust', 'Coffee table', 'Tennis ball', 'Tennis racket', 'Ladder', 'Boot', 'Bowl', 'Stop sign', 'Volleyball', 'Eagle', 'Paddle', 'Chicken', 'Skull', 'Lamp', 'Beehive', 'Maple', 'Sink', 'Goldfish', 'Tripod', 'Coconut', 'Bidet', 'Tap', 'Bathroom cabinet', 'Toilet', 'Filing cabinet', 'Pretzel', 'Table tennis racket', 'Bronze sculpture', 'Rocket', 'Mouse', 'Hamster', 'Lizard', 'Lifejacket', 'Goat', 'Washing machine', 'Trumpet', 'Horn', 'Trombone', 'Sheep', 'Tablet computer', 'Pillow', 'Kitchen & dining room table', 'Parachute', 'Raven', 'Glove', 'Loveseat', 'Christmas tree', 'Shellfish', 'Rifle', 'Shotgun', 'Sushi', 'Sparrow', 'Bread', 'Toaster', 'Watch', 'Asparagus', 'Artichoke', 'Suitcase', 'Antelope', 'Broccoli', 'Ice cream', 'Racket', 'Banana', 'Cookie', 'Cucumber', 'Dragonfly', 'Lynx', 'Caterpillar', 'Light bulb', 'Office supplies', 'Miniskirt', 'Skirt', 'Fireplace', 'Potato', 'Light switch', 'Croissant', 'Cabbage', 'Ladybug', 'Handgun', 'Luggage and bags', 'Window blind', 'Snowboard', 'Baseball bat', 'Digital clock', 'Serving tray', 'Infant bed', 'Sofa bed', 'Guacamole', 'Fox', 'Pizza', 'Snowplow', 'Jet ski', 'Refrigerator', 'Lantern', 'Convenience store', 'Sword', 'Rugby ball', 'Owl', 'Ostrich', 'Pancake', 'Strawberry', 'Carrot', 'Tart', 'Dice', 'Turkey', 'Rabbit', 'Invertebrate', 'Vase', 'Stool', 'Swim cap', 'Shower', 'Clock', 'Jellyfish', 'Aircraft', 'Chopsticks', 'Orange', 'Snake', 'Sewing machine', 'Kangaroo', 'Mixer', 'Food processor', 'Shrimp', 'Towel', 'Porcupine', 'Jaguar', 'Cannon', 'Limousine', 'Mule', 'Squirrel', 'Kitchen knife', 'Tiara', 'Tiger', 'Bow and arrow', 'Candy', 'Rhinoceros', 'Shark', 'Cricket ball', 'Doughnut', 'Plumbing fixture', 'Camel', 'Polar bear', 'Coin', 'Printer', 'Blender', 'Giraffe', 'Billiard table', 'Kettle', 'Dinosaur', 'Pineapple', 'Zucchini', 'Jug', 'Barge', 'Teapot', 'Golf ball', 'Binoculars', 'Scissors', 'Hot dog', 'Door handle', 'Seahorse', 'Bathtub', 'Leopard', 'Centipede', 'Grapefruit', 'Snowman', 'Cheetah', 'Alarm clock', 'Grape', 'Wrench', 'Wok', 'Bell pepper', 'Cake stand', 'Barrel', 'Woodpecker', 'Flute', 'Corded phone', 'Willow', 'Punching bag', 'Pomegranate', 'Telephone', 'Pear', 'Common fig', 'Bench', 'Wood-burning stove', 'Burrito', 'Nail', 'Turtle', 'Submarine sandwich', 'Drinking straw', 'Peach', 'Popcorn', 'Frying pan', 'Picnic basket', 'Honeycomb', 'Envelope', 'Mango', 'Cutting board', 'Pitcher', 'Stationary bicycle', 'Dumbbell', 'Personal care', 'Dog bed', 'Snowmobile', 'Oboe', 'Briefcase', 'Squash', 'Tick', 'Slow cooker', 'Coffeemaker', 'Measuring cup', 'Crutch', 'Stretcher', 'Screwdriver', 'Flashlight', 'Spatula', 'Pressure cooker', 'Ring binder', 'Beaker', 'Torch', 'Winter melon' ] def oid_v6_classes(): return [ 'Tortoise', 'Container', 'Magpie', 'Sea turtle', 'Football', 'Ambulance', 'Ladder', 'Toothbrush', 'Syringe', 'Sink', 'Toy', 'Organ (Musical Instrument)', 'Cassette deck', 'Apple', 'Human eye', 'Cosmetics', 'Paddle', 'Snowman', 'Beer', 'Chopsticks', 'Human beard', 'Bird', 'Parking meter', 'Traffic light', 'Croissant', 'Cucumber', 'Radish', 'Towel', 'Doll', 'Skull', 'Washing machine', 'Glove', 'Tick', 'Belt', 'Sunglasses', 'Banjo', 'Cart', 'Ball', 'Backpack', 'Bicycle', 'Home appliance', 'Centipede', 'Boat', 'Surfboard', 'Boot', 'Headphones', 'Hot dog', 'Shorts', 'Fast food', 'Bus', 'Boy', 'Screwdriver', 'Bicycle wheel', 'Barge', 'Laptop', 'Miniskirt', 'Drill (Tool)', 'Dress', 'Bear', 'Waffle', 'Pancake', 'Brown bear', 'Woodpecker', 'Blue jay', 'Pretzel', 'Bagel', 'Tower', 'Teapot', 'Person', 'Bow and arrow', 'Swimwear', 'Beehive', 'Brassiere', 'Bee', 'Bat (Animal)', 'Starfish', 'Popcorn', 'Burrito', 'Chainsaw', 'Balloon', 'Wrench', 'Tent', 'Vehicle registration plate', 'Lantern', 'Toaster', 'Flashlight', 'Billboard', 'Tiara', 'Limousine', 'Necklace', 'Carnivore', 'Scissors', 'Stairs', 'Computer keyboard', 'Printer', 'Traffic sign', 'Chair', 'Shirt', 'Poster', 'Cheese', 'Sock', 'Fire hydrant', 'Land vehicle', 'Earrings', 'Tie', 'Watercraft', 'Cabinetry', 'Suitcase', 'Muffin', 'Bidet', 'Snack', 'Snowmobile', 'Clock', 'Medical equipment', 'Cattle', 'Cello', 'Jet ski', 'Camel', 'Coat', 'Suit', 'Desk', 'Cat', 'Bronze sculpture', 'Juice', 'Gondola', 'Beetle', 'Cannon', 'Computer mouse', 'Cookie', 'Office building', 'Fountain', 'Coin', 'Calculator', 'Cocktail', 'Computer monitor', 'Box', 'Stapler', 'Christmas tree', 'Cowboy hat', 'Hiking equipment', 'Studio couch', 'Drum', 'Dessert', 'Wine rack', 'Drink', 'Zucchini', 'Ladle', 'Human mouth', 'Dairy Product', 'Dice', 'Oven', 'Dinosaur', 'Ratchet (Device)', 'Couch', 'Cricket ball', 'Winter melon', 'Spatula', 'Whiteboard', 'Pencil sharpener', 'Door', 'Hat', 'Shower', 'Eraser', 'Fedora', 'Guacamole', 'Dagger', 'Scarf', 'Dolphin', 'Sombrero', 'Tin can', 'Mug', 'Tap', 'Harbor seal', 'Stretcher', 'Can opener', 'Goggles', 'Human body', 'Roller skates', 'Coffee cup', 'Cutting board', 'Blender', 'Plumbing fixture', 'Stop sign', 'Office supplies', 'Volleyball (Ball)', 'Vase', 'Slow cooker', 'Wardrobe', 'Coffee', 'Whisk', 'Paper towel', 'Personal care', 'Food', 'Sun hat', 'Tree house', 'Flying disc', 'Skirt', 'Gas stove', 'Salt and pepper shakers', 'Mechanical fan', 'Face powder', 'Fax', 'Fruit', 'French fries', 'Nightstand', 'Barrel', 'Kite', 'Tart', 'Treadmill', 'Fox', 'Flag', 'French horn', 'Window blind', 'Human foot', 'Golf cart', 'Jacket', 'Egg (Food)', 'Street light', 'Guitar', 'Pillow', 'Human leg', 'Isopod', 'Grape', 'Human ear', 'Power plugs and sockets', 'Panda', 'Giraffe', 'Woman', 'Door handle', 'Rhinoceros', 'Bathtub', 'Goldfish', 'Houseplant', 'Goat', 'Baseball bat', 'Baseball glove', 'Mixing bowl', 'Marine invertebrates', 'Kitchen utensil', 'Light switch', 'House', 'Horse', 'Stationary bicycle', 'Hammer', 'Ceiling fan', 'Sofa bed', 'Adhesive tape', 'Harp', 'Sandal', 'Bicycle helmet', 'Saucer', 'Harpsichord', 'Human hair', 'Heater', 'Harmonica', 'Hamster', 'Curtain', 'Bed', 'Kettle', 'Fireplace', 'Scale', 'Drinking straw', 'Insect', 'Hair dryer', 'Kitchenware', 'Indoor rower', 'Invertebrate', 'Food processor', 'Bookcase', 'Refrigerator', 'Wood-burning stove', 'Punching bag', 'Common fig', 'Cocktail shaker', 'Jaguar (Animal)', 'Golf ball', 'Fashion accessory', 'Alarm clock', 'Filing cabinet', 'Artichoke', 'Table', 'Tableware', 'Kangaroo', 'Koala', 'Knife', 'Bottle', 'Bottle opener', 'Lynx', 'Lavender (Plant)', 'Lighthouse', 'Dumbbell', 'Human head', 'Bowl', 'Humidifier', 'Porch', 'Lizard', 'Billiard table', 'Mammal', 'Mouse', 'Motorcycle', 'Musical instrument', 'Swim cap', 'Frying pan', 'Snowplow', 'Bathroom cabinet', 'Missile', 'Bust', 'Man', 'Waffle iron', 'Milk', 'Ring binder', 'Plate', 'Mobile phone', 'Baked goods', 'Mushroom', 'Crutch', 'Pitcher (Container)', 'Mirror', 'Personal flotation device', 'Table tennis racket', 'Pencil case', 'Musical keyboard', 'Scoreboard', 'Briefcase', 'Kitchen knife', 'Nail (Construction)', 'Tennis ball', 'Plastic bag', 'Oboe', 'Chest of drawers', 'Ostrich', 'Piano', 'Girl', 'Plant', 'Potato', 'Hair spray', 'Sports equipment', 'Pasta', 'Penguin', 'Pumpkin', 'Pear', 'Infant bed', 'Polar bear', 'Mixer', 'Cupboard', 'Jacuzzi', 'Pizza', 'Digital clock', 'Pig', 'Reptile', 'Rifle', 'Lipstick', 'Skateboard', 'Raven', 'High heels', 'Red panda', 'Rose', 'Rabbit', 'Sculpture', 'Saxophone', 'Shotgun', 'Seafood', 'Submarine sandwich', 'Snowboard', 'Sword', 'Picture frame', 'Sushi', 'Loveseat', 'Ski', 'Squirrel', 'Tripod', 'Stethoscope', 'Submarine', 'Scorpion', 'Segway', 'Training bench', 'Snake', 'Coffee table', 'Skyscraper', 'Sheep', 'Television', 'Trombone', 'Tea', 'Tank', 'Taco', 'Telephone', 'Torch', 'Tiger', 'Strawberry', 'Trumpet', 'Tree', 'Tomato', 'Train', 'Tool', 'Picnic basket', 'Cooking spray', 'Trousers', 'Bowling equipment', 'Football helmet', 'Truck', 'Measuring cup', 'Coffeemaker', 'Violin', 'Vehicle', 'Handbag', 'Paper cutter', 'Wine', 'Weapon', 'Wheel', 'Worm', 'Wok', 'Whale', 'Zebra', 'Auto part', 'Jug', 'Pizza cutter', 'Cream', 'Monkey', 'Lion', 'Bread', 'Platter', 'Chicken', 'Eagle', 'Helicopter', 'Owl', 'Duck', 'Turtle', 'Hippopotamus', 'Crocodile', 'Toilet', 'Toilet paper', 'Squid', 'Clothing', 'Footwear', 'Lemon', 'Spider', 'Deer', 'Frog', 'Banana', 'Rocket', 'Wine glass', 'Countertop', 'Tablet computer', 'Waste container', 'Swimming pool', 'Dog', 'Book', 'Elephant', 'Shark', 'Candle', 'Leopard', 'Axe', 'Hand dryer', 'Soap dispenser', 'Porcupine', 'Flower', 'Canary', 'Cheetah', 'Palm tree', 'Hamburger', 'Maple', 'Building', 'Fish', 'Lobster', 'Garden Asparagus', 'Furniture', 'Hedgehog', 'Airplane', 'Spoon', 'Otter', 'Bull', 'Oyster', 'Horizontal bar', 'Convenience store', 'Bomb', 'Bench', 'Ice cream', 'Caterpillar', 'Butterfly', 'Parachute', 'Orange', 'Antelope', 'Beaker', 'Moths and butterflies', 'Window', 'Closet', 'Castle', 'Jellyfish', 'Goose', 'Mule', 'Swan', 'Peach', 'Coconut', 'Seat belt', 'Raccoon', 'Chisel', 'Fork', 'Lamp', 'Camera', 'Squash (Plant)', 'Racket', 'Human face', 'Human arm', 'Vegetable', 'Diaper', 'Unicycle', 'Falcon', 'Chime', 'Snail', 'Shellfish', 'Cabbage', 'Carrot', 'Mango', 'Jeans', 'Flowerpot', 'Pineapple', 'Drawer', 'Stool', 'Envelope', 'Cake', 'Dragonfly', 'Common sunflower', 'Microwave oven', 'Honeycomb', 'Marine mammal', 'Sea lion', 'Ladybug', 'Shelf', 'Watch', 'Candy', 'Salad', 'Parrot', 'Handgun', 'Sparrow', 'Van', 'Grinder', 'Spice rack', 'Light bulb', 'Corded phone', 'Sports uniform', 'Tennis racket', 'Wall clock', 'Serving tray', 'Kitchen & dining room table', 'Dog bed', 'Cake stand', 'Cat furniture', 'Bathroom accessory', 'Facial tissue holder', 'Pressure cooker', 'Kitchen appliance', 'Tire', 'Ruler', 'Luggage and bags', 'Microphone', 'Broccoli', 'Umbrella', 'Pastry', 'Grapefruit', 'Band-aid', 'Animal', 'Bell pepper', 'Turkey', 'Lily', 'Pomegranate', 'Doughnut', 'Glasses', 'Human nose', 'Pen', 'Ant', 'Car', 'Aircraft', 'Human hand', 'Skunk', 'Teddy bear', 'Watermelon', 'Cantaloupe', 'Dishwasher', 'Flute', 'Balance beam', 'Sandwich', 'Shrimp', 'Sewing machine', 'Binoculars', 'Rays and skates', 'Ipod', 'Accordion', 'Willow', 'Crab', 'Crown', 'Seahorse', 'Perfume', 'Alpaca', 'Taxi', 'Canoe', 'Remote control', 'Wheelchair', 'Rugby ball', 'Armadillo', 'Maracas', 'Helmet' ] def objects365v1_classes(): return [ 'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle', 'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk', 'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes', 'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv', 'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl', 'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can', 'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket', 'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv', 'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone', 'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle', 'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish', 'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle', 'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle', 'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus', 'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone', 'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon', 'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal', 'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane', 'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote', 'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato', 'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone', 'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine', 'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove', 'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat', 'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun', 'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck', 'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator', 'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies', 'cutting/chopping board', 'tennis racket', 'candy', 'skating and skiing shoes', 'scissors', 'folder', 'baseball', 'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine', 'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear', 'american football', 'basketball', 'potato', 'paint brush', 'printer', 'billiards', 'fire hydrant', 'goose', 'projector', 'sausage', 'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball', 'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee', 'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender', 'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango', 'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion', 'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale', 'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple', 'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle', 'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar', 'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD', 'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado', 'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear', 'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn', 'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball', 'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice', 'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel', 'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope', 'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut', 'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly', 'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker', 'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion', 'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill', 'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup', 'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish', 'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun', 'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal', 'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case', 'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey', 'durian', 'game board', 'rabbit', 'french horn', 'ambulance', 'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon', 'chainsaw', 'lobster', 'iron', 'flashlight' ] def objects365v2_classes(): return [ 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket', 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon', 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap', 'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak', 'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate', 'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle', 'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis ' ] dataset_aliases = { 'voc': ['voc', 'pascal_voc', 'voc07', 'voc12'], 'imagenet_det': ['det', 'imagenet_det', 'ilsvrc_det'], 'imagenet_vid': ['vid', 'imagenet_vid', 'ilsvrc_vid'], 'coco': ['coco', 'mscoco', 'ms_coco'], 'wider_face': ['WIDERFaceDataset', 'wider_face', 'WIDERFace'], 'cityscapes': ['cityscapes'], 'oid_challenge': ['oid_challenge', 'openimages_challenge'], 'oid_v6': ['oid_v6', 'openimages_v6'], 'objects365v1': ['objects365v1', 'obj365v1'], 'objects365v2': ['objects365v2', 'obj365v2'] } def get_classes(dataset): """Get class names of a dataset.""" alias2name = {} for name, aliases in dataset_aliases.items(): for alias in aliases: alias2name[alias] = name if mmcv.is_str(dataset): if dataset in alias2name: labels = eval(alias2name[dataset] + '_classes()') else: raise ValueError(f'Unrecognized dataset: {dataset}') else: raise TypeError(f'dataset must a str, but got {type(dataset)}') return labels
30,536
63.018868
79
py
mmdetection
mmdetection-master/mmdet/core/evaluation/eval_hooks.py
# Copyright (c) OpenMMLab. All rights reserved. import bisect import os.path as osp import mmcv import torch.distributed as dist from mmcv.runner import DistEvalHook as BaseDistEvalHook from mmcv.runner import EvalHook as BaseEvalHook from torch.nn.modules.batchnorm import _BatchNorm def _calc_dynamic_intervals(start_interval, dynamic_interval_list): assert mmcv.is_list_of(dynamic_interval_list, tuple) dynamic_milestones = [0] dynamic_milestones.extend( [dynamic_interval[0] for dynamic_interval in dynamic_interval_list]) dynamic_intervals = [start_interval] dynamic_intervals.extend( [dynamic_interval[1] for dynamic_interval in dynamic_interval_list]) return dynamic_milestones, dynamic_intervals class EvalHook(BaseEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(EvalHook, self).__init__(*args, **kwargs) self.latest_results = None self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" if not self._should_evaluate(runner): return from mmdet.apis import single_gpu_test # Changed results to self.results so that MMDetWandbHook can access # the evaluation results and log them to wandb. results = single_gpu_test(runner.model, self.dataloader, show=False) self.latest_results = results runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip the action to save # the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score) # Note: Considering that MMCV's EvalHook updated its interface in V1.3.16, # in order to avoid strong version dependency, we did not directly # inherit EvalHook but BaseDistEvalHook. class DistEvalHook(BaseDistEvalHook): def __init__(self, *args, dynamic_intervals=None, **kwargs): super(DistEvalHook, self).__init__(*args, **kwargs) self.latest_results = None self.use_dynamic_intervals = dynamic_intervals is not None if self.use_dynamic_intervals: self.dynamic_milestones, self.dynamic_intervals = \ _calc_dynamic_intervals(self.interval, dynamic_intervals) def _decide_interval(self, runner): if self.use_dynamic_intervals: progress = runner.epoch if self.by_epoch else runner.iter step = bisect.bisect(self.dynamic_milestones, (progress + 1)) # Dynamically modify the evaluation interval self.interval = self.dynamic_intervals[step - 1] def before_train_epoch(self, runner): """Evaluate the model only at the start of training by epoch.""" self._decide_interval(runner) super().before_train_epoch(runner) def before_train_iter(self, runner): self._decide_interval(runner) super().before_train_iter(runner) def _do_evaluate(self, runner): """perform evaluation and save ckpt.""" # Synchronization of BatchNorm's buffer (running_mean # and running_var) is not supported in the DDP of pytorch, # which may cause the inconsistent performance of models in # different ranks, so we broadcast BatchNorm's buffers # of rank 0 to other ranks to avoid this. if self.broadcast_bn_buffer: model = runner.model for name, module in model.named_modules(): if isinstance(module, _BatchNorm) and module.track_running_stats: dist.broadcast(module.running_var, 0) dist.broadcast(module.running_mean, 0) if not self._should_evaluate(runner): return tmpdir = self.tmpdir if tmpdir is None: tmpdir = osp.join(runner.work_dir, '.eval_hook') from mmdet.apis import multi_gpu_test # Changed results to self.results so that MMDetWandbHook can access # the evaluation results and log them to wandb. results = multi_gpu_test( runner.model, self.dataloader, tmpdir=tmpdir, gpu_collect=self.gpu_collect) self.latest_results = results if runner.rank == 0: print('\n') runner.log_buffer.output['eval_iter_num'] = len(self.dataloader) key_score = self.evaluate(runner, results) # the key_score may be `None` so it needs to skip # the action to save the best checkpoint if self.save_best and key_score: self._save_ckpt(runner, key_score)
5,604
38.751773
76
py
mmdetection
mmdetection-master/mmdet/core/evaluation/mean_ap.py
# Copyright (c) OpenMMLab. All rights reserved. from multiprocessing import Pool import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps from .class_names import get_classes def average_precision(recalls, precisions, mode='area'): """Calculate average precision (for single or multiple scales). Args: recalls (ndarray): shape (num_scales, num_dets) or (num_dets, ) precisions (ndarray): shape (num_scales, num_dets) or (num_dets, ) mode (str): 'area' or '11points', 'area' means calculating the area under precision-recall curve, '11points' means calculating the average precision of recalls at [0, 0.1, ..., 1] Returns: float or ndarray: calculated average precision """ no_scale = False if recalls.ndim == 1: no_scale = True recalls = recalls[np.newaxis, :] precisions = precisions[np.newaxis, :] assert recalls.shape == precisions.shape and recalls.ndim == 2 num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) if mode == 'area': zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) ones = np.ones((num_scales, 1), dtype=recalls.dtype) mrec = np.hstack((zeros, recalls, ones)) mpre = np.hstack((zeros, precisions, zeros)) for i in range(mpre.shape[1] - 1, 0, -1): mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) for i in range(num_scales): ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] ap[i] = np.sum( (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) elif mode == '11points': for i in range(num_scales): for thr in np.arange(0, 1 + 1e-3, 0.1): precs = precisions[i, recalls[i, :] >= thr] prec = precs.max() if precs.size > 0 else 0 ap[i] += prec ap /= 11 else: raise ValueError( 'Unrecognized mode, only "area" and "11points" are supported') if no_scale: ap = ap[0] return ap def tpfp_imagenet(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, default_iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None default_iou_thr (float): IoU threshold to be considered as matched for medium and large bboxes (small ones have special rules). Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp # of a certain scale. tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes - 1, use_legacy_coordinate=use_legacy_coordinate) gt_w = gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length gt_h = gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length iou_thrs = np.minimum((gt_w * gt_h) / ((gt_w + 10.0) * (gt_h + 10.0)), default_iou_thr) # sort all detections by scores in descending order sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = gt_w * gt_h gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: max_iou = -1 matched_gt = -1 # find best overlapped available gt for j in range(num_gts): # different from PASCAL VOC: allow finding other gts if the # best overlapped ones are already matched by other det bboxes if gt_covered[j]: continue elif ious[i, j] >= iou_thrs[j] and ious[i, j] > max_iou: max_iou = ious[i, j] matched_gt = j # there are 4 cases for a det bbox: # 1. it matches a gt, tp = 1, fp = 0 # 2. it matches an ignored gt, tp = 0, fp = 0 # 3. it matches no gt and within area range, tp = 0, fp = 1 # 4. it matches no gt but is beyond area range, tp = 0, fp = 0 if matched_gt >= 0: gt_covered[matched_gt] = 1 if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): tp[k, i] = 1 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_default(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. Returns: tuple[np.ndarray]: (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 return tp, fp def tpfp_openimages(det_bboxes, gt_bboxes, gt_bboxes_ignore=None, iou_thr=0.5, area_ranges=None, use_legacy_coordinate=False, gt_bboxes_group_of=None, use_group_of=True, ioa_thr=0.5, **kwargs): """Check if detected bboxes are true positive or false positive. Args: det_bbox (ndarray): Detected bboxes of this image, of shape (m, 5). gt_bboxes (ndarray): GT bboxes of this image, of shape (n, 4). gt_bboxes_ignore (ndarray): Ignored gt bboxes of this image, of shape (k, 4). Default: None iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. area_ranges (list[tuple] | None): Range of bbox areas to be evaluated, in the format [(min1, max1), (min2, max2), ...]. Default: None. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. gt_bboxes_group_of (ndarray): GT group_of of this image, of shape (k, 1). Default: None use_group_of (bool): Whether to use group of when calculate TP and FP, which only used in OpenImages evaluation. Default: True. ioa_thr (float | None): IoA threshold to be considered as matched, which only used in OpenImages evaluation. Default: 0.5. Returns: tuple[np.ndarray]: Returns a tuple (tp, fp, det_bboxes), where (tp, fp) whose elements are 0 and 1. The shape of each array is (num_scales, m). (det_bboxes) whose will filter those are not matched by group of gts when processing Open Images evaluation. The shape is (num_scales, m). """ if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. # an indicator of ignored gts gt_ignore_inds = np.concatenate( (np.zeros(gt_bboxes.shape[0], dtype=bool), np.ones(gt_bboxes_ignore.shape[0], dtype=bool))) # stack gt_bboxes and gt_bboxes_ignore for convenience gt_bboxes = np.vstack((gt_bboxes, gt_bboxes_ignore)) num_dets = det_bboxes.shape[0] num_gts = gt_bboxes.shape[0] if area_ranges is None: area_ranges = [(None, None)] num_scales = len(area_ranges) # tp and fp are of shape (num_scales, num_gts), each row is tp or fp of # a certain scale tp = np.zeros((num_scales, num_dets), dtype=np.float32) fp = np.zeros((num_scales, num_dets), dtype=np.float32) # if there is no gt bboxes in this image, then all det bboxes # within area range are false positives if gt_bboxes.shape[0] == 0: if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 return tp, fp, det_bboxes if gt_bboxes_group_of is not None and use_group_of: # if handle group-of boxes, divided gt boxes into two parts: # non-group-of and group-of.Then calculate ious and ioas through # non-group-of group-of gts respectively. This only used in # OpenImages evaluation. assert gt_bboxes_group_of.shape[0] == gt_bboxes.shape[0] non_group_gt_bboxes = gt_bboxes[~gt_bboxes_group_of] group_gt_bboxes = gt_bboxes[gt_bboxes_group_of] num_gts_group = group_gt_bboxes.shape[0] ious = bbox_overlaps(det_bboxes, non_group_gt_bboxes) ioas = bbox_overlaps(det_bboxes, group_gt_bboxes, mode='iof') else: # if not consider group-of boxes, only calculate ious through gt boxes ious = bbox_overlaps( det_bboxes, gt_bboxes, use_legacy_coordinate=use_legacy_coordinate) ioas = None if ious.shape[1] > 0: # for each det, the max iou with all gts ious_max = ious.max(axis=1) # for each det, which gt overlaps most with it ious_argmax = ious.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): gt_covered = np.zeros(num_gts, dtype=bool) # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = ( gt_bboxes[:, 2] - gt_bboxes[:, 0] + extra_length) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1] + extra_length) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: if ious_max[i] >= iou_thr: matched_gt = ious_argmax[i] if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not gt_covered[matched_gt]: gt_covered[matched_gt] = True tp[k, i] = 1 else: fp[k, i] = 1 # otherwise ignore this detected bbox, tp = 0, fp = 0 elif min_area is None: fp[k, i] = 1 else: bbox = det_bboxes[i, :4] area = (bbox[2] - bbox[0] + extra_length) * ( bbox[3] - bbox[1] + extra_length) if area >= min_area and area < max_area: fp[k, i] = 1 else: # if there is no no-group-of gt bboxes in this image, # then all det bboxes within area range are false positives. # Only used in OpenImages evaluation. if area_ranges == [(None, None)]: fp[...] = 1 else: det_areas = ( det_bboxes[:, 2] - det_bboxes[:, 0] + extra_length) * ( det_bboxes[:, 3] - det_bboxes[:, 1] + extra_length) for i, (min_area, max_area) in enumerate(area_ranges): fp[i, (det_areas >= min_area) & (det_areas < max_area)] = 1 if ioas is None or ioas.shape[1] <= 0: return tp, fp, det_bboxes else: # The evaluation of group-of TP and FP are done in two stages: # 1. All detections are first matched to non group-of boxes; true # positives are determined. # 2. Detections that are determined as false positives are matched # against group-of boxes and calculated group-of TP and FP. # Only used in OpenImages evaluation. det_bboxes_group = np.zeros( (num_scales, ioas.shape[1], det_bboxes.shape[1]), dtype=float) match_group_of = np.zeros((num_scales, num_dets), dtype=bool) tp_group = np.zeros((num_scales, num_gts_group), dtype=np.float32) ioas_max = ioas.max(axis=1) # for each det, which gt overlaps most with it ioas_argmax = ioas.argmax(axis=1) # sort all dets in descending order by scores sort_inds = np.argsort(-det_bboxes[:, -1]) for k, (min_area, max_area) in enumerate(area_ranges): box_is_covered = tp[k] # if no area range is specified, gt_area_ignore is all False if min_area is None: gt_area_ignore = np.zeros_like(gt_ignore_inds, dtype=bool) else: gt_areas = (gt_bboxes[:, 2] - gt_bboxes[:, 0]) * ( gt_bboxes[:, 3] - gt_bboxes[:, 1]) gt_area_ignore = (gt_areas < min_area) | (gt_areas >= max_area) for i in sort_inds: matched_gt = ioas_argmax[i] if not box_is_covered[i]: if ioas_max[i] >= ioa_thr: if not (gt_ignore_inds[matched_gt] or gt_area_ignore[matched_gt]): if not tp_group[k, matched_gt]: tp_group[k, matched_gt] = 1 match_group_of[k, i] = True else: match_group_of[k, i] = True if det_bboxes_group[k, matched_gt, -1] < \ det_bboxes[i, -1]: det_bboxes_group[k, matched_gt] = \ det_bboxes[i] fp_group = (tp_group <= 0).astype(float) tps = [] fps = [] # concatenate tp, fp, and det-boxes which not matched group of # gt boxes and tp_group, fp_group, and det_bboxes_group which # matched group of boxes respectively. for i in range(num_scales): tps.append( np.concatenate((tp[i][~match_group_of[i]], tp_group[i]))) fps.append( np.concatenate((fp[i][~match_group_of[i]], fp_group[i]))) det_bboxes = np.concatenate( (det_bboxes[~match_group_of[i]], det_bboxes_group[i])) tp = np.vstack(tps) fp = np.vstack(fps) return tp, fp, det_bboxes def get_cls_results(det_results, annotations, class_id): """Get det results and gt information of a certain class. Args: det_results (list[list]): Same as `eval_map()`. annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: tuple[list[np.ndarray]]: detected bboxes, gt bboxes, ignored gt bboxes """ cls_dets = [img_res[class_id] for img_res in det_results] cls_gts = [] cls_gts_ignore = [] for ann in annotations: gt_inds = ann['labels'] == class_id cls_gts.append(ann['bboxes'][gt_inds, :]) if ann.get('labels_ignore', None) is not None: ignore_inds = ann['labels_ignore'] == class_id cls_gts_ignore.append(ann['bboxes_ignore'][ignore_inds, :]) else: cls_gts_ignore.append(np.empty((0, 4), dtype=np.float32)) return cls_dets, cls_gts, cls_gts_ignore def get_cls_group_ofs(annotations, class_id): """Get `gt_group_of` of a certain class, which is used in Open Images. Args: annotations (list[dict]): Same as `eval_map()`. class_id (int): ID of a specific class. Returns: list[np.ndarray]: `gt_group_of` of a certain class. """ gt_group_ofs = [] for ann in annotations: gt_inds = ann['labels'] == class_id if ann.get('gt_is_group_ofs', None) is not None: gt_group_ofs.append(ann['gt_is_group_ofs'][gt_inds]) else: gt_group_ofs.append(np.empty((0, 1), dtype=bool)) return gt_group_ofs def eval_map(det_results, annotations, scale_ranges=None, iou_thr=0.5, ioa_thr=None, dataset=None, logger=None, tpfp_fn=None, nproc=4, use_legacy_coordinate=False, use_group_of=False): """Evaluate mAP of a dataset. Args: det_results (list[list]): [[cls1_det, cls2_det, ...], ...]. The outer list indicates images, and the inner list indicates per-class detected bboxes. annotations (list[dict]): Ground truth annotations where each item of the list indicates an image. Keys of annotations are: - `bboxes`: numpy array of shape (n, 4) - `labels`: numpy array of shape (n, ) - `bboxes_ignore` (optional): numpy array of shape (k, 4) - `labels_ignore` (optional): numpy array of shape (k, ) scale_ranges (list[tuple] | None): Range of scales to be evaluated, in the format [(min1, max1), (min2, max2), ...]. A range of (32, 64) means the area range between (32**2, 64**2). Default: None. iou_thr (float): IoU threshold to be considered as matched. Default: 0.5. ioa_thr (float | None): IoA threshold to be considered as matched, which only used in OpenImages evaluation. Default: None. dataset (list[str] | str | None): Dataset name or dataset classes, there are minor differences in metrics for different datasets, e.g. "voc07", "imagenet_det", etc. Default: None. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. tpfp_fn (callable | None): The function used to determine true/ false positives. If None, :func:`tpfp_default` is used as default unless dataset is 'det' or 'vid' (:func:`tpfp_imagenet` in this case). If it is given as a function, then this function is used to evaluate tp & fp. Default None. nproc (int): Processes used for computing TP and FP. Default: 4. use_legacy_coordinate (bool): Whether to use coordinate system in mmdet v1.x. which means width, height should be calculated as 'x2 - x1 + 1` and 'y2 - y1 + 1' respectively. Default: False. use_group_of (bool): Whether to use group of when calculate TP and FP, which only used in OpenImages evaluation. Default: False. Returns: tuple: (mAP, [dict, dict, ...]) """ assert len(det_results) == len(annotations) if not use_legacy_coordinate: extra_length = 0. else: extra_length = 1. num_imgs = len(det_results) num_scales = len(scale_ranges) if scale_ranges is not None else 1 num_classes = len(det_results[0]) # positive class num area_ranges = ([(rg[0]**2, rg[1]**2) for rg in scale_ranges] if scale_ranges is not None else None) # There is no need to use multi processes to process # when num_imgs = 1 . if num_imgs > 1: assert nproc > 0, 'nproc must be at least one.' nproc = min(nproc, num_imgs) pool = Pool(nproc) eval_results = [] for i in range(num_classes): # get gt and det bboxes of this class cls_dets, cls_gts, cls_gts_ignore = get_cls_results( det_results, annotations, i) # choose proper function according to datasets to compute tp and fp if tpfp_fn is None: if dataset in ['det', 'vid']: tpfp_fn = tpfp_imagenet elif dataset in ['oid_challenge', 'oid_v6'] \ or use_group_of is True: tpfp_fn = tpfp_openimages else: tpfp_fn = tpfp_default if not callable(tpfp_fn): raise ValueError( f'tpfp_fn has to be a function or None, but got {tpfp_fn}') if num_imgs > 1: # compute tp and fp for each image with multiple processes args = [] if use_group_of: # used in Open Images Dataset evaluation gt_group_ofs = get_cls_group_ofs(annotations, i) args.append(gt_group_ofs) args.append([use_group_of for _ in range(num_imgs)]) if ioa_thr is not None: args.append([ioa_thr for _ in range(num_imgs)]) tpfp = pool.starmap( tpfp_fn, zip(cls_dets, cls_gts, cls_gts_ignore, [iou_thr for _ in range(num_imgs)], [area_ranges for _ in range(num_imgs)], [use_legacy_coordinate for _ in range(num_imgs)], *args)) else: tpfp = tpfp_fn( cls_dets[0], cls_gts[0], cls_gts_ignore[0], iou_thr, area_ranges, use_legacy_coordinate, gt_bboxes_group_of=(get_cls_group_ofs(annotations, i)[0] if use_group_of else None), use_group_of=use_group_of, ioa_thr=ioa_thr) tpfp = [tpfp] if use_group_of: tp, fp, cls_dets = tuple(zip(*tpfp)) else: tp, fp = tuple(zip(*tpfp)) # calculate gt number of each scale # ignored gts or gts beyond the specific scale are not counted num_gts = np.zeros(num_scales, dtype=int) for j, bbox in enumerate(cls_gts): if area_ranges is None: num_gts[0] += bbox.shape[0] else: gt_areas = (bbox[:, 2] - bbox[:, 0] + extra_length) * ( bbox[:, 3] - bbox[:, 1] + extra_length) for k, (min_area, max_area) in enumerate(area_ranges): num_gts[k] += np.sum((gt_areas >= min_area) & (gt_areas < max_area)) # sort all det bboxes by score, also sort tp and fp cls_dets = np.vstack(cls_dets) num_dets = cls_dets.shape[0] sort_inds = np.argsort(-cls_dets[:, -1]) tp = np.hstack(tp)[:, sort_inds] fp = np.hstack(fp)[:, sort_inds] # calculate recall and precision with tp and fp tp = np.cumsum(tp, axis=1) fp = np.cumsum(fp, axis=1) eps = np.finfo(np.float32).eps recalls = tp / np.maximum(num_gts[:, np.newaxis], eps) precisions = tp / np.maximum((tp + fp), eps) # calculate AP if scale_ranges is None: recalls = recalls[0, :] precisions = precisions[0, :] num_gts = num_gts.item() mode = 'area' if dataset != 'voc07' else '11points' ap = average_precision(recalls, precisions, mode) eval_results.append({ 'num_gts': num_gts, 'num_dets': num_dets, 'recall': recalls, 'precision': precisions, 'ap': ap }) if num_imgs > 1: pool.close() if scale_ranges is not None: # shape (num_classes, num_scales) all_ap = np.vstack([cls_result['ap'] for cls_result in eval_results]) all_num_gts = np.vstack( [cls_result['num_gts'] for cls_result in eval_results]) mean_ap = [] for i in range(num_scales): if np.any(all_num_gts[:, i] > 0): mean_ap.append(all_ap[all_num_gts[:, i] > 0, i].mean()) else: mean_ap.append(0.0) else: aps = [] for cls_result in eval_results: if cls_result['num_gts'] > 0: aps.append(cls_result['ap']) mean_ap = np.array(aps).mean().item() if aps else 0.0 print_map_summary( mean_ap, eval_results, dataset, area_ranges, logger=logger) return mean_ap, eval_results def print_map_summary(mean_ap, results, dataset=None, scale_ranges=None, logger=None): """Print mAP and results of each class. A table will be printed to show the gts/dets/recall/AP of each class and the mAP. Args: mean_ap (float): Calculated from `eval_map()`. results (list[dict]): Calculated from `eval_map()`. dataset (list[str] | str | None): Dataset name or dataset classes. scale_ranges (list[tuple] | None): Range of scales to be evaluated. logger (logging.Logger | str | None): The way to print the mAP summary. See `mmcv.utils.print_log()` for details. Default: None. """ if logger == 'silent': return if isinstance(results[0]['ap'], np.ndarray): num_scales = len(results[0]['ap']) else: num_scales = 1 if scale_ranges is not None: assert len(scale_ranges) == num_scales num_classes = len(results) recalls = np.zeros((num_scales, num_classes), dtype=np.float32) aps = np.zeros((num_scales, num_classes), dtype=np.float32) num_gts = np.zeros((num_scales, num_classes), dtype=int) for i, cls_result in enumerate(results): if cls_result['recall'].size > 0: recalls[:, i] = np.array(cls_result['recall'], ndmin=2)[:, -1] aps[:, i] = cls_result['ap'] num_gts[:, i] = cls_result['num_gts'] if dataset is None: label_names = [str(i) for i in range(num_classes)] elif mmcv.is_str(dataset): label_names = get_classes(dataset) else: label_names = dataset if not isinstance(mean_ap, list): mean_ap = [mean_ap] header = ['class', 'gts', 'dets', 'recall', 'ap'] for i in range(num_scales): if scale_ranges is not None: print_log(f'Scale range {scale_ranges[i]}', logger=logger) table_data = [header] for j in range(num_classes): row_data = [ label_names[j], num_gts[i, j], results[j]['num_dets'], f'{recalls[i, j]:.3f}', f'{aps[i, j]:.3f}' ] table_data.append(row_data) table_data.append(['mAP', '', '', '', f'{mean_ap[i]:.3f}']) table = AsciiTable(table_data) table.inner_footing_row_border = True print_log('\n' + table.table, logger=logger)
32,678
40.735632
79
py
mmdetection
mmdetection-master/mmdet/core/evaluation/panoptic_utils.py
# Copyright (c) OpenMMLab. All rights reserved. # A custom value to distinguish instance ID and category ID; need to # be greater than the number of categories. # For a pixel in the panoptic result map: # pan_id = ins_id * INSTANCE_OFFSET + cat_id INSTANCE_OFFSET = 1000
273
38.142857
68
py
mmdetection
mmdetection-master/mmdet/core/evaluation/recall.py
# Copyright (c) OpenMMLab. All rights reserved. from collections.abc import Sequence import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .bbox_overlaps import bbox_overlaps def _recalls(all_ious, proposal_nums, thrs): img_num = all_ious.shape[0] total_gt_num = sum([ious.shape[0] for ious in all_ious]) _ious = np.zeros((proposal_nums.size, total_gt_num), dtype=np.float32) for k, proposal_num in enumerate(proposal_nums): tmp_ious = np.zeros(0) for i in range(img_num): ious = all_ious[i][:, :proposal_num].copy() gt_ious = np.zeros((ious.shape[0])) if ious.size == 0: tmp_ious = np.hstack((tmp_ious, gt_ious)) continue for j in range(ious.shape[0]): gt_max_overlaps = ious.argmax(axis=1) max_ious = ious[np.arange(0, ious.shape[0]), gt_max_overlaps] gt_idx = max_ious.argmax() gt_ious[j] = max_ious[gt_idx] box_idx = gt_max_overlaps[gt_idx] ious[gt_idx, :] = -1 ious[:, box_idx] = -1 tmp_ious = np.hstack((tmp_ious, gt_ious)) _ious[k, :] = tmp_ious _ious = np.fliplr(np.sort(_ious, axis=1)) recalls = np.zeros((proposal_nums.size, thrs.size)) for i, thr in enumerate(thrs): recalls[:, i] = (_ious >= thr).sum(axis=1) / float(total_gt_num) return recalls def set_recall_param(proposal_nums, iou_thrs): """Check proposal_nums and iou_thrs and set correct format.""" if isinstance(proposal_nums, Sequence): _proposal_nums = np.array(proposal_nums) elif isinstance(proposal_nums, int): _proposal_nums = np.array([proposal_nums]) else: _proposal_nums = proposal_nums if iou_thrs is None: _iou_thrs = np.array([0.5]) elif isinstance(iou_thrs, Sequence): _iou_thrs = np.array(iou_thrs) elif isinstance(iou_thrs, float): _iou_thrs = np.array([iou_thrs]) else: _iou_thrs = iou_thrs return _proposal_nums, _iou_thrs def eval_recalls(gts, proposals, proposal_nums=None, iou_thrs=0.5, logger=None, use_legacy_coordinate=False): """Calculate recalls. Args: gts (list[ndarray]): a list of arrays of shape (n, 4) proposals (list[ndarray]): a list of arrays of shape (k, 4) or (k, 5) proposal_nums (int | Sequence[int]): Top N proposals to be evaluated. iou_thrs (float | Sequence[float]): IoU thresholds. Default: 0.5. logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. use_legacy_coordinate (bool): Whether use coordinate system in mmdet v1.x. "1" was added to both height and width which means w, h should be computed as 'x2 - x1 + 1` and 'y2 - y1 + 1'. Default: False. Returns: ndarray: recalls of different ious and proposal nums """ img_num = len(gts) assert img_num == len(proposals) proposal_nums, iou_thrs = set_recall_param(proposal_nums, iou_thrs) all_ious = [] for i in range(img_num): if proposals[i].ndim == 2 and proposals[i].shape[1] == 5: scores = proposals[i][:, 4] sort_idx = np.argsort(scores)[::-1] img_proposal = proposals[i][sort_idx, :] else: img_proposal = proposals[i] prop_num = min(img_proposal.shape[0], proposal_nums[-1]) if gts[i] is None or gts[i].shape[0] == 0: ious = np.zeros((0, img_proposal.shape[0]), dtype=np.float32) else: ious = bbox_overlaps( gts[i], img_proposal[:prop_num, :4], use_legacy_coordinate=use_legacy_coordinate) all_ious.append(ious) all_ious = np.array(all_ious) recalls = _recalls(all_ious, proposal_nums, iou_thrs) print_recall_summary(recalls, proposal_nums, iou_thrs, logger=logger) return recalls def print_recall_summary(recalls, proposal_nums, iou_thrs, row_idxs=None, col_idxs=None, logger=None): """Print recalls in a table. Args: recalls (ndarray): calculated from `bbox_recalls` proposal_nums (ndarray or list): top N proposals iou_thrs (ndarray or list): iou thresholds row_idxs (ndarray): which rows(proposal nums) to print col_idxs (ndarray): which cols(iou thresholds) to print logger (logging.Logger | str | None): The way to print the recall summary. See `mmcv.utils.print_log()` for details. Default: None. """ proposal_nums = np.array(proposal_nums, dtype=np.int32) iou_thrs = np.array(iou_thrs) if row_idxs is None: row_idxs = np.arange(proposal_nums.size) if col_idxs is None: col_idxs = np.arange(iou_thrs.size) row_header = [''] + iou_thrs[col_idxs].tolist() table_data = [row_header] for i, num in enumerate(proposal_nums[row_idxs]): row = [f'{val:.3f}' for val in recalls[row_idxs[i], col_idxs].tolist()] row.insert(0, num) table_data.append(row) table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) def plot_num_recall(recalls, proposal_nums): """Plot Proposal_num-Recalls curve. Args: recalls(ndarray or list): shape (k,) proposal_nums(ndarray or list): same shape as `recalls` """ if isinstance(proposal_nums, np.ndarray): _proposal_nums = proposal_nums.tolist() else: _proposal_nums = proposal_nums if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot([0] + _proposal_nums, [0] + _recalls) plt.xlabel('Proposal num') plt.ylabel('Recall') plt.axis([0, proposal_nums.max(), 0, 1]) f.show() def plot_iou_recall(recalls, iou_thrs): """Plot IoU-Recalls curve. Args: recalls(ndarray or list): shape (k,) iou_thrs(ndarray or list): same shape as `recalls` """ if isinstance(iou_thrs, np.ndarray): _iou_thrs = iou_thrs.tolist() else: _iou_thrs = iou_thrs if isinstance(recalls, np.ndarray): _recalls = recalls.tolist() else: _recalls = recalls import matplotlib.pyplot as plt f = plt.figure() plt.plot(_iou_thrs + [1.0], _recalls + [0.]) plt.xlabel('IoU') plt.ylabel('Recall') plt.axis([iou_thrs.min(), 1, 0, 1]) f.show()
6,806
33.378788
79
py
mmdetection
mmdetection-master/mmdet/core/export/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .onnx_helper import (add_dummy_nms_for_onnx, dynamic_clip_for_onnx, get_k_for_topk) from .pytorch2onnx import (build_model_from_cfg, generate_inputs_and_wrap_model, preprocess_example_input) __all__ = [ 'build_model_from_cfg', 'generate_inputs_and_wrap_model', 'preprocess_example_input', 'get_k_for_topk', 'add_dummy_nms_for_onnx', 'dynamic_clip_for_onnx' ]
505
37.923077
75
py
mmdetection
mmdetection-master/mmdet/core/export/model_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings import numpy as np import torch from mmdet.core import bbox2result from mmdet.models import BaseDetector class DeployBaseDetector(BaseDetector): """DeployBaseDetector.""" def __init__(self, class_names, device_id): super(DeployBaseDetector, self).__init__() self.CLASSES = class_names self.device_id = device_id def simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def aug_test(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def extract_feat(self, imgs): raise NotImplementedError('This method is not implemented.') def forward_train(self, imgs, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def val_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def train_step(self, data, optimizer): raise NotImplementedError('This method is not implemented.') def forward_test(self, *, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def async_simple_test(self, img, img_metas, **kwargs): raise NotImplementedError('This method is not implemented.') def forward(self, img, img_metas, return_loss=True, **kwargs): outputs = self.forward_test(img, img_metas, **kwargs) batch_dets, batch_labels = outputs[:2] batch_masks = outputs[2] if len(outputs) == 3 else None batch_size = img[0].shape[0] img_metas = img_metas[0] results = [] rescale = kwargs.get('rescale', True) for i in range(batch_size): dets, labels = batch_dets[i], batch_labels[i] if rescale: scale_factor = img_metas[i]['scale_factor'] if isinstance(scale_factor, (list, tuple, np.ndarray)): assert len(scale_factor) == 4 scale_factor = np.array(scale_factor)[None, :] # [1,4] dets[:, :4] /= scale_factor if 'border' in img_metas[i]: # offset pixel of the top-left corners between original image # and padded/enlarged image, 'border' is used when exporting # CornerNet and CentripetalNet to onnx x_off = img_metas[i]['border'][2] y_off = img_metas[i]['border'][0] dets[:, [0, 2]] -= x_off dets[:, [1, 3]] -= y_off dets[:, :4] *= (dets[:, :4] > 0).astype(dets.dtype) dets_results = bbox2result(dets, labels, len(self.CLASSES)) if batch_masks is not None: masks = batch_masks[i] img_h, img_w = img_metas[i]['img_shape'][:2] ori_h, ori_w = img_metas[i]['ori_shape'][:2] masks = masks[:, :img_h, :img_w] if rescale: masks = masks.astype(np.float32) masks = torch.from_numpy(masks) masks = torch.nn.functional.interpolate( masks.unsqueeze(0), size=(ori_h, ori_w)) masks = masks.squeeze(0).detach().numpy() if masks.dtype != bool: masks = masks >= 0.5 segms_results = [[] for _ in range(len(self.CLASSES))] for j in range(len(dets)): segms_results[labels[j]].append(masks[j]) results.append((dets_results, segms_results)) else: results.append(dets_results) return results class ONNXRuntimeDetector(DeployBaseDetector): """Wrapper for detector's inference with ONNXRuntime.""" def __init__(self, onnx_file, class_names, device_id): super(ONNXRuntimeDetector, self).__init__(class_names, device_id) import onnxruntime as ort # get the custom op path ort_custom_op_path = '' try: from mmcv.ops import get_onnxruntime_op_path ort_custom_op_path = get_onnxruntime_op_path() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with ONNXRuntime from source.') session_options = ort.SessionOptions() # register custom op for onnxruntime if osp.exists(ort_custom_op_path): session_options.register_custom_ops_library(ort_custom_op_path) sess = ort.InferenceSession(onnx_file, session_options) providers = ['CPUExecutionProvider'] options = [{}] is_cuda_available = ort.get_device() == 'GPU' if is_cuda_available: providers.insert(0, 'CUDAExecutionProvider') options.insert(0, {'device_id': device_id}) sess.set_providers(providers, options) self.sess = sess self.io_binding = sess.io_binding() self.output_names = [_.name for _ in sess.get_outputs()] self.is_cuda_available = is_cuda_available def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0] # set io binding for inputs/outputs device_type = 'cuda' if self.is_cuda_available else 'cpu' if not self.is_cuda_available: input_data = input_data.cpu() self.io_binding.bind_input( name='input', device_type=device_type, device_id=self.device_id, element_type=np.float32, shape=input_data.shape, buffer_ptr=input_data.data_ptr()) for name in self.output_names: self.io_binding.bind_output(name) # run session to get outputs self.sess.run_with_iobinding(self.io_binding) ort_outputs = self.io_binding.copy_outputs_to_cpu() return ort_outputs class TensorRTDetector(DeployBaseDetector): """Wrapper for detector's inference with TensorRT.""" def __init__(self, engine_file, class_names, device_id, output_names=None): super(TensorRTDetector, self).__init__(class_names, device_id) warnings.warn('`output_names` is deprecated and will be removed in ' 'future releases.') from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin try: load_tensorrt_plugin() except (ImportError, ModuleNotFoundError): warnings.warn('If input model has custom op from mmcv, \ you may have to build mmcv with TensorRT from source.') output_names = ['dets', 'labels'] model = TRTWraper(engine_file, ['input'], output_names) with_masks = False # if TensorRT has totally 4 inputs/outputs, then # the detector should have `mask` output. if len(model.engine) == 4: model.output_names = output_names + ['masks'] with_masks = True self.model = model self.with_masks = with_masks def forward_test(self, imgs, img_metas, **kwargs): input_data = imgs[0].contiguous() with torch.cuda.device(self.device_id), torch.no_grad(): outputs = self.model({'input': input_data}) outputs = [outputs[name] for name in self.model.output_names] outputs = [out.detach().cpu().numpy() for out in outputs] return outputs
7,469
39.597826
79
py
mmdetection
mmdetection-master/mmdet/core/export/onnx_helper.py
# Copyright (c) OpenMMLab. All rights reserved. import os import torch def dynamic_clip_for_onnx(x1, y1, x2, y2, max_shape): """Clip boxes dynamically for onnx. Since torch.clamp cannot have dynamic `min` and `max`, we scale the boxes by 1/max_shape and clamp in the range [0, 1]. Args: x1 (Tensor): The x1 for bounding boxes. y1 (Tensor): The y1 for bounding boxes. x2 (Tensor): The x2 for bounding boxes. y2 (Tensor): The y2 for bounding boxes. max_shape (Tensor or torch.Size): The (H,W) of original image. Returns: tuple(Tensor): The clipped x1, y1, x2, y2. """ assert isinstance( max_shape, torch.Tensor), '`max_shape` should be tensor of (h,w) for onnx' # scale by 1/max_shape x1 = x1 / max_shape[1] y1 = y1 / max_shape[0] x2 = x2 / max_shape[1] y2 = y2 / max_shape[0] # clamp [0, 1] x1 = torch.clamp(x1, 0, 1) y1 = torch.clamp(y1, 0, 1) x2 = torch.clamp(x2, 0, 1) y2 = torch.clamp(y2, 0, 1) # scale back x1 = x1 * max_shape[1] y1 = y1 * max_shape[0] x2 = x2 * max_shape[1] y2 = y2 * max_shape[0] return x1, y1, x2, y2 def get_k_for_topk(k, size): """Get k of TopK for onnx exporting. The K of TopK in TensorRT should not be a Tensor, while in ONNX Runtime it could be a Tensor.Due to dynamic shape feature, we have to decide whether to do TopK and what K it should be while exporting to ONNX. If returned K is less than zero, it means we do not have to do TopK operation. Args: k (int or Tensor): The set k value for nms from config file. size (Tensor or torch.Size): The number of elements of \ TopK's input tensor Returns: tuple: (int or Tensor): The final K for TopK. """ ret_k = -1 if k <= 0 or size <= 0: return ret_k if torch.onnx.is_in_onnx_export(): is_trt_backend = os.environ.get('ONNX_BACKEND') == 'MMCVTensorRT' if is_trt_backend: # TensorRT does not support dynamic K with TopK op if 0 < k < size: ret_k = k else: # Always keep topk op for dynamic input in onnx for ONNX Runtime ret_k = torch.where(k < size, k, size) elif k < size: ret_k = k else: # ret_k is -1 pass return ret_k def add_dummy_nms_for_onnx(boxes, scores, max_output_boxes_per_class=1000, iou_threshold=0.5, score_threshold=0.05, pre_top_k=-1, after_top_k=-1, labels=None): """Create a dummy onnx::NonMaxSuppression op while exporting to ONNX. This function helps exporting to onnx with batch and multiclass NMS op. It only supports class-agnostic detection results. That is, the scores is of shape (N, num_bboxes, num_classes) and the boxes is of shape (N, num_boxes, 4). Args: boxes (Tensor): The bounding boxes of shape [N, num_boxes, 4] scores (Tensor): The detection scores of shape [N, num_boxes, num_classes] max_output_boxes_per_class (int): Maximum number of output boxes per class of nms. Defaults to 1000. iou_threshold (float): IOU threshold of nms. Defaults to 0.5 score_threshold (float): score threshold of nms. Defaults to 0.05. pre_top_k (bool): Number of top K boxes to keep before nms. Defaults to -1. after_top_k (int): Number of top K boxes to keep after nms. Defaults to -1. labels (Tensor, optional): It not None, explicit labels would be used. Otherwise, labels would be automatically generated using num_classed. Defaults to None. Returns: tuple[Tensor, Tensor]: dets of shape [N, num_det, 5] and class labels of shape [N, num_det]. """ max_output_boxes_per_class = torch.LongTensor([max_output_boxes_per_class]) iou_threshold = torch.tensor([iou_threshold], dtype=torch.float32) score_threshold = torch.tensor([score_threshold], dtype=torch.float32) batch_size = scores.shape[0] num_class = scores.shape[2] nms_pre = torch.tensor(pre_top_k, device=scores.device, dtype=torch.long) nms_pre = get_k_for_topk(nms_pre, boxes.shape[1]) if nms_pre > 0: max_scores, _ = scores.max(-1) _, topk_inds = max_scores.topk(nms_pre) batch_inds = torch.arange(batch_size).view( -1, 1).expand_as(topk_inds).long() # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = boxes.shape[1] * batch_inds + topk_inds boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) scores = scores.reshape(-1, num_class)[transformed_inds, :].reshape( batch_size, -1, num_class) if labels is not None: labels = labels.reshape(-1, 1)[transformed_inds].reshape( batch_size, -1) scores = scores.permute(0, 2, 1) num_box = boxes.shape[1] # turn off tracing to create a dummy output of nms state = torch._C._get_tracing_state() # dummy indices of nms's output num_fake_det = 2 batch_inds = torch.randint(batch_size, (num_fake_det, 1)) cls_inds = torch.randint(num_class, (num_fake_det, 1)) box_inds = torch.randint(num_box, (num_fake_det, 1)) indices = torch.cat([batch_inds, cls_inds, box_inds], dim=1) output = indices setattr(DummyONNXNMSop, 'output', output) # open tracing torch._C._set_tracing_state(state) selected_indices = DummyONNXNMSop.apply(boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold) batch_inds, cls_inds = selected_indices[:, 0], selected_indices[:, 1] box_inds = selected_indices[:, 2] if labels is None: labels = torch.arange(num_class, dtype=torch.long).to(scores.device) labels = labels.view(1, num_class, 1).expand_as(scores) scores = scores.reshape(-1, 1) boxes = boxes.reshape(batch_size, -1).repeat(1, num_class).reshape(-1, 4) pos_inds = (num_class * batch_inds + cls_inds) * num_box + box_inds mask = scores.new_zeros(scores.shape) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 # PyTorch style code: mask[batch_inds, box_inds] += 1 mask[pos_inds, :] += 1 scores = scores * mask boxes = boxes * mask scores = scores.reshape(batch_size, -1) boxes = boxes.reshape(batch_size, -1, 4) labels = labels.reshape(batch_size, -1) nms_after = torch.tensor( after_top_k, device=scores.device, dtype=torch.long) nms_after = get_k_for_topk(nms_after, num_box * num_class) if nms_after > 0: _, topk_inds = scores.topk(nms_after) batch_inds = torch.arange(batch_size).view(-1, 1).expand_as(topk_inds) # Avoid onnx2tensorrt issue in https://github.com/NVIDIA/TensorRT/issues/1134 # noqa: E501 transformed_inds = scores.shape[1] * batch_inds + topk_inds scores = scores.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) boxes = boxes.reshape(-1, 4)[transformed_inds, :].reshape( batch_size, -1, 4) labels = labels.reshape(-1, 1)[transformed_inds, :].reshape( batch_size, -1) scores = scores.unsqueeze(2) dets = torch.cat([boxes, scores], dim=2) return dets, labels class DummyONNXNMSop(torch.autograd.Function): """DummyONNXNMSop. This class is only for creating onnx::NonMaxSuppression. """ @staticmethod def forward(ctx, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return DummyONNXNMSop.output @staticmethod def symbolic(g, boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold): return g.op( 'NonMaxSuppression', boxes, scores, max_output_boxes_per_class, iou_threshold, score_threshold, outputs=1)
8,367
36.357143
98
py
mmdetection
mmdetection-master/mmdet/core/export/pytorch2onnx.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial import mmcv import numpy as np import torch from mmcv.runner import load_checkpoint def generate_inputs_and_wrap_model(config_path, checkpoint_path, input_config, cfg_options=None): """Prepare sample input and wrap model for ONNX export. The ONNX export API only accept args, and all inputs should be torch.Tensor or corresponding types (such as tuple of tensor). So we should call this function before exporting. This function will: 1. generate corresponding inputs which are used to execute the model. 2. Wrap the model's forward function. For example, the MMDet models' forward function has a parameter ``return_loss:bool``. As we want to set it as False while export API supports neither bool type or kwargs. So we have to replace the forward method like ``model.forward = partial(model.forward, return_loss=False)``. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint input_config (dict): the exactly data in this dict depends on the framework. For MMSeg, we can just declare the input shape, and generate the dummy data accordingly. However, for MMDet, we may pass the real img path, or the NMS will return None as there is no legal bbox. Returns: tuple: (model, tensor_data) wrapped model which can be called by ``model(*tensor_data)`` and a list of inputs which are used to execute the model while exporting. """ model = build_model_from_cfg( config_path, checkpoint_path, cfg_options=cfg_options) one_img, one_meta = preprocess_example_input(input_config) tensor_data = [one_img] model.forward = partial( model.forward, img_metas=[[one_meta]], return_loss=False) # pytorch has some bug in pytorch1.3, we have to fix it # by replacing these existing op opset_version = 11 # put the import within the function thus it will not cause import error # when not using this function try: from mmcv.onnx.symbolic import register_extra_symbolics except ModuleNotFoundError: raise NotImplementedError('please update mmcv to version>=v1.0.4') register_extra_symbolics(opset_version) return model, tensor_data def build_model_from_cfg(config_path, checkpoint_path, cfg_options=None): """Build a model from config and load the given checkpoint. Args: config_path (str): the OpenMMLab config for the model we want to export to ONNX checkpoint_path (str): Path to the corresponding checkpoint Returns: torch.nn.Module: the built model """ from mmdet.models import build_detector cfg = mmcv.Config.fromfile(config_path) if cfg_options is not None: cfg.merge_from_dict(cfg_options) # set cudnn_benchmark if cfg.get('cudnn_benchmark', False): torch.backends.cudnn.benchmark = True cfg.model.pretrained = None cfg.data.test.test_mode = True # build the model cfg.model.train_cfg = None model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg')) checkpoint = load_checkpoint(model, checkpoint_path, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: from mmdet.datasets import DATASETS dataset = DATASETS.get(cfg.data.test['type']) assert (dataset is not None) model.CLASSES = dataset.CLASSES model.cpu().eval() return model def preprocess_example_input(input_config): """Prepare an example input image for ``generate_inputs_and_wrap_model``. Args: input_config (dict): customized config describing the example input. Returns: tuple: (one_img, one_meta), tensor of the example input image and \ meta information for the example input image. Examples: >>> from mmdet.core.export import preprocess_example_input >>> input_config = { >>> 'input_shape': (1,3,224,224), >>> 'input_path': 'demo/demo.jpg', >>> 'normalize_cfg': { >>> 'mean': (123.675, 116.28, 103.53), >>> 'std': (58.395, 57.12, 57.375) >>> } >>> } >>> one_img, one_meta = preprocess_example_input(input_config) >>> print(one_img.shape) torch.Size([1, 3, 224, 224]) >>> print(one_meta) {'img_shape': (224, 224, 3), 'ori_shape': (224, 224, 3), 'pad_shape': (224, 224, 3), 'filename': '<demo>.png', 'scale_factor': 1.0, 'flip': False} """ input_path = input_config['input_path'] input_shape = input_config['input_shape'] one_img = mmcv.imread(input_path) one_img = mmcv.imresize(one_img, input_shape[2:][::-1]) show_img = one_img.copy() if 'normalize_cfg' in input_config.keys(): normalize_cfg = input_config['normalize_cfg'] mean = np.array(normalize_cfg['mean'], dtype=np.float32) std = np.array(normalize_cfg['std'], dtype=np.float32) to_rgb = normalize_cfg.get('to_rgb', True) one_img = mmcv.imnormalize(one_img, mean, std, to_rgb=to_rgb) one_img = one_img.transpose(2, 0, 1) one_img = torch.from_numpy(one_img).unsqueeze(0).float().requires_grad_( True) (_, C, H, W) = input_shape one_meta = { 'img_shape': (H, W, C), 'ori_shape': (H, W, C), 'pad_shape': (H, W, C), 'filename': '<demo>.png', 'scale_factor': np.ones(4, dtype=np.float32), 'flip': False, 'show_img': show_img, 'flip_direction': None } return one_img, one_meta
5,995
36.475
78
py
mmdetection
mmdetection-master/mmdet/core/hook/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .checkloss_hook import CheckInvalidLossHook from .ema import ExpMomentumEMAHook, LinearMomentumEMAHook from .memory_profiler_hook import MemoryProfilerHook from .set_epoch_info_hook import SetEpochInfoHook from .sync_norm_hook import SyncNormHook from .sync_random_size_hook import SyncRandomSizeHook from .wandblogger_hook import MMDetWandbHook from .yolox_lrupdater_hook import YOLOXLrUpdaterHook from .yolox_mode_switch_hook import YOLOXModeSwitchHook __all__ = [ 'SyncRandomSizeHook', 'YOLOXModeSwitchHook', 'SyncNormHook', 'ExpMomentumEMAHook', 'LinearMomentumEMAHook', 'YOLOXLrUpdaterHook', 'CheckInvalidLossHook', 'SetEpochInfoHook', 'MemoryProfilerHook', 'MMDetWandbHook' ]
752
40.833333
72
py
mmdetection
mmdetection-master/mmdet/core/hook/checkloss_hook.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class CheckInvalidLossHook(Hook): """Check invalid loss hook. This hook will regularly check whether the loss is valid during training. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): self.interval = interval def after_train_iter(self, runner): if self.every_n_iters(runner, self.interval): assert torch.isfinite(runner.outputs['loss']), \ runner.logger.info('loss become infinite or NaN!')
681
26.28
66
py
mmdetection
mmdetection-master/mmdet/core/hook/ema.py
# Copyright (c) OpenMMLab. All rights reserved. import math from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook class BaseEMAHook(Hook): """Exponential Moving Average Hook. Use Exponential Moving Average on all parameters of model in training process. All parameters have a ema backup, which update by the formula as below. EMAHook takes priority over EvalHook and CheckpointHook. Note, the original model parameters are actually saved in ema field after train. Args: momentum (float): The momentum used for updating ema parameter. Ema's parameter are updated with the formula: `ema_param = (1-momentum) * ema_param + momentum * cur_param`. Defaults to 0.0002. skip_buffers (bool): Whether to skip the model buffers, such as batchnorm running stats (running_mean, running_var), it does not perform the ema operation. Default to False. interval (int): Update ema parameter every interval iteration. Defaults to 1. resume_from (str, optional): The checkpoint path. Defaults to None. momentum_fun (func, optional): The function to change momentum during early iteration (also warmup) to help early training. It uses `momentum` as a constant. Defaults to None. """ def __init__(self, momentum=0.0002, interval=1, skip_buffers=False, resume_from=None, momentum_fun=None): assert 0 < momentum < 1 self.momentum = momentum self.skip_buffers = skip_buffers self.interval = interval self.checkpoint = resume_from self.momentum_fun = momentum_fun def before_run(self, runner): """To resume model with it's ema parameters more friendly. Register ema parameter as ``named_buffer`` to model. """ model = runner.model if is_module_wrapper(model): model = model.module self.param_ema_buffer = {} if self.skip_buffers: self.model_parameters = dict(model.named_parameters()) else: self.model_parameters = model.state_dict() for name, value in self.model_parameters.items(): # "." is not allowed in module's buffer name buffer_name = f"ema_{name.replace('.', '_')}" self.param_ema_buffer[name] = buffer_name model.register_buffer(buffer_name, value.data.clone()) self.model_buffers = dict(model.named_buffers()) if self.checkpoint is not None: runner.resume(self.checkpoint) def get_momentum(self, runner): return self.momentum_fun(runner.iter) if self.momentum_fun else \ self.momentum def after_train_iter(self, runner): """Update ema parameter every self.interval iterations.""" if (runner.iter + 1) % self.interval != 0: return momentum = self.get_momentum(runner) for name, parameter in self.model_parameters.items(): # exclude num_tracking if parameter.dtype.is_floating_point: buffer_name = self.param_ema_buffer[name] buffer_parameter = self.model_buffers[buffer_name] buffer_parameter.mul_(1 - momentum).add_( parameter.data, alpha=momentum) def after_train_epoch(self, runner): """We load parameter values from ema backup to model before the EvalHook.""" self._swap_ema_parameters() def before_train_epoch(self, runner): """We recover model's parameter from ema backup after last epoch's EvalHook.""" self._swap_ema_parameters() def _swap_ema_parameters(self): """Swap the parameter of model with parameter in ema_buffer.""" for name, value in self.model_parameters.items(): temp = value.data.clone() ema_buffer = self.model_buffers[self.param_ema_buffer[name]] value.data.copy_(ema_buffer.data) ema_buffer.data.copy_(temp) @HOOKS.register_module() class ExpMomentumEMAHook(BaseEMAHook): """EMAHook using exponential momentum strategy. Args: total_iter (int): The total number of iterations of EMA momentum. Defaults to 2000. """ def __init__(self, total_iter=2000, **kwargs): super(ExpMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: (1 - self.momentum) * math.exp(-( 1 + x) / total_iter) + self.momentum @HOOKS.register_module() class LinearMomentumEMAHook(BaseEMAHook): """EMAHook using linear momentum strategy. Args: warm_up (int): During first warm_up steps, we may use smaller decay to update ema parameters more slowly. Defaults to 100. """ def __init__(self, warm_up=100, **kwargs): super(LinearMomentumEMAHook, self).__init__(**kwargs) self.momentum_fun = lambda x: min(self.momentum**self.interval, (1 + x) / (warm_up + x))
5,150
38.320611
78
py
mmdetection
mmdetection-master/mmdet/core/hook/memory_profiler_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class MemoryProfilerHook(Hook): """Memory profiler hook recording memory information including virtual memory, swap memory, and the memory of the current process. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): try: from psutil import swap_memory, virtual_memory self._swap_memory = swap_memory self._virtual_memory = virtual_memory except ImportError: raise ImportError('psutil is not installed, please install it by: ' 'pip install psutil') try: from memory_profiler import memory_usage self._memory_usage = memory_usage except ImportError: raise ImportError( 'memory_profiler is not installed, please install it by: ' 'pip install memory_profiler') self.interval = interval def after_iter(self, runner): if self.every_n_iters(runner, self.interval): # in Byte virtual_memory = self._virtual_memory() swap_memory = self._swap_memory() # in MB process_memory = self._memory_usage()[0] factor = 1024 * 1024 runner.logger.info( 'Memory information ' 'available_memory: ' f'{round(virtual_memory.available / factor)} MB, ' 'used_memory: ' f'{round(virtual_memory.used / factor)} MB, ' f'memory_utilization: {virtual_memory.percent} %, ' 'available_swap_memory: ' f'{round((swap_memory.total - swap_memory.used) / factor)}' ' MB, ' f'used_swap_memory: {round(swap_memory.used / factor)} MB, ' f'swap_memory_utilization: {swap_memory.percent} %, ' 'current_process_memory: ' f'{round(process_memory)} MB')
2,118
36.839286
79
py
mmdetection
mmdetection-master/mmdet/core/hook/set_epoch_info_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner import HOOKS, Hook @HOOKS.register_module() class SetEpochInfoHook(Hook): """Set runner's epoch information to the model.""" def before_train_epoch(self, runner): epoch = runner.epoch model = runner.model if is_module_wrapper(model): model = model.module model.set_epoch(epoch)
442
26.6875
54
py
mmdetection
mmdetection-master/mmdet/core/hook/sync_norm_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from collections import OrderedDict from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import nn from ..utils.dist_utils import all_reduce_dict def get_norm_states(module): async_norm_states = OrderedDict() for name, child in module.named_modules(): if isinstance(child, nn.modules.batchnorm._NormBase): for k, v in child.state_dict().items(): async_norm_states['.'.join([name, k])] = v return async_norm_states @HOOKS.register_module() class SyncNormHook(Hook): """Synchronize Norm states after training epoch, currently used in YOLOX. Args: num_last_epochs (int): The number of latter epochs in the end of the training to switch to synchronizing norm interval. Default: 15. interval (int): Synchronizing norm interval. Default: 1. """ def __init__(self, num_last_epochs=15, interval=1): self.interval = interval self.num_last_epochs = num_last_epochs def before_train_epoch(self, runner): epoch = runner.epoch if (epoch + 1) == runner.max_epochs - self.num_last_epochs: # Synchronize norm every epoch. self.interval = 1 def after_train_epoch(self, runner): """Synchronizing norm.""" epoch = runner.epoch module = runner.model if (epoch + 1) % self.interval == 0: _, world_size = get_dist_info() if world_size == 1: return norm_states = get_norm_states(module) if len(norm_states) == 0: return norm_states = all_reduce_dict(norm_states, op='mean') module.load_state_dict(norm_states, strict=False)
1,789
32.773585
77
py
mmdetection
mmdetection-master/mmdet/core/hook/sync_random_size_hook.py
# Copyright (c) OpenMMLab. All rights reserved. import random import warnings import torch from mmcv.runner import get_dist_info from mmcv.runner.hooks import HOOKS, Hook from torch import distributed as dist @HOOKS.register_module() class SyncRandomSizeHook(Hook): """Change and synchronize the random image size across ranks. SyncRandomSizeHook is deprecated, please use Resize pipeline to achieve similar functions. Such as `dict(type='Resize', img_scale=[(448, 448), (832, 832)], multiscale_mode='range', keep_ratio=True)`. Note: Due to the multi-process dataloader, its behavior is different from YOLOX's official implementation, the official is to change the size every fixed iteration interval and what we achieved is a fixed epoch interval. Args: ratio_range (tuple[int]): Random ratio range. It will be multiplied by 32, and then change the dataset output image size. Default: (14, 26). img_scale (tuple[int]): Size of input image. Default: (640, 640). interval (int): The epoch interval of change image size. Default: 1. device (torch.device | str): device for returned tensors. Default: 'cuda'. """ def __init__(self, ratio_range=(14, 26), img_scale=(640, 640), interval=1, device='cuda'): warnings.warn('DeprecationWarning: SyncRandomSizeHook is deprecated. ' 'Please use Resize pipeline to achieve similar ' 'functions. Due to the multi-process dataloader, ' 'its behavior is different from YOLOX\'s official ' 'implementation, the official is to change the size ' 'every fixed iteration interval and what we achieved ' 'is a fixed epoch interval.') self.rank, world_size = get_dist_info() self.is_distributed = world_size > 1 self.ratio_range = ratio_range self.img_scale = img_scale self.interval = interval self.device = device def after_train_epoch(self, runner): """Change the dataset output image size.""" if self.ratio_range is not None and (runner.epoch + 1) % self.interval == 0: # Due to DDP and DP get the device behavior inconsistent, # so we did not get the device from runner.model. tensor = torch.LongTensor(2).to(self.device) if self.rank == 0: size_factor = self.img_scale[1] * 1. / self.img_scale[0] size = random.randint(*self.ratio_range) size = (int(32 * size), 32 * int(size * size_factor)) tensor[0] = size[0] tensor[1] = size[1] if self.is_distributed: dist.barrier() dist.broadcast(tensor, 0) runner.data_loader.dataset.update_dynamic_scale( (tensor[0].item(), tensor[1].item()))
3,061
40.945205
78
py
mmdetection
mmdetection-master/mmdet/core/hook/wandblogger_hook.py
# Copyright (c) OpenMMLab. All rights reserved. import importlib import os.path as osp import sys import warnings import mmcv import numpy as np import pycocotools.mask as mask_util from mmcv.runner import HOOKS from mmcv.runner.dist_utils import master_only from mmcv.runner.hooks.checkpoint import CheckpointHook from mmcv.runner.hooks.logger.wandb import WandbLoggerHook from mmcv.utils import digit_version from mmdet.core import DistEvalHook, EvalHook from mmdet.core.mask.structures import polygon_to_bitmap @HOOKS.register_module() class MMDetWandbHook(WandbLoggerHook): """Enhanced Wandb logger hook for MMDetection. Comparing with the :cls:`mmcv.runner.WandbLoggerHook`, this hook can not only automatically log all the metrics but also log the following extra information - saves model checkpoints as W&B Artifact, and logs model prediction as interactive W&B Tables. - Metrics: The MMDetWandbHook will automatically log training and validation metrics along with system metrics (CPU/GPU). - Checkpointing: If `log_checkpoint` is True, the checkpoint saved at every checkpoint interval will be saved as W&B Artifacts. This depends on the : class:`mmcv.runner.CheckpointHook` whose priority is higher than this hook. Please refer to https://docs.wandb.ai/guides/artifacts/model-versioning to learn more about model versioning with W&B Artifacts. - Checkpoint Metadata: If evaluation results are available for a given checkpoint artifact, it will have a metadata associated with it. The metadata contains the evaluation metrics computed on validation data with that checkpoint along with the current epoch. It depends on `EvalHook` whose priority is more than MMDetWandbHook. - Evaluation: At every evaluation interval, the `MMDetWandbHook` logs the model prediction as interactive W&B Tables. The number of samples logged is given by `num_eval_images`. Currently, the `MMDetWandbHook` logs the predicted bounding boxes along with the ground truth at every evaluation interval. This depends on the `EvalHook` whose priority is more than `MMDetWandbHook`. Also note that the data is just logged once and subsequent evaluation tables uses reference to the logged data to save memory usage. Please refer to https://docs.wandb.ai/guides/data-vis to learn more about W&B Tables. For more details check out W&B's MMDetection docs: https://docs.wandb.ai/guides/integrations/mmdetection ``` Example: log_config = dict( ... hooks=[ ..., dict(type='MMDetWandbHook', init_kwargs={ 'entity': "YOUR_ENTITY", 'project': "YOUR_PROJECT_NAME" }, interval=50, log_checkpoint=True, log_checkpoint_metadata=True, num_eval_images=100, bbox_score_thr=0.3) ]) ``` Args: init_kwargs (dict): A dict passed to wandb.init to initialize a W&B run. Please refer to https://docs.wandb.ai/ref/python/init for possible key-value pairs. interval (int): Logging interval (every k iterations). Defaults to 50. log_checkpoint (bool): Save the checkpoint at every checkpoint interval as W&B Artifacts. Use this for model versioning where each version is a checkpoint. Defaults to False. log_checkpoint_metadata (bool): Log the evaluation metrics computed on the validation data with the checkpoint, along with current epoch as a metadata to that checkpoint. Defaults to True. num_eval_images (int): The number of validation images to be logged. If zero, the evaluation won't be logged. Defaults to 100. bbox_score_thr (float): Threshold for bounding box scores. Defaults to 0.3. """ def __init__(self, init_kwargs=None, interval=50, log_checkpoint=False, log_checkpoint_metadata=False, num_eval_images=100, bbox_score_thr=0.3, **kwargs): super(MMDetWandbHook, self).__init__(init_kwargs, interval, **kwargs) self.log_checkpoint = log_checkpoint self.log_checkpoint_metadata = ( log_checkpoint and log_checkpoint_metadata) self.num_eval_images = num_eval_images self.bbox_score_thr = bbox_score_thr self.log_evaluation = (num_eval_images > 0) self.ckpt_hook: CheckpointHook = None self.eval_hook: EvalHook = None def import_wandb(self): try: import wandb from wandb import init # noqa # Fix ResourceWarning when calling wandb.log in wandb v0.12.10. # https://github.com/wandb/client/issues/2837 if digit_version(wandb.__version__) < digit_version('0.12.10'): warnings.warn( f'The current wandb {wandb.__version__} is ' f'lower than v0.12.10 will cause ResourceWarning ' f'when calling wandb.log, Please run ' f'"pip install --upgrade wandb"') except ImportError: raise ImportError( 'Please run "pip install "wandb>=0.12.10"" to install wandb') self.wandb = wandb @master_only def before_run(self, runner): super(MMDetWandbHook, self).before_run(runner) # Save and Log config. if runner.meta is not None and runner.meta.get('exp_name', None) is not None: src_cfg_path = osp.join(runner.work_dir, runner.meta.get('exp_name', None)) if osp.exists(src_cfg_path): self.wandb.save(src_cfg_path, base_path=runner.work_dir) self._update_wandb_config(runner) else: runner.logger.warning('No meta information found in the runner. ') # Inspect CheckpointHook and EvalHook for hook in runner.hooks: if isinstance(hook, CheckpointHook): self.ckpt_hook = hook if isinstance(hook, (EvalHook, DistEvalHook)): self.eval_hook = hook # Check conditions to log checkpoint if self.log_checkpoint: if self.ckpt_hook is None: self.log_checkpoint = False self.log_checkpoint_metadata = False runner.logger.warning( 'To log checkpoint in MMDetWandbHook, `CheckpointHook` is' 'required, please check hooks in the runner.') else: self.ckpt_interval = self.ckpt_hook.interval # Check conditions to log evaluation if self.log_evaluation or self.log_checkpoint_metadata: if self.eval_hook is None: self.log_evaluation = False self.log_checkpoint_metadata = False runner.logger.warning( 'To log evaluation or checkpoint metadata in ' 'MMDetWandbHook, `EvalHook` or `DistEvalHook` in mmdet ' 'is required, please check whether the validation ' 'is enabled.') else: self.eval_interval = self.eval_hook.interval self.val_dataset = self.eval_hook.dataloader.dataset # Determine the number of samples to be logged. if self.num_eval_images > len(self.val_dataset): self.num_eval_images = len(self.val_dataset) runner.logger.warning( f'The num_eval_images ({self.num_eval_images}) is ' 'greater than the total number of validation samples ' f'({len(self.val_dataset)}). The complete validation ' 'dataset will be logged.') # Check conditions to log checkpoint metadata if self.log_checkpoint_metadata: assert self.ckpt_interval % self.eval_interval == 0, \ 'To log checkpoint metadata in MMDetWandbHook, the interval ' \ f'of checkpoint saving ({self.ckpt_interval}) should be ' \ 'divisible by the interval of evaluation ' \ f'({self.eval_interval}).' # Initialize evaluation table if self.log_evaluation: # Initialize data table self._init_data_table() # Add data to the data table self._add_ground_truth(runner) # Log ground truth data self._log_data_table() @master_only def after_train_epoch(self, runner): super(MMDetWandbHook, self).after_train_epoch(runner) if not self.by_epoch: return # Log checkpoint and metadata. if (self.log_checkpoint and self.every_n_epochs(runner, self.ckpt_interval) or (self.ckpt_hook.save_last and self.is_last_epoch(runner))): if self.log_checkpoint_metadata and self.eval_hook: metadata = { 'epoch': runner.epoch + 1, **self._get_eval_results() } else: metadata = None aliases = [f'epoch_{runner.epoch + 1}', 'latest'] model_path = osp.join(self.ckpt_hook.out_dir, f'epoch_{runner.epoch + 1}.pth') self._log_ckpt_as_artifact(model_path, aliases, metadata) # Save prediction table if self.log_evaluation and self.eval_hook._should_evaluate(runner): results = self.eval_hook.latest_results # Initialize evaluation table self._init_pred_table() # Log predictions self._log_predictions(results) # Log the table self._log_eval_table(runner.epoch + 1) # for the reason of this double-layered structure, refer to # https://github.com/open-mmlab/mmdetection/issues/8145#issuecomment-1345343076 def after_train_iter(self, runner): if self.get_mode(runner) == 'train': # An ugly patch. The iter-based eval hook will call the # `after_train_iter` method of all logger hooks before evaluation. # Use this trick to skip that call. # Don't call super method at first, it will clear the log_buffer return super(MMDetWandbHook, self).after_train_iter(runner) else: super(MMDetWandbHook, self).after_train_iter(runner) self._after_train_iter(runner) @master_only def _after_train_iter(self, runner): if self.by_epoch: return # Save checkpoint and metadata if (self.log_checkpoint and self.every_n_iters(runner, self.ckpt_interval) or (self.ckpt_hook.save_last and self.is_last_iter(runner))): if self.log_checkpoint_metadata and self.eval_hook: metadata = { 'iter': runner.iter + 1, **self._get_eval_results() } else: metadata = None aliases = [f'iter_{runner.iter + 1}', 'latest'] model_path = osp.join(self.ckpt_hook.out_dir, f'iter_{runner.iter + 1}.pth') self._log_ckpt_as_artifact(model_path, aliases, metadata) # Save prediction table if self.log_evaluation and self.eval_hook._should_evaluate(runner): results = self.eval_hook.latest_results # Initialize evaluation table self._init_pred_table() # Log predictions self._log_predictions(results) # Log the table self._log_eval_table(runner.iter + 1) @master_only def after_run(self, runner): self.wandb.finish() def _update_wandb_config(self, runner): """Update wandb config.""" # Import the config file. sys.path.append(runner.work_dir) config_filename = runner.meta['exp_name'][:-3] configs = importlib.import_module(config_filename) # Prepare a nested dict of config variables. config_keys = [key for key in dir(configs) if not key.startswith('__')] config_dict = {key: getattr(configs, key) for key in config_keys} # Update the W&B config. self.wandb.config.update(config_dict) def _log_ckpt_as_artifact(self, model_path, aliases, metadata=None): """Log model checkpoint as W&B Artifact. Args: model_path (str): Path of the checkpoint to log. aliases (list): List of the aliases associated with this artifact. metadata (dict, optional): Metadata associated with this artifact. """ model_artifact = self.wandb.Artifact( f'run_{self.wandb.run.id}_model', type='model', metadata=metadata) model_artifact.add_file(model_path) self.wandb.log_artifact(model_artifact, aliases=aliases) def _get_eval_results(self): """Get model evaluation results.""" results = self.eval_hook.latest_results eval_results = self.val_dataset.evaluate( results, logger='silent', **self.eval_hook.eval_kwargs) return eval_results def _init_data_table(self): """Initialize the W&B Tables for validation data.""" columns = ['image_name', 'image'] self.data_table = self.wandb.Table(columns=columns) def _init_pred_table(self): """Initialize the W&B Tables for model evaluation.""" columns = ['image_name', 'ground_truth', 'prediction'] self.eval_table = self.wandb.Table(columns=columns) def _add_ground_truth(self, runner): # Get image loading pipeline from mmdet.datasets.pipelines import LoadImageFromFile img_loader = None for t in self.val_dataset.pipeline.transforms: if isinstance(t, LoadImageFromFile): img_loader = t if img_loader is None: self.log_evaluation = False runner.logger.warning( 'LoadImageFromFile is required to add images ' 'to W&B Tables.') return # Select the images to be logged. self.eval_image_indexs = np.arange(len(self.val_dataset)) # Set seed so that same validation set is logged each time. np.random.seed(42) np.random.shuffle(self.eval_image_indexs) self.eval_image_indexs = self.eval_image_indexs[:self.num_eval_images] CLASSES = self.val_dataset.CLASSES self.class_id_to_label = { id + 1: name for id, name in enumerate(CLASSES) } self.class_set = self.wandb.Classes([{ 'id': id, 'name': name } for id, name in self.class_id_to_label.items()]) img_prefix = self.val_dataset.img_prefix for idx in self.eval_image_indexs: img_info = self.val_dataset.data_infos[idx] image_name = img_info.get('filename', f'img_{idx}') img_height, img_width = img_info['height'], img_info['width'] img_meta = img_loader( dict(img_info=img_info, img_prefix=img_prefix)) # Get image and convert from BGR to RGB image = mmcv.bgr2rgb(img_meta['img']) data_ann = self.val_dataset.get_ann_info(idx) bboxes = data_ann['bboxes'] labels = data_ann['labels'] masks = data_ann.get('masks', None) # Get dict of bounding boxes to be logged. assert len(bboxes) == len(labels) wandb_boxes = self._get_wandb_bboxes(bboxes, labels) # Get dict of masks to be logged. if masks is not None: wandb_masks = self._get_wandb_masks( masks, labels, is_poly_mask=True, height=img_height, width=img_width) else: wandb_masks = None # TODO: Panoramic segmentation visualization. # Log a row to the data table. self.data_table.add_data( image_name, self.wandb.Image( image, boxes=wandb_boxes, masks=wandb_masks, classes=self.class_set)) def _log_predictions(self, results): table_idxs = self.data_table_ref.get_index() assert len(table_idxs) == len(self.eval_image_indexs) for ndx, eval_image_index in enumerate(self.eval_image_indexs): # Get the result result = results[eval_image_index] if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None assert len(bbox_result) == len(self.class_id_to_label) # Get labels bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) # Get segmentation mask if available. segms = None if segm_result is not None and len(labels) > 0: segms = mmcv.concat_list(segm_result) segms = mask_util.decode(segms) segms = segms.transpose(2, 0, 1) assert len(segms) == len(labels) # TODO: Panoramic segmentation visualization. # Remove bounding boxes and masks with score lower than threshold. if self.bbox_score_thr > 0: assert bboxes is not None and bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > self.bbox_score_thr bboxes = bboxes[inds, :] labels = labels[inds] if segms is not None: segms = segms[inds, ...] # Get dict of bounding boxes to be logged. wandb_boxes = self._get_wandb_bboxes(bboxes, labels, log_gt=False) # Get dict of masks to be logged. if segms is not None: wandb_masks = self._get_wandb_masks(segms, labels) else: wandb_masks = None # Log a row to the eval table. self.eval_table.add_data( self.data_table_ref.data[ndx][0], self.data_table_ref.data[ndx][1], self.wandb.Image( self.data_table_ref.data[ndx][1], boxes=wandb_boxes, masks=wandb_masks, classes=self.class_set)) def _get_wandb_bboxes(self, bboxes, labels, log_gt=True): """Get list of structured dict for logging bounding boxes to W&B. Args: bboxes (list): List of bounding box coordinates in (minX, minY, maxX, maxY) format. labels (int): List of label ids. log_gt (bool): Whether to log ground truth or prediction boxes. Returns: Dictionary of bounding boxes to be logged. """ wandb_boxes = {} box_data = [] for bbox, label in zip(bboxes, labels): if not isinstance(label, int): label = int(label) label = label + 1 if len(bbox) == 5: confidence = float(bbox[4]) class_name = self.class_id_to_label[label] box_caption = f'{class_name} {confidence:.2f}' else: box_caption = str(self.class_id_to_label[label]) position = dict( minX=int(bbox[0]), minY=int(bbox[1]), maxX=int(bbox[2]), maxY=int(bbox[3])) box_data.append({ 'position': position, 'class_id': label, 'box_caption': box_caption, 'domain': 'pixel' }) wandb_bbox_dict = { 'box_data': box_data, 'class_labels': self.class_id_to_label } if log_gt: wandb_boxes['ground_truth'] = wandb_bbox_dict else: wandb_boxes['predictions'] = wandb_bbox_dict return wandb_boxes def _get_wandb_masks(self, masks, labels, is_poly_mask=False, height=None, width=None): """Get list of structured dict for logging masks to W&B. Args: masks (list): List of masks. labels (int): List of label ids. is_poly_mask (bool): Whether the mask is polygonal or not. This is true for CocoDataset. height (int): Height of the image. width (int): Width of the image. Returns: Dictionary of masks to be logged. """ mask_label_dict = dict() for mask, label in zip(masks, labels): label = label + 1 # Get bitmap mask from polygon. if is_poly_mask: if height is not None and width is not None: mask = polygon_to_bitmap(mask, height, width) # Create composite masks for each class. if label not in mask_label_dict.keys(): mask_label_dict[label] = mask else: mask_label_dict[label] = np.logical_or(mask_label_dict[label], mask) wandb_masks = dict() for key, value in mask_label_dict.items(): # Create mask for that class. value = value.astype(np.uint8) value[value > 0] = key # Create dict of masks for logging. class_name = self.class_id_to_label[key] wandb_masks[class_name] = { 'mask_data': value, 'class_labels': self.class_id_to_label } return wandb_masks def _log_data_table(self): """Log the W&B Tables for validation data as artifact and calls `use_artifact` on it so that the evaluation table can use the reference of already uploaded images. This allows the data to be uploaded just once. """ data_artifact = self.wandb.Artifact('val', type='dataset') data_artifact.add(self.data_table, 'val_data') if not self.wandb.run.offline: self.wandb.run.use_artifact(data_artifact) data_artifact.wait() self.data_table_ref = data_artifact.get('val_data') else: self.data_table_ref = self.data_table def _log_eval_table(self, idx): """Log the W&B Tables for model evaluation. The table will be logged multiple times creating new version. Use this to compare models at different intervals interactively. """ pred_artifact = self.wandb.Artifact( f'run_{self.wandb.run.id}_pred', type='evaluation') pred_artifact.add(self.eval_table, 'eval_data') if self.by_epoch: aliases = ['latest', f'epoch_{idx}'] else: aliases = ['latest', f'iter_{idx}'] self.wandb.run.log_artifact(pred_artifact, aliases=aliases)
23,938
39.301347
83
py
mmdetection
mmdetection-master/mmdet/core/hook/yolox_lrupdater_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.runner.hooks import HOOKS from mmcv.runner.hooks.lr_updater import (CosineAnnealingLrUpdaterHook, annealing_cos) @HOOKS.register_module() class YOLOXLrUpdaterHook(CosineAnnealingLrUpdaterHook): """YOLOX learning rate scheme. There are two main differences between YOLOXLrUpdaterHook and CosineAnnealingLrUpdaterHook. 1. When the current running epoch is greater than `max_epoch-last_epoch`, a fixed learning rate will be used 2. The exp warmup scheme is different with LrUpdaterHook in MMCV Args: num_last_epochs (int): The number of epochs with a fixed learning rate before the end of the training. """ def __init__(self, num_last_epochs, **kwargs): self.num_last_epochs = num_last_epochs super(YOLOXLrUpdaterHook, self).__init__(**kwargs) def get_warmup_lr(self, cur_iters): def _get_warmup_lr(cur_iters, regular_lr): # exp warmup scheme k = self.warmup_ratio * pow( (cur_iters + 1) / float(self.warmup_iters), 2) warmup_lr = [_lr * k for _lr in regular_lr] return warmup_lr if isinstance(self.base_lr, dict): lr_groups = {} for key, base_lr in self.base_lr.items(): lr_groups[key] = _get_warmup_lr(cur_iters, base_lr) return lr_groups else: return _get_warmup_lr(cur_iters, self.base_lr) def get_lr(self, runner, base_lr): last_iter = len(runner.data_loader) * self.num_last_epochs if self.by_epoch: progress = runner.epoch max_progress = runner.max_epochs else: progress = runner.iter max_progress = runner.max_iters progress += 1 if self.min_lr_ratio is not None: target_lr = base_lr * self.min_lr_ratio else: target_lr = self.min_lr if progress >= max_progress - last_iter: # fixed learning rate return target_lr else: return annealing_cos( base_lr, target_lr, (progress - self.warmup_iters) / (max_progress - self.warmup_iters - last_iter))
2,310
32.985294
78
py
mmdetection
mmdetection-master/mmdet/core/hook/yolox_mode_switch_hook.py
# Copyright (c) OpenMMLab. All rights reserved. from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class YOLOXModeSwitchHook(Hook): """Switch the mode of YOLOX during training. This hook turns off the mosaic and mixup data augmentation and switches to use L1 loss in bbox_head. Args: num_last_epochs (int): The number of latter epochs in the end of the training to close the data augmentation and switch to L1 loss. Default: 15. skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. Default: ('Mosaic', 'RandomAffine', 'MixUp') """ def __init__(self, num_last_epochs=15, skip_type_keys=('Mosaic', 'RandomAffine', 'MixUp')): self.num_last_epochs = num_last_epochs self.skip_type_keys = skip_type_keys self._restart_dataloader = False def before_train_epoch(self, runner): """Close mosaic and mixup augmentation and switches to use L1 loss.""" epoch = runner.epoch train_loader = runner.data_loader model = runner.model if is_module_wrapper(model): model = model.module if (epoch + 1) == runner.max_epochs - self.num_last_epochs: runner.logger.info('No mosaic and mixup aug now!') # The dataset pipeline cannot be updated when persistent_workers # is True, so we need to force the dataloader's multi-process # restart. This is a very hacky approach. train_loader.dataset.update_skip_type_keys(self.skip_type_keys) if hasattr(train_loader, 'persistent_workers' ) and train_loader.persistent_workers is True: train_loader._DataLoader__initialized = False train_loader._iterator = None self._restart_dataloader = True runner.logger.info('Add additional L1 loss now!') model.bbox_head.use_l1 = True else: # Once the restart is complete, we need to restore # the initialization flag. if self._restart_dataloader: train_loader._DataLoader__initialized = True
2,270
41.849057
78
py
mmdetection
mmdetection-master/mmdet/core/mask/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .mask_target import mask_target from .structures import BaseInstanceMasks, BitmapMasks, PolygonMasks from .utils import encode_mask_results, mask2bbox, split_combined_polys __all__ = [ 'split_combined_polys', 'mask_target', 'BaseInstanceMasks', 'BitmapMasks', 'PolygonMasks', 'encode_mask_results', 'mask2bbox' ]
375
36.6
78
py
mmdetection
mmdetection-master/mmdet/core/mask/mask_target.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair def mask_target(pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg): """Compute mask target for positive proposals in multiple images. Args: pos_proposals_list (list[Tensor]): Positive proposals in multiple images. pos_assigned_gt_inds_list (list[Tensor]): Assigned GT indices for each positive proposals. gt_masks_list (list[:obj:`BaseInstanceMasks`]): Ground truth masks of each image. cfg (dict): Config dict that specifies the mask size. Returns: list[Tensor]: Mask target of each image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * >>> H, W = 17, 18 >>> cfg = mmcv.Config({'mask_size': (13, 14)}) >>> rng = np.random.RandomState(0) >>> # Positive proposals (tl_x, tl_y, br_x, br_y) for each image >>> pos_proposals_list = [ >>> torch.Tensor([ >>> [ 7.2425, 5.5929, 13.9414, 14.9541], >>> [ 7.3241, 3.6170, 16.3850, 15.3102], >>> ]), >>> torch.Tensor([ >>> [ 4.8448, 6.4010, 7.0314, 9.7681], >>> [ 5.9790, 2.6989, 7.4416, 4.8580], >>> [ 0.0000, 0.0000, 0.1398, 9.8232], >>> ]), >>> ] >>> # Corresponding class index for each proposal for each image >>> pos_assigned_gt_inds_list = [ >>> torch.LongTensor([7, 0]), >>> torch.LongTensor([5, 4, 1]), >>> ] >>> # Ground truth mask for each true object for each image >>> gt_masks_list = [ >>> BitmapMasks(rng.rand(8, H, W), height=H, width=W), >>> BitmapMasks(rng.rand(6, H, W), height=H, width=W), >>> ] >>> mask_targets = mask_target( >>> pos_proposals_list, pos_assigned_gt_inds_list, >>> gt_masks_list, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ cfg_list = [cfg for _ in range(len(pos_proposals_list))] mask_targets = map(mask_target_single, pos_proposals_list, pos_assigned_gt_inds_list, gt_masks_list, cfg_list) mask_targets = list(mask_targets) if len(mask_targets) > 0: mask_targets = torch.cat(mask_targets) return mask_targets def mask_target_single(pos_proposals, pos_assigned_gt_inds, gt_masks, cfg): """Compute mask target for each positive proposal in the image. Args: pos_proposals (Tensor): Positive proposals. pos_assigned_gt_inds (Tensor): Assigned GT inds of positive proposals. gt_masks (:obj:`BaseInstanceMasks`): GT masks in the format of Bitmap or Polygon. cfg (dict): Config dict that indicate the mask size. Returns: Tensor: Mask target of each positive proposals in the image. Example: >>> import mmcv >>> import mmdet >>> from mmdet.core.mask import BitmapMasks >>> from mmdet.core.mask.mask_target import * # NOQA >>> H, W = 32, 32 >>> cfg = mmcv.Config({'mask_size': (7, 11)}) >>> rng = np.random.RandomState(0) >>> # Masks for each ground truth box (relative to the image) >>> gt_masks_data = rng.rand(3, H, W) >>> gt_masks = BitmapMasks(gt_masks_data, height=H, width=W) >>> # Predicted positive boxes in one image >>> pos_proposals = torch.FloatTensor([ >>> [ 16.2, 5.5, 19.9, 20.9], >>> [ 17.3, 13.6, 19.3, 19.3], >>> [ 14.8, 16.4, 17.0, 23.7], >>> [ 0.0, 0.0, 16.0, 16.0], >>> [ 4.0, 0.0, 20.0, 16.0], >>> ]) >>> # For each predicted proposal, its assignment to a gt mask >>> pos_assigned_gt_inds = torch.LongTensor([0, 1, 2, 1, 1]) >>> mask_targets = mask_target_single( >>> pos_proposals, pos_assigned_gt_inds, gt_masks, cfg) >>> assert mask_targets.shape == (5,) + cfg['mask_size'] """ device = pos_proposals.device mask_size = _pair(cfg.mask_size) binarize = not cfg.get('soft_mask_target', False) num_pos = pos_proposals.size(0) if num_pos > 0: proposals_np = pos_proposals.cpu().numpy() maxh, maxw = gt_masks.height, gt_masks.width proposals_np[:, [0, 2]] = np.clip(proposals_np[:, [0, 2]], 0, maxw) proposals_np[:, [1, 3]] = np.clip(proposals_np[:, [1, 3]], 0, maxh) pos_assigned_gt_inds = pos_assigned_gt_inds.cpu().numpy() mask_targets = gt_masks.crop_and_resize( proposals_np, mask_size, device=device, inds=pos_assigned_gt_inds, binarize=binarize).to_ndarray() mask_targets = torch.from_numpy(mask_targets).float().to(device) else: mask_targets = pos_proposals.new_zeros((0, ) + mask_size) return mask_targets
5,115
38.96875
78
py
mmdetection
mmdetection-master/mmdet/core/mask/structures.py
# Copyright (c) OpenMMLab. All rights reserved. from abc import ABCMeta, abstractmethod import cv2 import mmcv import numpy as np import pycocotools.mask as maskUtils import torch from mmcv.ops.roi_align import roi_align class BaseInstanceMasks(metaclass=ABCMeta): """Base class for instance masks.""" @abstractmethod def rescale(self, scale, interpolation='nearest'): """Rescale masks as large as possible while keeping the aspect ratio. For details can refer to `mmcv.imrescale`. Args: scale (tuple[int]): The maximum size (h, w) of rescaled mask. interpolation (str): Same as :func:`mmcv.imrescale`. Returns: BaseInstanceMasks: The rescaled masks. """ @abstractmethod def resize(self, out_shape, interpolation='nearest'): """Resize masks to the given out_shape. Args: out_shape: Target (h, w) of resized mask. interpolation (str): See :func:`mmcv.imresize`. Returns: BaseInstanceMasks: The resized masks. """ @abstractmethod def flip(self, flip_direction='horizontal'): """Flip masks alone the given direction. Args: flip_direction (str): Either 'horizontal' or 'vertical'. Returns: BaseInstanceMasks: The flipped masks. """ @abstractmethod def pad(self, out_shape, pad_val): """Pad masks to the given size of (h, w). Args: out_shape (tuple[int]): Target (h, w) of padded mask. pad_val (int): The padded value. Returns: BaseInstanceMasks: The padded masks. """ @abstractmethod def crop(self, bbox): """Crop each mask by the given bbox. Args: bbox (ndarray): Bbox in format [x1, y1, x2, y2], shape (4, ). Return: BaseInstanceMasks: The cropped masks. """ @abstractmethod def crop_and_resize(self, bboxes, out_shape, inds, device, interpolation='bilinear', binarize=True): """Crop and resize masks by the given bboxes. This function is mainly used in mask targets computation. It firstly align mask to bboxes by assigned_inds, then crop mask by the assigned bbox and resize to the size of (mask_h, mask_w) Args: bboxes (Tensor): Bboxes in format [x1, y1, x2, y2], shape (N, 4) out_shape (tuple[int]): Target (h, w) of resized mask inds (ndarray): Indexes to assign masks to each bbox, shape (N,) and values should be between [0, num_masks - 1]. device (str): Device of bboxes interpolation (str): See `mmcv.imresize` binarize (bool): if True fractional values are rounded to 0 or 1 after the resize operation. if False and unsupported an error will be raised. Defaults to True. Return: BaseInstanceMasks: the cropped and resized masks. """ @abstractmethod def expand(self, expanded_h, expanded_w, top, left): """see :class:`Expand`.""" @property @abstractmethod def areas(self): """ndarray: areas of each instance.""" @abstractmethod def to_ndarray(self): """Convert masks to the format of ndarray. Return: ndarray: Converted masks in the format of ndarray. """ @abstractmethod def to_tensor(self, dtype, device): """Convert masks to the format of Tensor. Args: dtype (str): Dtype of converted mask. device (torch.device): Device of converted masks. Returns: Tensor: Converted masks in the format of Tensor. """ @abstractmethod def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: Translated masks. """ def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. Default 0. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: ndarray: Sheared masks. """ @abstractmethod def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the masks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: Rotated masks. """ class BitmapMasks(BaseInstanceMasks): """This class represents masks in the form of bitmaps. Args: masks (ndarray): ndarray of masks in shape (N, H, W), where N is the number of objects. height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> num_masks, H, W = 3, 32, 32 >>> rng = np.random.RandomState(0) >>> masks = (rng.rand(num_masks, H, W) > 0.1).astype(np.int) >>> self = BitmapMasks(masks, height=H, width=W) >>> # demo crop_and_resize >>> num_boxes = 5 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (14, 14) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): self.height = height self.width = width if len(masks) == 0: self.masks = np.empty((0, self.height, self.width), dtype=np.uint8) else: assert isinstance(masks, (list, np.ndarray)) if isinstance(masks, list): assert isinstance(masks[0], np.ndarray) assert masks[0].ndim == 2 # (H, W) else: assert masks.ndim == 3 # (N, H, W) self.masks = np.stack(masks).reshape(-1, height, width) assert self.masks.shape[1] == self.height assert self.masks.shape[2] == self.width def __getitem__(self, index): """Index the BitmapMask. Args: index (int | ndarray): Indices in the format of integer or ndarray. Returns: :obj:`BitmapMasks`: Indexed bitmap masks. """ masks = self.masks[index].reshape(-1, self.height, self.width) return BitmapMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation='nearest'): """See :func:`BaseInstanceMasks.rescale`.""" if len(self.masks) == 0: new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) rescaled_masks = np.empty((0, new_h, new_w), dtype=np.uint8) else: rescaled_masks = np.stack([ mmcv.imrescale(mask, scale, interpolation=interpolation) for mask in self.masks ]) height, width = rescaled_masks.shape[1:] return BitmapMasks(rescaled_masks, height, width) def resize(self, out_shape, interpolation='nearest'): """See :func:`BaseInstanceMasks.resize`.""" if len(self.masks) == 0: resized_masks = np.empty((0, *out_shape), dtype=np.uint8) else: resized_masks = np.stack([ mmcv.imresize( mask, out_shape[::-1], interpolation=interpolation) for mask in self.masks ]) return BitmapMasks(resized_masks, *out_shape) def flip(self, flip_direction='horizontal'): """See :func:`BaseInstanceMasks.flip`.""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = self.masks else: flipped_masks = np.stack([ mmcv.imflip(mask, direction=flip_direction) for mask in self.masks ]) return BitmapMasks(flipped_masks, self.height, self.width) def pad(self, out_shape, pad_val=0): """See :func:`BaseInstanceMasks.pad`.""" if len(self.masks) == 0: padded_masks = np.empty((0, *out_shape), dtype=np.uint8) else: padded_masks = np.stack([ mmcv.impad(mask, shape=out_shape, pad_val=pad_val) for mask in self.masks ]) return BitmapMasks(padded_masks, *out_shape) def crop(self, bbox): """See :func:`BaseInstanceMasks.crop`.""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = np.empty((0, h, w), dtype=np.uint8) else: cropped_masks = self.masks[:, y1:y1 + h, x1:x1 + w] return BitmapMasks(cropped_masks, h, w) def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """See :func:`BaseInstanceMasks.crop_and_resize`.""" if len(self.masks) == 0: empty_masks = np.empty((0, *out_shape), dtype=np.uint8) return BitmapMasks(empty_masks, *out_shape) # convert bboxes to tensor if isinstance(bboxes, np.ndarray): bboxes = torch.from_numpy(bboxes).to(device=device) if isinstance(inds, np.ndarray): inds = torch.from_numpy(inds).to(device=device) num_bbox = bboxes.shape[0] fake_inds = torch.arange( num_bbox, device=device).to(dtype=bboxes.dtype)[:, None] rois = torch.cat([fake_inds, bboxes], dim=1) # Nx5 rois = rois.to(device=device) if num_bbox > 0: gt_masks_th = torch.from_numpy(self.masks).to(device).index_select( 0, inds).to(dtype=rois.dtype) targets = roi_align(gt_masks_th[:, None, :, :], rois, out_shape, 1.0, 0, 'avg', True).squeeze(1) if binarize: resized_masks = (targets >= 0.5).cpu().numpy() else: resized_masks = targets.cpu().numpy() else: resized_masks = [] return BitmapMasks(resized_masks, *out_shape) def expand(self, expanded_h, expanded_w, top, left): """See :func:`BaseInstanceMasks.expand`.""" if len(self.masks) == 0: expanded_mask = np.empty((0, expanded_h, expanded_w), dtype=np.uint8) else: expanded_mask = np.zeros((len(self), expanded_h, expanded_w), dtype=np.uint8) expanded_mask[:, top:top + self.height, left:left + self.width] = self.masks return BitmapMasks(expanded_mask, expanded_h, expanded_w) def translate(self, out_shape, offset, direction='horizontal', fill_val=0, interpolation='bilinear'): """Translate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). offset (int | float): The offset for translate. direction (str): The translate direction, either "horizontal" or "vertical". fill_val (int | float): Border value. Default 0 for masks. interpolation (str): Same as :func:`mmcv.imtranslate`. Returns: BitmapMasks: Translated BitmapMasks. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random(dtype=np.uint8) >>> out_shape = (32, 32) >>> offset = 4 >>> direction = 'horizontal' >>> fill_val = 0 >>> interpolation = 'bilinear' >>> # Note, There seem to be issues when: >>> # * out_shape is different than self's shape >>> # * the mask dtype is not supported by cv2.AffineWarp >>> new = self.translate(out_shape, offset, direction, fill_val, >>> interpolation) >>> assert len(new) == len(self) >>> assert new.height, new.width == out_shape """ if len(self.masks) == 0: translated_masks = np.empty((0, *out_shape), dtype=np.uint8) else: translated_masks = mmcv.imtranslate( self.masks.transpose((1, 2, 0)), offset, direction, border_value=fill_val, interpolation=interpolation) if translated_masks.ndim == 2: translated_masks = translated_masks[:, :, None] translated_masks = translated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(translated_masks, *out_shape) def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """Shear the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). magnitude (int | float): The magnitude used for shear. direction (str): The shear direction, either "horizontal" or "vertical". border_value (int | tuple[int]): Value used in case of a constant border. interpolation (str): Same as in :func:`mmcv.imshear`. Returns: BitmapMasks: The sheared masks. """ if len(self.masks) == 0: sheared_masks = np.empty((0, *out_shape), dtype=np.uint8) else: sheared_masks = mmcv.imshear( self.masks.transpose((1, 2, 0)), magnitude, direction, border_value=border_value, interpolation=interpolation) if sheared_masks.ndim == 2: sheared_masks = sheared_masks[:, :, None] sheared_masks = sheared_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(sheared_masks, *out_shape) def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """Rotate the BitmapMasks. Args: out_shape (tuple[int]): Shape for output mask, format (h, w). angle (int | float): Rotation angle in degrees. Positive values mean counter-clockwise rotation. center (tuple[float], optional): Center point (w, h) of the rotation in source image. If not specified, the center of the image will be used. scale (int | float): Isotropic scale factor. fill_val (int | float): Border value. Default 0 for masks. Returns: BitmapMasks: Rotated BitmapMasks. """ if len(self.masks) == 0: rotated_masks = np.empty((0, *out_shape), dtype=self.masks.dtype) else: rotated_masks = mmcv.imrotate( self.masks.transpose((1, 2, 0)), angle, center=center, scale=scale, border_value=fill_val) if rotated_masks.ndim == 2: # case when only one mask, (h, w) rotated_masks = rotated_masks[:, :, None] # (h, w, 1) rotated_masks = rotated_masks.transpose( (2, 0, 1)).astype(self.masks.dtype) return BitmapMasks(rotated_masks, *out_shape) @property def areas(self): """See :py:attr:`BaseInstanceMasks.areas`.""" return self.masks.sum((1, 2)) def to_ndarray(self): """See :func:`BaseInstanceMasks.to_ndarray`.""" return self.masks def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" return torch.tensor(self.masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, dtype=np.uint8, rng=None): """Generate random bitmap masks for demo / testing purposes. Example: >>> from mmdet.core.mask.structures import BitmapMasks >>> self = BitmapMasks.random() >>> print('self = {}'.format(self)) self = BitmapMasks(num_masks=3, height=32, width=32) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) masks = (rng.rand(num_masks, height, width) > 0.1).astype(dtype) self = cls(masks, height=height, width=width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) x_any = self.masks.any(axis=1) y_any = self.masks.any(axis=2) for idx in range(num_masks): x = np.where(x_any[idx, :])[0] y = np.where(y_any[idx, :])[0] if len(x) > 0 and len(y) > 0: # use +1 for x_max and y_max so that the right and bottom # boundary of instance masks are fully included by the box boxes[idx, :] = np.array([x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=np.float32) return boxes class PolygonMasks(BaseInstanceMasks): """This class represents masks in the form of polygons. Polygons is a list of three levels. The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates Args: masks (list[list[ndarray]]): The first level of the list corresponds to objects, the second level to the polys that compose the object, the third level to the poly coordinates height (int): height of masks width (int): width of masks Example: >>> from mmdet.core.mask.structures import * # NOQA >>> masks = [ >>> [ np.array([0, 0, 10, 0, 10, 10., 0, 10, 0, 0]) ] >>> ] >>> height, width = 16, 16 >>> self = PolygonMasks(masks, height, width) >>> # demo translate >>> new = self.translate((16, 16), 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == masks[0][0][0::2] + 4) >>> # demo crop_and_resize >>> num_boxes = 3 >>> bboxes = np.array([[0, 0, 30, 10.0]] * num_boxes) >>> out_shape = (16, 16) >>> inds = torch.randint(0, len(self), size=(num_boxes,)) >>> device = 'cpu' >>> interpolation = 'bilinear' >>> new = self.crop_and_resize( ... bboxes, out_shape, inds, device, interpolation) >>> assert len(new) == num_boxes >>> assert new.height, new.width == out_shape """ def __init__(self, masks, height, width): assert isinstance(masks, list) if len(masks) > 0: assert isinstance(masks[0], list) assert isinstance(masks[0][0], np.ndarray) self.height = height self.width = width self.masks = masks def __getitem__(self, index): """Index the polygon masks. Args: index (ndarray | List): The indices. Returns: :obj:`PolygonMasks`: The indexed polygon masks. """ if isinstance(index, np.ndarray): index = index.tolist() if isinstance(index, list): masks = [self.masks[i] for i in index] else: try: masks = self.masks[index] except Exception: raise ValueError( f'Unsupported input of type {type(index)} for indexing!') if len(masks) and isinstance(masks[0], np.ndarray): masks = [masks] # ensure a list of three levels return PolygonMasks(masks, self.height, self.width) def __iter__(self): return iter(self.masks) def __repr__(self): s = self.__class__.__name__ + '(' s += f'num_masks={len(self.masks)}, ' s += f'height={self.height}, ' s += f'width={self.width})' return s def __len__(self): """Number of masks.""" return len(self.masks) def rescale(self, scale, interpolation=None): """see :func:`BaseInstanceMasks.rescale`""" new_w, new_h = mmcv.rescale_size((self.width, self.height), scale) if len(self.masks) == 0: rescaled_masks = PolygonMasks([], new_h, new_w) else: rescaled_masks = self.resize((new_h, new_w)) return rescaled_masks def resize(self, out_shape, interpolation=None): """see :func:`BaseInstanceMasks.resize`""" if len(self.masks) == 0: resized_masks = PolygonMasks([], *out_shape) else: h_scale = out_shape[0] / self.height w_scale = out_shape[1] / self.width resized_masks = [] for poly_per_obj in self.masks: resized_poly = [] for p in poly_per_obj: p = p.copy() p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_poly.append(p) resized_masks.append(resized_poly) resized_masks = PolygonMasks(resized_masks, *out_shape) return resized_masks def flip(self, flip_direction='horizontal'): """see :func:`BaseInstanceMasks.flip`""" assert flip_direction in ('horizontal', 'vertical', 'diagonal') if len(self.masks) == 0: flipped_masks = PolygonMasks([], self.height, self.width) else: flipped_masks = [] for poly_per_obj in self.masks: flipped_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if flip_direction == 'horizontal': p[0::2] = self.width - p[0::2] elif flip_direction == 'vertical': p[1::2] = self.height - p[1::2] else: p[0::2] = self.width - p[0::2] p[1::2] = self.height - p[1::2] flipped_poly_per_obj.append(p) flipped_masks.append(flipped_poly_per_obj) flipped_masks = PolygonMasks(flipped_masks, self.height, self.width) return flipped_masks def crop(self, bbox): """see :func:`BaseInstanceMasks.crop`""" assert isinstance(bbox, np.ndarray) assert bbox.ndim == 1 # clip the boundary bbox = bbox.copy() bbox[0::2] = np.clip(bbox[0::2], 0, self.width) bbox[1::2] = np.clip(bbox[1::2], 0, self.height) x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) if len(self.masks) == 0: cropped_masks = PolygonMasks([], h, w) else: cropped_masks = [] for poly_per_obj in self.masks: cropped_poly_per_obj = [] for p in poly_per_obj: # pycocotools will clip the boundary p = p.copy() p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] cropped_poly_per_obj.append(p) cropped_masks.append(cropped_poly_per_obj) cropped_masks = PolygonMasks(cropped_masks, h, w) return cropped_masks def pad(self, out_shape, pad_val=0): """padding has no effect on polygons`""" return PolygonMasks(self.masks, *out_shape) def expand(self, *args, **kwargs): """TODO: Add expand for polygon""" raise NotImplementedError def crop_and_resize(self, bboxes, out_shape, inds, device='cpu', interpolation='bilinear', binarize=True): """see :func:`BaseInstanceMasks.crop_and_resize`""" out_h, out_w = out_shape if len(self.masks) == 0: return PolygonMasks([], out_h, out_w) if not binarize: raise ValueError('Polygons are always binary, ' 'setting binarize=False is unsupported') resized_masks = [] for i in range(len(bboxes)): mask = self.masks[inds[i]] bbox = bboxes[i, :] x1, y1, x2, y2 = bbox w = np.maximum(x2 - x1, 1) h = np.maximum(y2 - y1, 1) h_scale = out_h / max(h, 0.1) # avoid too large scale w_scale = out_w / max(w, 0.1) resized_mask = [] for p in mask: p = p.copy() # crop # pycocotools will clip the boundary p[0::2] = p[0::2] - bbox[0] p[1::2] = p[1::2] - bbox[1] # resize p[0::2] = p[0::2] * w_scale p[1::2] = p[1::2] * h_scale resized_mask.append(p) resized_masks.append(resized_mask) return PolygonMasks(resized_masks, *out_shape) def translate(self, out_shape, offset, direction='horizontal', fill_val=None, interpolation=None): """Translate the PolygonMasks. Example: >>> self = PolygonMasks.random(dtype=np.int) >>> out_shape = (self.height, self.width) >>> new = self.translate(out_shape, 4., direction='horizontal') >>> assert np.all(new.masks[0][0][1::2] == self.masks[0][0][1::2]) >>> assert np.all(new.masks[0][0][0::2] == self.masks[0][0][0::2] + 4) # noqa: E501 """ assert fill_val is None or fill_val == 0, 'Here fill_val is not '\ f'used, and defaultly should be None or 0. got {fill_val}.' if len(self.masks) == 0: translated_masks = PolygonMasks([], *out_shape) else: translated_masks = [] for poly_per_obj in self.masks: translated_poly_per_obj = [] for p in poly_per_obj: p = p.copy() if direction == 'horizontal': p[0::2] = np.clip(p[0::2] + offset, 0, out_shape[1]) elif direction == 'vertical': p[1::2] = np.clip(p[1::2] + offset, 0, out_shape[0]) translated_poly_per_obj.append(p) translated_masks.append(translated_poly_per_obj) translated_masks = PolygonMasks(translated_masks, *out_shape) return translated_masks def shear(self, out_shape, magnitude, direction='horizontal', border_value=0, interpolation='bilinear'): """See :func:`BaseInstanceMasks.shear`.""" if len(self.masks) == 0: sheared_masks = PolygonMasks([], *out_shape) else: sheared_masks = [] if direction == 'horizontal': shear_matrix = np.stack([[1, magnitude], [0, 1]]).astype(np.float32) elif direction == 'vertical': shear_matrix = np.stack([[1, 0], [magnitude, 1]]).astype(np.float32) for poly_per_obj in self.masks: sheared_poly = [] for p in poly_per_obj: p = np.stack([p[0::2], p[1::2]], axis=0) # [2, n] new_coords = np.matmul(shear_matrix, p) # [2, n] new_coords[0, :] = np.clip(new_coords[0, :], 0, out_shape[1]) new_coords[1, :] = np.clip(new_coords[1, :], 0, out_shape[0]) sheared_poly.append( new_coords.transpose((1, 0)).reshape(-1)) sheared_masks.append(sheared_poly) sheared_masks = PolygonMasks(sheared_masks, *out_shape) return sheared_masks def rotate(self, out_shape, angle, center=None, scale=1.0, fill_val=0): """See :func:`BaseInstanceMasks.rotate`.""" if len(self.masks) == 0: rotated_masks = PolygonMasks([], *out_shape) else: rotated_masks = [] rotate_matrix = cv2.getRotationMatrix2D(center, -angle, scale) for poly_per_obj in self.masks: rotated_poly = [] for p in poly_per_obj: p = p.copy() coords = np.stack([p[0::2], p[1::2]], axis=1) # [n, 2] # pad 1 to convert from format [x, y] to homogeneous # coordinates format [x, y, 1] coords = np.concatenate( (coords, np.ones((coords.shape[0], 1), coords.dtype)), axis=1) # [n, 3] rotated_coords = np.matmul( rotate_matrix[None, :, :], coords[:, :, None])[..., 0] # [n, 2, 1] -> [n, 2] rotated_coords[:, 0] = np.clip(rotated_coords[:, 0], 0, out_shape[1]) rotated_coords[:, 1] = np.clip(rotated_coords[:, 1], 0, out_shape[0]) rotated_poly.append(rotated_coords.reshape(-1)) rotated_masks.append(rotated_poly) rotated_masks = PolygonMasks(rotated_masks, *out_shape) return rotated_masks def to_bitmap(self): """convert polygon masks to bitmap masks.""" bitmap_masks = self.to_ndarray() return BitmapMasks(bitmap_masks, self.height, self.width) @property def areas(self): """Compute areas of masks. This func is modified from `detectron2 <https://github.com/facebookresearch/detectron2/blob/ffff8acc35ea88ad1cb1806ab0f00b4c1c5dbfd9/detectron2/structures/masks.py#L387>`_. The function only works with Polygons using the shoelace formula. Return: ndarray: areas of each instance """ # noqa: W501 area = [] for polygons_per_obj in self.masks: area_per_obj = 0 for p in polygons_per_obj: area_per_obj += self._polygon_area(p[0::2], p[1::2]) area.append(area_per_obj) return np.asarray(area) def _polygon_area(self, x, y): """Compute the area of a component of a polygon. Using the shoelace formula: https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates Args: x (ndarray): x coordinates of the component y (ndarray): y coordinates of the component Return: float: the are of the component """ # noqa: 501 return 0.5 * np.abs( np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) def to_ndarray(self): """Convert masks to the format of ndarray.""" if len(self.masks) == 0: return np.empty((0, self.height, self.width), dtype=np.uint8) bitmap_masks = [] for poly_per_obj in self.masks: bitmap_masks.append( polygon_to_bitmap(poly_per_obj, self.height, self.width)) return np.stack(bitmap_masks) def to_tensor(self, dtype, device): """See :func:`BaseInstanceMasks.to_tensor`.""" if len(self.masks) == 0: return torch.empty((0, self.height, self.width), dtype=dtype, device=device) ndarray_masks = self.to_ndarray() return torch.tensor(ndarray_masks, dtype=dtype, device=device) @classmethod def random(cls, num_masks=3, height=32, width=32, n_verts=5, dtype=np.float32, rng=None): """Generate random polygon masks for demo / testing purposes. Adapted from [1]_ References: .. [1] https://gitlab.kitware.com/computer-vision/kwimage/-/blob/928cae35ca8/kwimage/structs/polygon.py#L379 # noqa: E501 Example: >>> from mmdet.core.mask.structures import PolygonMasks >>> self = PolygonMasks.random() >>> print('self = {}'.format(self)) """ from mmdet.utils.util_random import ensure_rng rng = ensure_rng(rng) def _gen_polygon(n, irregularity, spikeyness): """Creates the polygon by sampling points on a circle around the centre. Random noise is added by varying the angular spacing between sequential points, and by varying the radial distance of each point from the centre. Based on original code by Mike Ounsworth Args: n (int): number of vertices irregularity (float): [0,1] indicating how much variance there is in the angular spacing of vertices. [0,1] will map to [0, 2pi/numberOfVerts] spikeyness (float): [0,1] indicating how much variance there is in each vertex from the circle of radius aveRadius. [0,1] will map to [0, aveRadius] Returns: a list of vertices, in CCW order. """ from scipy.stats import truncnorm # Generate around the unit circle cx, cy = (0.0, 0.0) radius = 1 tau = np.pi * 2 irregularity = np.clip(irregularity, 0, 1) * 2 * np.pi / n spikeyness = np.clip(spikeyness, 1e-9, 1) # generate n angle steps lower = (tau / n) - irregularity upper = (tau / n) + irregularity angle_steps = rng.uniform(lower, upper, n) # normalize the steps so that point 0 and point n+1 are the same k = angle_steps.sum() / (2 * np.pi) angles = (angle_steps / k).cumsum() + rng.uniform(0, tau) # Convert high and low values to be wrt the standard normal range # https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.truncnorm.html low = 0 high = 2 * radius mean = radius std = spikeyness a = (low - mean) / std b = (high - mean) / std tnorm = truncnorm(a=a, b=b, loc=mean, scale=std) # now generate the points radii = tnorm.rvs(n, random_state=rng) x_pts = cx + radii * np.cos(angles) y_pts = cy + radii * np.sin(angles) points = np.hstack([x_pts[:, None], y_pts[:, None]]) # Scale to 0-1 space points = points - points.min(axis=0) points = points / points.max(axis=0) # Randomly place within 0-1 space points = points * (rng.rand() * .8 + .2) min_pt = points.min(axis=0) max_pt = points.max(axis=0) high = (1 - max_pt) low = (0 - min_pt) offset = (rng.rand(2) * (high - low)) + low points = points + offset return points def _order_vertices(verts): """ References: https://stackoverflow.com/questions/1709283/how-can-i-sort-a-coordinate-list-for-a-rectangle-counterclockwise """ mlat = verts.T[0].sum() / len(verts) mlng = verts.T[1].sum() / len(verts) tau = np.pi * 2 angle = (np.arctan2(mlat - verts.T[0], verts.T[1] - mlng) + tau) % tau sortx = angle.argsort() verts = verts.take(sortx, axis=0) return verts # Generate a random exterior for each requested mask masks = [] for _ in range(num_masks): exterior = _order_vertices(_gen_polygon(n_verts, 0.9, 0.9)) exterior = (exterior * [(width, height)]).astype(dtype) masks.append([exterior.ravel()]) self = cls(masks, height, width) return self def get_bboxes(self): num_masks = len(self) boxes = np.zeros((num_masks, 4), dtype=np.float32) for idx, poly_per_obj in enumerate(self.masks): # simply use a number that is big enough for comparison with # coordinates xy_min = np.array([self.width * 2, self.height * 2], dtype=np.float32) xy_max = np.zeros(2, dtype=np.float32) for p in poly_per_obj: xy = np.array(p).reshape(-1, 2).astype(np.float32) xy_min = np.minimum(xy_min, np.min(xy, axis=0)) xy_max = np.maximum(xy_max, np.max(xy, axis=0)) boxes[idx, :2] = xy_min boxes[idx, 2:] = xy_max return boxes def polygon_to_bitmap(polygons, height, width): """Convert masks from the form of polygons to bitmaps. Args: polygons (list[ndarray]): masks in polygon representation height (int): mask height width (int): mask width Return: ndarray: the converted masks in bitmap representation """ rles = maskUtils.frPyObjects(polygons, height, width) rle = maskUtils.merge(rles) bitmap_mask = maskUtils.decode(rle).astype(bool) return bitmap_mask def bitmap_to_polygon(bitmap): """Convert masks from the form of bitmaps to polygons. Args: bitmap (ndarray): masks in bitmap representation. Return: list[ndarray]: the converted mask in polygon representation. bool: whether the mask has holes. """ bitmap = np.ascontiguousarray(bitmap).astype(np.uint8) # cv2.RETR_CCOMP: retrieves all of the contours and organizes them # into a two-level hierarchy. At the top level, there are external # boundaries of the components. At the second level, there are # boundaries of the holes. If there is another contour inside a hole # of a connected component, it is still put at the top level. # cv2.CHAIN_APPROX_NONE: stores absolutely all the contour points. outs = cv2.findContours(bitmap, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) contours = outs[-2] hierarchy = outs[-1] if hierarchy is None: return [], False # hierarchy[i]: 4 elements, for the indexes of next, previous, # parent, or nested contours. If there is no corresponding contour, # it will be -1. with_hole = (hierarchy.reshape(-1, 4)[:, 3] >= 0).any() contours = [c.reshape(-1, 2) for c in contours] return contours, with_hole
41,496
36.62194
141
py
mmdetection
mmdetection-master/mmdet/core/mask/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np import pycocotools.mask as mask_util import torch def split_combined_polys(polys, poly_lens, polys_per_mask): """Split the combined 1-D polys into masks. A mask is represented as a list of polys, and a poly is represented as a 1-D array. In dataset, all masks are concatenated into a single 1-D tensor. Here we need to split the tensor into original representations. Args: polys (list): a list (length = image num) of 1-D tensors poly_lens (list): a list (length = image num) of poly length polys_per_mask (list): a list (length = image num) of poly number of each mask Returns: list: a list (length = image num) of list (length = mask num) of \ list (length = poly num) of numpy array. """ mask_polys_list = [] for img_id in range(len(polys)): polys_single = polys[img_id] polys_lens_single = poly_lens[img_id].tolist() polys_per_mask_single = polys_per_mask[img_id].tolist() split_polys = mmcv.slice_list(polys_single, polys_lens_single) mask_polys = mmcv.slice_list(split_polys, polys_per_mask_single) mask_polys_list.append(mask_polys) return mask_polys_list # TODO: move this function to more proper place def encode_mask_results(mask_results): """Encode bitmap mask to RLE code. Args: mask_results (list | tuple[list]): bitmap mask results. In mask scoring rcnn, mask_results is a tuple of (segm_results, segm_cls_score). Returns: list | tuple: RLE encoded mask. """ if isinstance(mask_results, tuple): # mask scoring cls_segms, cls_mask_scores = mask_results else: cls_segms = mask_results num_classes = len(cls_segms) encoded_mask_results = [[] for _ in range(num_classes)] for i in range(len(cls_segms)): for cls_segm in cls_segms[i]: encoded_mask_results[i].append( mask_util.encode( np.array( cls_segm[:, :, np.newaxis], order='F', dtype='uint8'))[0]) # encoded with RLE if isinstance(mask_results, tuple): return encoded_mask_results, cls_mask_scores else: return encoded_mask_results def mask2bbox(masks): """Obtain tight bounding boxes of binary masks. Args: masks (Tensor): Binary mask of shape (n, h, w). Returns: Tensor: Bboxe with shape (n, 4) of \ positive region in binary mask. """ N = masks.shape[0] bboxes = masks.new_zeros((N, 4), dtype=torch.float32) x_any = torch.any(masks, dim=1) y_any = torch.any(masks, dim=2) for i in range(N): x = torch.where(x_any[i, :])[0] y = torch.where(y_any[i, :])[0] if len(x) > 0 and len(y) > 0: bboxes[i, :] = bboxes.new_tensor( [x[0], y[0], x[-1] + 1, y[-1] + 1]) return bboxes
3,017
32.533333
75
py
mmdetection
mmdetection-master/mmdet/core/optimizers/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import OPTIMIZER_BUILDERS, build_optimizer from .layer_decay_optimizer_constructor import \ LearningRateDecayOptimizerConstructor __all__ = [ 'LearningRateDecayOptimizerConstructor', 'OPTIMIZER_BUILDERS', 'build_optimizer' ]
300
29.1
66
py
mmdetection
mmdetection-master/mmdet/core/optimizers/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import copy from mmcv.runner.optimizer import OPTIMIZER_BUILDERS as MMCV_OPTIMIZER_BUILDERS from mmcv.utils import Registry, build_from_cfg OPTIMIZER_BUILDERS = Registry( 'optimizer builder', parent=MMCV_OPTIMIZER_BUILDERS) def build_optimizer_constructor(cfg): constructor_type = cfg.get('type') if constructor_type in OPTIMIZER_BUILDERS: return build_from_cfg(cfg, OPTIMIZER_BUILDERS) elif constructor_type in MMCV_OPTIMIZER_BUILDERS: return build_from_cfg(cfg, MMCV_OPTIMIZER_BUILDERS) else: raise KeyError(f'{constructor_type} is not registered ' 'in the optimizer builder registry.') def build_optimizer(model, cfg): optimizer_cfg = copy.deepcopy(cfg) constructor_type = optimizer_cfg.pop('constructor', 'DefaultOptimizerConstructor') paramwise_cfg = optimizer_cfg.pop('paramwise_cfg', None) optim_constructor = build_optimizer_constructor( dict( type=constructor_type, optimizer_cfg=optimizer_cfg, paramwise_cfg=paramwise_cfg)) optimizer = optim_constructor(model) return optimizer
1,218
34.852941
79
py
mmdetection
mmdetection-master/mmdet/core/optimizers/layer_decay_optimizer_constructor.py
# Copyright (c) OpenMMLab. All rights reserved. import json from mmcv.runner import DefaultOptimizerConstructor, get_dist_info from mmdet.utils import get_root_logger from .builder import OPTIMIZER_BUILDERS def get_layer_id_for_convnext(var_name, max_layer_id): """Get the layer id to set the different learning rates in ``layer_wise`` decay_type. Args: var_name (str): The key of the model. max_layer_id (int): Maximum layer id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): stage_id = int(var_name.split('.')[2]) if stage_id == 0: layer_id = 0 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 elif stage_id == 3: layer_id = max_layer_id return layer_id elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) block_id = int(var_name.split('.')[3]) if stage_id == 0: layer_id = 1 elif stage_id == 1: layer_id = 2 elif stage_id == 2: layer_id = 3 + block_id // 3 elif stage_id == 3: layer_id = max_layer_id return layer_id else: return max_layer_id + 1 def get_stage_id_for_convnext(var_name, max_stage_id): """Get the stage id to set the different learning rates in ``stage_wise`` decay_type. Args: var_name (str): The key of the model. max_stage_id (int): Maximum stage id. Returns: int: The id number corresponding to different learning rate in ``LearningRateDecayOptimizerConstructor``. """ if var_name in ('backbone.cls_token', 'backbone.mask_token', 'backbone.pos_embed'): return 0 elif var_name.startswith('backbone.downsample_layers'): return 0 elif var_name.startswith('backbone.stages'): stage_id = int(var_name.split('.')[2]) return stage_id + 1 else: return max_stage_id - 1 @OPTIMIZER_BUILDERS.register_module() class LearningRateDecayOptimizerConstructor(DefaultOptimizerConstructor): # Different learning rates are set for different layers of backbone. # Note: Currently, this optimizer constructor is built for ConvNeXt. def add_params(self, params, module, **kwargs): """Add all parameters of module to the params list. The parameters of the given module will be added to the list of param groups, with specific rules defined by paramwise_cfg. Args: params (list[dict]): A list of param groups, it will be modified in place. module (nn.Module): The module to be added. """ logger = get_root_logger() parameter_groups = {} logger.info(f'self.paramwise_cfg is {self.paramwise_cfg}') num_layers = self.paramwise_cfg.get('num_layers') + 2 decay_rate = self.paramwise_cfg.get('decay_rate') decay_type = self.paramwise_cfg.get('decay_type', 'layer_wise') logger.info('Build LearningRateDecayOptimizerConstructor ' f'{decay_type} {decay_rate} - {num_layers}') weight_decay = self.base_wd for name, param in module.named_parameters(): if not param.requires_grad: continue # frozen weights if len(param.shape) == 1 or name.endswith('.bias') or name in ( 'pos_embed', 'cls_token'): group_name = 'no_decay' this_weight_decay = 0. else: group_name = 'decay' this_weight_decay = weight_decay if 'layer_wise' in decay_type: if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_layer_id_for_convnext( name, self.paramwise_cfg.get('num_layers')) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() elif decay_type == 'stage_wise': if 'ConvNeXt' in module.backbone.__class__.__name__: layer_id = get_stage_id_for_convnext(name, num_layers) logger.info(f'set param {name} as id {layer_id}') else: raise NotImplementedError() group_name = f'layer_{layer_id}_{group_name}' if group_name not in parameter_groups: scale = decay_rate**(num_layers - layer_id - 1) parameter_groups[group_name] = { 'weight_decay': this_weight_decay, 'params': [], 'param_names': [], 'lr_scale': scale, 'group_name': group_name, 'lr': scale * self.base_lr, } parameter_groups[group_name]['params'].append(param) parameter_groups[group_name]['param_names'].append(name) rank, _ = get_dist_info() if rank == 0: to_display = {} for key in parameter_groups: to_display[key] = { 'param_names': parameter_groups[key]['param_names'], 'lr_scale': parameter_groups[key]['lr_scale'], 'lr': parameter_groups[key]['lr'], 'weight_decay': parameter_groups[key]['weight_decay'], } logger.info(f'Param groups = {json.dumps(to_display, indent=2)}') params.extend(parameter_groups.values())
5,856
36.787097
77
py
mmdetection
mmdetection-master/mmdet/core/post_processing/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .bbox_nms import fast_nms, multiclass_nms from .matrix_nms import mask_matrix_nms from .merge_augs import (merge_aug_bboxes, merge_aug_masks, merge_aug_proposals, merge_aug_scores) __all__ = [ 'multiclass_nms', 'merge_aug_proposals', 'merge_aug_bboxes', 'merge_aug_scores', 'merge_aug_masks', 'mask_matrix_nms', 'fast_nms' ]
412
36.545455
72
py
mmdetection
mmdetection-master/mmdet/core/post_processing/bbox_nms.py
# Copyright (c) OpenMMLab. All rights reserved. import torch from mmcv.ops.nms import batched_nms from mmdet.core.bbox.iou_calculators import bbox_overlaps def multiclass_nms(multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None, return_inds=False): """NMS for multi-class bboxes. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class), where the last column contains scores of the background class, but this will be ignored. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. nms_cfg (dict): a dict that contains the arguments of nms operations max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. score_factors (Tensor, optional): The factors multiplied to scores before applying NMS. Default to None. return_inds (bool, optional): Whether return the indices of kept bboxes. Default to False. Returns: tuple: (dets, labels, indices (optional)), tensors of shape (k, 5), (k), and (k). Dets are boxes with scores. Labels are 0-based. """ num_classes = multi_scores.size(1) - 1 # exclude background category if multi_bboxes.shape[1] > 4: bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) else: bboxes = multi_bboxes[:, None].expand( multi_scores.size(0), num_classes, 4) scores = multi_scores[:, :-1] labels = torch.arange(num_classes, dtype=torch.long, device=scores.device) labels = labels.view(1, -1).expand_as(scores) bboxes = bboxes.reshape(-1, 4) scores = scores.reshape(-1) labels = labels.reshape(-1) if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT # remove low scoring boxes valid_mask = scores > score_thr # multiply score_factor after threshold to preserve more bboxes, improve # mAP by 1% for YOLOv3 if score_factors is not None: # expand the shape to match original shape of score score_factors = score_factors.view(-1, 1).expand( multi_scores.size(0), num_classes) score_factors = score_factors.reshape(-1) scores = scores * score_factors if not torch.onnx.is_in_onnx_export(): # NonZero not supported in TensorRT inds = valid_mask.nonzero(as_tuple=False).squeeze(1) bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] else: # TensorRT NMS plugin has invalid output filled with -1 # add dummy data to make detection output correct. bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) scores = torch.cat([scores, scores.new_zeros(1)], dim=0) labels = torch.cat([labels, labels.new_zeros(1)], dim=0) if bboxes.numel() == 0: if torch.onnx.is_in_onnx_export(): raise RuntimeError('[ONNX Error] Can not record NMS ' 'as it has not been executed this time') dets = torch.cat([bboxes, scores[:, None]], -1) if return_inds: return dets, labels, inds else: return dets, labels dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) if max_num > 0: dets = dets[:max_num] keep = keep[:max_num] if return_inds: return dets, labels[keep], inds[keep] else: return dets, labels[keep] def fast_nms(multi_bboxes, multi_scores, multi_coeffs, score_thr, iou_thr, top_k, max_num=-1): """Fast NMS in `YOLACT <https://arxiv.org/abs/1904.02689>`_. Fast NMS allows already-removed detections to suppress other detections so that every instance can be decided to be kept or discarded in parallel, which is not possible in traditional NMS. This relaxation allows us to implement Fast NMS entirely in standard GPU-accelerated matrix operations. Args: multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) multi_scores (Tensor): shape (n, #class+1), where the last column contains scores of the background class, but this will be ignored. multi_coeffs (Tensor): shape (n, #class*coeffs_dim). score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_thr (float): IoU threshold to be considered as conflicted. top_k (int): if there are more than top_k bboxes before NMS, only top top_k will be kept. max_num (int): if there are more than max_num bboxes after NMS, only top max_num will be kept. If -1, keep all the bboxes. Default: -1. Returns: tuple: (dets, labels, coefficients), tensors of shape (k, 5), (k, 1), and (k, coeffs_dim). Dets are boxes with scores. Labels are 0-based. """ scores = multi_scores[:, :-1].t() # [#class, n] scores, idx = scores.sort(1, descending=True) idx = idx[:, :top_k].contiguous() scores = scores[:, :top_k] # [#class, topk] num_classes, num_dets = idx.size() boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] iou.triu_(diagonal=1) iou_max, _ = iou.max(dim=1) # Now just filter out the ones higher than the threshold keep = iou_max <= iou_thr # Second thresholding introduces 0.2 mAP gain at negligible time cost keep *= scores > score_thr # Assign each kept detection to its corresponding class classes = torch.arange( num_classes, device=boxes.device)[:, None].expand_as(keep) classes = classes[keep] boxes = boxes[keep] coeffs = coeffs[keep] scores = scores[keep] # Only keep the top max_num highest scores across all classes scores, idx = scores.sort(0, descending=True) if max_num > 0: idx = idx[:max_num] scores = scores[:max_num] classes = classes[idx] boxes = boxes[idx] coeffs = coeffs[idx] cls_dets = torch.cat([boxes, scores[:, None]], dim=1) return cls_dets, classes, coeffs
6,495
36.767442
78
py
mmdetection
mmdetection-master/mmdet/core/post_processing/matrix_nms.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def mask_matrix_nms(masks, labels, scores, filter_thr=-1, nms_pre=-1, max_num=-1, kernel='gaussian', sigma=2.0, mask_area=None): """Matrix NMS for multi-class masks. Args: masks (Tensor): Has shape (num_instances, h, w) labels (Tensor): Labels of corresponding masks, has shape (num_instances,). scores (Tensor): Mask scores of corresponding masks, has shape (num_instances). filter_thr (float): Score threshold to filter the masks after matrix nms. Default: -1, which means do not use filter_thr. nms_pre (int): The max number of instances to do the matrix nms. Default: -1, which means do not use nms_pre. max_num (int, optional): If there are more than max_num masks after matrix, only top max_num will be kept. Default: -1, which means do not use max_num. kernel (str): 'linear' or 'gaussian'. sigma (float): std in gaussian method. mask_area (Tensor): The sum of seg_masks. Returns: tuple(Tensor): Processed mask results. - scores (Tensor): Updated scores, has shape (n,). - labels (Tensor): Remained labels, has shape (n,). - masks (Tensor): Remained masks, has shape (n, w, h). - keep_inds (Tensor): The indices number of the remaining mask in the input mask, has shape (n,). """ assert len(labels) == len(masks) == len(scores) if len(labels) == 0: return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) if mask_area is None: mask_area = masks.sum((1, 2)).float() else: assert len(masks) == len(mask_area) # sort and keep top nms_pre scores, sort_inds = torch.sort(scores, descending=True) keep_inds = sort_inds if nms_pre > 0 and len(sort_inds) > nms_pre: sort_inds = sort_inds[:nms_pre] keep_inds = keep_inds[:nms_pre] scores = scores[:nms_pre] masks = masks[sort_inds] mask_area = mask_area[sort_inds] labels = labels[sort_inds] num_masks = len(labels) flatten_masks = masks.reshape(num_masks, -1).float() # inter. inter_matrix = torch.mm(flatten_masks, flatten_masks.transpose(1, 0)) expanded_mask_area = mask_area.expand(num_masks, num_masks) # Upper triangle iou matrix. iou_matrix = (inter_matrix / (expanded_mask_area + expanded_mask_area.transpose(1, 0) - inter_matrix)).triu(diagonal=1) # label_specific matrix. expanded_labels = labels.expand(num_masks, num_masks) # Upper triangle label matrix. label_matrix = (expanded_labels == expanded_labels.transpose( 1, 0)).triu(diagonal=1) # IoU compensation compensate_iou, _ = (iou_matrix * label_matrix).max(0) compensate_iou = compensate_iou.expand(num_masks, num_masks).transpose(1, 0) # IoU decay decay_iou = iou_matrix * label_matrix # Calculate the decay_coefficient if kernel == 'gaussian': decay_matrix = torch.exp(-1 * sigma * (decay_iou**2)) compensate_matrix = torch.exp(-1 * sigma * (compensate_iou**2)) decay_coefficient, _ = (decay_matrix / compensate_matrix).min(0) elif kernel == 'linear': decay_matrix = (1 - decay_iou) / (1 - compensate_iou) decay_coefficient, _ = decay_matrix.min(0) else: raise NotImplementedError( f'{kernel} kernel is not supported in matrix nms!') # update the score. scores = scores * decay_coefficient if filter_thr > 0: keep = scores >= filter_thr keep_inds = keep_inds[keep] if not keep.any(): return scores.new_zeros(0), labels.new_zeros(0), masks.new_zeros( 0, *masks.shape[-2:]), labels.new_zeros(0) masks = masks[keep] scores = scores[keep] labels = labels[keep] # sort and keep top max_num scores, sort_inds = torch.sort(scores, descending=True) keep_inds = keep_inds[sort_inds] if max_num > 0 and len(sort_inds) > max_num: sort_inds = sort_inds[:max_num] keep_inds = keep_inds[:max_num] scores = scores[:max_num] masks = masks[sort_inds] labels = labels[sort_inds] return scores, labels, masks, keep_inds
4,622
36.893443
77
py
mmdetection
mmdetection-master/mmdet/core/post_processing/merge_augs.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import warnings import numpy as np import torch from mmcv import ConfigDict from mmcv.ops import nms from ..bbox import bbox_mapping_back def merge_aug_proposals(aug_proposals, img_metas, cfg): """Merge augmented proposals (multiscale, flip, etc.) Args: aug_proposals (list[Tensor]): proposals from different testing schemes, shape (n, 5). Note that they are not rescaled to the original image size. img_metas (list[dict]): list of image info dict where each dict has: 'img_shape', 'scale_factor', 'flip', and may also contain 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'. For details on the values of these keys see `mmdet/datasets/pipelines/formatting.py:Collect`. cfg (dict): rpn test config. Returns: Tensor: shape (n, 4), proposals corresponding to original image scale. """ cfg = copy.deepcopy(cfg) # deprecate arguments warning if 'nms' not in cfg or 'max_num' in cfg or 'nms_thr' in cfg: warnings.warn( 'In rpn_proposal or test_cfg, ' 'nms_thr has been moved to a dict named nms as ' 'iou_threshold, max_num has been renamed as max_per_img, ' 'name of original arguments and the way to specify ' 'iou_threshold of NMS will be deprecated.') if 'nms' not in cfg: cfg.nms = ConfigDict(dict(type='nms', iou_threshold=cfg.nms_thr)) if 'max_num' in cfg: if 'max_per_img' in cfg: assert cfg.max_num == cfg.max_per_img, f'You set max_num and ' \ f'max_per_img at the same time, but get {cfg.max_num} ' \ f'and {cfg.max_per_img} respectively' \ f'Please delete max_num which will be deprecated.' else: cfg.max_per_img = cfg.max_num if 'nms_thr' in cfg: assert cfg.nms.iou_threshold == cfg.nms_thr, f'You set ' \ f'iou_threshold in nms and ' \ f'nms_thr at the same time, but get ' \ f'{cfg.nms.iou_threshold} and {cfg.nms_thr}' \ f' respectively. Please delete the nms_thr ' \ f'which will be deprecated.' recovered_proposals = [] for proposals, img_info in zip(aug_proposals, img_metas): img_shape = img_info['img_shape'] scale_factor = img_info['scale_factor'] flip = img_info['flip'] flip_direction = img_info['flip_direction'] _proposals = proposals.clone() _proposals[:, :4] = bbox_mapping_back(_proposals[:, :4], img_shape, scale_factor, flip, flip_direction) recovered_proposals.append(_proposals) aug_proposals = torch.cat(recovered_proposals, dim=0) merged_proposals, _ = nms(aug_proposals[:, :4].contiguous(), aug_proposals[:, -1].contiguous(), cfg.nms.iou_threshold) scores = merged_proposals[:, 4] _, order = scores.sort(0, descending=True) num = min(cfg.max_per_img, merged_proposals.shape[0]) order = order[:num] merged_proposals = merged_proposals[order, :] return merged_proposals def merge_aug_bboxes(aug_bboxes, aug_scores, img_metas, rcnn_test_cfg): """Merge augmented detection bboxes and scores. Args: aug_bboxes (list[Tensor]): shape (n, 4*#class) aug_scores (list[Tensor] or None): shape (n, #class) img_shapes (list[Tensor]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_bboxes = [] for bboxes, img_info in zip(aug_bboxes, img_metas): img_shape = img_info[0]['img_shape'] scale_factor = img_info[0]['scale_factor'] flip = img_info[0]['flip'] flip_direction = img_info[0]['flip_direction'] bboxes = bbox_mapping_back(bboxes, img_shape, scale_factor, flip, flip_direction) recovered_bboxes.append(bboxes) bboxes = torch.stack(recovered_bboxes).mean(dim=0) if aug_scores is None: return bboxes else: scores = torch.stack(aug_scores).mean(dim=0) return bboxes, scores def merge_aug_scores(aug_scores): """Merge augmented bbox scores.""" if isinstance(aug_scores[0], torch.Tensor): return torch.mean(torch.stack(aug_scores), dim=0) else: return np.mean(aug_scores, axis=0) def merge_aug_masks(aug_masks, img_metas, rcnn_test_cfg, weights=None): """Merge augmented mask prediction. Args: aug_masks (list[ndarray]): shape (n, #class, h, w) img_shapes (list[ndarray]): shape (3, ). rcnn_test_cfg (dict): rcnn test config. Returns: tuple: (bboxes, scores) """ recovered_masks = [] for mask, img_info in zip(aug_masks, img_metas): flip = img_info[0]['flip'] if flip: flip_direction = img_info[0]['flip_direction'] if flip_direction == 'horizontal': mask = mask[:, :, :, ::-1] elif flip_direction == 'vertical': mask = mask[:, :, ::-1, :] elif flip_direction == 'diagonal': mask = mask[:, :, :, ::-1] mask = mask[:, :, ::-1, :] else: raise ValueError( f"Invalid flipping direction '{flip_direction}'") recovered_masks.append(mask) if weights is None: merged_masks = np.mean(recovered_masks, axis=0) else: merged_masks = np.average( np.array(recovered_masks), axis=0, weights=np.array(weights)) return merged_masks
5,790
36.36129
78
py
mmdetection
mmdetection-master/mmdet/core/utils/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .dist_utils import (DistOptimizerHook, all_reduce_dict, allreduce_grads, reduce_mean, sync_random_seed) from .misc import (center_of_mass, filter_scores_and_topk, flip_tensor, generate_coordinate, mask2ndarray, multi_apply, select_single_mlvl, unmap) __all__ = [ 'allreduce_grads', 'DistOptimizerHook', 'reduce_mean', 'multi_apply', 'unmap', 'mask2ndarray', 'flip_tensor', 'all_reduce_dict', 'center_of_mass', 'generate_coordinate', 'select_single_mlvl', 'filter_scores_and_topk', 'sync_random_seed' ]
635
44.428571
77
py
mmdetection
mmdetection-master/mmdet/core/utils/dist_utils.py
# Copyright (c) OpenMMLab. All rights reserved. import functools import pickle import warnings from collections import OrderedDict import numpy as np import torch import torch.distributed as dist from mmcv.runner import OptimizerHook, get_dist_info from torch._utils import (_flatten_dense_tensors, _take_tensors, _unflatten_dense_tensors) def _allreduce_coalesced(tensors, world_size, bucket_size_mb=-1): if bucket_size_mb > 0: bucket_size_bytes = bucket_size_mb * 1024 * 1024 buckets = _take_tensors(tensors, bucket_size_bytes) else: buckets = OrderedDict() for tensor in tensors: tp = tensor.type() if tp not in buckets: buckets[tp] = [] buckets[tp].append(tensor) buckets = buckets.values() for bucket in buckets: flat_tensors = _flatten_dense_tensors(bucket) dist.all_reduce(flat_tensors) flat_tensors.div_(world_size) for tensor, synced in zip( bucket, _unflatten_dense_tensors(flat_tensors, bucket)): tensor.copy_(synced) def allreduce_grads(params, coalesce=True, bucket_size_mb=-1): """Allreduce gradients. Args: params (list[torch.Parameters]): List of parameters of a model coalesce (bool, optional): Whether allreduce parameters as a whole. Defaults to True. bucket_size_mb (int, optional): Size of bucket, the unit is MB. Defaults to -1. """ grads = [ param.grad.data for param in params if param.requires_grad and param.grad is not None ] world_size = dist.get_world_size() if coalesce: _allreduce_coalesced(grads, world_size, bucket_size_mb) else: for tensor in grads: dist.all_reduce(tensor.div_(world_size)) class DistOptimizerHook(OptimizerHook): """Deprecated optimizer hook for distributed training.""" def __init__(self, *args, **kwargs): warnings.warn('"DistOptimizerHook" is deprecated, please switch to' '"mmcv.runner.OptimizerHook".') super().__init__(*args, **kwargs) def reduce_mean(tensor): """"Obtain the mean of tensor on different GPUs.""" if not (dist.is_available() and dist.is_initialized()): return tensor tensor = tensor.clone() dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) return tensor def obj2tensor(pyobj, device='cuda'): """Serialize picklable python object to tensor.""" storage = torch.ByteStorage.from_buffer(pickle.dumps(pyobj)) return torch.ByteTensor(storage).to(device=device) def tensor2obj(tensor): """Deserialize tensor to picklable python object.""" return pickle.loads(tensor.cpu().numpy().tobytes()) @functools.lru_cache() def _get_global_gloo_group(): """Return a process group based on gloo backend, containing all the ranks The result is cached.""" if dist.get_backend() == 'nccl': return dist.new_group(backend='gloo') else: return dist.group.WORLD def all_reduce_dict(py_dict, op='sum', group=None, to_float=True): """Apply all reduce function for python dict object. The code is modified from https://github.com/Megvii- BaseDetection/YOLOX/blob/main/yolox/utils/allreduce_norm.py. NOTE: make sure that py_dict in different ranks has the same keys and the values should be in the same shape. Currently only supports nccl backend. Args: py_dict (dict): Dict to be applied all reduce op. op (str): Operator, could be 'sum' or 'mean'. Default: 'sum' group (:obj:`torch.distributed.group`, optional): Distributed group, Default: None. to_float (bool): Whether to convert all values of dict to float. Default: True. Returns: OrderedDict: reduced python dict object. """ warnings.warn( 'group` is deprecated. Currently only supports NCCL backend.') _, world_size = get_dist_info() if world_size == 1: return py_dict # all reduce logic across different devices. py_key = list(py_dict.keys()) if not isinstance(py_dict, OrderedDict): py_key_tensor = obj2tensor(py_key) dist.broadcast(py_key_tensor, src=0) py_key = tensor2obj(py_key_tensor) tensor_shapes = [py_dict[k].shape for k in py_key] tensor_numels = [py_dict[k].numel() for k in py_key] if to_float: warnings.warn('Note: the "to_float" is True, you need to ' 'ensure that the behavior is reasonable.') flatten_tensor = torch.cat( [py_dict[k].flatten().float() for k in py_key]) else: flatten_tensor = torch.cat([py_dict[k].flatten() for k in py_key]) dist.all_reduce(flatten_tensor, op=dist.ReduceOp.SUM) if op == 'mean': flatten_tensor /= world_size split_tensors = [ x.reshape(shape) for x, shape in zip( torch.split(flatten_tensor, tensor_numels), tensor_shapes) ] out_dict = {k: v for k, v in zip(py_key, split_tensors)} if isinstance(py_dict, OrderedDict): out_dict = OrderedDict(out_dict) return out_dict def sync_random_seed(seed=None, device='cuda'): """Make sure different ranks share the same seed. All workers must call this function, otherwise it will deadlock. This method is generally used in `DistributedSampler`, because the seed should be identical across all processes in the distributed group. In distributed sampling, different ranks should sample non-overlapped data in the dataset. Therefore, this function is used to make sure that each rank shuffles the data indices in the same order based on the same seed. Then different ranks could use different indices to select non-overlapped data from the same data list. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is None: seed = np.random.randint(2**31) assert isinstance(seed, int) rank, world_size = get_dist_info() if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item()
6,525
32.639175
77
py
mmdetection
mmdetection-master/mmdet/core/utils/misc.py
# Copyright (c) OpenMMLab. All rights reserved. from functools import partial import numpy as np import torch from six.moves import map, zip from ..mask.structures import BitmapMasks, PolygonMasks def multi_apply(func, *args, **kwargs): """Apply function to a list of arguments. Note: This function applies the ``func`` to multiple inputs and map the multiple outputs of the ``func`` into different list. Each list contains the same type of outputs corresponding to different inputs. Args: func (Function): A function that will be applied to a list of arguments Returns: tuple(list): A tuple containing multiple list, each list contains \ a kind of returned results by the function """ pfunc = partial(func, **kwargs) if kwargs else func map_results = map(pfunc, *args) return tuple(map(list, zip(*map_results))) def unmap(data, count, inds, fill=0): """Unmap a subset of item (data) back to the original set of items (of size count)""" if data.dim() == 1: ret = data.new_full((count, ), fill) ret[inds.type(torch.bool)] = data else: new_size = (count, ) + data.size()[1:] ret = data.new_full(new_size, fill) ret[inds.type(torch.bool), :] = data return ret def mask2ndarray(mask): """Convert Mask to ndarray.. Args: mask (:obj:`BitmapMasks` or :obj:`PolygonMasks` or torch.Tensor or np.ndarray): The mask to be converted. Returns: np.ndarray: Ndarray mask of shape (n, h, w) that has been converted """ if isinstance(mask, (BitmapMasks, PolygonMasks)): mask = mask.to_ndarray() elif isinstance(mask, torch.Tensor): mask = mask.detach().cpu().numpy() elif not isinstance(mask, np.ndarray): raise TypeError(f'Unsupported {type(mask)} data type') return mask def flip_tensor(src_tensor, flip_direction): """flip tensor base on flip_direction. Args: src_tensor (Tensor): input feature map, shape (B, C, H, W). flip_direction (str): The flipping direction. Options are 'horizontal', 'vertical', 'diagonal'. Returns: out_tensor (Tensor): Flipped tensor. """ assert src_tensor.ndim == 4 valid_directions = ['horizontal', 'vertical', 'diagonal'] assert flip_direction in valid_directions if flip_direction == 'horizontal': out_tensor = torch.flip(src_tensor, [3]) elif flip_direction == 'vertical': out_tensor = torch.flip(src_tensor, [2]) else: out_tensor = torch.flip(src_tensor, [2, 3]) return out_tensor def select_single_mlvl(mlvl_tensors, batch_id, detach=True): """Extract a multi-scale single image tensor from a multi-scale batch tensor based on batch index. Note: The default value of detach is True, because the proposal gradient needs to be detached during the training of the two-stage model. E.g Cascade Mask R-CNN. Args: mlvl_tensors (list[Tensor]): Batch tensor for all scale levels, each is a 4D-tensor. batch_id (int): Batch index. detach (bool): Whether detach gradient. Default True. Returns: list[Tensor]: Multi-scale single image tensor. """ assert isinstance(mlvl_tensors, (list, tuple)) num_levels = len(mlvl_tensors) if detach: mlvl_tensor_list = [ mlvl_tensors[i][batch_id].detach() for i in range(num_levels) ] else: mlvl_tensor_list = [ mlvl_tensors[i][batch_id] for i in range(num_levels) ] return mlvl_tensor_list def filter_scores_and_topk(scores, score_thr, topk, results=None): """Filter results using score threshold and topk candidates. Args: scores (Tensor): The scores, shape (num_bboxes, K). score_thr (float): The score filter threshold. topk (int): The number of topk candidates. results (dict or list or Tensor, Optional): The results to which the filtering rule is to be applied. The shape of each item is (num_bboxes, N). Returns: tuple: Filtered results - scores (Tensor): The scores after being filtered, \ shape (num_bboxes_filtered, ). - labels (Tensor): The class labels, shape \ (num_bboxes_filtered, ). - anchor_idxs (Tensor): The anchor indexes, shape \ (num_bboxes_filtered, ). - filtered_results (dict or list or Tensor, Optional): \ The filtered results. The shape of each item is \ (num_bboxes_filtered, N). """ valid_mask = scores > score_thr scores = scores[valid_mask] valid_idxs = torch.nonzero(valid_mask) num_topk = min(topk, valid_idxs.size(0)) # torch.sort is actually faster than .topk (at least on GPUs) scores, idxs = scores.sort(descending=True) scores = scores[:num_topk] topk_idxs = valid_idxs[idxs[:num_topk]] keep_idxs, labels = topk_idxs.unbind(dim=1) filtered_results = None if results is not None: if isinstance(results, dict): filtered_results = {k: v[keep_idxs] for k, v in results.items()} elif isinstance(results, list): filtered_results = [result[keep_idxs] for result in results] elif isinstance(results, torch.Tensor): filtered_results = results[keep_idxs] else: raise NotImplementedError(f'Only supports dict or list or Tensor, ' f'but get {type(results)}.') return scores, labels, keep_idxs, filtered_results def center_of_mass(mask, esp=1e-6): """Calculate the centroid coordinates of the mask. Args: mask (Tensor): The mask to be calculated, shape (h, w). esp (float): Avoid dividing by zero. Default: 1e-6. Returns: tuple[Tensor]: the coordinates of the center point of the mask. - center_h (Tensor): the center point of the height. - center_w (Tensor): the center point of the width. """ h, w = mask.shape grid_h = torch.arange(h, device=mask.device)[:, None] grid_w = torch.arange(w, device=mask.device) normalizer = mask.sum().float().clamp(min=esp) center_h = (mask * grid_h).sum() / normalizer center_w = (mask * grid_w).sum() / normalizer return center_h, center_w def generate_coordinate(featmap_sizes, device='cuda'): """Generate the coordinate. Args: featmap_sizes (tuple): The feature to be calculated, of shape (N, C, W, H). device (str): The device where the feature will be put on. Returns: coord_feat (Tensor): The coordinate feature, of shape (N, 2, W, H). """ x_range = torch.linspace(-1, 1, featmap_sizes[-1], device=device) y_range = torch.linspace(-1, 1, featmap_sizes[-2], device=device) y, x = torch.meshgrid(y_range, x_range) y = y.expand([featmap_sizes[0], 1, -1, -1]) x = x.expand([featmap_sizes[0], 1, -1, -1]) coord_feat = torch.cat([x, y], 1) return coord_feat
7,147
33.200957
79
py
mmdetection
mmdetection-master/mmdet/core/visualization/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .image import (color_val_matplotlib, imshow_det_bboxes, imshow_gt_det_bboxes) from .palette import get_palette, palette_val __all__ = [ 'imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib', 'palette_val', 'get_palette' ]
318
30.9
72
py
mmdetection
mmdetection-master/mmdet/core/visualization/image.py
# Copyright (c) OpenMMLab. All rights reserved. import sys import cv2 import matplotlib.pyplot as plt import mmcv import numpy as np import pycocotools.mask as mask_util from matplotlib.collections import PatchCollection from matplotlib.patches import Polygon from mmdet.core.evaluation.panoptic_utils import INSTANCE_OFFSET from ..mask.structures import bitmap_to_polygon from ..utils import mask2ndarray from .palette import get_palette, palette_val __all__ = [ 'color_val_matplotlib', 'draw_masks', 'draw_bboxes', 'draw_labels', 'imshow_det_bboxes', 'imshow_gt_det_bboxes' ] EPS = 1e-2 def color_val_matplotlib(color): """Convert various input in BGR order to normalized RGB matplotlib color tuples. Args: color (:obj`Color` | str | tuple | int | ndarray): Color inputs. Returns: tuple[float]: A tuple of 3 normalized floats indicating RGB channels. """ color = mmcv.color_val(color) color = [color / 255 for color in color[::-1]] return tuple(color) def _get_adaptive_scales(areas, min_area=800, max_area=30000): """Get adaptive scales according to areas. The scale range is [0.5, 1.0]. When the area is less than ``'min_area'``, the scale is 0.5 while the area is larger than ``'max_area'``, the scale is 1.0. Args: areas (ndarray): The areas of bboxes or masks with the shape of (n, ). min_area (int): Lower bound areas for adaptive scales. Default: 800. max_area (int): Upper bound areas for adaptive scales. Default: 30000. Returns: ndarray: The adaotive scales with the shape of (n, ). """ scales = 0.5 + (areas - min_area) / (max_area - min_area) scales = np.clip(scales, 0.5, 1.0) return scales def _get_bias_color(base, max_dist=30): """Get different colors for each masks. Get different colors for each masks by adding a bias color to the base category color. Args: base (ndarray): The base category color with the shape of (3, ). max_dist (int): The max distance of bias. Default: 30. Returns: ndarray: The new color for a mask with the shape of (3, ). """ new_color = base + np.random.randint( low=-max_dist, high=max_dist + 1, size=3) return np.clip(new_color, 0, 255, new_color) def draw_bboxes(ax, bboxes, color='g', alpha=0.8, thickness=2): """Draw bounding boxes on the axes. Args: ax (matplotlib.Axes): The input axes. bboxes (ndarray): The input bounding boxes with the shape of (n, 4). color (list[tuple] | matplotlib.color): the colors for each bounding boxes. alpha (float): Transparency of bounding boxes. Default: 0.8. thickness (int): Thickness of lines. Default: 2. Returns: matplotlib.Axes: The result axes. """ polygons = [] for i, bbox in enumerate(bboxes): bbox_int = bbox.astype(np.int32) poly = [[bbox_int[0], bbox_int[1]], [bbox_int[0], bbox_int[3]], [bbox_int[2], bbox_int[3]], [bbox_int[2], bbox_int[1]]] np_poly = np.array(poly).reshape((4, 2)) polygons.append(Polygon(np_poly)) p = PatchCollection( polygons, facecolor='none', edgecolors=color, linewidths=thickness, alpha=alpha) ax.add_collection(p) return ax def draw_labels(ax, labels, positions, scores=None, class_names=None, color='w', font_size=8, scales=None, horizontal_alignment='left'): """Draw labels on the axes. Args: ax (matplotlib.Axes): The input axes. labels (ndarray): The labels with the shape of (n, ). positions (ndarray): The positions to draw each labels. scores (ndarray): The scores for each labels. class_names (list[str]): The class names. color (list[tuple] | matplotlib.color): The colors for labels. font_size (int): Font size of texts. Default: 8. scales (list[float]): Scales of texts. Default: None. horizontal_alignment (str): The horizontal alignment method of texts. Default: 'left'. Returns: matplotlib.Axes: The result axes. """ for i, (pos, label) in enumerate(zip(positions, labels)): label_text = class_names[ label] if class_names is not None else f'class {label}' if scores is not None: label_text += f'|{scores[i]:.02f}' text_color = color[i] if isinstance(color, list) else color font_size_mask = font_size if scales is None else font_size * scales[i] ax.text( pos[0], pos[1], f'{label_text}', bbox={ 'facecolor': 'black', 'alpha': 0.8, 'pad': 0.7, 'edgecolor': 'none' }, color=text_color, fontsize=font_size_mask, verticalalignment='top', horizontalalignment=horizontal_alignment) return ax def draw_masks(ax, img, masks, color=None, with_edge=True, alpha=0.8): """Draw masks on the image and their edges on the axes. Args: ax (matplotlib.Axes): The input axes. img (ndarray): The image with the shape of (3, h, w). masks (ndarray): The masks with the shape of (n, h, w). color (ndarray): The colors for each masks with the shape of (n, 3). with_edge (bool): Whether to draw edges. Default: True. alpha (float): Transparency of bounding boxes. Default: 0.8. Returns: matplotlib.Axes: The result axes. ndarray: The result image. """ taken_colors = set([0, 0, 0]) if color is None: random_colors = np.random.randint(0, 255, (masks.size(0), 3)) color = [tuple(c) for c in random_colors] color = np.array(color, dtype=np.uint8) polygons = [] for i, mask in enumerate(masks): if with_edge: contours, _ = bitmap_to_polygon(mask) polygons += [Polygon(c) for c in contours] color_mask = color[i] while tuple(color_mask) in taken_colors: color_mask = _get_bias_color(color_mask) taken_colors.add(tuple(color_mask)) mask = mask.astype(bool) img[mask] = img[mask] * (1 - alpha) + color_mask * alpha p = PatchCollection( polygons, facecolor='none', edgecolors='w', linewidths=1, alpha=0.8) ax.add_collection(p) return ax, img def imshow_det_bboxes(img, bboxes=None, labels=None, segms=None, class_names=None, score_thr=0, bbox_color='green', text_color='green', mask_color=None, thickness=2, font_size=8, win_name='', show=True, wait_time=0, out_file=None): """Draw bboxes and class labels (with scores) on an image. Args: img (str | ndarray): The image to be displayed. bboxes (ndarray): Bounding boxes (with scores), shaped (n, 4) or (n, 5). labels (ndarray): Labels of bboxes. segms (ndarray | None): Masks, shaped (n,h,w) or None. class_names (list[str]): Names of each classes. score_thr (float): Minimum score of bboxes to be shown. Default: 0. bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: 'green'. text_color (list[tuple] | tuple | str | None): Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: 'green'. mask_color (list[tuple] | tuple | str | None, optional): Colors of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: None. thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. show (bool): Whether to show the image. Default: True. win_name (str): The window name. Default: ''. wait_time (float): Value of waitKey param. Default: 0. out_file (str, optional): The filename to write the image. Default: None. Returns: ndarray: The image with bboxes drawn on it. """ assert bboxes is None or bboxes.ndim == 2, \ f' bboxes ndim should be 2, but its ndim is {bboxes.ndim}.' assert labels.ndim == 1, \ f' labels ndim should be 1, but its ndim is {labels.ndim}.' assert bboxes is None or bboxes.shape[1] == 4 or bboxes.shape[1] == 5, \ f' bboxes.shape[1] should be 4 or 5, but its {bboxes.shape[1]}.' assert bboxes is None or bboxes.shape[0] <= labels.shape[0], \ 'labels.shape[0] should not be less than bboxes.shape[0].' assert segms is None or segms.shape[0] == labels.shape[0], \ 'segms.shape[0] and labels.shape[0] should have the same length.' assert segms is not None or bboxes is not None, \ 'segms and bboxes should not be None at the same time.' img = mmcv.imread(img).astype(np.uint8) if score_thr > 0: assert bboxes is not None and bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > score_thr bboxes = bboxes[inds, :] labels = labels[inds] if segms is not None: segms = segms[inds, ...] img = mmcv.bgr2rgb(img) width, height = img.shape[1], img.shape[0] img = np.ascontiguousarray(img) fig = plt.figure(win_name, frameon=False) plt.title(win_name) canvas = fig.canvas dpi = fig.get_dpi() # add a small EPS to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches((width + EPS) / dpi, (height + EPS) / dpi) # remove white edges by set subplot margin plt.subplots_adjust(left=0, right=1, bottom=0, top=1) ax = plt.gca() ax.axis('off') max_label = int(max(labels) if len(labels) > 0 else 0) text_palette = palette_val(get_palette(text_color, max_label + 1)) text_colors = [text_palette[label] for label in labels] num_bboxes = 0 if bboxes is not None: num_bboxes = bboxes.shape[0] bbox_palette = palette_val(get_palette(bbox_color, max_label + 1)) colors = [bbox_palette[label] for label in labels[:num_bboxes]] draw_bboxes(ax, bboxes, colors, alpha=0.8, thickness=thickness) horizontal_alignment = 'left' positions = bboxes[:, :2].astype(np.int32) + thickness areas = (bboxes[:, 3] - bboxes[:, 1]) * (bboxes[:, 2] - bboxes[:, 0]) scales = _get_adaptive_scales(areas) scores = bboxes[:, 4] if bboxes.shape[1] == 5 else None draw_labels( ax, labels[:num_bboxes], positions, scores=scores, class_names=class_names, color=text_colors, font_size=font_size, scales=scales, horizontal_alignment=horizontal_alignment) if segms is not None: mask_palette = get_palette(mask_color, max_label + 1) colors = [mask_palette[label] for label in labels] colors = np.array(colors, dtype=np.uint8) draw_masks(ax, img, segms, colors, with_edge=True) if num_bboxes < segms.shape[0]: segms = segms[num_bboxes:] horizontal_alignment = 'center' areas = [] positions = [] for mask in segms: _, _, stats, centroids = cv2.connectedComponentsWithStats( mask.astype(np.uint8), connectivity=8) largest_id = np.argmax(stats[1:, -1]) + 1 positions.append(centroids[largest_id]) areas.append(stats[largest_id, -1]) areas = np.stack(areas, axis=0) scales = _get_adaptive_scales(areas) draw_labels( ax, labels[num_bboxes:], positions, class_names=class_names, color=text_colors, font_size=font_size, scales=scales, horizontal_alignment=horizontal_alignment) plt.imshow(img) stream, _ = canvas.print_to_buffer() buffer = np.frombuffer(stream, dtype='uint8') if sys.platform == 'darwin': width, height = canvas.get_width_height(physical=True) img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) img = rgb.astype('uint8') img = mmcv.rgb2bgr(img) if show: # We do not use cv2 for display because in some cases, opencv will # conflict with Qt, it will output a warning: Current thread # is not the object's thread. You can refer to # https://github.com/opencv/opencv-python/issues/46 for details if wait_time == 0: plt.show() else: plt.show(block=False) plt.pause(wait_time) if out_file is not None: mmcv.imwrite(img, out_file) plt.close() return img def imshow_gt_det_bboxes(img, annotation, result, class_names=None, score_thr=0, gt_bbox_color=(61, 102, 255), gt_text_color=(200, 200, 200), gt_mask_color=(61, 102, 255), det_bbox_color=(241, 101, 72), det_text_color=(200, 200, 200), det_mask_color=(241, 101, 72), thickness=2, font_size=13, win_name='', show=True, wait_time=0, out_file=None, overlay_gt_pred=True): """General visualization GT and result function. Args: img (str | ndarray): The image to be displayed. annotation (dict): Ground truth annotations where contain keys of 'gt_bboxes' and 'gt_labels' or 'gt_masks'. result (tuple[list] | list): The detection result, can be either (bbox, segm) or just bbox. class_names (list[str]): Names of each classes. score_thr (float): Minimum score of bboxes to be shown. Default: 0. gt_bbox_color (list[tuple] | tuple | str | None): Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (61, 102, 255). gt_text_color (list[tuple] | tuple | str | None): Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (200, 200, 200). gt_mask_color (list[tuple] | tuple | str | None, optional): Colors of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (61, 102, 255). det_bbox_color (list[tuple] | tuple | str | None):Colors of bbox lines. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (241, 101, 72). det_text_color (list[tuple] | tuple | str | None):Colors of texts. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (200, 200, 200). det_mask_color (list[tuple] | tuple | str | None, optional): Color of masks. If a single color is given, it will be applied to all classes. The tuple of color should be in RGB order. Default: (241, 101, 72). thickness (int): Thickness of lines. Default: 2. font_size (int): Font size of texts. Default: 13. win_name (str): The window name. Default: ''. show (bool): Whether to show the image. Default: True. wait_time (float): Value of waitKey param. Default: 0. out_file (str, optional): The filename to write the image. Default: None. overlay_gt_pred (bool): Whether to plot gts and predictions on the same image. If False, predictions and gts will be plotted on two same image which will be concatenated in vertical direction. The image above is drawn with gt, and the image below is drawn with the prediction result. Default: True. Returns: ndarray: The image with bboxes or masks drawn on it. """ assert 'gt_bboxes' in annotation assert 'gt_labels' in annotation assert isinstance(result, (tuple, list, dict)), 'Expected ' \ f'tuple or list or dict, but get {type(result)}' gt_bboxes = annotation['gt_bboxes'] gt_labels = annotation['gt_labels'] gt_masks = annotation.get('gt_masks', None) if gt_masks is not None: gt_masks = mask2ndarray(gt_masks) gt_seg = annotation.get('gt_semantic_seg', None) if gt_seg is not None: pad_value = 255 # the padding value of gt_seg sem_labels = np.unique(gt_seg) all_labels = np.concatenate((gt_labels, sem_labels), axis=0) all_labels, counts = np.unique(all_labels, return_counts=True) stuff_labels = all_labels[np.logical_and(counts < 2, all_labels != pad_value)] stuff_masks = gt_seg[None] == stuff_labels[:, None, None] gt_labels = np.concatenate((gt_labels, stuff_labels), axis=0) gt_masks = np.concatenate((gt_masks, stuff_masks.astype(np.uint8)), axis=0) # If you need to show the bounding boxes, # please comment the following line # gt_bboxes = None img = mmcv.imread(img) img_with_gt = imshow_det_bboxes( img, gt_bboxes, gt_labels, gt_masks, class_names=class_names, bbox_color=gt_bbox_color, text_color=gt_text_color, mask_color=gt_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=False) if not isinstance(result, dict): if isinstance(result, tuple): bbox_result, segm_result = result if isinstance(segm_result, tuple): segm_result = segm_result[0] # ms rcnn else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) segms = None if segm_result is not None and len(labels) > 0: # non empty segms = mmcv.concat_list(segm_result) segms = mask_util.decode(segms) segms = segms.transpose(2, 0, 1) else: assert class_names is not None, 'We need to know the number ' \ 'of classes.' VOID = len(class_names) bboxes = None pan_results = result['pan_results'] # keep objects ahead ids = np.unique(pan_results)[::-1] legal_indices = ids != VOID ids = ids[legal_indices] labels = np.array([id % INSTANCE_OFFSET for id in ids], dtype=np.int64) segms = (pan_results[None] == ids[:, None, None]) if overlay_gt_pred: img = imshow_det_bboxes( img_with_gt, bboxes, labels, segms=segms, class_names=class_names, score_thr=score_thr, bbox_color=det_bbox_color, text_color=det_text_color, mask_color=det_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=show, wait_time=wait_time, out_file=out_file) else: img_with_det = imshow_det_bboxes( img, bboxes, labels, segms=segms, class_names=class_names, score_thr=score_thr, bbox_color=det_bbox_color, text_color=det_text_color, mask_color=det_mask_color, thickness=thickness, font_size=font_size, win_name=win_name, show=False) img = np.concatenate([img_with_gt, img_with_det], axis=0) plt.imshow(img) if show: if wait_time == 0: plt.show() else: plt.show(block=False) plt.pause(wait_time) if out_file is not None: mmcv.imwrite(img, out_file) plt.close() return img
21,190
36.572695
79
py
mmdetection
mmdetection-master/mmdet/core/visualization/palette.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv import numpy as np def palette_val(palette): """Convert palette to matplotlib palette. Args: palette List[tuple]: A list of color tuples. Returns: List[tuple[float]]: A list of RGB matplotlib color tuples. """ new_palette = [] for color in palette: color = [c / 255 for c in color] new_palette.append(tuple(color)) return new_palette def get_palette(palette, num_classes): """Get palette from various inputs. Args: palette (list[tuple] | str | tuple | :obj:`Color`): palette inputs. num_classes (int): the number of classes. Returns: list[tuple[int]]: A list of color tuples. """ assert isinstance(num_classes, int) if isinstance(palette, list): dataset_palette = palette elif isinstance(palette, tuple): dataset_palette = [palette] * num_classes elif palette == 'random' or palette is None: state = np.random.get_state() # random color np.random.seed(42) palette = np.random.randint(0, 256, size=(num_classes, 3)) np.random.set_state(state) dataset_palette = [tuple(c) for c in palette] elif palette == 'coco': from mmdet.datasets import CocoDataset, CocoPanopticDataset dataset_palette = CocoDataset.PALETTE if len(dataset_palette) < num_classes: dataset_palette = CocoPanopticDataset.PALETTE elif palette == 'citys': from mmdet.datasets import CityscapesDataset dataset_palette = CityscapesDataset.PALETTE elif palette == 'voc': from mmdet.datasets import VOCDataset dataset_palette = VOCDataset.PALETTE elif mmcv.is_str(palette): dataset_palette = [mmcv.color_val(palette)[::-1]] * num_classes else: raise TypeError(f'Invalid type for palette: {type(palette)}') assert len(dataset_palette) >= num_classes, \ 'The length of palette should not be less than `num_classes`.' return dataset_palette
2,068
31.328125
75
py
mmdetection
mmdetection-master/mmdet/datasets/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS, PIPELINES, build_dataloader, build_dataset from .cityscapes import CityscapesDataset from .coco import CocoDataset from .coco_occluded import OccludedSeparatedCocoDataset from .coco_panoptic import CocoPanopticDataset from .custom import CustomDataset from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset) from .deepfashion import DeepFashionDataset from .lvis import LVISDataset, LVISV1Dataset, LVISV05Dataset from .objects365 import Objects365V1Dataset, Objects365V2Dataset from .openimages import OpenImagesChallengeDataset, OpenImagesDataset from .samplers import DistributedGroupSampler, DistributedSampler, GroupSampler from .utils import (NumClassCheckHook, get_loading_pipeline, replace_ImageToTensor) from .voc import VOCDataset from .wider_face import WIDERFaceDataset from .xml_style import XMLDataset __all__ = [ 'CustomDataset', 'XMLDataset', 'CocoDataset', 'DeepFashionDataset', 'VOCDataset', 'CityscapesDataset', 'LVISDataset', 'LVISV05Dataset', 'LVISV1Dataset', 'GroupSampler', 'DistributedGroupSampler', 'DistributedSampler', 'build_dataloader', 'ConcatDataset', 'RepeatDataset', 'ClassBalancedDataset', 'WIDERFaceDataset', 'DATASETS', 'PIPELINES', 'build_dataset', 'replace_ImageToTensor', 'get_loading_pipeline', 'NumClassCheckHook', 'CocoPanopticDataset', 'MultiImageMixDataset', 'OpenImagesDataset', 'OpenImagesChallengeDataset', 'Objects365V1Dataset', 'Objects365V2Dataset', 'OccludedSeparatedCocoDataset' ]
1,648
50.53125
79
py
mmdetection
mmdetection-master/mmdet/datasets/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import copy import platform import random import warnings from functools import partial import numpy as np import torch from mmcv.parallel import collate from mmcv.runner import get_dist_info from mmcv.utils import TORCH_VERSION, Registry, build_from_cfg, digit_version from torch.utils.data import DataLoader from .samplers import (ClassAwareSampler, DistributedGroupSampler, DistributedSampler, GroupSampler, InfiniteBatchSampler, InfiniteGroupBatchSampler) if platform.system() != 'Windows': # https://github.com/pytorch/pytorch/issues/973 import resource rlimit = resource.getrlimit(resource.RLIMIT_NOFILE) base_soft_limit = rlimit[0] hard_limit = rlimit[1] soft_limit = min(max(4096, base_soft_limit), hard_limit) resource.setrlimit(resource.RLIMIT_NOFILE, (soft_limit, hard_limit)) DATASETS = Registry('dataset') PIPELINES = Registry('pipeline') def _concat_dataset(cfg, default_args=None): from .dataset_wrappers import ConcatDataset ann_files = cfg['ann_file'] img_prefixes = cfg.get('img_prefix', None) seg_prefixes = cfg.get('seg_prefix', None) proposal_files = cfg.get('proposal_file', None) separate_eval = cfg.get('separate_eval', True) datasets = [] num_dset = len(ann_files) for i in range(num_dset): data_cfg = copy.deepcopy(cfg) # pop 'separate_eval' since it is not a valid key for common datasets. if 'separate_eval' in data_cfg: data_cfg.pop('separate_eval') data_cfg['ann_file'] = ann_files[i] if isinstance(img_prefixes, (list, tuple)): data_cfg['img_prefix'] = img_prefixes[i] if isinstance(seg_prefixes, (list, tuple)): data_cfg['seg_prefix'] = seg_prefixes[i] if isinstance(proposal_files, (list, tuple)): data_cfg['proposal_file'] = proposal_files[i] datasets.append(build_dataset(data_cfg, default_args)) return ConcatDataset(datasets, separate_eval) def build_dataset(cfg, default_args=None): from .dataset_wrappers import (ClassBalancedDataset, ConcatDataset, MultiImageMixDataset, RepeatDataset) if isinstance(cfg, (list, tuple)): dataset = ConcatDataset([build_dataset(c, default_args) for c in cfg]) elif cfg['type'] == 'ConcatDataset': dataset = ConcatDataset( [build_dataset(c, default_args) for c in cfg['datasets']], cfg.get('separate_eval', True)) elif cfg['type'] == 'RepeatDataset': dataset = RepeatDataset( build_dataset(cfg['dataset'], default_args), cfg['times']) elif cfg['type'] == 'ClassBalancedDataset': dataset = ClassBalancedDataset( build_dataset(cfg['dataset'], default_args), cfg['oversample_thr']) elif cfg['type'] == 'MultiImageMixDataset': cp_cfg = copy.deepcopy(cfg) cp_cfg['dataset'] = build_dataset(cp_cfg['dataset']) cp_cfg.pop('type') dataset = MultiImageMixDataset(**cp_cfg) elif isinstance(cfg.get('ann_file'), (list, tuple)): dataset = _concat_dataset(cfg, default_args) else: dataset = build_from_cfg(cfg, DATASETS, default_args) return dataset def build_dataloader(dataset, samples_per_gpu, workers_per_gpu, num_gpus=1, dist=True, shuffle=True, seed=None, runner_type='EpochBasedRunner', persistent_workers=False, class_aware_sampler=None, **kwargs): """Build PyTorch DataLoader. In distributed training, each GPU/process has a dataloader. In non-distributed training, there is only one dataloader for all GPUs. Args: dataset (Dataset): A PyTorch dataset. samples_per_gpu (int): Number of training samples on each GPU, i.e., batch size of each GPU. workers_per_gpu (int): How many subprocesses to use for data loading for each GPU. num_gpus (int): Number of GPUs. Only used in non-distributed training. dist (bool): Distributed training/test or not. Default: True. shuffle (bool): Whether to shuffle the data at every epoch. Default: True. seed (int, Optional): Seed to be used. Default: None. runner_type (str): Type of runner. Default: `EpochBasedRunner` persistent_workers (bool): If True, the data loader will not shutdown the worker processes after a dataset has been consumed once. This allows to maintain the workers `Dataset` instances alive. This argument is only valid when PyTorch>=1.7.0. Default: False. class_aware_sampler (dict): Whether to use `ClassAwareSampler` during training. Default: None. kwargs: any keyword argument to be used to initialize DataLoader Returns: DataLoader: A PyTorch dataloader. """ rank, world_size = get_dist_info() if dist: # When model is :obj:`DistributedDataParallel`, # `batch_size` of :obj:`dataloader` is the # number of training samples on each GPU. batch_size = samples_per_gpu num_workers = workers_per_gpu else: # When model is obj:`DataParallel` # the batch size is samples on all the GPUS batch_size = num_gpus * samples_per_gpu num_workers = num_gpus * workers_per_gpu if runner_type == 'IterBasedRunner': # this is a batch sampler, which can yield # a mini-batch indices each time. # it can be used in both `DataParallel` and # `DistributedDataParallel` if shuffle: batch_sampler = InfiniteGroupBatchSampler( dataset, batch_size, world_size, rank, seed=seed) else: batch_sampler = InfiniteBatchSampler( dataset, batch_size, world_size, rank, seed=seed, shuffle=False) batch_size = 1 sampler = None else: if class_aware_sampler is not None: # ClassAwareSampler can be used in both distributed and # non-distributed training. num_sample_class = class_aware_sampler.get('num_sample_class', 1) sampler = ClassAwareSampler( dataset, samples_per_gpu, world_size, rank, seed=seed, num_sample_class=num_sample_class) elif dist: # DistributedGroupSampler will definitely shuffle the data to # satisfy that images on each GPU are in the same group if shuffle: sampler = DistributedGroupSampler( dataset, samples_per_gpu, world_size, rank, seed=seed) else: sampler = DistributedSampler( dataset, world_size, rank, shuffle=False, seed=seed) else: sampler = GroupSampler(dataset, samples_per_gpu) if shuffle else None batch_sampler = None init_fn = partial( worker_init_fn, num_workers=num_workers, rank=rank, seed=seed) if seed is not None else None if (TORCH_VERSION != 'parrots' and digit_version(TORCH_VERSION) >= digit_version('1.7.0')): kwargs['persistent_workers'] = persistent_workers elif persistent_workers is True: warnings.warn('persistent_workers is invalid because your pytorch ' 'version is lower than 1.7.0') data_loader = DataLoader( dataset, batch_size=batch_size, sampler=sampler, num_workers=num_workers, batch_sampler=batch_sampler, collate_fn=partial(collate, samples_per_gpu=samples_per_gpu), pin_memory=kwargs.pop('pin_memory', False), worker_init_fn=init_fn, **kwargs) return data_loader def worker_init_fn(worker_id, num_workers, rank, seed): # The seed of each worker equals to # num_worker * rank + worker_id + user_seed worker_seed = num_workers * rank + worker_id + seed np.random.seed(worker_seed) random.seed(worker_seed) torch.manual_seed(worker_seed)
8,421
37.990741
79
py
mmdetection
mmdetection-master/mmdet/datasets/cityscapes.py
# Copyright (c) OpenMMLab. All rights reserved. # Modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/cityscapes.py # noqa # and https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa import glob import os import os.path as osp import tempfile from collections import OrderedDict import mmcv import numpy as np import pycocotools.mask as maskUtils from mmcv.utils import print_log from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class CityscapesDataset(CocoDataset): CLASSES = ('person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle') PALETTE = [(220, 20, 60), (255, 0, 0), (0, 0, 142), (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32)] def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" valid_inds = [] # obtain images that contain annotation ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) # obtain images that contain annotations of the required categories ids_in_cat = set() for i, class_id in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) # merge the image id sets of the two conditions and use the merged set # to filter out images if self.filter_empty_gt=True ids_in_cat &= ids_with_ann valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = img_info['id'] ann_ids = self.coco.getAnnIds(imgIds=[img_id]) ann_info = self.coco.loadAnns(ann_ids) all_iscrowd = all([_['iscrowd'] for _ in ann_info]) if self.filter_empty_gt and (self.img_ids[i] not in ids_in_cat or all_iscrowd): continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): """Parse bbox and mask annotation. Args: img_info (dict): Image info of an image. ann_info (list[dict]): Annotation info of an image. Returns: dict: A dict containing the following keys: bboxes, \ bboxes_ignore, labels, masks, seg_map. \ "masks" are already decoded into binary masks. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for i, ann in enumerate(ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue if ann['category_id'] not in self.cat_ids: continue bbox = [x1, y1, x1 + w, y1 + h] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann['segmentation']) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=img_info['segm_file']) return ann def results2txt(self, results, outfile_prefix): """Dump the detection results to a txt file. Args: results (list[list | tuple]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the txt files will be named "somepath/xxx.txt". Returns: list[str]: Result txt files which contains corresponding \ instance segmentation images. """ try: import cityscapesscripts.helpers.labels as CSLabels except ImportError: raise ImportError('Please run "pip install citscapesscripts" to ' 'install cityscapesscripts first.') result_files = [] os.makedirs(outfile_prefix, exist_ok=True) prog_bar = mmcv.ProgressBar(len(self)) for idx in range(len(self)): result = results[idx] filename = self.data_infos[idx]['filename'] basename = osp.splitext(osp.basename(filename))[0] pred_txt = osp.join(outfile_prefix, basename + '_pred.txt') bbox_result, segm_result = result bboxes = np.vstack(bbox_result) # segm results if isinstance(segm_result, tuple): # Some detectors use different scores for bbox and mask, # like Mask Scoring R-CNN. Score of segm will be used instead # of bbox score. segms = mmcv.concat_list(segm_result[0]) mask_score = segm_result[1] else: # use bbox score for mask score segms = mmcv.concat_list(segm_result) mask_score = [bbox[-1] for bbox in bboxes] labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] labels = np.concatenate(labels) assert len(bboxes) == len(segms) == len(labels) num_instances = len(bboxes) prog_bar.update() with open(pred_txt, 'w') as fout: for i in range(num_instances): pred_class = labels[i] classes = self.CLASSES[pred_class] class_id = CSLabels.name2label[classes].id score = mask_score[i] mask = maskUtils.decode(segms[i]).astype(np.uint8) png_filename = osp.join(outfile_prefix, basename + f'_{i}_{classes}.png') mmcv.imwrite(mask, png_filename) fout.write(f'{osp.basename(png_filename)} {class_id} ' f'{score}\n') result_files.append(pred_txt) return result_files def format_results(self, results, txtfile_prefix=None): """Format the results to txt (standard format for Cityscapes evaluation). Args: results (list): Testing results of the dataset. txtfile_prefix (str | None): The prefix of txt files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving txt/png files when txtfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if txtfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() txtfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2txt(results, txtfile_prefix) return result_files, tmp_dir def evaluate(self, results, metric='bbox', logger=None, outfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in Cityscapes/COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. outfile_prefix (str | None): The prefix of output file. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If results are evaluated with COCO protocol, it would be the prefix of output json file. For example, the metric is 'bbox' and 'segm', then json files would be "a/b/prefix.bbox.json" and "a/b/prefix.segm.json". If results are evaluated with cityscapes protocol, it would be the prefix of output txt/png files. The output files would be png images under folder "a/b/prefix/xxx/" and the file name of images would be written into a txt file "a/b/prefix/xxx_pred.txt", where "xxx" is the video name of cityscapes. If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str, float]: COCO style evaluation metric or cityscapes mAP \ and AP@50. """ eval_results = dict() metrics = metric.copy() if isinstance(metric, list) else [metric] if 'cityscapes' in metrics: eval_results.update( self._evaluate_cityscapes(results, outfile_prefix, logger)) metrics.remove('cityscapes') # left metrics are all coco metric if len(metrics) > 0: # create CocoDataset with CityscapesDataset annotation self_coco = CocoDataset(self.ann_file, self.pipeline.transforms, None, self.data_root, self.img_prefix, self.seg_prefix, self.seg_suffix, self.proposal_file, self.test_mode, self.filter_empty_gt) # TODO: remove this in the future # reload annotations of correct class self_coco.CLASSES = self.CLASSES self_coco.data_infos = self_coco.load_annotations(self.ann_file) eval_results.update( self_coco.evaluate(results, metrics, logger, outfile_prefix, classwise, proposal_nums, iou_thrs)) return eval_results def _evaluate_cityscapes(self, results, txtfile_prefix, logger): """Evaluation in Cityscapes protocol. Args: results (list): Testing results of the dataset. txtfile_prefix (str | None): The prefix of output txt file logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. Returns: dict[str: float]: Cityscapes evaluation results, contains 'mAP' \ and 'AP@50'. """ try: import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as CSEval # noqa except ImportError: raise ImportError('Please run "pip install citscapesscripts" to ' 'install cityscapesscripts first.') msg = 'Evaluating in Cityscapes style' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) result_files, tmp_dir = self.format_results(results, txtfile_prefix) if tmp_dir is None: result_dir = osp.join(txtfile_prefix, 'results') else: result_dir = osp.join(tmp_dir.name, 'results') eval_results = OrderedDict() print_log(f'Evaluating results under {result_dir} ...', logger=logger) # set global states in cityscapes evaluation API CSEval.args.cityscapesPath = os.path.join(self.img_prefix, '../..') CSEval.args.predictionPath = os.path.abspath(result_dir) CSEval.args.predictionWalk = None CSEval.args.JSONOutput = False CSEval.args.colorized = False CSEval.args.gtInstancesFile = os.path.join(result_dir, 'gtInstances.json') CSEval.args.groundTruthSearch = os.path.join( self.img_prefix.replace('leftImg8bit', 'gtFine'), '*/*_gtFine_instanceIds.png') groundTruthImgList = glob.glob(CSEval.args.groundTruthSearch) assert len(groundTruthImgList), 'Cannot find ground truth images' \ f' in {CSEval.args.groundTruthSearch}.' predictionImgList = [] for gt in groundTruthImgList: predictionImgList.append(CSEval.getPrediction(gt, CSEval.args)) CSEval_results = CSEval.evaluateImgLists(predictionImgList, groundTruthImgList, CSEval.args)['averages'] eval_results['mAP'] = CSEval_results['allAp'] eval_results['AP@50'] = CSEval_results['allAp50%'] if tmp_dir is not None: tmp_dir.cleanup() return eval_results
14,529
41.735294
135
py
mmdetection
mmdetection-master/mmdet/datasets/coco.py
# Copyright (c) OpenMMLab. All rights reserved. import contextlib import io import itertools import logging import os.path as osp import tempfile import warnings from collections import OrderedDict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from mmdet.core import eval_recalls from .api_wrappers import COCO, COCOeval from .builder import DATASETS from .custom import CustomDataset @DATASETS.register_module() class CocoDataset(CustomDataset): CLASSES = ('person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush') PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174), (255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), (191, 162, 208)] def load_annotations(self, ann_file): """Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCO(ann_file) # The order of returned `cat_ids` will not # change with the order of the CLASSES self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] total_ann_ids = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) ann_ids = self.coco.get_ann_ids(img_ids=[i]) total_ann_ids.extend(ann_ids) assert len(set(total_ann_ids)) == len( total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" return data_infos def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return self._parse_ann_info(self.data_infos[idx], ann_info) def get_cat_ids(self, idx): """Get COCO category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) return [ann['category_id'] for ann in ann_info] def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" valid_inds = [] # obtain images that contain annotation ids_with_ann = set(_['image_id'] for _ in self.coco.anns.values()) # obtain images that contain annotations of the required categories ids_in_cat = set() for i, class_id in enumerate(self.cat_ids): ids_in_cat |= set(self.coco.cat_img_map[class_id]) # merge the image id sets of the two conditions and use the merged set # to filter out images if self.filter_empty_gt=True ids_in_cat &= ids_with_ann valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = self.img_ids[i] if self.filter_empty_gt and img_id not in ids_in_cat: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _parse_ann_info(self, img_info, ann_info): """Parse bbox and mask annotation. Args: ann_info (list[dict]): Annotation info of an image. with_mask (bool): Whether to parse mask annotations. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore,\ labels, masks, seg_map. "masks" are raw annotations and not \ decoded into binary masks. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_masks_ann = [] for i, ann in enumerate(ann_info): if ann.get('ignore', False): continue x1, y1, w, h = ann['bbox'] inter_w = max(0, min(x1 + w, img_info['width']) - max(x1, 0)) inter_h = max(0, min(y1 + h, img_info['height']) - max(y1, 0)) if inter_w * inter_h == 0: continue if ann['area'] <= 0 or w < 1 or h < 1: continue if ann['category_id'] not in self.cat_ids: continue bbox = [x1, y1, x1 + w, y1 + h] if ann.get('iscrowd', False): gt_bboxes_ignore.append(bbox) else: gt_bboxes.append(bbox) gt_labels.append(self.cat2label[ann['category_id']]) gt_masks_ann.append(ann.get('segmentation', None)) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) seg_map = img_info['filename'].rsplit('.', 1)[0] + self.seg_suffix ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_masks_ann, seg_map=seg_map) return ann def xyxy2xywh(self, bbox): """Convert ``xyxy`` style bounding boxes to ``xywh`` style for COCO evaluation. Args: bbox (numpy.ndarray): The bounding boxes, shape (4, ), in ``xyxy`` order. Returns: list[float]: The converted bounding boxes, in ``xywh`` order. """ _bbox = bbox.tolist() return [ _bbox[0], _bbox[1], _bbox[2] - _bbox[0], _bbox[3] - _bbox[1], ] def _proposal2json(self, results): """Convert proposal results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] bboxes = results[idx] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = 1 json_results.append(data) return json_results def _det2json(self, results): """Convert detection results to COCO json style.""" json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] result = results[idx] for label in range(len(result)): bboxes = result[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] json_results.append(data) return json_results def _segm2json(self, results): """Convert instance segmentation results to COCO json style.""" bbox_json_results = [] segm_json_results = [] for idx in range(len(self)): img_id = self.img_ids[idx] det, seg = results[idx] for label in range(len(det)): # bbox results bboxes = det[label] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(bboxes[i][4]) data['category_id'] = self.cat_ids[label] bbox_json_results.append(data) # segm results # some detectors use different scores for bbox and mask if isinstance(seg, tuple): segms = seg[0][label] mask_score = seg[1][label] else: segms = seg[label] mask_score = [bbox[4] for bbox in bboxes] for i in range(bboxes.shape[0]): data = dict() data['image_id'] = img_id data['bbox'] = self.xyxy2xywh(bboxes[i]) data['score'] = float(mask_score[i]) data['category_id'] = self.cat_ids[label] if isinstance(segms[i]['counts'], bytes): segms[i]['counts'] = segms[i]['counts'].decode() data['segmentation'] = segms[i] segm_json_results.append(data) return bbox_json_results, segm_json_results def results2json(self, results, outfile_prefix): """Dump the detection results to a COCO style json file. There are 3 types of results: proposals, bbox predictions, mask predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. Args: results (list[list | tuple | ndarray]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.bbox.json", "somepath/xxx.segm.json", "somepath/xxx.proposal.json". Returns: dict[str: str]: Possible keys are "bbox", "segm", "proposal", and \ values are corresponding filenames. """ result_files = dict() if isinstance(results[0], list): json_results = self._det2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' mmcv.dump(json_results, result_files['bbox']) elif isinstance(results[0], tuple): json_results = self._segm2json(results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(json_results[0], result_files['bbox']) mmcv.dump(json_results[1], result_files['segm']) elif isinstance(results[0], np.ndarray): json_results = self._proposal2json(results) result_files['proposal'] = f'{outfile_prefix}.proposal.json' mmcv.dump(json_results, result_files['proposal']) else: raise TypeError('invalid type of results') return result_files def fast_eval_recall(self, results, proposal_nums, iou_thrs, logger=None): gt_bboxes = [] for i in range(len(self.img_ids)): ann_ids = self.coco.get_ann_ids(img_ids=self.img_ids[i]) ann_info = self.coco.load_anns(ann_ids) if len(ann_info) == 0: gt_bboxes.append(np.zeros((0, 4))) continue bboxes = [] for ann in ann_info: if ann.get('ignore', False) or ann['iscrowd']: continue x1, y1, w, h = ann['bbox'] bboxes.append([x1, y1, x1 + w, y1 + h]) bboxes = np.array(bboxes, dtype=np.float32) if bboxes.shape[0] == 0: bboxes = np.zeros((0, 4)) gt_bboxes.append(bboxes) recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thrs, logger=logger) ar = recalls.mean(axis=1) return ar def format_results(self, results, jsonfile_prefix=None, **kwargs): """Format the results to json (standard format for COCO evaluation). Args: results (list[tuple | numpy.ndarray]): Testing results of the dataset. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. Returns: tuple: (result_files, tmp_dir), result_files is a dict containing \ the json filepaths, tmp_dir is the temporal directory created \ for saving json files when jsonfile_prefix is not specified. """ assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) return result_files, tmp_dir def evaluate_det_segm(self, results, result_files, coco_gt, metrics, logger=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): """Instance segmentation and object detection evaluation in COCO protocol. Args: results (list[list | tuple | dict]): Testing results of the dataset. result_files (dict[str, str]): a dict contains json file path. coco_gt (COCO): COCO API object with ground truth annotation. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ if iou_thrs is None: iou_thrs = np.linspace( .5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True) if metric_items is not None: if not isinstance(metric_items, list): metric_items = [metric_items] eval_results = OrderedDict() for metric in metrics: msg = f'Evaluating {metric}...' if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': if isinstance(results[0], tuple): raise KeyError('proposal_fast is not supported for ' 'instance segmentation result.') ar = self.fast_eval_recall( results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}') log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue iou_type = 'bbox' if metric == 'proposal' else metric if metric not in result_files: raise KeyError(f'{metric} is not in results') try: predictions = mmcv.load(result_files[metric]) if iou_type == 'segm': # Refer to https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocotools/coco.py#L331 # noqa # When evaluating mask AP, if the results contain bbox, # cocoapi will use the box area instead of the mask area # for calculating the instance area. Though the overall AP # is not affected, this leads to different # small/medium/large mask AP results. for x in predictions: x.pop('bbox') warnings.simplefilter('once') warnings.warn( 'The key "bbox" is deleted for more accurate mask AP ' 'of small/medium/large instances since v2.12.0. This ' 'does not change the overall mAP calculation.', UserWarning) coco_det = coco_gt.loadRes(predictions) except IndexError: print_log( 'The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break cocoEval = COCOeval(coco_gt, coco_det, iou_type) cocoEval.params.catIds = self.cat_ids cocoEval.params.imgIds = self.img_ids cocoEval.params.maxDets = list(proposal_nums) cocoEval.params.iouThrs = iou_thrs # mapping of cocoEval.stats coco_metric_names = { 'mAP': 0, 'mAP_50': 1, 'mAP_75': 2, 'mAP_s': 3, 'mAP_m': 4, 'mAP_l': 5, 'AR@100': 6, 'AR@300': 7, 'AR@1000': 8, 'AR_s@1000': 9, 'AR_m@1000': 10, 'AR_l@1000': 11 } if metric_items is not None: for metric_item in metric_items: if metric_item not in coco_metric_names: raise KeyError( f'metric item {metric_item} is not supported') if metric == 'proposal': cocoEval.params.useCats = 0 cocoEval.evaluate() cocoEval.accumulate() # Save coco summarize print information to logger redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): cocoEval.summarize() print_log('\n' + redirect_string.getvalue(), logger=logger) if metric_items is None: metric_items = [ 'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ] for item in metric_items: val = float( f'{cocoEval.stats[coco_metric_names[item]]:.4f}') eval_results[item] = val else: cocoEval.evaluate() cocoEval.accumulate() # Save coco summarize print information to logger redirect_string = io.StringIO() with contextlib.redirect_stdout(redirect_string): cocoEval.summarize() print_log('\n' + redirect_string.getvalue(), logger=logger) if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = cocoEval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image nm = self.coco.loadCats(catId)[0] precision = precisions[:, :, idx, 0, -1] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) if metric_items is None: metric_items = [ 'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l' ] for metric_item in metric_items: key = f'{metric}_{metric_item}' val = float( f'{cocoEval.stats[coco_metric_names[metric_item]]:.4f}' ) eval_results[key] = val ap = cocoEval.stats[:6] eval_results[f'{metric}_mAP_copypaste'] = ( f'{ap[0]:.4f} {ap[1]:.4f} {ap[2]:.4f} {ap[3]:.4f} ' f'{ap[4]:.4f} {ap[5]:.4f}') return eval_results def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=None, metric_items=None): """Evaluation in COCO protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float], optional): IoU threshold used for evaluating recalls/mAPs. If set to a list, the average of all IoUs will also be computed. If not specified, [0.50, 0.55, 0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used. Default: None. metric_items (list[str] | str, optional): Metric items that will be returned. If not specified, ``['AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l']`` will be used when ``metric=='bbox' or metric=='segm'``. Returns: dict[str, float]: COCO style evaluation metric. """ metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') coco_gt = self.coco self.cat_ids = coco_gt.get_cat_ids(cat_names=self.CLASSES) result_files, tmp_dir = self.format_results(results, jsonfile_prefix) eval_results = self.evaluate_det_segm(results, result_files, coco_gt, metrics, logger, classwise, proposal_nums, iou_thrs, metric_items) if tmp_dir is not None: tmp_dir.cleanup() return eval_results
28,333
42.590769
124
py
mmdetection
mmdetection-master/mmdet/datasets/coco_occluded.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import mmcv import numpy as np from mmcv.fileio import load from mmcv.utils import print_log from pycocotools import mask as coco_mask from terminaltables import AsciiTable from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class OccludedSeparatedCocoDataset(CocoDataset): """COCO dataset with evaluation on separated and occluded masks which presented in paper `A Tri-Layer Plugin to Improve Occluded Detection. <https://arxiv.org/abs/2210.10046>`_. Separated COCO and Occluded COCO are automatically generated subsets of COCO val dataset, collecting separated objects and partially occluded objects for a large variety of categories. In this way, we define occlusion into two major categories: separated and partially occluded. - Separation: target object segmentation mask is separated into distinct regions by the occluder. - Partial Occlusion: target object is partially occluded but the segmentation mask is connected. These two new scalable real-image datasets are to benchmark a model's capability to detect occluded objects of 80 common categories. Please cite the paper if you use this dataset: @article{zhan2022triocc, title={A Tri-Layer Plugin to Improve Occluded Detection}, author={Zhan, Guanqi and Xie, Weidi and Zisserman, Andrew}, journal={British Machine Vision Conference}, year={2022} } Args: occluded_ann (str): Path to the occluded coco annotation file. separated_ann (str): Path to the separated coco annotation file. """ # noqa def __init__( self, *args, occluded_ann='https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/occluded_coco.pkl', # noqa separated_ann='https://www.robots.ox.ac.uk/~vgg/research/tpod/datasets/separated_coco.pkl', # noqa **kwargs): super().__init__(*args, **kwargs) # load from local file if osp.isfile(occluded_ann) and not osp.isabs(occluded_ann): occluded_ann = osp.join(self.data_root, occluded_ann) if osp.isfile(separated_ann) and not osp.isabs(separated_ann): separated_ann = osp.join(self.data_root, separated_ann) self.occluded_ann = load(occluded_ann) self.separated_ann = load(separated_ann) def evaluate(self, results, metric=[], score_thr=0.3, iou_thr=0.75, **kwargs): """Occluded and separated mask evaluation in COCO protocol. Args: results (list[tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. Defaults to []. score_thr (float): Score threshold of the detection masks. Defaults to 0.3. iou_thr (float): IoU threshold for the recall calculation. Defaults to 0.75. Returns: dict[str, float]: The recall of occluded and separated masks and COCO style evaluation metric. """ coco_metric_res = super().evaluate(results, metric=metric, **kwargs) eval_res = self.evaluate_occluded_separated(results, score_thr, iou_thr) coco_metric_res.update(eval_res) return coco_metric_res def evaluate_occluded_separated(self, results, score_thr=0.3, iou_thr=0.75): """Compute the recall of occluded and separated masks. Args: results (list[tuple]): Testing results of the dataset. score_thr (float): Score threshold of the detection masks. Defaults to 0.3. iou_thr (float): IoU threshold for the recall calculation. Defaults to 0.75. Returns: dict[str, float]: The recall of occluded and separated masks. """ dict_det = {} print_log('processing detection results...') prog_bar = mmcv.ProgressBar(len(results)) for i in range(len(results)): cur_img_name = self.data_infos[i]['filename'] if cur_img_name not in dict_det.keys(): dict_det[cur_img_name] = [] for cat_id in range(len(results[i][1])): assert len(results[i][1][cat_id]) == len(results[i][0][cat_id]) for instance_id in range(len(results[i][1][cat_id])): cur_binary_mask = coco_mask.decode( results[i][1][cat_id][instance_id]) cur_det_bbox = results[i][0][cat_id][instance_id][:4] dict_det[cur_img_name].append([ results[i][0][cat_id][instance_id][4], self.CLASSES[cat_id], cur_binary_mask, cur_det_bbox ]) dict_det[cur_img_name].sort( key=lambda x: (-x[0], x[3][0], x[3][1]) ) # rank by confidence from high to low, avoid same confidence prog_bar.update() print_log('\ncomputing occluded mask recall...') occluded_correct_num, occluded_recall = self.compute_recall( dict_det, gt_ann=self.occluded_ann, score_thr=score_thr, iou_thr=iou_thr, is_occ=True) print_log(f'\nCOCO occluded mask recall: {occluded_recall:.2f}%') print_log(f'COCO occluded mask success num: {occluded_correct_num}') print_log('computing separated mask recall...') separated_correct_num, separated_recall = self.compute_recall( dict_det, gt_ann=self.separated_ann, score_thr=score_thr, iou_thr=iou_thr, is_occ=False) print_log(f'\nCOCO separated mask recall: {separated_recall:.2f}%') print_log(f'COCO separated mask success num: {separated_correct_num}') table_data = [ ['mask type', 'recall', 'num correct'], ['occluded', f'{occluded_recall:.2f}%', occluded_correct_num], ['separated', f'{separated_recall:.2f}%', separated_correct_num] ] table = AsciiTable(table_data) print_log('\n' + table.table) return dict( occluded_recall=occluded_recall, separated_recall=separated_recall) def compute_recall(self, result_dict, gt_ann, score_thr=0.3, iou_thr=0.75, is_occ=True): """Compute the recall of occluded or separated masks. Args: results (list[tuple]): Testing results of the dataset. gt_ann (list): Occluded or separated coco annotations. score_thr (float): Score threshold of the detection masks. Defaults to 0.3. iou_thr (float): IoU threshold for the recall calculation. Defaults to 0.75. is_occ (bool): Whether the annotation is occluded mask. Defaults to True. Returns: tuple: number of correct masks and the recall. """ correct = 0 prog_bar = mmcv.ProgressBar(len(gt_ann)) for iter_i in range(len(gt_ann)): cur_item = gt_ann[iter_i] cur_img_name = cur_item[0] cur_gt_bbox = cur_item[3] if is_occ: cur_gt_bbox = [ cur_gt_bbox[0], cur_gt_bbox[1], cur_gt_bbox[0] + cur_gt_bbox[2], cur_gt_bbox[1] + cur_gt_bbox[3] ] cur_gt_class = cur_item[1] cur_gt_mask = coco_mask.decode(cur_item[4]) assert cur_img_name in result_dict.keys() cur_detections = result_dict[cur_img_name] correct_flag = False for i in range(len(cur_detections)): cur_det_confidence = cur_detections[i][0] if cur_det_confidence < score_thr: break cur_det_class = cur_detections[i][1] if cur_det_class != cur_gt_class: continue cur_det_mask = cur_detections[i][2] cur_iou = self.mask_iou(cur_det_mask, cur_gt_mask) if cur_iou >= iou_thr: correct_flag = True break if correct_flag: correct += 1 prog_bar.update() recall = correct / len(gt_ann) * 100 return correct, recall def mask_iou(self, mask1, mask2): """Compute IoU between two masks.""" mask1_area = np.count_nonzero(mask1 == 1) mask2_area = np.count_nonzero(mask2 == 1) intersection = np.count_nonzero(np.logical_and(mask1 == 1, mask2 == 1)) iou = intersection / (mask1_area + mask2_area - intersection) return iou
9,166
40.668182
111
py
mmdetection
mmdetection-master/mmdet/datasets/coco_panoptic.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import os from collections import defaultdict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from mmdet.core import INSTANCE_OFFSET from .api_wrappers import COCO, pq_compute_multi_core from .builder import DATASETS from .coco import CocoDataset try: import panopticapi from panopticapi.evaluation import VOID from panopticapi.utils import id2rgb except ImportError: panopticapi = None id2rgb = None VOID = None __all__ = ['CocoPanopticDataset'] class COCOPanoptic(COCO): """This wrapper is for loading the panoptic style annotation file. The format is shown in the CocoPanopticDataset class. Args: annotation_file (str): Path of annotation file. """ def __init__(self, annotation_file=None): if panopticapi is None: raise RuntimeError( 'panopticapi is not installed, please install it by: ' 'pip install git+https://github.com/cocodataset/' 'panopticapi.git.') super(COCOPanoptic, self).__init__(annotation_file) def createIndex(self): # create index print('creating index...') # anns stores 'segment_id -> annotation' anns, cats, imgs = {}, {}, {} img_to_anns, cat_to_imgs = defaultdict(list), defaultdict(list) if 'annotations' in self.dataset: for ann, img_info in zip(self.dataset['annotations'], self.dataset['images']): img_info['segm_file'] = ann['file_name'] for seg_ann in ann['segments_info']: # to match with instance.json seg_ann['image_id'] = ann['image_id'] seg_ann['height'] = img_info['height'] seg_ann['width'] = img_info['width'] img_to_anns[ann['image_id']].append(seg_ann) # segment_id is not unique in coco dataset orz... if seg_ann['id'] in anns.keys(): anns[seg_ann['id']].append(seg_ann) else: anns[seg_ann['id']] = [seg_ann] if 'images' in self.dataset: for img in self.dataset['images']: imgs[img['id']] = img if 'categories' in self.dataset: for cat in self.dataset['categories']: cats[cat['id']] = cat if 'annotations' in self.dataset and 'categories' in self.dataset: for ann in self.dataset['annotations']: for seg_ann in ann['segments_info']: cat_to_imgs[seg_ann['category_id']].append(ann['image_id']) print('index created!') self.anns = anns self.imgToAnns = img_to_anns self.catToImgs = cat_to_imgs self.imgs = imgs self.cats = cats def load_anns(self, ids=[]): """Load anns with the specified ids. self.anns is a list of annotation lists instead of a list of annotations. Args: ids (int array): integer ids specifying anns Returns: anns (object array): loaded ann objects """ anns = [] if hasattr(ids, '__iter__') and hasattr(ids, '__len__'): # self.anns is a list of annotation lists instead of # a list of annotations for id in ids: anns += self.anns[id] return anns elif type(ids) == int: return self.anns[ids] @DATASETS.register_module() class CocoPanopticDataset(CocoDataset): """Coco dataset for Panoptic segmentation. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': f'{image_id:012}.png', 'image_id':9 'segments_info': { [ { 'id': 8345037, (segment_id in panoptic png, convert from rgb) 'category_id': 51, 'iscrowd': 0, 'bbox': (x1, y1, w, h), 'area': 24315, 'segmentation': list,(coded mask) }, ... } } }, ... ] Args: ann_file (str): Panoptic segmentation annotation file path. pipeline (list[dict]): Processing pipeline. ins_ann_file (str): Instance segmentation annotation file path. Defaults to None. classes (str | Sequence[str], optional): Specify classes to load. If is None, ``cls.CLASSES`` will be used. Defaults to None. data_root (str, optional): Data root for ``ann_file``, ``ins_ann_file`` ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. Defaults to None. img_prefix (str, optional): Prefix of path to images. Defaults to ''. seg_prefix (str, optional): Prefix of path to segmentation files. Defaults to None. proposal_file (str, optional): Path to proposal file. Defaults to None. test_mode (bool, optional): If set True, annotation will not be loaded. Defaults to False. filter_empty_gt (bool, optional): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. Defaults to True. file_client_args (:obj:`mmcv.ConfigDict` | dict): file client args. Defaults to dict(backend='disk'). """ CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', ' truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush', 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] THING_CLASSES = [ 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone', 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush' ] STUFF_CLASSES = [ 'banner', 'blanket', 'bridge', 'cardboard', 'counter', 'curtain', 'door-stuff', 'floor-wood', 'flower', 'fruit', 'gravel', 'house', 'light', 'mirror-stuff', 'net', 'pillow', 'platform', 'playingfield', 'railroad', 'river', 'road', 'roof', 'sand', 'sea', 'shelf', 'snow', 'stairs', 'tent', 'towel', 'wall-brick', 'wall-stone', 'wall-tile', 'wall-wood', 'water-other', 'window-blind', 'window-other', 'tree-merged', 'fence-merged', 'ceiling-merged', 'sky-other-merged', 'cabinet-merged', 'table-merged', 'floor-other-merged', 'pavement-merged', 'mountain-merged', 'grass-merged', 'dirt-merged', 'paper-merged', 'food-other-merged', 'building-other-merged', 'rock-merged', 'wall-other-merged', 'rug-merged' ] PALETTE = [(220, 20, 60), (119, 11, 32), (0, 0, 142), (0, 0, 230), (106, 0, 228), (0, 60, 100), (0, 80, 100), (0, 0, 70), (0, 0, 192), (250, 170, 30), (100, 170, 30), (220, 220, 0), (175, 116, 175), (250, 0, 30), (165, 42, 42), (255, 77, 255), (0, 226, 252), (182, 182, 255), (0, 82, 0), (120, 166, 157), (110, 76, 0), (174, 57, 255), (199, 100, 0), (72, 0, 118), (255, 179, 240), (0, 125, 92), (209, 0, 151), (188, 208, 182), (0, 220, 176), (255, 99, 164), (92, 0, 73), (133, 129, 255), (78, 180, 255), (0, 228, 0), (174, 255, 243), (45, 89, 255), (134, 134, 103), (145, 148, 174), (255, 208, 186), (197, 226, 255), (171, 134, 1), (109, 63, 54), (207, 138, 255), (151, 0, 95), (9, 80, 61), (84, 105, 51), (74, 65, 105), (166, 196, 102), (208, 195, 210), (255, 109, 65), (0, 143, 149), (179, 0, 194), (209, 99, 106), (5, 121, 0), (227, 255, 205), (147, 186, 208), (153, 69, 1), (3, 95, 161), (163, 255, 0), (119, 0, 170), (0, 182, 199), (0, 165, 120), (183, 130, 88), (95, 32, 0), (130, 114, 135), (110, 129, 133), (166, 74, 118), (219, 142, 185), (79, 210, 114), (178, 90, 62), (65, 70, 15), (127, 167, 115), (59, 105, 106), (142, 108, 45), (196, 172, 0), (95, 54, 80), (128, 76, 255), (201, 57, 1), (246, 0, 122), (191, 162, 208), (255, 255, 128), (147, 211, 203), (150, 100, 100), (168, 171, 172), (146, 112, 198), (210, 170, 100), (92, 136, 89), (218, 88, 184), (241, 129, 0), (217, 17, 255), (124, 74, 181), (70, 70, 70), (255, 228, 255), (154, 208, 0), (193, 0, 92), (76, 91, 113), (255, 180, 195), (106, 154, 176), (230, 150, 140), (60, 143, 255), (128, 64, 128), (92, 82, 55), (254, 212, 124), (73, 77, 174), (255, 160, 98), (255, 255, 255), (104, 84, 109), (169, 164, 131), (225, 199, 255), (137, 54, 74), (135, 158, 223), (7, 246, 231), (107, 255, 200), (58, 41, 149), (183, 121, 142), (255, 73, 97), (107, 142, 35), (190, 153, 153), (146, 139, 141), (70, 130, 180), (134, 199, 156), (209, 226, 140), (96, 36, 108), (96, 96, 96), (64, 170, 64), (152, 251, 152), (208, 229, 228), (206, 186, 171), (152, 161, 64), (116, 112, 0), (0, 114, 143), (102, 102, 156), (250, 141, 255)] def __init__(self, ann_file, pipeline, ins_ann_file=None, classes=None, data_root=None, img_prefix='', seg_prefix=None, proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')): super().__init__( ann_file, pipeline, classes=classes, data_root=data_root, img_prefix=img_prefix, seg_prefix=seg_prefix, proposal_file=proposal_file, test_mode=test_mode, filter_empty_gt=filter_empty_gt, file_client_args=file_client_args) self.ins_ann_file = ins_ann_file def load_annotations(self, ann_file): """Load annotation from COCO Panoptic style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCOPanoptic(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.categories = self.coco.cats self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] info['segm_file'] = info['filename'].replace('jpg', 'png') data_infos.append(info) return data_infos def get_ann_info(self, idx): """Get COCO annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ img_id = self.data_infos[idx]['id'] ann_ids = self.coco.get_ann_ids(img_ids=[img_id]) ann_info = self.coco.load_anns(ann_ids) # filter out unmatched images ann_info = [i for i in ann_info if i['image_id'] == img_id] return self._parse_ann_info(self.data_infos[idx], ann_info) def _parse_ann_info(self, img_info, ann_info): """Parse annotations and load panoptic ground truths. Args: img_info (int): Image info of an image. ann_info (list[dict]): Annotation info of an image. Returns: dict: A dict containing the following keys: bboxes, bboxes_ignore, labels, masks, seg_map. """ gt_bboxes = [] gt_labels = [] gt_bboxes_ignore = [] gt_mask_infos = [] for i, ann in enumerate(ann_info): x1, y1, w, h = ann['bbox'] if ann['area'] <= 0 or w < 1 or h < 1: continue bbox = [x1, y1, x1 + w, y1 + h] category_id = ann['category_id'] contiguous_cat_id = self.cat2label[category_id] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if is_thing: is_crowd = ann.get('iscrowd', False) if not is_crowd: gt_bboxes.append(bbox) gt_labels.append(contiguous_cat_id) else: gt_bboxes_ignore.append(bbox) is_thing = False mask_info = { 'id': ann['id'], 'category': contiguous_cat_id, 'is_thing': is_thing } gt_mask_infos.append(mask_info) if gt_bboxes: gt_bboxes = np.array(gt_bboxes, dtype=np.float32) gt_labels = np.array(gt_labels, dtype=np.int64) else: gt_bboxes = np.zeros((0, 4), dtype=np.float32) gt_labels = np.array([], dtype=np.int64) if gt_bboxes_ignore: gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32) else: gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32) ann = dict( bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore, masks=gt_mask_infos, seg_map=img_info['segm_file']) return ann def _filter_imgs(self, min_size=32): """Filter images too small or without ground truths.""" ids_with_ann = [] # check whether images have legal thing annotations. for lists in self.coco.anns.values(): for item in lists: category_id = item['category_id'] is_thing = self.coco.load_cats(ids=category_id)[0]['isthing'] if not is_thing: continue ids_with_ann.append(item['image_id']) ids_with_ann = set(ids_with_ann) valid_inds = [] valid_img_ids = [] for i, img_info in enumerate(self.data_infos): img_id = self.img_ids[i] if self.filter_empty_gt and img_id not in ids_with_ann: continue if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) valid_img_ids.append(img_id) self.img_ids = valid_img_ids return valid_inds def _pan2json(self, results, outfile_prefix): """Convert panoptic results to COCO panoptic json style.""" label2cat = dict((v, k) for (k, v) in self.cat2label.items()) pred_annotations = [] outdir = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') for idx in range(len(self)): img_id = self.img_ids[idx] segm_file = self.data_infos[idx]['segm_file'] pan = results[idx] pan_labels = np.unique(pan) segm_info = [] for pan_label in pan_labels: sem_label = pan_label % INSTANCE_OFFSET # We reserve the length of self.CLASSES for VOID label if sem_label == len(self.CLASSES): continue # convert sem_label to json label cat_id = label2cat[sem_label] is_thing = self.categories[cat_id]['isthing'] mask = pan == pan_label area = mask.sum() segm_info.append({ 'id': int(pan_label), 'category_id': cat_id, 'isthing': is_thing, 'area': int(area) }) # evaluation script uses 0 for VOID label. pan[pan % INSTANCE_OFFSET == len(self.CLASSES)] = VOID pan = id2rgb(pan).astype(np.uint8) mmcv.imwrite(pan[:, :, ::-1], os.path.join(outdir, segm_file)) record = { 'image_id': img_id, 'segments_info': segm_info, 'file_name': segm_file } pred_annotations.append(record) pan_json_results = dict(annotations=pred_annotations) return pan_json_results def results2json(self, results, outfile_prefix): """Dump the results to a COCO style json file. There are 4 types of results: proposals, bbox predictions, mask predictions, panoptic segmentation predictions, and they have different data types. This method will automatically recognize the type, and dump them to json files. .. code-block:: none [ { 'pan_results': np.array, # shape (h, w) # ins_results which includes bboxes and RLE encoded masks # is optional. 'ins_results': (list[np.array], list[list[str]]) }, ... ] Args: results (list[dict]): Testing results of the dataset. outfile_prefix (str): The filename prefix of the json files. If the prefix is "somepath/xxx", the json files will be named "somepath/xxx.panoptic.json", "somepath/xxx.bbox.json", "somepath/xxx.segm.json" Returns: dict[str: str]: Possible keys are "panoptic", "bbox", "segm", \ "proposal", and values are corresponding filenames. """ result_files = dict() # panoptic segmentation results if 'pan_results' in results[0]: pan_results = [result['pan_results'] for result in results] pan_json_results = self._pan2json(pan_results, outfile_prefix) result_files['panoptic'] = f'{outfile_prefix}.panoptic.json' mmcv.dump(pan_json_results, result_files['panoptic']) # instance segmentation results if 'ins_results' in results[0]: ins_results = [result['ins_results'] for result in results] bbox_json_results, segm_json_results = self._segm2json(ins_results) result_files['bbox'] = f'{outfile_prefix}.bbox.json' result_files['proposal'] = f'{outfile_prefix}.bbox.json' result_files['segm'] = f'{outfile_prefix}.segm.json' mmcv.dump(bbox_json_results, result_files['bbox']) mmcv.dump(segm_json_results, result_files['segm']) return result_files def evaluate_pan_json(self, result_files, outfile_prefix, logger=None, classwise=False, nproc=32): """Evaluate PQ according to the panoptic results json file.""" imgs = self.coco.imgs gt_json = self.coco.img_ann_map # image to annotations gt_json = [{ 'image_id': k, 'segments_info': v, 'file_name': imgs[k]['segm_file'] } for k, v in gt_json.items()] pred_json = mmcv.load(result_files['panoptic']) pred_json = dict( (el['image_id'], el) for el in pred_json['annotations']) # match the gt_anns and pred_anns in the same image matched_annotations_list = [] for gt_ann in gt_json: img_id = gt_ann['image_id'] if img_id not in pred_json.keys(): raise Exception('no prediction for the image' ' with id: {}'.format(img_id)) matched_annotations_list.append((gt_ann, pred_json[img_id])) gt_folder = self.seg_prefix pred_folder = os.path.join(os.path.dirname(outfile_prefix), 'panoptic') pq_stat = pq_compute_multi_core( matched_annotations_list, gt_folder, pred_folder, self.categories, self.file_client, nproc=nproc) metrics = [('All', None), ('Things', True), ('Stuff', False)] pq_results = {} for name, isthing in metrics: pq_results[name], classwise_results = pq_stat.pq_average( self.categories, isthing=isthing) if name == 'All': pq_results['classwise'] = classwise_results classwise_results = None if classwise: classwise_results = { k: v for k, v in zip(self.CLASSES, pq_results['classwise'].values()) } print_panoptic_table(pq_results, classwise_results, logger=logger) results = parse_pq_results(pq_results) results['PQ_copypaste'] = ( f'{results["PQ"]:.3f} {results["SQ"]:.3f} ' f'{results["RQ"]:.3f} ' f'{results["PQ_th"]:.3f} {results["SQ_th"]:.3f} ' f'{results["RQ_th"]:.3f} ' f'{results["PQ_st"]:.3f} {results["SQ_st"]:.3f} ' f'{results["RQ_st"]:.3f}') return results def evaluate(self, results, metric='PQ', logger=None, jsonfile_prefix=None, classwise=False, nproc=32, **kwargs): """Evaluation in COCO Panoptic protocol. Args: results (list[dict]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. 'PQ', 'bbox', 'segm', 'proposal' are supported. 'pq' will be regarded as 'PQ. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): The prefix of json files. It includes the file path and the prefix of filename, e.g., "a/b/prefix". If not specified, a temp file will be created. Default: None. classwise (bool): Whether to print classwise evaluation results. Default: False. nproc (int): Number of processes for panoptic quality computing. Defaults to 32. When `nproc` exceeds the number of cpu cores, the number of cpu cores is used. Returns: dict[str, float]: COCO Panoptic style evaluation metric. """ metrics = metric if isinstance(metric, list) else [metric] # Compatible with lowercase 'pq' metrics = ['PQ' if metric == 'pq' else metric for metric in metrics] allowed_metrics = ['PQ', 'bbox', 'segm', 'proposal'] for metric in metrics: if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') result_files, tmp_dir = self.format_results(results, jsonfile_prefix) eval_results = {} outfile_prefix = os.path.join(tmp_dir.name, 'results') \ if tmp_dir is not None else jsonfile_prefix if 'PQ' in metrics: eval_pan_results = self.evaluate_pan_json( result_files, outfile_prefix, logger, classwise, nproc=nproc) eval_results.update(eval_pan_results) metrics.remove('PQ') if (('bbox' in metrics) or ('segm' in metrics) or ('proposal' in metrics)): assert 'ins_results' in results[0], 'instance segmentation' \ 'results are absent from results' assert self.ins_ann_file is not None, 'Annotation '\ 'file for instance segmentation or object detection ' \ 'shuold not be None' coco_gt = COCO(self.ins_ann_file) panoptic_cat_ids = self.cat_ids self.cat_ids = coco_gt.get_cat_ids(cat_names=self.THING_CLASSES) eval_ins_results = self.evaluate_det_segm(results, result_files, coco_gt, metrics, logger, classwise, **kwargs) self.cat_ids = panoptic_cat_ids eval_results.update(eval_ins_results) if tmp_dir is not None: tmp_dir.cleanup() return eval_results def parse_pq_results(pq_results): """Parse the Panoptic Quality results.""" result = dict() result['PQ'] = 100 * pq_results['All']['pq'] result['SQ'] = 100 * pq_results['All']['sq'] result['RQ'] = 100 * pq_results['All']['rq'] result['PQ_th'] = 100 * pq_results['Things']['pq'] result['SQ_th'] = 100 * pq_results['Things']['sq'] result['RQ_th'] = 100 * pq_results['Things']['rq'] result['PQ_st'] = 100 * pq_results['Stuff']['pq'] result['SQ_st'] = 100 * pq_results['Stuff']['sq'] result['RQ_st'] = 100 * pq_results['Stuff']['rq'] return result def print_panoptic_table(pq_results, classwise_results=None, logger=None): """Print the panoptic evaluation results table. Args: pq_results(dict): The Panoptic Quality results. classwise_results(dict | None): The classwise Panoptic Quality results. The keys are class names and the values are metrics. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. """ headers = ['', 'PQ', 'SQ', 'RQ', 'categories'] data = [headers] for name in ['All', 'Things', 'Stuff']: numbers = [ f'{(pq_results[name][k] * 100):0.3f}' for k in ['pq', 'sq', 'rq'] ] row = [name] + numbers + [pq_results[name]['n']] data.append(row) table = AsciiTable(data) print_log('Panoptic Evaluation Results:\n' + table.table, logger=logger) if classwise_results is not None: class_metrics = [(name, ) + tuple(f'{(metrics[k] * 100):0.3f}' for k in ['pq', 'sq', 'rq']) for name, metrics in classwise_results.items()] num_columns = min(8, len(class_metrics) * 4) results_flatten = list(itertools.chain(*class_metrics)) headers = ['category', 'PQ', 'SQ', 'RQ'] * (num_columns // 4) results_2d = itertools.zip_longest( *[results_flatten[i::num_columns] for i in range(num_columns)]) data = [headers] data += [result for result in results_2d] table = AsciiTable(data) print_log( 'Classwise Panoptic Evaluation Results:\n' + table.table, logger=logger)
29,277
41.248196
79
py
mmdetection
mmdetection-master/mmdet/datasets/custom.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import warnings from collections import OrderedDict import mmcv import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from torch.utils.data import Dataset from mmdet.core import eval_map, eval_recalls from .builder import DATASETS from .pipelines import Compose @DATASETS.register_module() class CustomDataset(Dataset): """Custom dataset for detection. The annotation format is shown as follows. The `ann` field is optional for testing. .. code-block:: none [ { 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': <np.ndarray> (n, 4) in (x1, y1, x2, y2) order. 'labels': <np.ndarray> (n, ), 'bboxes_ignore': <np.ndarray> (k, 4), (optional field) 'labels_ignore': <np.ndarray> (k, 4) (optional field) } }, ... ] Args: ann_file (str): Annotation file path. pipeline (list[dict]): Processing pipeline. classes (str | Sequence[str], optional): Specify classes to load. If is None, ``cls.CLASSES`` will be used. Default: None. data_root (str, optional): Data root for ``ann_file``, ``img_prefix``, ``seg_prefix``, ``proposal_file`` if specified. test_mode (bool, optional): If set True, annotation will not be loaded. filter_empty_gt (bool, optional): If set true, images without bounding boxes of the dataset's classes will be filtered out. This option only works when `test_mode=False`, i.e., we never filter images during tests. """ CLASSES = None PALETTE = None def __init__(self, ann_file, pipeline, classes=None, data_root=None, img_prefix='', seg_prefix=None, seg_suffix='.png', proposal_file=None, test_mode=False, filter_empty_gt=True, file_client_args=dict(backend='disk')): self.ann_file = ann_file self.data_root = data_root self.img_prefix = img_prefix self.seg_prefix = seg_prefix self.seg_suffix = seg_suffix self.proposal_file = proposal_file self.test_mode = test_mode self.filter_empty_gt = filter_empty_gt self.file_client = mmcv.FileClient(**file_client_args) self.CLASSES = self.get_classes(classes) # join paths if data_root is specified if self.data_root is not None: if not osp.isabs(self.ann_file): self.ann_file = osp.join(self.data_root, self.ann_file) if not (self.img_prefix is None or osp.isabs(self.img_prefix)): self.img_prefix = osp.join(self.data_root, self.img_prefix) if not (self.seg_prefix is None or osp.isabs(self.seg_prefix)): self.seg_prefix = osp.join(self.data_root, self.seg_prefix) if not (self.proposal_file is None or osp.isabs(self.proposal_file)): self.proposal_file = osp.join(self.data_root, self.proposal_file) # load annotations (and proposals) if hasattr(self.file_client, 'get_local_path'): with self.file_client.get_local_path(self.ann_file) as local_path: self.data_infos = self.load_annotations(local_path) else: warnings.warn( 'The used MMCV version does not have get_local_path. ' f'We treat the {self.ann_file} as local paths and it ' 'might cause errors if the path is not a local path. ' 'Please use MMCV>= 1.3.16 if you meet errors.') self.data_infos = self.load_annotations(self.ann_file) if self.proposal_file is not None: if hasattr(self.file_client, 'get_local_path'): with self.file_client.get_local_path( self.proposal_file) as local_path: self.proposals = self.load_proposals(local_path) else: warnings.warn( 'The used MMCV version does not have get_local_path. ' f'We treat the {self.ann_file} as local paths and it ' 'might cause errors if the path is not a local path. ' 'Please use MMCV>= 1.3.16 if you meet errors.') self.proposals = self.load_proposals(self.proposal_file) else: self.proposals = None # filter images too small and containing no annotations if not test_mode: valid_inds = self._filter_imgs() self.data_infos = [self.data_infos[i] for i in valid_inds] if self.proposals is not None: self.proposals = [self.proposals[i] for i in valid_inds] # set group flag for the sampler self._set_group_flag() # processing pipeline self.pipeline = Compose(pipeline) def __len__(self): """Total number of samples of data.""" return len(self.data_infos) def load_annotations(self, ann_file): """Load annotation from annotation file.""" return mmcv.load(ann_file) def load_proposals(self, proposal_file): """Load proposal from proposal file.""" return mmcv.load(proposal_file) def get_ann_info(self, idx): """Get annotation by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ return self.data_infos[idx]['ann'] def get_cat_ids(self, idx): """Get category ids by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.data_infos[idx]['ann']['labels'].astype(np.int).tolist() def pre_pipeline(self, results): """Prepare results dict for pipeline.""" results['img_prefix'] = self.img_prefix results['seg_prefix'] = self.seg_prefix results['proposal_file'] = self.proposal_file results['bbox_fields'] = [] results['mask_fields'] = [] results['seg_fields'] = [] def _filter_imgs(self, min_size=32): """Filter images too small.""" if self.filter_empty_gt: warnings.warn( 'CustomDataset does not support filtering empty gt images.') valid_inds = [] for i, img_info in enumerate(self.data_infos): if min(img_info['width'], img_info['height']) >= min_size: valid_inds.append(i) return valid_inds def _set_group_flag(self): """Set flag according to image aspect ratio. Images with aspect ratio greater than 1 will be set as group 1, otherwise group 0. """ self.flag = np.zeros(len(self), dtype=np.uint8) for i in range(len(self)): img_info = self.data_infos[i] if img_info['width'] / img_info['height'] > 1: self.flag[i] = 1 def _rand_another(self, idx): """Get another random index from the same group as the given index.""" pool = np.where(self.flag == self.flag[idx])[0] return np.random.choice(pool) def __getitem__(self, idx): """Get training/test data after pipeline. Args: idx (int): Index of data. Returns: dict: Training/test data (with annotation if `test_mode` is set \ True). """ if self.test_mode: return self.prepare_test_img(idx) while True: data = self.prepare_train_img(idx) if data is None: idx = self._rand_another(idx) continue return data def prepare_train_img(self, idx): """Get training data and annotations after pipeline. Args: idx (int): Index of data. Returns: dict: Training data and annotation after pipeline with new keys \ introduced by pipeline. """ img_info = self.data_infos[idx] ann_info = self.get_ann_info(idx) results = dict(img_info=img_info, ann_info=ann_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) def prepare_test_img(self, idx): """Get testing data after pipeline. Args: idx (int): Index of data. Returns: dict: Testing data after pipeline with new keys introduced by \ pipeline. """ img_info = self.data_infos[idx] results = dict(img_info=img_info) if self.proposals is not None: results['proposals'] = self.proposals[idx] self.pre_pipeline(results) return self.pipeline(results) @classmethod def get_classes(cls, classes=None): """Get class names of current dataset. Args: classes (Sequence[str] | str | None): If classes is None, use default CLASSES defined by builtin dataset. If classes is a string, take it as a file name. The file contains the name of classes where each line contains one class name. If classes is a tuple or list, override the CLASSES defined by the dataset. Returns: tuple[str] or list[str]: Names of categories of the dataset. """ if classes is None: return cls.CLASSES if isinstance(classes, str): # take it as a file path class_names = mmcv.list_from_file(classes) elif isinstance(classes, (tuple, list)): class_names = classes else: raise ValueError(f'Unsupported type {type(classes)} of classes.') return class_names def get_cat2imgs(self): """Get a dict with class as key and img_ids as values, which will be used in :class:`ClassAwareSampler`. Returns: dict[list]: A dict of per-label image list, the item of the dict indicates a label index, corresponds to the image index that contains the label. """ if self.CLASSES is None: raise ValueError('self.CLASSES can not be None') # sort the label index cat2imgs = {i: [] for i in range(len(self.CLASSES))} for i in range(len(self)): cat_ids = set(self.get_cat_ids(i)) for cat in cat_ids: cat2imgs[cat].append(i) return cat2imgs def format_results(self, results, **kwargs): """Place holder to format result to dataset specific output.""" def evaluate(self, results, metric='mAP', logger=None, proposal_nums=(100, 300, 1000), iou_thr=0.5, scale_ranges=None): """Evaluate the dataset. Args: results (list): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. logger (logging.Logger | None | str): Logger used for printing related information during evaluation. Default: None. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thr (float | list[float]): IoU threshold. Default: 0.5. scale_ranges (list[tuple] | None): Scale ranges for evaluating mAP. Default: None. """ if not isinstance(metric, str): assert len(metric) == 1 metric = metric[0] allowed_metrics = ['mAP', 'recall'] if metric not in allowed_metrics: raise KeyError(f'metric {metric} is not supported') annotations = [self.get_ann_info(i) for i in range(len(self))] eval_results = OrderedDict() iou_thrs = [iou_thr] if isinstance(iou_thr, float) else iou_thr if metric == 'mAP': assert isinstance(iou_thrs, list) mean_aps = [] for iou_thr in iou_thrs: print_log(f'\n{"-" * 15}iou_thr: {iou_thr}{"-" * 15}') mean_ap, _ = eval_map( results, annotations, scale_ranges=scale_ranges, iou_thr=iou_thr, dataset=self.CLASSES, logger=logger) mean_aps.append(mean_ap) eval_results[f'AP{int(iou_thr * 100):02d}'] = round(mean_ap, 3) eval_results['mAP'] = sum(mean_aps) / len(mean_aps) elif metric == 'recall': gt_bboxes = [ann['bboxes'] for ann in annotations] recalls = eval_recalls( gt_bboxes, results, proposal_nums, iou_thr, logger=logger) for i, num in enumerate(proposal_nums): for j, iou in enumerate(iou_thrs): eval_results[f'recall@{num}@{iou}'] = recalls[i, j] if recalls.shape[1] > 1: ar = recalls.mean(axis=1) for i, num in enumerate(proposal_nums): eval_results[f'AR@{num}'] = ar[i] return eval_results def __repr__(self): """Print the number of instance number.""" dataset_type = 'Test' if self.test_mode else 'Train' result = (f'\n{self.__class__.__name__} {dataset_type} dataset ' f'with number of images {len(self)}, ' f'and instance counts: \n') if self.CLASSES is None: result += 'Category names are not provided. \n' return result instance_count = np.zeros(len(self.CLASSES) + 1).astype(int) # count the instance number in each image for idx in range(len(self)): label = self.get_ann_info(idx)['labels'] unique, counts = np.unique(label, return_counts=True) if len(unique) > 0: # add the occurrence number to each class instance_count[unique] += counts else: # background is the last index instance_count[-1] += 1 # create a table with category count table_data = [['category', 'count'] * 5] row_data = [] for cls, count in enumerate(instance_count): if cls < len(self.CLASSES): row_data += [f'{cls} [{self.CLASSES[cls]}]', f'{count}'] else: # add the background number row_data += ['-1 background', f'{count}'] if len(row_data) == 10: table_data.append(row_data) row_data = [] if len(row_data) >= 2: if row_data[-1] == '0': row_data = row_data[:-2] if len(row_data) >= 2: table_data.append([]) table_data.append(row_data) table = AsciiTable(table_data) result += table.table return result
15,497
36.525424
79
py
mmdetection
mmdetection-master/mmdet/datasets/dataset_wrappers.py
# Copyright (c) OpenMMLab. All rights reserved. import bisect import collections import copy import math from collections import defaultdict import numpy as np from mmcv.utils import build_from_cfg, print_log from torch.utils.data.dataset import ConcatDataset as _ConcatDataset from .builder import DATASETS, PIPELINES from .coco import CocoDataset @DATASETS.register_module() class ConcatDataset(_ConcatDataset): """A wrapper of concatenated dataset. Same as :obj:`torch.utils.data.dataset.ConcatDataset`, but concat the group flag for image aspect ratio. Args: datasets (list[:obj:`Dataset`]): A list of datasets. separate_eval (bool): Whether to evaluate the results separately if it is used as validation dataset. Defaults to True. """ def __init__(self, datasets, separate_eval=True): super(ConcatDataset, self).__init__(datasets) self.CLASSES = datasets[0].CLASSES self.PALETTE = getattr(datasets[0], 'PALETTE', None) self.separate_eval = separate_eval if not separate_eval: if any([isinstance(ds, CocoDataset) for ds in datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') if hasattr(datasets[0], 'flag'): flags = [] for i in range(0, len(datasets)): flags.append(datasets[i].flag) self.flag = np.concatenate(flags) def get_cat_ids(self, idx): """Get category ids of concatenated dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ if idx < 0: if -idx > len(self): raise ValueError( 'absolute value of index should not exceed dataset length') idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return self.datasets[dataset_idx].get_cat_ids(sample_idx) def get_ann_info(self, idx): """Get annotation of concatenated dataset by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ if idx < 0: if -idx > len(self): raise ValueError( 'absolute value of index should not exceed dataset length') idx = len(self) + idx dataset_idx = bisect.bisect_right(self.cumulative_sizes, idx) if dataset_idx == 0: sample_idx = idx else: sample_idx = idx - self.cumulative_sizes[dataset_idx - 1] return self.datasets[dataset_idx].get_ann_info(sample_idx) def evaluate(self, results, logger=None, **kwargs): """Evaluate the results. Args: results (list[list | tuple]): Testing results of the dataset. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. Returns: dict[str: float]: AP results of the total dataset or each separate dataset if `self.separate_eval=True`. """ assert len(results) == self.cumulative_sizes[-1], \ ('Dataset and results have different sizes: ' f'{self.cumulative_sizes[-1]} v.s. {len(results)}') # Check whether all the datasets support evaluation for dataset in self.datasets: assert hasattr(dataset, 'evaluate'), \ f'{type(dataset)} does not implement evaluate function' if self.separate_eval: dataset_idx = -1 total_eval_results = dict() for size, dataset in zip(self.cumulative_sizes, self.datasets): start_idx = 0 if dataset_idx == -1 else \ self.cumulative_sizes[dataset_idx] end_idx = self.cumulative_sizes[dataset_idx + 1] results_per_dataset = results[start_idx:end_idx] print_log( f'\nEvaluating {dataset.ann_file} with ' f'{len(results_per_dataset)} images now', logger=logger) eval_results_per_dataset = dataset.evaluate( results_per_dataset, logger=logger, **kwargs) dataset_idx += 1 for k, v in eval_results_per_dataset.items(): total_eval_results.update({f'{dataset_idx}_{k}': v}) return total_eval_results elif any([isinstance(ds, CocoDataset) for ds in self.datasets]): raise NotImplementedError( 'Evaluating concatenated CocoDataset as a whole is not' ' supported! Please set "separate_eval=True"') elif len(set([type(ds) for ds in self.datasets])) != 1: raise NotImplementedError( 'All the datasets should have same types') else: original_data_infos = self.datasets[0].data_infos self.datasets[0].data_infos = sum( [dataset.data_infos for dataset in self.datasets], []) eval_results = self.datasets[0].evaluate( results, logger=logger, **kwargs) self.datasets[0].data_infos = original_data_infos return eval_results @DATASETS.register_module() class RepeatDataset: """A wrapper of repeated dataset. The length of repeated dataset will be `times` larger than the original dataset. This is useful when the data loading time is long but the dataset is small. Using RepeatDataset can reduce the data loading time between epochs. Args: dataset (:obj:`Dataset`): The dataset to be repeated. times (int): Repeat times. """ def __init__(self, dataset, times): self.dataset = dataset self.times = times self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) if hasattr(self.dataset, 'flag'): self.flag = np.tile(self.dataset.flag, times) self._ori_len = len(self.dataset) def __getitem__(self, idx): return self.dataset[idx % self._ori_len] def get_cat_ids(self, idx): """Get category ids of repeat dataset by index. Args: idx (int): Index of data. Returns: list[int]: All categories in the image of specified index. """ return self.dataset.get_cat_ids(idx % self._ori_len) def get_ann_info(self, idx): """Get annotation of repeat dataset by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ return self.dataset.get_ann_info(idx % self._ori_len) def __len__(self): """Length after repetition.""" return self.times * self._ori_len # Modified from https://github.com/facebookresearch/detectron2/blob/41d475b75a230221e21d9cac5d69655e3415e3a4/detectron2/data/samplers/distributed_sampler.py#L57 # noqa @DATASETS.register_module() class ClassBalancedDataset: """A wrapper of repeated dataset with repeat factor. Suitable for training on class imbalanced datasets like LVIS. Following the sampling strategy in the `paper <https://arxiv.org/abs/1908.03195>`_, in each epoch, an image may appear multiple times based on its "repeat factor". The repeat factor for an image is a function of the frequency the rarest category labeled in that image. The "frequency of category c" in [0, 1] is defined by the fraction of images in the training set (without repeats) in which category c appears. The dataset needs to instantiate :func:`self.get_cat_ids` to support ClassBalancedDataset. The repeat factor is computed as followed. 1. For each category c, compute the fraction # of images that contain it: :math:`f(c)` 2. For each category c, compute the category-level repeat factor: :math:`r(c) = max(1, sqrt(t/f(c)))` 3. For each image I, compute the image-level repeat factor: :math:`r(I) = max_{c in I} r(c)` Args: dataset (:obj:`CustomDataset`): The dataset to be repeated. oversample_thr (float): frequency threshold below which data is repeated. For categories with ``f_c >= oversample_thr``, there is no oversampling. For categories with ``f_c < oversample_thr``, the degree of oversampling following the square-root inverse frequency heuristic above. filter_empty_gt (bool, optional): If set true, images without bounding boxes will not be oversampled. Otherwise, they will be categorized as the pure background class and involved into the oversampling. Default: True. """ def __init__(self, dataset, oversample_thr, filter_empty_gt=True): self.dataset = dataset self.oversample_thr = oversample_thr self.filter_empty_gt = filter_empty_gt self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) repeat_factors = self._get_repeat_factors(dataset, oversample_thr) repeat_indices = [] for dataset_idx, repeat_factor in enumerate(repeat_factors): repeat_indices.extend([dataset_idx] * math.ceil(repeat_factor)) self.repeat_indices = repeat_indices flags = [] if hasattr(self.dataset, 'flag'): for flag, repeat_factor in zip(self.dataset.flag, repeat_factors): flags.extend([flag] * int(math.ceil(repeat_factor))) assert len(flags) == len(repeat_indices) self.flag = np.asarray(flags, dtype=np.uint8) def _get_repeat_factors(self, dataset, repeat_thr): """Get repeat factor for each images in the dataset. Args: dataset (:obj:`CustomDataset`): The dataset repeat_thr (float): The threshold of frequency. If an image contains the categories whose frequency below the threshold, it would be repeated. Returns: list[float]: The repeat factors for each images in the dataset. """ # 1. For each category c, compute the fraction # of images # that contain it: f(c) category_freq = defaultdict(int) num_images = len(dataset) for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) for cat_id in cat_ids: category_freq[cat_id] += 1 for k, v in category_freq.items(): category_freq[k] = v / num_images # 2. For each category c, compute the category-level repeat factor: # r(c) = max(1, sqrt(t/f(c))) category_repeat = { cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq)) for cat_id, cat_freq in category_freq.items() } # 3. For each image I, compute the image-level repeat factor: # r(I) = max_{c in I} r(c) repeat_factors = [] for idx in range(num_images): cat_ids = set(self.dataset.get_cat_ids(idx)) if len(cat_ids) == 0 and not self.filter_empty_gt: cat_ids = set([len(self.CLASSES)]) repeat_factor = 1 if len(cat_ids) > 0: repeat_factor = max( {category_repeat[cat_id] for cat_id in cat_ids}) repeat_factors.append(repeat_factor) return repeat_factors def __getitem__(self, idx): ori_index = self.repeat_indices[idx] return self.dataset[ori_index] def get_ann_info(self, idx): """Get annotation of dataset by index. Args: idx (int): Index of data. Returns: dict: Annotation info of specified index. """ ori_index = self.repeat_indices[idx] return self.dataset.get_ann_info(ori_index) def __len__(self): """Length after repetition.""" return len(self.repeat_indices) @DATASETS.register_module() class MultiImageMixDataset: """A wrapper of multiple images mixed dataset. Suitable for training on multiple images mixed data augmentation like mosaic and mixup. For the augmentation pipeline of mixed image data, the `get_indexes` method needs to be provided to obtain the image indexes, and you can set `skip_flags` to change the pipeline running process. At the same time, we provide the `dynamic_scale` parameter to dynamically change the output image size. Args: dataset (:obj:`CustomDataset`): The dataset to be mixed. pipeline (Sequence[dict]): Sequence of transform object or config dict to be composed. dynamic_scale (tuple[int], optional): The image scale can be changed dynamically. Default to None. It is deprecated. skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. Default to None. max_refetch (int): The maximum number of retry iterations for getting valid results from the pipeline. If the number of iterations is greater than `max_refetch`, but results is still None, then the iteration is terminated and raise the error. Default: 15. """ def __init__(self, dataset, pipeline, dynamic_scale=None, skip_type_keys=None, max_refetch=15): if dynamic_scale is not None: raise RuntimeError( 'dynamic_scale is deprecated. Please use Resize pipeline ' 'to achieve similar functions') assert isinstance(pipeline, collections.abc.Sequence) if skip_type_keys is not None: assert all([ isinstance(skip_type_key, str) for skip_type_key in skip_type_keys ]) self._skip_type_keys = skip_type_keys self.pipeline = [] self.pipeline_types = [] for transform in pipeline: if isinstance(transform, dict): self.pipeline_types.append(transform['type']) transform = build_from_cfg(transform, PIPELINES) self.pipeline.append(transform) else: raise TypeError('pipeline must be a dict') self.dataset = dataset self.CLASSES = dataset.CLASSES self.PALETTE = getattr(dataset, 'PALETTE', None) if hasattr(self.dataset, 'flag'): self.flag = dataset.flag self.num_samples = len(dataset) self.max_refetch = max_refetch def __len__(self): return self.num_samples def __getitem__(self, idx): results = copy.deepcopy(self.dataset[idx]) for (transform, transform_type) in zip(self.pipeline, self.pipeline_types): if self._skip_type_keys is not None and \ transform_type in self._skip_type_keys: continue if hasattr(transform, 'get_indexes'): for i in range(self.max_refetch): # Make sure the results passed the loading pipeline # of the original dataset is not None. indexes = transform.get_indexes(self.dataset) if not isinstance(indexes, collections.abc.Sequence): indexes = [indexes] mix_results = [ copy.deepcopy(self.dataset[index]) for index in indexes ] if None not in mix_results: results['mix_results'] = mix_results break else: raise RuntimeError( 'The loading pipeline of the original dataset' ' always return None. Please check the correctness ' 'of the dataset and its pipeline.') for i in range(self.max_refetch): # To confirm the results passed the training pipeline # of the wrapper is not None. updated_results = transform(copy.deepcopy(results)) if updated_results is not None: results = updated_results break else: raise RuntimeError( 'The training pipeline of the dataset wrapper' ' always return None.Please check the correctness ' 'of the dataset and its pipeline.') if 'mix_results' in results: results.pop('mix_results') return results def update_skip_type_keys(self, skip_type_keys): """Update skip_type_keys. It is called by an external hook. Args: skip_type_keys (list[str], optional): Sequence of type string to be skip pipeline. """ assert all([ isinstance(skip_type_key, str) for skip_type_key in skip_type_keys ]) self._skip_type_keys = skip_type_keys
17,774
37.894967
167
py
mmdetection
mmdetection-master/mmdet/datasets/deepfashion.py
# Copyright (c) OpenMMLab. All rights reserved. from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class DeepFashionDataset(CocoDataset): CLASSES = ('top', 'skirt', 'leggings', 'dress', 'outer', 'pants', 'bag', 'neckwear', 'headwear', 'eyeglass', 'belt', 'footwear', 'hair', 'skin', 'face') PALETTE = [(0, 192, 64), (0, 64, 96), (128, 192, 192), (0, 64, 64), (0, 192, 224), (0, 192, 192), (128, 192, 64), (0, 192, 96), (128, 32, 192), (0, 0, 224), (0, 0, 64), (0, 160, 192), (128, 0, 96), (128, 0, 192), (0, 32, 192)]
642
36.823529
78
py
mmdetection
mmdetection-master/mmdet/datasets/lvis.py
# Copyright (c) OpenMMLab. All rights reserved. import itertools import logging import os.path as osp import tempfile import warnings from collections import OrderedDict import numpy as np from mmcv.utils import print_log from terminaltables import AsciiTable from .builder import DATASETS from .coco import CocoDataset @DATASETS.register_module() class LVISV05Dataset(CocoDataset): CLASSES = ( 'acorn', 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', 'apple', 'apple_juice', 'applesauce', 'apricot', 'apron', 'aquarium', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', 'awning', 'ax', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', 'baseball_glove', 'basket', 'basketball_hoop', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'beaker', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'biscuit_(bread)', 'pirate_flag', 'black_sheep', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', 'blinker', 'blueberry', 'boar', 'gameboard', 'boat', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'book_bag', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'bowling_pin', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'breechcloth', 'bridal_gown', 'briefcase', 'bristle_brush', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'corned_beef', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butcher_knife', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candelabrum', 'candle', 'candle_holder', 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'cannon', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', 'caviar', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'champagne', 'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chest_of_drawers_(furniture)', 'chicken_(animal)', 'chicken_wire', 'chickpea', 'Chihuahua', 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', 'clementine', 'clip', 'clipboard', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'coconut', 'coffee_filter', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'computer_keyboard', 'concrete_mixer', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cookie', 'cookie_jar', 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'romaine_lettuce', 'costume', 'cougar', 'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'credit_card', 'crescent_roll', 'crib', 'crock_pot', 'crossbar', 'crouton', 'crow', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'custard', 'cutting_tool', 'cylinder', 'cymbal', 'dachshund', 'dagger', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'diskette', 'dispenser', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dolphin', 'domestic_ass', 'eye_mask', 'doorbell', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drinking_fountain', 'drone', 'dropper', 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'Dutch_oven', 'eagle', 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug', 'fish', 'fish_(food)', 'fishbowl', 'fishing_boat', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', 'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', 'fruit_salad', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'surgical_gown', 'grape', 'grasshopper', 'grater', 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grillroom', 'grinder_(tool)', 'grits', 'grizzly', 'grocery_bag', 'guacamole', 'guitar', 'gull', 'gun', 'hair_spray', 'hairbrush', 'hairnet', 'hairpin', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'hatch', 'veil', 'headband', 'headboard', 'headlight', 'headscarf', 'headset', 'headstall_(for_horses)', 'hearing_aid', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', 'ice_tea', 'igniter', 'incense', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jean', 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewelry', 'joystick', 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', 'knee_pad', 'knife', 'knight_(chess_piece)', 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', 'Lego', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', 'linen_paper', 'lion', 'lip_balm', 'lipstick', 'liquor', 'lizard', 'Loafer_(type_of_shoe)', 'log', 'lollipop', 'lotion', 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallet', 'mammoth', 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', 'milk', 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', 'motor_scooter', 'motor_vehicle', 'motorboat', 'motorcycle', 'mound_(baseball)', 'mouse_(animal_rodent)', 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument', 'nailfile', 'nameplate', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newsstand', 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'oregano', 'ostrich', 'ottoman', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', 'padlock', 'paintbox', 'paintbrush', 'painting', 'pajamas', 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya', 'paperclip', 'paper_plate', 'paper_towel', 'paperback_book', 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parchment', 'parka', 'parking_meter', 'parrot', 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'baby', 'pet', 'petfood', 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', 'plate', 'platter', 'playing_card', 'playpen', 'pliers', 'plow_(farm_equipment)', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', 'pole', 'police_van', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'portrait', 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', 'recliner', 'record_player', 'red_cabbage', 'reflector', 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', 'robe', 'rocking_chair', 'roller_skate', 'Rollerblade', 'rolling_pin', 'root_beer', 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', 'scrambled_eggs', 'scraper', 'scratcher', 'screwdriver', 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', 'seashell', 'seedling', 'serving_dish', 'sewing_machine', 'shaker', 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', 'shower_curtain', 'shredder_(for_paper)', 'sieve', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'sled', 'sleeping_bag', 'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock', 'soda_fountain', 'carbonated_water', 'sofa', 'softball', 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'sponge', 'spoon', 'sportswear', 'spotlight', 'squirrel', 'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steamer_(kitchen_appliance)', 'steering_wheel', 'stencil', 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup', 'stockings_(leg_wear)', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', 'sunhat', 'sunscreen', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', 'telephone_pole', 'telephoto_lens', 'television_camera', 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray', 'tree_house', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban', 'turkey_(bird)', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'valve', 'vase', 'vending_machine', 'vent', 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', 'wasabi', 'automatic_washer', 'watch', 'water_bottle', 'water_cooler', 'water_faucet', 'water_filter', 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', 'whiskey', 'whistle', 'wick', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', 'wineglass', 'wing_chair', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yak', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini') PALETTE = None def load_annotations(self, ann_file): """Load annotation from lvis style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from LVIS api. """ try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVIS except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) self.coco = LVIS(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] if info['file_name'].startswith('COCO'): # Convert form the COCO 2014 file naming convention of # COCO_[train/val/test]2014_000000000000.jpg to the 2017 # naming convention of 000000000000.jpg # (LVIS v1 will fix this naming issue) info['filename'] = info['file_name'][-16:] else: info['filename'] = info['file_name'] data_infos.append(info) return data_infos def evaluate(self, results, metric='bbox', logger=None, jsonfile_prefix=None, classwise=False, proposal_nums=(100, 300, 1000), iou_thrs=np.arange(0.5, 0.96, 0.05)): """Evaluation in LVIS protocol. Args: results (list[list | tuple]): Testing results of the dataset. metric (str | list[str]): Metrics to be evaluated. Options are 'bbox', 'segm', 'proposal', 'proposal_fast'. logger (logging.Logger | str | None): Logger used for printing related information during evaluation. Default: None. jsonfile_prefix (str | None): classwise (bool): Whether to evaluating the AP for each class. proposal_nums (Sequence[int]): Proposal number used for evaluating recalls, such as recall@100, recall@1000. Default: (100, 300, 1000). iou_thrs (Sequence[float]): IoU threshold used for evaluating recalls. If set to a list, the average recall of all IoUs will also be computed. Default: 0.5. Returns: dict[str, float]: LVIS style metrics. """ try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVISEval, LVISResults except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) assert isinstance(results, list), 'results must be a list' assert len(results) == len(self), ( 'The length of results is not equal to the dataset len: {} != {}'. format(len(results), len(self))) metrics = metric if isinstance(metric, list) else [metric] allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast'] for metric in metrics: if metric not in allowed_metrics: raise KeyError('metric {} is not supported'.format(metric)) if jsonfile_prefix is None: tmp_dir = tempfile.TemporaryDirectory() jsonfile_prefix = osp.join(tmp_dir.name, 'results') else: tmp_dir = None result_files = self.results2json(results, jsonfile_prefix) eval_results = OrderedDict() # get original api lvis_gt = self.coco for metric in metrics: msg = 'Evaluating {}...'.format(metric) if logger is None: msg = '\n' + msg print_log(msg, logger=logger) if metric == 'proposal_fast': ar = self.fast_eval_recall( results, proposal_nums, iou_thrs, logger='silent') log_msg = [] for i, num in enumerate(proposal_nums): eval_results['AR@{}'.format(num)] = ar[i] log_msg.append('\nAR@{}\t{:.4f}'.format(num, ar[i])) log_msg = ''.join(log_msg) print_log(log_msg, logger=logger) continue if metric not in result_files: raise KeyError('{} is not in results'.format(metric)) try: lvis_dt = LVISResults(lvis_gt, result_files[metric]) except IndexError: print_log( 'The testing results of the whole dataset is empty.', logger=logger, level=logging.ERROR) break iou_type = 'bbox' if metric == 'proposal' else metric lvis_eval = LVISEval(lvis_gt, lvis_dt, iou_type) lvis_eval.params.imgIds = self.img_ids if metric == 'proposal': lvis_eval.params.useCats = 0 lvis_eval.params.maxDets = list(proposal_nums) lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() for k, v in lvis_eval.get_results().items(): if k.startswith('AR'): val = float('{:.4f}'.format(float(v))) eval_results[k] = val else: lvis_eval.evaluate() lvis_eval.accumulate() lvis_eval.summarize() lvis_results = lvis_eval.get_results() if classwise: # Compute per-category AP # Compute per-category AP # from https://github.com/facebookresearch/detectron2/ precisions = lvis_eval.eval['precision'] # precision: (iou, recall, cls, area range, max dets) assert len(self.cat_ids) == precisions.shape[2] results_per_category = [] for idx, catId in enumerate(self.cat_ids): # area range index 0: all area ranges # max dets index -1: typically 100 per image # the dimensions of precisions are # [num_thrs, num_recalls, num_cats, num_area_rngs] nm = self.coco.load_cats([catId])[0] precision = precisions[:, :, idx, 0] precision = precision[precision > -1] if precision.size: ap = np.mean(precision) else: ap = float('nan') results_per_category.append( (f'{nm["name"]}', f'{float(ap):0.3f}')) num_columns = min(6, len(results_per_category) * 2) results_flatten = list( itertools.chain(*results_per_category)) headers = ['category', 'AP'] * (num_columns // 2) results_2d = itertools.zip_longest(*[ results_flatten[i::num_columns] for i in range(num_columns) ]) table_data = [headers] table_data += [result for result in results_2d] table = AsciiTable(table_data) print_log('\n' + table.table, logger=logger) for k, v in lvis_results.items(): if k.startswith('AP'): key = '{}_{}'.format(metric, k) val = float('{:.4f}'.format(float(v))) eval_results[key] = val ap_summary = ' '.join([ '{}:{:.4f}'.format(k, float(v)) for k, v in lvis_results.items() if k.startswith('AP') ]) eval_results['{}_mAP_copypaste'.format(metric)] = ap_summary lvis_eval.print_results() if tmp_dir is not None: tmp_dir.cleanup() return eval_results LVISDataset = LVISV05Dataset DATASETS.register_module(name='LVISDataset', module=LVISDataset) @DATASETS.register_module() class LVISV1Dataset(LVISDataset): CLASSES = ( 'aerosol_can', 'air_conditioner', 'airplane', 'alarm_clock', 'alcohol', 'alligator', 'almond', 'ambulance', 'amplifier', 'anklet', 'antenna', 'apple', 'applesauce', 'apricot', 'apron', 'aquarium', 'arctic_(type_of_shoe)', 'armband', 'armchair', 'armoire', 'armor', 'artichoke', 'trash_can', 'ashtray', 'asparagus', 'atomizer', 'avocado', 'award', 'awning', 'ax', 'baboon', 'baby_buggy', 'basketball_backboard', 'backpack', 'handbag', 'suitcase', 'bagel', 'bagpipe', 'baguet', 'bait', 'ball', 'ballet_skirt', 'balloon', 'bamboo', 'banana', 'Band_Aid', 'bandage', 'bandanna', 'banjo', 'banner', 'barbell', 'barge', 'barrel', 'barrette', 'barrow', 'baseball_base', 'baseball', 'baseball_bat', 'baseball_cap', 'baseball_glove', 'basket', 'basketball', 'bass_horn', 'bat_(animal)', 'bath_mat', 'bath_towel', 'bathrobe', 'bathtub', 'batter_(food)', 'battery', 'beachball', 'bead', 'bean_curd', 'beanbag', 'beanie', 'bear', 'bed', 'bedpan', 'bedspread', 'cow', 'beef_(food)', 'beeper', 'beer_bottle', 'beer_can', 'beetle', 'bell', 'bell_pepper', 'belt', 'belt_buckle', 'bench', 'beret', 'bib', 'Bible', 'bicycle', 'visor', 'billboard', 'binder', 'binoculars', 'bird', 'birdfeeder', 'birdbath', 'birdcage', 'birdhouse', 'birthday_cake', 'birthday_card', 'pirate_flag', 'black_sheep', 'blackberry', 'blackboard', 'blanket', 'blazer', 'blender', 'blimp', 'blinker', 'blouse', 'blueberry', 'gameboard', 'boat', 'bob', 'bobbin', 'bobby_pin', 'boiled_egg', 'bolo_tie', 'deadbolt', 'bolt', 'bonnet', 'book', 'bookcase', 'booklet', 'bookmark', 'boom_microphone', 'boot', 'bottle', 'bottle_opener', 'bouquet', 'bow_(weapon)', 'bow_(decorative_ribbons)', 'bow-tie', 'bowl', 'pipe_bowl', 'bowler_hat', 'bowling_ball', 'box', 'boxing_glove', 'suspenders', 'bracelet', 'brass_plaque', 'brassiere', 'bread-bin', 'bread', 'breechcloth', 'bridal_gown', 'briefcase', 'broccoli', 'broach', 'broom', 'brownie', 'brussels_sprouts', 'bubble_gum', 'bucket', 'horse_buggy', 'bull', 'bulldog', 'bulldozer', 'bullet_train', 'bulletin_board', 'bulletproof_vest', 'bullhorn', 'bun', 'bunk_bed', 'buoy', 'burrito', 'bus_(vehicle)', 'business_card', 'butter', 'butterfly', 'button', 'cab_(taxi)', 'cabana', 'cabin_car', 'cabinet', 'locker', 'cake', 'calculator', 'calendar', 'calf', 'camcorder', 'camel', 'camera', 'camera_lens', 'camper_(vehicle)', 'can', 'can_opener', 'candle', 'candle_holder', 'candy_bar', 'candy_cane', 'walking_cane', 'canister', 'canoe', 'cantaloup', 'canteen', 'cap_(headwear)', 'bottle_cap', 'cape', 'cappuccino', 'car_(automobile)', 'railcar_(part_of_a_train)', 'elevator_car', 'car_battery', 'identity_card', 'card', 'cardigan', 'cargo_ship', 'carnation', 'horse_carriage', 'carrot', 'tote_bag', 'cart', 'carton', 'cash_register', 'casserole', 'cassette', 'cast', 'cat', 'cauliflower', 'cayenne_(spice)', 'CD_player', 'celery', 'cellular_telephone', 'chain_mail', 'chair', 'chaise_longue', 'chalice', 'chandelier', 'chap', 'checkbook', 'checkerboard', 'cherry', 'chessboard', 'chicken_(animal)', 'chickpea', 'chili_(vegetable)', 'chime', 'chinaware', 'crisp_(potato_chip)', 'poker_chip', 'chocolate_bar', 'chocolate_cake', 'chocolate_milk', 'chocolate_mousse', 'choker', 'chopping_board', 'chopstick', 'Christmas_tree', 'slide', 'cider', 'cigar_box', 'cigarette', 'cigarette_case', 'cistern', 'clarinet', 'clasp', 'cleansing_agent', 'cleat_(for_securing_rope)', 'clementine', 'clip', 'clipboard', 'clippers_(for_plants)', 'cloak', 'clock', 'clock_tower', 'clothes_hamper', 'clothespin', 'clutch_bag', 'coaster', 'coat', 'coat_hanger', 'coatrack', 'cock', 'cockroach', 'cocoa_(beverage)', 'coconut', 'coffee_maker', 'coffee_table', 'coffeepot', 'coil', 'coin', 'colander', 'coleslaw', 'coloring_material', 'combination_lock', 'pacifier', 'comic_book', 'compass', 'computer_keyboard', 'condiment', 'cone', 'control', 'convertible_(automobile)', 'sofa_bed', 'cooker', 'cookie', 'cooking_utensil', 'cooler_(for_food)', 'cork_(bottle_plug)', 'corkboard', 'corkscrew', 'edible_corn', 'cornbread', 'cornet', 'cornice', 'cornmeal', 'corset', 'costume', 'cougar', 'coverall', 'cowbell', 'cowboy_hat', 'crab_(animal)', 'crabmeat', 'cracker', 'crape', 'crate', 'crayon', 'cream_pitcher', 'crescent_roll', 'crib', 'crock_pot', 'crossbar', 'crouton', 'crow', 'crowbar', 'crown', 'crucifix', 'cruise_ship', 'police_cruiser', 'crumb', 'crutch', 'cub_(animal)', 'cube', 'cucumber', 'cufflink', 'cup', 'trophy_cup', 'cupboard', 'cupcake', 'hair_curler', 'curling_iron', 'curtain', 'cushion', 'cylinder', 'cymbal', 'dagger', 'dalmatian', 'dartboard', 'date_(fruit)', 'deck_chair', 'deer', 'dental_floss', 'desk', 'detergent', 'diaper', 'diary', 'die', 'dinghy', 'dining_table', 'tux', 'dish', 'dish_antenna', 'dishrag', 'dishtowel', 'dishwasher', 'dishwasher_detergent', 'dispenser', 'diving_board', 'Dixie_cup', 'dog', 'dog_collar', 'doll', 'dollar', 'dollhouse', 'dolphin', 'domestic_ass', 'doorknob', 'doormat', 'doughnut', 'dove', 'dragonfly', 'drawer', 'underdrawers', 'dress', 'dress_hat', 'dress_suit', 'dresser', 'drill', 'drone', 'dropper', 'drum_(musical_instrument)', 'drumstick', 'duck', 'duckling', 'duct_tape', 'duffel_bag', 'dumbbell', 'dumpster', 'dustpan', 'eagle', 'earphone', 'earplug', 'earring', 'easel', 'eclair', 'eel', 'egg', 'egg_roll', 'egg_yolk', 'eggbeater', 'eggplant', 'electric_chair', 'refrigerator', 'elephant', 'elk', 'envelope', 'eraser', 'escargot', 'eyepatch', 'falcon', 'fan', 'faucet', 'fedora', 'ferret', 'Ferris_wheel', 'ferry', 'fig_(fruit)', 'fighter_jet', 'figurine', 'file_cabinet', 'file_(tool)', 'fire_alarm', 'fire_engine', 'fire_extinguisher', 'fire_hose', 'fireplace', 'fireplug', 'first-aid_kit', 'fish', 'fish_(food)', 'fishbowl', 'fishing_rod', 'flag', 'flagpole', 'flamingo', 'flannel', 'flap', 'flash', 'flashlight', 'fleece', 'flip-flop_(sandal)', 'flipper_(footwear)', 'flower_arrangement', 'flute_glass', 'foal', 'folding_chair', 'food_processor', 'football_(American)', 'football_helmet', 'footstool', 'fork', 'forklift', 'freight_car', 'French_toast', 'freshener', 'frisbee', 'frog', 'fruit_juice', 'frying_pan', 'fudge', 'funnel', 'futon', 'gag', 'garbage', 'garbage_truck', 'garden_hose', 'gargle', 'gargoyle', 'garlic', 'gasmask', 'gazelle', 'gelatin', 'gemstone', 'generator', 'giant_panda', 'gift_wrap', 'ginger', 'giraffe', 'cincture', 'glass_(drink_container)', 'globe', 'glove', 'goat', 'goggles', 'goldfish', 'golf_club', 'golfcart', 'gondola_(boat)', 'goose', 'gorilla', 'gourd', 'grape', 'grater', 'gravestone', 'gravy_boat', 'green_bean', 'green_onion', 'griddle', 'grill', 'grits', 'grizzly', 'grocery_bag', 'guitar', 'gull', 'gun', 'hairbrush', 'hairnet', 'hairpin', 'halter_top', 'ham', 'hamburger', 'hammer', 'hammock', 'hamper', 'hamster', 'hair_dryer', 'hand_glass', 'hand_towel', 'handcart', 'handcuff', 'handkerchief', 'handle', 'handsaw', 'hardback_book', 'harmonium', 'hat', 'hatbox', 'veil', 'headband', 'headboard', 'headlight', 'headscarf', 'headset', 'headstall_(for_horses)', 'heart', 'heater', 'helicopter', 'helmet', 'heron', 'highchair', 'hinge', 'hippopotamus', 'hockey_stick', 'hog', 'home_plate_(baseball)', 'honey', 'fume_hood', 'hook', 'hookah', 'hornet', 'horse', 'hose', 'hot-air_balloon', 'hotplate', 'hot_sauce', 'hourglass', 'houseboat', 'hummingbird', 'hummus', 'polar_bear', 'icecream', 'popsicle', 'ice_maker', 'ice_pack', 'ice_skate', 'igniter', 'inhaler', 'iPod', 'iron_(for_clothing)', 'ironing_board', 'jacket', 'jam', 'jar', 'jean', 'jeep', 'jelly_bean', 'jersey', 'jet_plane', 'jewel', 'jewelry', 'joystick', 'jumpsuit', 'kayak', 'keg', 'kennel', 'kettle', 'key', 'keycard', 'kilt', 'kimono', 'kitchen_sink', 'kitchen_table', 'kite', 'kitten', 'kiwi_fruit', 'knee_pad', 'knife', 'knitting_needle', 'knob', 'knocker_(on_a_door)', 'koala', 'lab_coat', 'ladder', 'ladle', 'ladybug', 'lamb_(animal)', 'lamb-chop', 'lamp', 'lamppost', 'lampshade', 'lantern', 'lanyard', 'laptop_computer', 'lasagna', 'latch', 'lawn_mower', 'leather', 'legging_(clothing)', 'Lego', 'legume', 'lemon', 'lemonade', 'lettuce', 'license_plate', 'life_buoy', 'life_jacket', 'lightbulb', 'lightning_rod', 'lime', 'limousine', 'lion', 'lip_balm', 'liquor', 'lizard', 'log', 'lollipop', 'speaker_(stereo_equipment)', 'loveseat', 'machine_gun', 'magazine', 'magnet', 'mail_slot', 'mailbox_(at_home)', 'mallard', 'mallet', 'mammoth', 'manatee', 'mandarin_orange', 'manger', 'manhole', 'map', 'marker', 'martini', 'mascot', 'mashed_potato', 'masher', 'mask', 'mast', 'mat_(gym_equipment)', 'matchbox', 'mattress', 'measuring_cup', 'measuring_stick', 'meatball', 'medicine', 'melon', 'microphone', 'microscope', 'microwave_oven', 'milestone', 'milk', 'milk_can', 'milkshake', 'minivan', 'mint_candy', 'mirror', 'mitten', 'mixer_(kitchen_tool)', 'money', 'monitor_(computer_equipment) computer_monitor', 'monkey', 'motor', 'motor_scooter', 'motor_vehicle', 'motorcycle', 'mound_(baseball)', 'mouse_(computer_equipment)', 'mousepad', 'muffin', 'mug', 'mushroom', 'music_stool', 'musical_instrument', 'nailfile', 'napkin', 'neckerchief', 'necklace', 'necktie', 'needle', 'nest', 'newspaper', 'newsstand', 'nightshirt', 'nosebag_(for_animals)', 'noseband_(for_animals)', 'notebook', 'notepad', 'nut', 'nutcracker', 'oar', 'octopus_(food)', 'octopus_(animal)', 'oil_lamp', 'olive_oil', 'omelet', 'onion', 'orange_(fruit)', 'orange_juice', 'ostrich', 'ottoman', 'oven', 'overalls_(clothing)', 'owl', 'packet', 'inkpad', 'pad', 'paddle', 'padlock', 'paintbrush', 'painting', 'pajamas', 'palette', 'pan_(for_cooking)', 'pan_(metal_container)', 'pancake', 'pantyhose', 'papaya', 'paper_plate', 'paper_towel', 'paperback_book', 'paperweight', 'parachute', 'parakeet', 'parasail_(sports)', 'parasol', 'parchment', 'parka', 'parking_meter', 'parrot', 'passenger_car_(part_of_a_train)', 'passenger_ship', 'passport', 'pastry', 'patty_(food)', 'pea_(food)', 'peach', 'peanut_butter', 'pear', 'peeler_(tool_for_fruit_and_vegetables)', 'wooden_leg', 'pegboard', 'pelican', 'pen', 'pencil', 'pencil_box', 'pencil_sharpener', 'pendulum', 'penguin', 'pennant', 'penny_(coin)', 'pepper', 'pepper_mill', 'perfume', 'persimmon', 'person', 'pet', 'pew_(church_bench)', 'phonebook', 'phonograph_record', 'piano', 'pickle', 'pickup_truck', 'pie', 'pigeon', 'piggy_bank', 'pillow', 'pin_(non_jewelry)', 'pineapple', 'pinecone', 'ping-pong_ball', 'pinwheel', 'tobacco_pipe', 'pipe', 'pistol', 'pita_(bread)', 'pitcher_(vessel_for_liquid)', 'pitchfork', 'pizza', 'place_mat', 'plate', 'platter', 'playpen', 'pliers', 'plow_(farm_equipment)', 'plume', 'pocket_watch', 'pocketknife', 'poker_(fire_stirring_tool)', 'pole', 'polo_shirt', 'poncho', 'pony', 'pool_table', 'pop_(soda)', 'postbox_(public)', 'postcard', 'poster', 'pot', 'flowerpot', 'potato', 'potholder', 'pottery', 'pouch', 'power_shovel', 'prawn', 'pretzel', 'printer', 'projectile_(weapon)', 'projector', 'propeller', 'prune', 'pudding', 'puffer_(fish)', 'puffin', 'pug-dog', 'pumpkin', 'puncher', 'puppet', 'puppy', 'quesadilla', 'quiche', 'quilt', 'rabbit', 'race_car', 'racket', 'radar', 'radiator', 'radio_receiver', 'radish', 'raft', 'rag_doll', 'raincoat', 'ram_(animal)', 'raspberry', 'rat', 'razorblade', 'reamer_(juicer)', 'rearview_mirror', 'receipt', 'recliner', 'record_player', 'reflector', 'remote_control', 'rhinoceros', 'rib_(food)', 'rifle', 'ring', 'river_boat', 'road_map', 'robe', 'rocking_chair', 'rodent', 'roller_skate', 'Rollerblade', 'rolling_pin', 'root_beer', 'router_(computer_equipment)', 'rubber_band', 'runner_(carpet)', 'plastic_bag', 'saddle_(on_an_animal)', 'saddle_blanket', 'saddlebag', 'safety_pin', 'sail', 'salad', 'salad_plate', 'salami', 'salmon_(fish)', 'salmon_(food)', 'salsa', 'saltshaker', 'sandal_(type_of_shoe)', 'sandwich', 'satchel', 'saucepan', 'saucer', 'sausage', 'sawhorse', 'saxophone', 'scale_(measuring_instrument)', 'scarecrow', 'scarf', 'school_bus', 'scissors', 'scoreboard', 'scraper', 'screwdriver', 'scrubbing_brush', 'sculpture', 'seabird', 'seahorse', 'seaplane', 'seashell', 'sewing_machine', 'shaker', 'shampoo', 'shark', 'sharpener', 'Sharpie', 'shaver_(electric)', 'shaving_cream', 'shawl', 'shears', 'sheep', 'shepherd_dog', 'sherbert', 'shield', 'shirt', 'shoe', 'shopping_bag', 'shopping_cart', 'short_pants', 'shot_glass', 'shoulder_bag', 'shovel', 'shower_head', 'shower_cap', 'shower_curtain', 'shredder_(for_paper)', 'signboard', 'silo', 'sink', 'skateboard', 'skewer', 'ski', 'ski_boot', 'ski_parka', 'ski_pole', 'skirt', 'skullcap', 'sled', 'sleeping_bag', 'sling_(bandage)', 'slipper_(footwear)', 'smoothie', 'snake', 'snowboard', 'snowman', 'snowmobile', 'soap', 'soccer_ball', 'sock', 'sofa', 'softball', 'solar_array', 'sombrero', 'soup', 'soup_bowl', 'soupspoon', 'sour_cream', 'soya_milk', 'space_shuttle', 'sparkler_(fireworks)', 'spatula', 'spear', 'spectacles', 'spice_rack', 'spider', 'crawfish', 'sponge', 'spoon', 'sportswear', 'spotlight', 'squid_(food)', 'squirrel', 'stagecoach', 'stapler_(stapling_machine)', 'starfish', 'statue_(sculpture)', 'steak_(food)', 'steak_knife', 'steering_wheel', 'stepladder', 'step_stool', 'stereo_(sound_system)', 'stew', 'stirrer', 'stirrup', 'stool', 'stop_sign', 'brake_light', 'stove', 'strainer', 'strap', 'straw_(for_drinking)', 'strawberry', 'street_sign', 'streetlight', 'string_cheese', 'stylus', 'subwoofer', 'sugar_bowl', 'sugarcane_(plant)', 'suit_(clothing)', 'sunflower', 'sunglasses', 'sunhat', 'surfboard', 'sushi', 'mop', 'sweat_pants', 'sweatband', 'sweater', 'sweatshirt', 'sweet_potato', 'swimsuit', 'sword', 'syringe', 'Tabasco_sauce', 'table-tennis_table', 'table', 'table_lamp', 'tablecloth', 'tachometer', 'taco', 'tag', 'taillight', 'tambourine', 'army_tank', 'tank_(storage_vessel)', 'tank_top_(clothing)', 'tape_(sticky_cloth_or_paper)', 'tape_measure', 'tapestry', 'tarp', 'tartan', 'tassel', 'tea_bag', 'teacup', 'teakettle', 'teapot', 'teddy_bear', 'telephone', 'telephone_booth', 'telephone_pole', 'telephoto_lens', 'television_camera', 'television_set', 'tennis_ball', 'tennis_racket', 'tequila', 'thermometer', 'thermos_bottle', 'thermostat', 'thimble', 'thread', 'thumbtack', 'tiara', 'tiger', 'tights_(clothing)', 'timer', 'tinfoil', 'tinsel', 'tissue_paper', 'toast_(food)', 'toaster', 'toaster_oven', 'toilet', 'toilet_tissue', 'tomato', 'tongs', 'toolbox', 'toothbrush', 'toothpaste', 'toothpick', 'cover', 'tortilla', 'tow_truck', 'towel', 'towel_rack', 'toy', 'tractor_(farm_equipment)', 'traffic_light', 'dirt_bike', 'trailer_truck', 'train_(railroad_vehicle)', 'trampoline', 'tray', 'trench_coat', 'triangle_(musical_instrument)', 'tricycle', 'tripod', 'trousers', 'truck', 'truffle_(chocolate)', 'trunk', 'vat', 'turban', 'turkey_(food)', 'turnip', 'turtle', 'turtleneck_(clothing)', 'typewriter', 'umbrella', 'underwear', 'unicycle', 'urinal', 'urn', 'vacuum_cleaner', 'vase', 'vending_machine', 'vent', 'vest', 'videotape', 'vinegar', 'violin', 'vodka', 'volleyball', 'vulture', 'waffle', 'waffle_iron', 'wagon', 'wagon_wheel', 'walking_stick', 'wall_clock', 'wall_socket', 'wallet', 'walrus', 'wardrobe', 'washbasin', 'automatic_washer', 'watch', 'water_bottle', 'water_cooler', 'water_faucet', 'water_heater', 'water_jug', 'water_gun', 'water_scooter', 'water_ski', 'water_tower', 'watering_can', 'watermelon', 'weathervane', 'webcam', 'wedding_cake', 'wedding_ring', 'wet_suit', 'wheel', 'wheelchair', 'whipped_cream', 'whistle', 'wig', 'wind_chime', 'windmill', 'window_box_(for_plants)', 'windshield_wiper', 'windsock', 'wine_bottle', 'wine_bucket', 'wineglass', 'blinder_(for_horses)', 'wok', 'wolf', 'wooden_spoon', 'wreath', 'wrench', 'wristband', 'wristlet', 'yacht', 'yogurt', 'yoke_(animal_equipment)', 'zebra', 'zucchini') def load_annotations(self, ann_file): try: import lvis if getattr(lvis, '__version__', '0') >= '10.5.3': warnings.warn( 'mmlvis is deprecated, please install official lvis-api by "pip install git+https://github.com/lvis-dataset/lvis-api.git"', # noqa: E501 UserWarning) from lvis import LVIS except ImportError: raise ImportError( 'Package lvis is not installed. Please run "pip install git+https://github.com/lvis-dataset/lvis-api.git".' # noqa: E501 ) self.coco = LVIS(ann_file) self.cat_ids = self.coco.get_cat_ids() self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] # coco_url is used in LVISv1 instead of file_name # e.g. http://images.cocodataset.org/train2017/000000391895.jpg # train/val split in specified in url info['filename'] = info['coco_url'].replace( 'http://images.cocodataset.org/', '') data_infos.append(info) return data_infos
46,336
61.364738
157
py
mmdetection
mmdetection-master/mmdet/datasets/objects365.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp from .api_wrappers import COCO from .builder import DATASETS from .coco import CocoDataset # images exist in annotations but not in image folder. objv2_ignore_list = [ osp.join('patch16', 'objects365_v2_00908726.jpg'), osp.join('patch6', 'objects365_v1_00320532.jpg'), osp.join('patch6', 'objects365_v1_00320534.jpg'), ] @DATASETS.register_module() class Objects365V1Dataset(CocoDataset): """Objects365 v1 dataset for detection.""" CLASSES = ( 'person', 'sneakers', 'chair', 'hat', 'lamp', 'bottle', 'cabinet/shelf', 'cup', 'car', 'glasses', 'picture/frame', 'desk', 'handbag', 'street lights', 'book', 'plate', 'helmet', 'leather shoes', 'pillow', 'glove', 'potted plant', 'bracelet', 'flower', 'tv', 'storage box', 'vase', 'bench', 'wine glass', 'boots', 'bowl', 'dining table', 'umbrella', 'boat', 'flag', 'speaker', 'trash bin/can', 'stool', 'backpack', 'couch', 'belt', 'carpet', 'basket', 'towel/napkin', 'slippers', 'barrel/bucket', 'coffee table', 'suv', 'toy', 'tie', 'bed', 'traffic light', 'pen/pencil', 'microphone', 'sandals', 'canned', 'necklace', 'mirror', 'faucet', 'bicycle', 'bread', 'high heels', 'ring', 'van', 'watch', 'sink', 'horse', 'fish', 'apple', 'camera', 'candle', 'teddy bear', 'cake', 'motorcycle', 'wild bird', 'laptop', 'knife', 'traffic sign', 'cell phone', 'paddle', 'truck', 'cow', 'power outlet', 'clock', 'drum', 'fork', 'bus', 'hanger', 'nightstand', 'pot/pan', 'sheep', 'guitar', 'traffic cone', 'tea pot', 'keyboard', 'tripod', 'hockey', 'fan', 'dog', 'spoon', 'blackboard/whiteboard', 'balloon', 'air conditioner', 'cymbal', 'mouse', 'telephone', 'pickup truck', 'orange', 'banana', 'airplane', 'luggage', 'skis', 'soccer', 'trolley', 'oven', 'remote', 'baseball glove', 'paper towel', 'refrigerator', 'train', 'tomato', 'machinery vehicle', 'tent', 'shampoo/shower gel', 'head phone', 'lantern', 'donut', 'cleaning products', 'sailboat', 'tangerine', 'pizza', 'kite', 'computer box', 'elephant', 'toiletries', 'gas stove', 'broccoli', 'toilet', 'stroller', 'shovel', 'baseball bat', 'microwave', 'skateboard', 'surfboard', 'surveillance camera', 'gun', 'life saver', 'cat', 'lemon', 'liquid soap', 'zebra', 'duck', 'sports car', 'giraffe', 'pumpkin', 'piano', 'stop sign', 'radiator', 'converter', 'tissue ', 'carrot', 'washing machine', 'vent', 'cookies', 'cutting/chopping board', 'tennis racket', 'candy', 'skating and skiing shoes', 'scissors', 'folder', 'baseball', 'strawberry', 'bow tie', 'pigeon', 'pepper', 'coffee machine', 'bathtub', 'snowboard', 'suitcase', 'grapes', 'ladder', 'pear', 'american football', 'basketball', 'potato', 'paint brush', 'printer', 'billiards', 'fire hydrant', 'goose', 'projector', 'sausage', 'fire extinguisher', 'extension cord', 'facial mask', 'tennis ball', 'chopsticks', 'electronic stove and gas stove', 'pie', 'frisbee', 'kettle', 'hamburger', 'golf club', 'cucumber', 'clutch', 'blender', 'tong', 'slide', 'hot dog', 'toothbrush', 'facial cleanser', 'mango', 'deer', 'egg', 'violin', 'marker', 'ship', 'chicken', 'onion', 'ice cream', 'tape', 'wheelchair', 'plum', 'bar soap', 'scale', 'watermelon', 'cabbage', 'router/modem', 'golf ball', 'pine apple', 'crane', 'fire truck', 'peach', 'cello', 'notepaper', 'tricycle', 'toaster', 'helicopter', 'green beans', 'brush', 'carriage', 'cigar', 'earphone', 'penguin', 'hurdle', 'swing', 'radio', 'CD', 'parking meter', 'swan', 'garlic', 'french fries', 'horn', 'avocado', 'saxophone', 'trumpet', 'sandwich', 'cue', 'kiwi fruit', 'bear', 'fishing rod', 'cherry', 'tablet', 'green vegetables', 'nuts', 'corn', 'key', 'screwdriver', 'globe', 'broom', 'pliers', 'volleyball', 'hammer', 'eggplant', 'trophy', 'dates', 'board eraser', 'rice', 'tape measure/ruler', 'dumbbell', 'hamimelon', 'stapler', 'camel', 'lettuce', 'goldfish', 'meat balls', 'medal', 'toothpaste', 'antelope', 'shrimp', 'rickshaw', 'trombone', 'pomegranate', 'coconut', 'jellyfish', 'mushroom', 'calculator', 'treadmill', 'butterfly', 'egg tart', 'cheese', 'pig', 'pomelo', 'race car', 'rice cooker', 'tuba', 'crosswalk sign', 'papaya', 'hair drier', 'green onion', 'chips', 'dolphin', 'sushi', 'urinal', 'donkey', 'electric drill', 'spring rolls', 'tortoise/turtle', 'parrot', 'flute', 'measuring cup', 'shark', 'steak', 'poker card', 'binoculars', 'llama', 'radish', 'noodles', 'yak', 'mop', 'crab', 'microscope', 'barbell', 'bread/bun', 'baozi', 'lion', 'red cabbage', 'polar bear', 'lighter', 'seal', 'mangosteen', 'comb', 'eraser', 'pitaya', 'scallop', 'pencil case', 'saw', 'table tennis paddle', 'okra', 'starfish', 'eagle', 'monkey', 'durian', 'game board', 'rabbit', 'french horn', 'ambulance', 'asparagus', 'hoverboard', 'pasta', 'target', 'hotair balloon', 'chainsaw', 'lobster', 'iron', 'flashlight') PALETTE = None def load_annotations(self, ann_file): """Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCO(ann_file) # 'categories' list in objects365_train.json and objects365_val. # json is inconsistent, need sorted list(or dict) before get cat_ids. cats = self.coco.cats sorted_cats = {i: cats[i] for i in sorted(cats)} self.coco.cats = sorted_cats categories = self.coco.dataset['categories'] sorted_categories = sorted(categories, key=lambda i: i['id']) self.coco.dataset['categories'] = sorted_categories # The order of returned `cat_ids` will not # change with the order of the CLASSES self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] total_ann_ids = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] info['filename'] = info['file_name'] data_infos.append(info) ann_ids = self.coco.get_ann_ids(img_ids=[i]) total_ann_ids.extend(ann_ids) assert len(set(total_ann_ids)) == len( total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" return data_infos @DATASETS.register_module() class Objects365V2Dataset(CocoDataset): """Objects365 v2 dataset for detection.""" CLASSES = ( 'Person', 'Sneakers', 'Chair', 'Other Shoes', 'Hat', 'Car', 'Lamp', 'Glasses', 'Bottle', 'Desk', 'Cup', 'Street Lights', 'Cabinet/shelf', 'Handbag/Satchel', 'Bracelet', 'Plate', 'Picture/Frame', 'Helmet', 'Book', 'Gloves', 'Storage box', 'Boat', 'Leather Shoes', 'Flower', 'Bench', 'Potted Plant', 'Bowl/Basin', 'Flag', 'Pillow', 'Boots', 'Vase', 'Microphone', 'Necklace', 'Ring', 'SUV', 'Wine Glass', 'Belt', 'Moniter/TV', 'Backpack', 'Umbrella', 'Traffic Light', 'Speaker', 'Watch', 'Tie', 'Trash bin Can', 'Slippers', 'Bicycle', 'Stool', 'Barrel/bucket', 'Van', 'Couch', 'Sandals', 'Bakset', 'Drum', 'Pen/Pencil', 'Bus', 'Wild Bird', 'High Heels', 'Motorcycle', 'Guitar', 'Carpet', 'Cell Phone', 'Bread', 'Camera', 'Canned', 'Truck', 'Traffic cone', 'Cymbal', 'Lifesaver', 'Towel', 'Stuffed Toy', 'Candle', 'Sailboat', 'Laptop', 'Awning', 'Bed', 'Faucet', 'Tent', 'Horse', 'Mirror', 'Power outlet', 'Sink', 'Apple', 'Air Conditioner', 'Knife', 'Hockey Stick', 'Paddle', 'Pickup Truck', 'Fork', 'Traffic Sign', 'Ballon', 'Tripod', 'Dog', 'Spoon', 'Clock', 'Pot', 'Cow', 'Cake', 'Dinning Table', 'Sheep', 'Hanger', 'Blackboard/Whiteboard', 'Napkin', 'Other Fish', 'Orange/Tangerine', 'Toiletry', 'Keyboard', 'Tomato', 'Lantern', 'Machinery Vehicle', 'Fan', 'Green Vegetables', 'Banana', 'Baseball Glove', 'Airplane', 'Mouse', 'Train', 'Pumpkin', 'Soccer', 'Skiboard', 'Luggage', 'Nightstand', 'Tea pot', 'Telephone', 'Trolley', 'Head Phone', 'Sports Car', 'Stop Sign', 'Dessert', 'Scooter', 'Stroller', 'Crane', 'Remote', 'Refrigerator', 'Oven', 'Lemon', 'Duck', 'Baseball Bat', 'Surveillance Camera', 'Cat', 'Jug', 'Broccoli', 'Piano', 'Pizza', 'Elephant', 'Skateboard', 'Surfboard', 'Gun', 'Skating and Skiing shoes', 'Gas stove', 'Donut', 'Bow Tie', 'Carrot', 'Toilet', 'Kite', 'Strawberry', 'Other Balls', 'Shovel', 'Pepper', 'Computer Box', 'Toilet Paper', 'Cleaning Products', 'Chopsticks', 'Microwave', 'Pigeon', 'Baseball', 'Cutting/chopping Board', 'Coffee Table', 'Side Table', 'Scissors', 'Marker', 'Pie', 'Ladder', 'Snowboard', 'Cookies', 'Radiator', 'Fire Hydrant', 'Basketball', 'Zebra', 'Grape', 'Giraffe', 'Potato', 'Sausage', 'Tricycle', 'Violin', 'Egg', 'Fire Extinguisher', 'Candy', 'Fire Truck', 'Billards', 'Converter', 'Bathtub', 'Wheelchair', 'Golf Club', 'Briefcase', 'Cucumber', 'Cigar/Cigarette ', 'Paint Brush', 'Pear', 'Heavy Truck', 'Hamburger', 'Extractor', 'Extention Cord', 'Tong', 'Tennis Racket', 'Folder', 'American Football', 'earphone', 'Mask', 'Kettle', 'Tennis', 'Ship', 'Swing', 'Coffee Machine', 'Slide', 'Carriage', 'Onion', 'Green beans', 'Projector', 'Frisbee', 'Washing Machine/Drying Machine', 'Chicken', 'Printer', 'Watermelon', 'Saxophone', 'Tissue', 'Toothbrush', 'Ice cream', 'Hotair ballon', 'Cello', 'French Fries', 'Scale', 'Trophy', 'Cabbage', 'Hot dog', 'Blender', 'Peach', 'Rice', 'Wallet/Purse', 'Volleyball', 'Deer', 'Goose', 'Tape', 'Tablet', 'Cosmetics', 'Trumpet', 'Pineapple', 'Golf Ball', 'Ambulance', 'Parking meter', 'Mango', 'Key', 'Hurdle', 'Fishing Rod', 'Medal', 'Flute', 'Brush', 'Penguin', 'Megaphone', 'Corn', 'Lettuce', 'Garlic', 'Swan', 'Helicopter', 'Green Onion', 'Sandwich', 'Nuts', 'Speed Limit Sign', 'Induction Cooker', 'Broom', 'Trombone', 'Plum', 'Rickshaw', 'Goldfish', 'Kiwi fruit', 'Router/modem', 'Poker Card', 'Toaster', 'Shrimp', 'Sushi', 'Cheese', 'Notepaper', 'Cherry', 'Pliers', 'CD', 'Pasta', 'Hammer', 'Cue', 'Avocado', 'Hamimelon', 'Flask', 'Mushroon', 'Screwdriver', 'Soap', 'Recorder', 'Bear', 'Eggplant', 'Board Eraser', 'Coconut', 'Tape Measur/ Ruler', 'Pig', 'Showerhead', 'Globe', 'Chips', 'Steak', 'Crosswalk Sign', 'Stapler', 'Campel', 'Formula 1 ', 'Pomegranate', 'Dishwasher', 'Crab', 'Hoverboard', 'Meat ball', 'Rice Cooker', 'Tuba', 'Calculator', 'Papaya', 'Antelope', 'Parrot', 'Seal', 'Buttefly', 'Dumbbell', 'Donkey', 'Lion', 'Urinal', 'Dolphin', 'Electric Drill', 'Hair Dryer', 'Egg tart', 'Jellyfish', 'Treadmill', 'Lighter', 'Grapefruit', 'Game board', 'Mop', 'Radish', 'Baozi', 'Target', 'French', 'Spring Rolls', 'Monkey', 'Rabbit', 'Pencil Case', 'Yak', 'Red Cabbage', 'Binoculars', 'Asparagus', 'Barbell', 'Scallop', 'Noddles', 'Comb', 'Dumpling', 'Oyster', 'Table Teniis paddle', 'Cosmetics Brush/Eyeliner Pencil', 'Chainsaw', 'Eraser', 'Lobster', 'Durian', 'Okra', 'Lipstick', 'Cosmetics Mirror', 'Curling', 'Table Tennis ') def load_annotations(self, ann_file): """Load annotation from COCO style annotation file. Args: ann_file (str): Path of annotation file. Returns: list[dict]: Annotation info from COCO api. """ self.coco = COCO(ann_file) # The order of returned `cat_ids` will not # change with the order of the CLASSES self.cat_ids = self.coco.get_cat_ids(cat_names=self.CLASSES) self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)} self.img_ids = self.coco.get_img_ids() data_infos = [] total_ann_ids = [] for i in self.img_ids: info = self.coco.load_imgs([i])[0] file_name = osp.join( osp.split(osp.split(info['file_name'])[0])[-1], osp.split(info['file_name'])[-1]) info['file_name'] = file_name if info['file_name'] in objv2_ignore_list: continue info['filename'] = info['file_name'] data_infos.append(info) ann_ids = self.coco.get_ann_ids(img_ids=[i]) total_ann_ids.extend(ann_ids) assert len(set(total_ann_ids)) == len( total_ann_ids), f"Annotation ids in '{ann_file}' are not unique!" return data_infos
13,187
55.600858
79
py