Search is not available for this dataset
repo
stringlengths
2
152
βŒ€
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/configs/libra_rcnn/libra_fast_rcnn_r50_fpn_1x_coco.py
_base_ = '../fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=2, refine_type='non_local') ], roi_head=dict( bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( sampler=dict( _delete_=True, type='CombinedSampler', num=512, pos_fraction=0.25, add_gt_as_proposals=True, pos_sampler=dict(type='InstanceBalancedPosSampler'), neg_sampler=dict( type='IoUBalancedNegSampler', floor_thr=-1, floor_fraction=0, num_bins=3))))) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' data = dict( train=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_train2017.pkl'), val=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'), test=dict(proposal_file=data_root + 'libra_proposals/rpn_r50_fpn_1x_val2017.pkl'))
1,590
30.196078
68
py
mmdetection
mmdetection-master/configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
205
28.428571
61
py
mmdetection
mmdetection-master/configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=2, refine_type='non_local') ], roi_head=dict( bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=1.0, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rpn=dict(sampler=dict(neg_pos_ub=5), allowed_border=-1), rcnn=dict( sampler=dict( _delete_=True, type='CombinedSampler', num=512, pos_fraction=0.25, add_gt_as_proposals=True, pos_sampler=dict(type='InstanceBalancedPosSampler'), neg_sampler=dict( type='IoUBalancedNegSampler', floor_thr=-1, floor_fraction=0, num_bins=3)))))
1,268
29.214286
68
py
mmdetection
mmdetection-master/configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './libra_faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
427
27.533333
76
py
mmdetection
mmdetection-master/configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' # model settings model = dict( neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), dict( type='BFP', in_channels=256, num_levels=5, refine_level=1, refine_type='non_local') ], bbox_head=dict( loss_bbox=dict( _delete_=True, type='BalancedL1Loss', alpha=0.5, gamma=1.5, beta=0.11, loss_weight=1.0)))
674
24
52
py
mmdetection
mmdetection-master/configs/libra_rcnn/metafile.yml
Collections: - Name: Libra R-CNN Metadata: Training Data: COCO Training Techniques: - IoU-Balanced Sampling - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Balanced Feature Pyramid Paper: URL: https://arxiv.org/abs/1904.02701 Title: 'Libra R-CNN: Towards Balanced Learning for Object Detection' README: configs/libra_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/bfp.py#L10 Version: v2.0.0 Models: - Name: libra_faster_rcnn_r50_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.6 inference time (ms/im): - value: 52.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r50_fpn_1x_coco/libra_faster_rcnn_r50_fpn_1x_coco_20200130-3afee3a9.pth - Name: libra_faster_rcnn_r101_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 inference time (ms/im): - value: 69.44 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_r101_fpn_1x_coco/libra_faster_rcnn_r101_fpn_1x_coco_20200203-8dba6a5a.pth - Name: libra_faster_rcnn_x101_64x4d_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.8 inference time (ms/im): - value: 117.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_faster_rcnn_x101_64x4d_fpn_1x_coco/libra_faster_rcnn_x101_64x4d_fpn_1x_coco_20200315-3a7d0488.pth - Name: libra_retinanet_r50_fpn_1x_coco In Collection: Libra R-CNN Config: configs/libra_rcnn/libra_retinanet_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 56.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/libra_rcnn/libra_retinanet_r50_fpn_1x_coco/libra_retinanet_r50_fpn_1x_coco_20200205-804d94ce.pth
3,246
31.47
175
yml
mmdetection
mmdetection-master/configs/lvis/README.md
# LVIS > [LVIS: A Dataset for Large Vocabulary Instance Segmentation](https://arxiv.org/abs/1908.03195) <!-- [DATASET] --> ## Abstract Progress on object detection is enabled by datasets that focus the research community's attention on open challenges. This process led us from simple images to complex scenes and from bounding boxes to segmentation masks. In this work, we introduce LVIS (pronounced \`el-vis'): a new dataset for Large Vocabulary Instance Segmentation. We plan to collect ~2 million high-quality instance segmentation masks for over 1000 entry-level object categories in 164k images. Due to the Zipfian distribution of categories in natural images, LVIS naturally has a long tail of categories with few training samples. Given that state-of-the-art deep learning methods for object detection perform poorly in the low-sample regime, we believe that our dataset poses an important and exciting new scientific challenge. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143967423-85b9b705-05ea-4bbc-9a41-eccc14240c7a.png" height="300"/> </div> ## Common Setting - Please follow [install guide](../../docs/get_started.md#install-mmdetection) to install open-mmlab forked cocoapi first. - Run following scripts to install our forked lvis-api. ```shell pip install git+https://github.com/lvis-dataset/lvis-api.git ``` - All experiments use oversample strategy [here](../../docs/tutorials/customize_dataset.md#class-balanced-dataset) with oversample threshold `1e-3`. - The size of LVIS v0.5 is half of COCO, so schedule `2x` in LVIS is roughly the same iterations as `1x` in COCO. ## Results and models of LVIS v0.5 | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | pytorch | 2x | - | - | 26.1 | 25.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis-dbd06831.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_20200531_160435.log.json) | | R-101-FPN | pytorch | 2x | - | - | 27.1 | 27.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis-54582ee2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_20200601_134748.log.json) | | X-101-32x4d-FPN | pytorch | 2x | - | - | 26.7 | 26.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis-3cf55ea2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_20200531_221749.log.json) | | X-101-64x4d-FPN | pytorch | 2x | - | - | 26.4 | 26.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis-1c99a5ad.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_20200601_194651.log.json) | ## Results and models of LVIS v1 | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | pytorch | 1x | 9.1 | - | 22.5 | 21.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-aa78ac3d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_061305.log.json) | | R-101-FPN | pytorch | 1x | 10.8 | - | 24.6 | 23.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-ec55ce32.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_070959.log.json) | | X-101-32x4d-FPN | pytorch | 1x | 11.8 | - | 26.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-ebbc5c81.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200829_071317.log.json) | | X-101-64x4d-FPN | pytorch | 1x | 14.6 | - | 27.2 | 25.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-43d9edfe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1-20200830_060206.log.json) | ## Citation ```latex @inproceedings{gupta2019lvis, title={{LVIS}: A Dataset for Large Vocabulary Instance Segmentation}, author={Gupta, Agrim and Dollar, Piotr and Girshick, Ross}, booktitle={Proceedings of the {IEEE} Conference on Computer Vision and Pattern Recognition}, year={2019} } ```
9,264
161.54386
801
md
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
219
30.428571
63
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_r101_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
221
30.714286
65
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1203), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
1,160
35.28125
77
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v0.5_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=1230), mask_head=dict(num_classes=1230)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline)))
1,162
35.34375
77
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
441
28.466667
76
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_x101_32x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
443
28.6
76
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_1x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_1x_lvis_v1.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
441
28.466667
76
py
mmdetection
mmdetection-master/configs/lvis/mask_rcnn_x101_64x4d_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_mstrain_2x_lvis_v0.5.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
443
28.6
76
py
mmdetection
mmdetection-master/configs/mask2former/README.md
# Mask2Former > [Masked-attention Mask Transformer for Universal Image Segmentation](http://arxiv.org/abs/2112.01527) <!-- [ALGORITHM] --> ## Abstract Image segmentation is about grouping pixels with different semantics, e.g., category or instance membership, where each choice of semantics defines a task. While only the semantics of each task differ, current research focuses on designing specialized architectures for each task. We present Masked-attention Mask Transformer (Mask2Former), a new architecture capable of addressing any image segmentation task (panoptic, instance or semantic). Its key components include masked attention, which extracts localized features by constraining cross-attention within predicted mask regions. In addition to reducing the research effort by at least three times, it outperforms the best specialized architectures by a significant margin on four popular datasets. Most notably, Mask2Former sets a new state-of-the-art for panoptic segmentation (57.8 PQ on COCO), instance segmentation (50.1 AP on COCO) and semantic segmentation (57.7 mIoU on ADE20K). <div align=center> <img src="https://camo.githubusercontent.com/455d3116845b1d580b1f8a8542334b9752fdf39364deee2951cdd231524c7725/68747470733a2f2f626f77656e63303232312e6769746875622e696f2f696d616765732f6d61736b666f726d657276325f7465617365722e706e67" height="300"/> </div> ## Introduction Mask2Former requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. The directory should be like this. ```none mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ coco β”‚ β”‚ β”œβ”€β”€ annotations | | | β”œβ”€β”€ instances_train2017.json | | | β”œβ”€β”€ instances_val2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017 β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017 β”‚ β”‚ β”œβ”€β”€ train2017 β”‚ β”‚ β”œβ”€β”€ val2017 β”‚ β”‚ β”œβ”€β”€ test2017 ``` ## Results and Models ### Panoptic segmentation | Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | PQ | box mAP | mask mAP | Config | Download | | :------: | :-----: | :----------: | :-----: | :------: | :------------: | :--: | :-----: | :------: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | ImageNet-1K | 50e | 13.9 | - | 51.9 | 44.8 | 41.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516.log.json) | | R-101 | pytorch | ImageNet-1K | 50e | 16.1 | - | 52.4 | 45.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104.log.json) | | Swin-T | - | ImageNet-1K | 50e | 15.9 | - | 53.4 | 46.3 | 43.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553.log.json) | | Swin-S | - | ImageNet-1K | 50e | 19.1 | - | 54.5 | 47.8 | 44.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200.log.json) | | Swin-B | - | ImageNet-1K | 50e | 26.0 | - | 55.1 | 48.2 | 44.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244.log.json) | | Swin-B | - | ImageNet-21K | 50e | 25.8 | - | 56.3 | 50.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021.log.json) | | Swin-L | - | ImageNet-21K | 100e | 21.1 | - | 57.6 | 52.2 | 48.5 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949.log.json) | ### Instance segmentation | Backbone | style | Pretrain | Lr schd | Mem (GB) | Inf time (fps) | box mAP | mask mAP | Config | Download | | -------- | ------- | ----------- | ------- | -------- | -------------- | ------- | -------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | R-50 | pytorch | ImageNet-1K | 50e | 13.7 | - | 45.7 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028.log.json) | | R-101 | pytorch | ImageNet-1K | 50e | 15.5 | - | 46.7 | 44.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250.log.json) | | Swin-T | - | ImageNet-1K | 50e | 15.3 | - | 47.7 | 44.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649.log.json) | | Swin-S | - | ImageNet-1K | 50e | 18.8 | - | 49.3 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756.log.json) | Note: We have trained the instance segmentation models many times (see more details in [PR 7571](https://github.com/open-mmlab/mmdetection/pull/7571)). The results of the trained models are relatively stable (+- 0.2), and have a certain gap (about 0.2 AP) in comparison with the results in the [paper](http://arxiv.org/abs/2112.01527). However, the performance of the model trained with the official code is unstable and may also be slightly lower than the reported results as mentioned in the [issue](https://github.com/facebookresearch/Mask2Former/issues/46). ## Citation ```latex @article{cheng2021mask2former, title={Masked-attention Mask Transformer for Universal Image Segmentation}, author={Bowen Cheng and Ishan Misra and Alexander G. Schwing and Alexander Kirillov and Rohit Girdhar}, journal={arXiv}, year={2021} } ```
12,992
174.581081
942
md
mmdetection
mmdetection-master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py
_base_ = './mask2former_r50_lsj_8x2_50e_coco-panoptic.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
214
25.875
61
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
207
25
61
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py
_base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes model = dict( type='Mask2Former', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), panoptic_head=dict( type='Mask2FormerHead', in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside strides=[4, 8, 16, 32], feat_channels=256, out_channels=256, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, num_transformer_feat_level=3, pixel_decoder=dict( type='MSDeformAttnPixelDecoder', num_outs=3, norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256, num_heads=8, num_levels=3, num_points=4, im2col_step=64, dropout=0.0, batch_first=False, norm_cfg=None, init_cfg=None), ffn_cfgs=dict( type='FFN', embed_dims=256, feedforward_channels=1024, num_fcs=2, ffn_drop=0.0, act_cfg=dict(type='ReLU', inplace=True)), operation_order=('self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), init_cfg=None), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=9, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.0, proj_drop=0.0, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.0, dropout_layer=None, add_identity=True), feedforward_channels=2048, operation_order=('cross_attn', 'norm', 'self_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='CrossEntropyLoss', use_sigmoid=True, reduction='mean', loss_weight=5.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=5.0)), panoptic_fusion_head=dict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, init_cfg=None), train_cfg=dict( num_points=12544, oversample_ratio=3.0, importance_sample_ratio=0.75, assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=2.0), mask_cost=dict( type='CrossEntropyLossCost', weight=5.0, use_sigmoid=True), dice_cost=dict( type='DiceCost', weight=5.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, # For now, the dataset does not support # evaluating semantic segmentation metric. semantic_on=False, instance_on=True, # max_per_image is for instance segmentation. max_per_image=100, iou_thr=0.8, # In Mask2Former's panoptic postprocessing, # it will filter mask area where score is less than 0.5 . filter_low_score=True), init_cfg=None) # dataset settings image_size = (1024, 1024) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), # large scale jittering dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_size=image_size, crop_type='absolute', recompute_bbox=True, allow_negative_crop=True), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), dict(type='DefaultFormatBundle', img_to_float=True), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data_root = 'data/coco/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', ), test=dict( pipeline=test_pipeline, ins_ann_file=data_root + 'annotations/instances_val2017.json', )) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi, }, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=False, step=[327778, 355092], warmup='linear', warmup_by_epoch=False, warmup_ratio=1.0, # no warmup warmup_iters=10) max_iters = 368750 runner = dict(type='IterBasedRunner', max_iters=max_iters) log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook', by_epoch=False) ]) interval = 5000 workflow = [('train', interval)] checkpoint_config = dict( by_epoch=False, interval=interval, save_last=True, max_keep_ckpts=3) # Before 365001th iteration, we do evaluation every 5000 iterations. # After 365000th iteration, we do evaluation every 368750 iterations, # which means that we do evaluation at the end of training. dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] evaluation = dict( interval=interval, dynamic_intervals=dynamic_intervals, metric=['PQ', 'bbox', 'segm'])
8,600
32.862205
79
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] num_things_classes = 80 num_stuff_classes = 0 num_classes = num_things_classes + num_stuff_classes model = dict( panoptic_head=dict( num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_cls=dict(class_weight=[1.0] * num_classes + [0.1])), panoptic_fusion_head=dict( num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes), test_cfg=dict(panoptic_on=False)) # dataset settings image_size = (1024, 1024) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) pad_cfg = dict(img=(128, 128, 128), masks=0, seg=255) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), # large scale jittering dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_size=image_size, crop_type='absolute', recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-5, 1e-5), by_mask=True), dict(type='Pad', size=image_size, pad_val=pad_cfg), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle', img_to_float=True), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Pad', size_divisor=32, pad_val=pad_cfg), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'CocoDataset' data_root = 'data/coco/' data = dict( _delete_=True, samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(metric=['bbox', 'segm'])
2,781
33.775
78
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
294
48.166667
128
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( pretrain_img_size=384, embed_dims=128, depths=depths, num_heads=[4, 8, 16, 32], window_size=12, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict(in_channels=[128, 256, 512, 1024])) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
1,609
36.44186
124
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py
_base_ = ['./mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( backbone=dict( embed_dims=192, num_heads=[6, 12, 24, 48], init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict(num_queries=200, in_channels=[192, 384, 768, 1536])) data = dict(samples_per_gpu=1, workers_per_gpu=1) lr_config = dict(step=[655556, 710184]) max_iters = 737500 runner = dict(type='IterBasedRunner', max_iters=max_iters) # Before 735001th iteration, we do evaluation every 5000 iterations. # After 735000th iteration, we do evaluation every 737500 iterations, # which means that we do evaluation at the end of training.' interval = 5000 dynamic_intervals = [(max_iters // interval * interval + 1, max_iters)] evaluation = dict( interval=interval, dynamic_intervals=dynamic_intervals, metric=['PQ', 'bbox', 'segm'])
1,017
36.703704
129
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', checkpoint=pretrained))) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
1,466
37.605263
124
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py
_base_ = ['./mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( depths=depths, init_cfg=dict(type='Pretrained', checkpoint=pretrained))) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
1,457
37.368421
124
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco-panoptic.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa depths = [2, 2, 6, 2] model = dict( type='Mask2Former', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=depths, num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, frozen_stages=-1, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), init_cfg=None) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
2,066
31.809524
123
py
mmdetection
mmdetection-master/configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py
_base_ = ['./mask2former_r50_lsj_8x2_50e_coco.py'] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa depths = [2, 2, 6, 2] model = dict( type='Mask2Former', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=depths, num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, frozen_stages=-1, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( type='Mask2FormerHead', in_channels=[96, 192, 384, 768]), init_cfg=None) # set all layers in backbone to lr_mult=0.1 # set all norm layers, position_embeding, # query_embeding, level_embeding to decay_multi=0.0 backbone_norm_multi = dict(lr_mult=0.1, decay_mult=0.0) backbone_embed_multi = dict(lr_mult=0.1, decay_mult=0.0) embed_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'backbone.patch_embed.norm': backbone_norm_multi, 'backbone.norm': backbone_norm_multi, 'absolute_pos_embed': backbone_embed_multi, 'relative_position_bias_table': backbone_embed_multi, 'query_embed': embed_multi, 'query_feat': embed_multi, 'level_embed': embed_multi } custom_keys.update({ f'backbone.stages.{stage_id}.blocks.{block_id}.norm': backbone_norm_multi for stage_id, num_blocks in enumerate(depths) for block_id in range(num_blocks) }) custom_keys.update({ f'backbone.stages.{stage_id}.downsample.norm': backbone_norm_multi for stage_id in range(len(depths) - 1) }) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0))
2,056
32.177419
123
py
mmdetection
mmdetection-master/configs/mask2former/metafile.yml
Collections: - Name: Mask2Former Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 8x A100 GPUs Architecture: - Mask2Former Paper: URL: https://arxiv.org/pdf/2112.01527 Title: 'Masked-attention Mask Transformer for Universal Image Segmentation' README: configs/mask2former/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/detectors/mask2former.py#L7 Version: v2.23.0 Models: - Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 19.1 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.5 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 54.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220329_225200-c7b94355.pth - Name: mask2former_r101_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 15.5 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco/mask2former_r101_lsj_8x2_50e_coco_20220426_100250-c50b6fa6.pth - Name: mask2former_r101_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 16.1 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.4 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 52.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r101_lsj_8x2_50e_coco-panoptic/mask2former_r101_lsj_8x2_50e_coco-panoptic_20220329_225104-c54e64c9.pth - Name: mask2former_r50_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 13.9 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.9 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 51.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco-panoptic/mask2former_r50_lsj_8x2_50e_coco-panoptic_20220326_224516-11a44721.pth - Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 15.9 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.4 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 53.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco-panoptic_20220326_224553-fc567107.pth - Name: mask2former_r50_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_r50_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 13.7 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_r50_lsj_8x2_50e_coco/mask2former_r50_lsj_8x2_50e_coco_20220506_191028-8e96e88b.pth - Name: mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic.py Metadata: Training Memory (GB): 21.1 Iterations: 737500 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 52.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 48.5 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 57.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic/mask2former_swin-l-p4-w12-384-in21k_lsj_16x1_100e_coco-panoptic_20220407_104949-d4919c44.pth - Name: mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 25.8 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 46.3 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 56.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384-in21k_lsj_8x2_50e_coco-panoptic_20220329_230021-3bb8b482.pth - Name: mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic.py Metadata: Training Memory (GB): 26.0 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.9 - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 55.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic/mask2former_swin-b-p4-w12-384_lsj_8x2_50e_coco-panoptic_20220331_002244-c149a9e9.pth - Name: mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 15.3 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 44.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-t-p4-w7-224_lsj_8x2_50e_coco_20220508_091649-4a943037.pth - Name: mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco In Collection: Mask2Former Config: configs/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco.py Metadata: Training Memory (GB): 18.8 Iterations: 368750 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask2former/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco/mask2former_swin-s-p4-w7-224_lsj_8x2_50e_coco_20220504_001756-743b7d99.pth
7,717
33.455357
227
yml
mmdetection
mmdetection-master/configs/mask_rcnn/README.md
# Mask R-CNN > [Mask R-CNN](https://arxiv.org/abs/1703.06870) <!-- [ALGORITHM] --> ## Abstract We present a conceptually simple, flexible, and general framework for object instance segmentation. Our approach efficiently detects objects in an image while simultaneously generating a high-quality segmentation mask for each instance. The method, called Mask R-CNN, extends Faster R-CNN by adding a branch for predicting an object mask in parallel with the existing branch for bounding box recognition. Mask R-CNN is simple to train and adds only a small overhead to Faster R-CNN, running at 5 fps. Moreover, Mask R-CNN is easy to generalize to other tasks, e.g., allowing us to estimate human poses in the same framework. We show top results in all three tracks of the COCO suite of challenges, including instance segmentation, bounding-box object detection, and person keypoint detection. Without bells and whistles, Mask R-CNN outperforms all existing, single-model entries on every task, including the COCO 2016 challenge winners. We hope our simple and effective approach will serve as a solid baseline and help ease future research in instance-level recognition. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143967081-c2552bed-9af2-46c4-ae44-5b3b74e5679f.png"/> </div> ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | caffe | 1x | 4.3 | | 38.0 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_20200504_231812.log.json) | | R-50-FPN | pytorch | 1x | 4.4 | 16.1 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | | R-50-FPN (FP16) | pytorch | 1x | 3.6 | 24.1 | 38.1 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205_130539.log.json) | | R-50-FPN | pytorch | 2x | - | - | 39.2 | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_20200505_003907.log.json) | | R-101-FPN | caffe | 1x | | | 40.4 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758.log.json) | | R-101-FPN | pytorch | 1x | 6.4 | 13.5 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | | R-101-FPN | pytorch | 2x | - | - | 40.8 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_20200505_071027.log.json) | | X-101-32x4d-FPN | pytorch | 1x | 7.6 | 11.3 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | | X-101-32x4d-FPN | pytorch | 2x | - | - | 42.2 | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_20200506_004702.log.json) | | X-101-64x4d-FPN | pytorch | 1x | 10.7 | 8.0 | 42.8 | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201_124310.log.json) | | X-101-64x4d-FPN | pytorch | 2x | - | - | 42.7 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208.log.json) | | X-101-32x8d-FPN | pytorch | 1x | 10.6 | - | 42.8 | 38.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841.log.json) | ## Pre-trained Models We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | caffe | 2x | 4.3 | | 40.3 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_20200504_231822.log.json) | | [R-50-FPN](./mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 4.3 | | 40.8 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_20200504_163245.log.json) | | [R-50-FPN](./mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 4.1 | | 40.9 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154.log.json) | | [R-101-FPN](./mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | caffe | 3x | 5.9 | | 42.9 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339.log.json) | | [R-101-FPN](./mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 6.1 | | 42.7 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244.log.json) | | [x101-32x4d-FPN](./mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 7.3 | | 43.6 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410.log.json) | | [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 1x | 10.4 | | 43.4 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346.log.json) | | [X-101-32x8d-FPN](./mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.3 | | 44.3 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042.log.json) | | [X-101-64x4d-FPN](./mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 10.4 | | 44.5 | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447.log.json) | ## Citation ```latex @article{He_2017, title={Mask R-CNN}, journal={2017 IEEE International Conference on Computer Vision (ICCV)}, publisher={IEEE}, author={He, Kaiming and Gkioxari, Georgia and Dollar, Piotr and Girshick, Ross}, year={2017}, month={Oct} } ```
17,058
283.316667
1,070
md
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
222
26.875
67
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,660
28.660714
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py
_base_ = './mask_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
263
23
61
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_c4_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,413
34.35
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,412
33.463415
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,606
31.14
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
165
32.2
60
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
165
32.2
60
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,556
32.847826
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), rpn_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)), roi_head=dict( bbox_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=7, sampling_ratio=2, aligned=False)), bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_roi_extractor=dict( roi_layer=dict( type='RoIAlign', output_size=14, sampling_ratio=2, aligned=False)))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,066
32.33871
78
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
174
28.166667
72
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_wandb_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # Set evaluation interval evaluation = dict(interval=2) # Set checkpoint interval checkpoint_config = dict(interval=4) # yapf:disable log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict(type='MMDetWandbHook', init_kwargs={ 'project': 'mmdetection', 'group': 'maskrcnn-r50-fpn-1x-coco' }, interval=50, log_checkpoint=True, log_checkpoint_metadata=True, num_eval_images=100) ])
716
25.555556
72
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
174
28.166667
72
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py
_base_ = './mask_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
87
21
41
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ]
107
20.6
49
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_r50_fpn_poly_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline))
805
32.583333
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
420
27.066667
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py
_base_ = './mask_rcnn_r101_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
420
27.066667
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
485
24.578947
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline))
2,132
31.318182
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py
_base_ = './mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,838
29.147541
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline))
2,537
28.511628
77
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
426
27.466667
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py
_base_ = './mask_rcnn_x101_32x4d_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
426
27.466667
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
485
24.578947
76
py
mmdetection
mmdetection-master/configs/mask_rcnn/metafile.yml
Collections: - Name: Mask R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/1703.06870v3 Title: "Mask R-CNN" README: configs/mask_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_rcnn.py#L6 Version: v2.0.0 Models: - Name: mask_rcnn_r50_caffe_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco/mask_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.38__segm_mAP-0.344_20200504_231812-0ebd1859.pth - Name: mask_rcnn_r50_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth - Name: mask_rcnn_r50_fpn_fp16_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py Metadata: Training Memory (GB): 3.6 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training inference time (ms/im): - value: 41.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_1x_coco/mask_rcnn_r50_fpn_fp16_1x_coco_20200205-59faf7e4.pth - Name: mask_rcnn_r50_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 4.4 inference time (ms/im): - value: 62.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_2x_coco/mask_rcnn_r50_fpn_2x_coco_bbox_mAP-0.392__segm_mAP-0.354_20200505_003907-3e542a40.pth - Name: mask_rcnn_r101_caffe_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_1x_coco/mask_rcnn_r101_caffe_fpn_1x_coco_20200601_095758-805e06c1.pth - Name: mask_rcnn_r101_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth - Name: mask_rcnn_r101_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 74.07 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_2x_coco/mask_rcnn_r101_fpn_2x_coco_bbox_mAP-0.408__segm_mAP-0.366_20200505_071027-14b391c7.pth - Name: mask_rcnn_x101_32x4d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth - Name: mask_rcnn_x101_32x4d_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 7.6 inference time (ms/im): - value: 88.5 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_2x_coco/mask_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.422__segm_mAP-0.378_20200506_004702-faef898c.pth - Name: mask_rcnn_x101_64x4d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.7 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_1x_coco/mask_rcnn_x101_64x4d_fpn_1x_coco_20200201-9352eb0d.pth - Name: mask_rcnn_x101_64x4d_fpn_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 10.7 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_2x_coco/mask_rcnn_x101_64x4d_fpn_2x_coco_20200509_224208-39d6f70c.pth - Name: mask_rcnn_x101_32x8d_fpn_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco.py Metadata: Training Memory (GB): 10.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_1x_coco/mask_rcnn_x101_32x8d_fpn_1x_coco_20220630_173841-0aaf329e.pth - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_2x_coco_bbox_mAP-0.403__segm_mAP-0.365_20200504_231822-a75c98ce.pth - Name: mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth - Name: mask_rcnn_r50_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_fpn_mstrain-poly_3x_coco_20210524_201154-21b550bb.pth - Name: mask_rcnn_r101_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 6.1 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_fpn_mstrain-poly_3x_coco_20210524_200244-5675c317.pth - Name: mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 5.9 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r101_caffe_fpn_mstrain-poly_3x_coco_20210526_132339-3c33ce02.pth - Name: mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 7.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x4d_fpn_mstrain-poly_3x_coco_20210524_201410-abcd7859.pth - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco.py Metadata: Training Memory (GB): 10.4 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_1x_coco_20220630_170346-b4637974.pth - Name: mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 10.3 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_32x8d_fpn_mstrain-poly_3x_coco_20210607_161042-8bd2c639.pth - Name: mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py Metadata: Epochs: 36 Training Memory (GB): 10.4 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco/mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco_20210526_120447-c376f129.pth
14,706
32.123874
220
yml
mmdetection
mmdetection-master/configs/maskformer/README.md
# MaskFormer > [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) <!-- [ALGORITHM] --> ## Abstract Modern approaches typically formulate semantic segmentation as a per-pixel classification task, while instance-level segmentation is handled with an alternative mask classification. Our key insight: mask classification is sufficiently general to solve both semantic- and instance-level segmentation tasks in a unified manner using the exact same model, loss, and training procedure. Following this observation, we propose MaskFormer, a simple mask classification model which predicts a set of binary masks, each associated with a single global class label prediction. Overall, the proposed mask classification-based method simplifies the landscape of effective approaches to semantic and panoptic segmentation tasks and shows excellent empirical results. In particular, we observe that MaskFormer outperforms per-pixel classification baselines when the number of classes is large. Our mask classification-based method outperforms both current state-of-the-art semantic (55.6 mIoU on ADE20K) and panoptic segmentation (52.7 PQ on COCO) models. <div align=center> <img src="https://camo.githubusercontent.com/29fb22298d506ce176caad3006a7b05ef2603ca12cece6c788b7e73c046e8bc9/68747470733a2f2f626f77656e63303232312e6769746875622e696f2f696d616765732f6d61736b666f726d65722e706e67" height="300"/> </div> ## Introduction MaskFormer requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. The directory should be like this. ```none mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ coco β”‚ β”‚ β”œβ”€β”€ annotations β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017 β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017 β”‚ β”‚ β”œβ”€β”€ train2017 β”‚ β”‚ β”œβ”€β”€ val2017 β”‚ β”‚ β”œβ”€β”€ test2017 ``` ## Results and Models | Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | detail | | :------: | :-----: | :-----: | :------: | :------------: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | 75e | 16.2 | - | 46.854 | 80.617 | 57.085 | 51.089 | 81.511 | 61.853 | 40.463 | 79.269 | 49.888 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956.log.json) | This version was mentioned in Table XI, in paper [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) | | Swin-L | pytorch | 300e | 27.2 | - | 53.249 | 81.704 | 64.231 | 58.798 | 82.923 | 70.282 | 44.874 | 79.863 | 55.097 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612.log.json) | - | ## Citation ```latex @inproceedings{cheng2021maskformer, title={Per-Pixel Classification is Not All You Need for Semantic Segmentation}, author={Bowen Cheng and Alexander G. Schwing and Alexander Kirillov}, journal={NeurIPS}, year={2021} } ```
5,593
102.592593
1,043
md
mmdetection
mmdetection-master/configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py
_base_ = [ '../_base_/datasets/coco_panoptic.py', '../_base_/default_runtime.py' ] num_things_classes = 80 num_stuff_classes = 53 num_classes = num_things_classes + num_stuff_classes model = dict( type='MaskFormer', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), panoptic_head=dict( type='MaskFormerHead', in_channels=[256, 512, 1024, 2048], # pass to pixel_decoder inside feat_channels=256, out_channels=256, num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, num_queries=100, pixel_decoder=dict( type='TransformerEncoderPixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU'), encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), operation_order=('self_attn', 'norm', 'ffn', 'norm'), norm_cfg=dict(type='LN'), init_cfg=None, batch_first=False), init_cfg=None), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True)), enforce_decoder_input_project=False, positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), transformer_decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, attn_drop=0.1, proj_drop=0.1, dropout_layer=None, batch_first=False), ffn_cfgs=dict( embed_dims=256, feedforward_channels=2048, num_fcs=2, act_cfg=dict(type='ReLU', inplace=True), ffn_drop=0.1, dropout_layer=None, add_identity=True), # the following parameter was not used, # just make current api happy feedforward_channels=2048, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), init_cfg=None), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0, reduction='mean', class_weight=[1.0] * num_classes + [0.1]), loss_mask=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=20.0), loss_dice=dict( type='DiceLoss', use_sigmoid=True, activate=True, reduction='mean', naive_dice=True, eps=1.0, loss_weight=1.0)), panoptic_fusion_head=dict( type='MaskFormerFusionHead', num_things_classes=num_things_classes, num_stuff_classes=num_stuff_classes, loss_panoptic=None, init_cfg=None), train_cfg=dict( assigner=dict( type='MaskHungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.0), mask_cost=dict( type='FocalLossCost', weight=20.0, binary_input=True), dice_cost=dict( type='DiceCost', weight=1.0, pred_act=True, eps=1.0)), sampler=dict(type='MaskPseudoSampler')), test_cfg=dict( panoptic_on=True, # For now, the dataset does not support # evaluating semantic segmentation metric. semantic_on=False, instance_on=False, # max_per_image is for instance segmentation. max_per_image=100, object_mask_thr=0.8, iou_thr=0.8, # In MaskFormer's panoptic postprocessing, # it will not filter masks whose score is smaller than 0.5 . filter_low_score=False), init_cfg=None) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=1, workers_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1, decay_mult=1.0), 'query_embed': dict(lr_mult=1.0, decay_mult=0.0) }, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=True, step=[50], warmup='linear', warmup_by_epoch=False, warmup_ratio=1.0, # no warmup warmup_iters=10) runner = dict(type='EpochBasedRunner', max_epochs=75)
8,584
34.920502
79
py
mmdetection
mmdetection-master/configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py
_base_ = './maskformer_r50_mstrain_16x1_75e_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa depths = [2, 2, 18, 2] model = dict( backbone=dict( _delete_=True, type='SwinTransformer', pretrain_img_size=384, embed_dims=192, patch_size=4, window_size=12, mlp_ratio=4, depths=depths, num_heads=[6, 12, 24, 48], qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), panoptic_head=dict( in_channels=[192, 384, 768, 1536], # pass to pixel_decoder inside pixel_decoder=dict( _delete_=True, type='PixelDecoder', norm_cfg=dict(type='GN', num_groups=32), act_cfg=dict(type='ReLU')), enforce_decoder_input_project=True)) # weight_decay = 0.01 # norm_weight_decay = 0.0 # embed_weight_decay = 0.0 embed_multi = dict(lr_mult=1.0, decay_mult=0.0) norm_multi = dict(lr_mult=1.0, decay_mult=0.0) custom_keys = { 'norm': norm_multi, 'absolute_pos_embed': embed_multi, 'relative_position_bias_table': embed_multi, 'query_embed': embed_multi } # optimizer optimizer = dict( type='AdamW', lr=6e-5, weight_decay=0.01, eps=1e-8, betas=(0.9, 0.999), paramwise_cfg=dict(custom_keys=custom_keys, norm_decay_mult=0.0)) optimizer_config = dict(grad_clip=dict(max_norm=0.01, norm_type=2)) # learning policy lr_config = dict( policy='step', gamma=0.1, by_epoch=True, step=[250], warmup='linear', warmup_by_epoch=False, warmup_ratio=1e-6, warmup_iters=1500) runner = dict(type='EpochBasedRunner', max_epochs=300)
1,949
27.676471
129
py
mmdetection
mmdetection-master/configs/maskformer/metafile.yml
Collections: - Name: MaskFormer Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 16x V100 GPUs Architecture: - MaskFormer Paper: URL: https://arxiv.org/pdf/2107.06278 Title: 'Per-Pixel Classification is Not All You Need for Semantic Segmentation' README: configs/maskformer/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/detectors/maskformer.py#L7 Version: v2.22.0 Models: - Name: maskformer_r50_mstrain_16x1_75e_coco In Collection: MaskFormer Config: configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py Metadata: Training Memory (GB): 16.2 Epochs: 75 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 46.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_r50_mstrain_16x1_75e_coco/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth - Name: maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco In Collection: MaskFormer Config: configs/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco.py Metadata: Training Memory (GB): 27.2 Epochs: 300 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 53.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/maskformer/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco/maskformer_swin-l-p4-w12_mstrain_64x1_300e_coco_20220326_221612-061b4eb8.pth
1,568
34.659091
196
yml
mmdetection
mmdetection-master/configs/ms_rcnn/README.md
# MS R-CNN > [Mask Scoring R-CNN](https://arxiv.org/abs/1903.00241) <!-- [ALGORITHM] --> ## Abstract Letting a deep network be aware of the quality of its own predictions is an interesting yet important problem. In the task of instance segmentation, the confidence of instance classification is used as mask quality score in most instance segmentation frameworks. However, the mask quality, quantified as the IoU between the instance mask and its ground truth, is usually not well correlated with classification score. In this paper, we study this problem and propose Mask Scoring R-CNN which contains a network block to learn the quality of the predicted instance masks. The proposed network block takes the instance feature and the corresponding predicted mask together to regress the mask IoU. The mask scoring strategy calibrates the misalignment between mask quality and mask score, and improves instance segmentation performance by prioritizing more accurate mask predictions during COCO AP evaluation. By extensive evaluations on the COCO dataset, Mask Scoring R-CNN brings consistent and noticeable gain with different models, and outperforms the state-of-the-art Mask R-CNN. We hope our simple and effective approach will provide a new direction for improving instance segmentation. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143967239-3a95ae92-6443-4181-9cbc-dfe16e81b969.png"/> </div> ## Results and Models | Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :----------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | caffe | 1x | 4.5 | | 38.2 | 36.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848.log.json) | | R-50-FPN | caffe | 2x | - | - | 38.8 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_20200506_004738.log.json) | | R-101-FPN | caffe | 1x | 6.5 | | 40.4 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_20200506_004755.log.json) | | R-101-FPN | caffe | 2x | - | - | 41.1 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_20200506_011134.log.json) | | R-X101-32x4d | pytorch | 2x | 7.9 | 11.0 | 41.8 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206_100113.log.json) | | R-X101-64x4d | pytorch | 1x | 11.0 | 8.0 | 43.0 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206_091744.log.json) | | R-X101-64x4d | pytorch | 2x | 11.0 | 8.0 | 42.6 | 39.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308_012247.log.json) | ## Citation ```latex @inproceedings{huang2019msrcnn, title={Mask Scoring R-CNN}, author={Zhaojin Huang and Lichao Huang and Yongchao Gong and Chang Huang and Xinggang Wang}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, year={2019}, } ```
6,623
178.027027
1,190
md
mmdetection
mmdetection-master/configs/ms_rcnn/metafile.yml
Collections: - Name: Mask Scoring R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RPN - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/1903.00241 Title: 'Mask Scoring R-CNN' README: configs/ms_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/mask_scoring_rcnn.py#L6 Version: v2.0.0 Models: - Name: ms_rcnn_r50_caffe_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco/ms_rcnn_r50_caffe_fpn_1x_coco_20200702_180848-61c9355e.pth - Name: ms_rcnn_r50_caffe_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco/ms_rcnn_r50_caffe_fpn_2x_coco_bbox_mAP-0.388__segm_mAP-0.363_20200506_004738-ee87b137.pth - Name: ms_rcnn_r101_caffe_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco/ms_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.404__segm_mAP-0.376_20200506_004755-b9b12a37.pth - Name: ms_rcnn_r101_caffe_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco/ms_rcnn_r101_caffe_fpn_2x_coco_bbox_mAP-0.411__segm_mAP-0.381_20200506_011134-5f3cc74f.pth - Name: ms_rcnn_x101_32x4d_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 7.9 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco/ms_rcnn_x101_32x4d_fpn_1x_coco_20200206-81fd1740.pth - Name: ms_rcnn_x101_64x4d_fpn_1x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py Metadata: Training Memory (GB): 11.0 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco/ms_rcnn_x101_64x4d_fpn_1x_coco_20200206-86ba88d2.pth - Name: ms_rcnn_x101_64x4d_fpn_2x_coco In Collection: Mask Scoring R-CNN Config: configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py Metadata: Training Memory (GB): 11.0 inference time (ms/im): - value: 125 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco/ms_rcnn_x101_64x4d_fpn_2x_coco_20200308-02a445e2.pth
5,102
30.89375
190
yml
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
220
26.625
67
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_r101_caffe_fpn_2x_coco.py
_base_ = './ms_rcnn_r101_caffe_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
151
29.4
53
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( type='MaskScoringRCNN', roi_head=dict( type='MaskScoringRoIHead', mask_iou_head=dict( type='MaskIoUHead', num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80)), # model training and testing settings train_cfg=dict(rcnn=dict(mask_thr_binary=0.5)))
515
29.352941
58
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_r50_caffe_fpn_2x_coco.py
_base_ = './ms_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
150
29.2
53
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_r50_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( type='MaskScoringRCNN', roi_head=dict( type='MaskScoringRoIHead', mask_iou_head=dict( type='MaskIoUHead', num_convs=4, num_fcs=2, roi_feat_size=14, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80)), # model training and testing settings train_cfg=dict(rcnn=dict(mask_thr_binary=0.5)))
509
29
52
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = './ms_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
417
26.866667
76
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './ms_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
417
26.866667
76
py
mmdetection
mmdetection-master/configs/ms_rcnn/ms_rcnn_x101_64x4d_fpn_2x_coco.py
_base_ = './ms_rcnn_x101_64x4d_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
151
29.4
53
py
mmdetection
mmdetection-master/configs/nas_fcos/README.md
# NAS-FCOS > [NAS-FCOS: Fast Neural Architecture Search for Object Detection](https://arxiv.org/abs/1906.04423) <!-- [ALGORITHM] --> ## Abstract The success of deep neural networks relies on significant architecture engineering. Recently neural architecture search (NAS) has emerged as a promise to greatly reduce manual effort in network design by automatically searching for optimal architectures, although typically such algorithms need an excessive amount of computational resources, e.g., a few thousand GPU-days. To date, on challenging vision tasks such as object detection, NAS, especially fast versions of NAS, is less studied. Here we propose to search for the decoder structure of object detectors with search efficiency being taken into consideration. To be more specific, we aim to efficiently search for the feature pyramid network (FPN) as well as the prediction head of a simple anchor-free object detector, namely FCOS, using a tailored reinforcement learning paradigm. With carefully designed search space, search algorithms and strategies for evaluating network quality, we are able to efficiently search a top-performing detection architecture within 4 days using 8 V100 GPUs. The discovered architecture surpasses state-of-the-art object detection models (such as Faster R-CNN, RetinaNet and FCOS) by 1.5 to 3.5 points in AP on the COCO dataset, with comparable computation complexity and memory footprint, demonstrating the efficacy of the proposed NAS for object detection. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143967900-1c8a65b9-c58d-4b03-8900-96af8f9768e8.png"/> </div> ## Results and Models | Head | Backbone | Style | GN-head | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :----------: | :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | NAS-FCOSHead | R-50 | caffe | Y | 1x | | | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520.log.json) | | FCOSHead | R-50 | caffe | Y | 1x | | | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521.log.json) | **Notes:** - To be consistent with the author's implementation, we use 4 GPUs with 4 images/GPU. ## Citation ```latex @article{wang2019fcos, title={Nas-fcos: Fast neural architecture search for object detection}, author={Wang, Ning and Gao, Yang and Chen, Hao and Wang, Peng and Tian, Zhi and Shen, Chunhua}, journal={arXiv preprint arXiv:1906.04423}, year={2019} } ```
4,505
124.166667
1,351
md
mmdetection
mmdetection-master/configs/nas_fcos/metafile.yml
Collections: - Name: NAS-FCOS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 4x V100 GPUs Architecture: - FPN - NAS-FCOS - ResNet Paper: URL: https://arxiv.org/abs/1906.04423 Title: 'NAS-FCOS: Fast Neural Architecture Search for Object Detection' README: configs/nas_fcos/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/detectors/nasfcos.py#L6 Version: v2.1.0 Models: - Name: nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco In Collection: NAS-FCOS Config: configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200520-1bdba3ce.pth - Name: nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco In Collection: NAS-FCOS Config: configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco_20200521-7fdcbce0.pth
1,585
34.244444
195
yml
mmdetection
mmdetection-master/configs/nas_fcos/nas_fcos_fcoshead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='NASFCOS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='FCOSHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
3,012
28.831683
73
py
mmdetection
mmdetection-master/configs/nas_fcos/nas_fcos_nashead_r50_caffe_fpn_gn-head_4x4_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='NASFCOS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False, eps=0), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=dict( type='NASFCOS_FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs=True, num_outs=5, norm_cfg=dict(type='BN'), conv_cfg=dict(type='DCNv2', deform_groups=2)), bbox_head=dict( type='NASFCOSHead', num_classes=80, in_channels=256, feat_channels=256, strides=[8, 16, 32, 64, 128], norm_cfg=dict(type='GN', num_groups=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='IoULoss', loss_weight=1.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.))
2,990
28.91
73
py
mmdetection
mmdetection-master/configs/nas_fpn/README.md
# NAS-FPN > [NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection](https://arxiv.org/abs/1904.07392) <!-- [ALGORITHM] --> ## Abstract Current state-of-the-art convolutional architectures for object detection are manually designed. Here we aim to learn a better architecture of feature pyramid network for object detection. We adopt Neural Architecture Search and discover a new feature pyramid architecture in a novel scalable search space covering all cross-scale connections. The discovered architecture, named NAS-FPN, consists of a combination of top-down and bottom-up connections to fuse features across scales. NAS-FPN, combined with various backbone models in the RetinaNet framework, achieves better accuracy and latency tradeoff compared to state-of-the-art object detection models. NAS-FPN improves mobile detection accuracy by 2 AP compared to state-of-the-art SSDLite with MobileNetV2 model in \[32\] and achieves 48.3 AP which surpasses Mask R-CNN \[10\] detection accuracy with less computation time. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143968037-cedd76e9-1ae7-4869-bd34-c9d8611d630c.png"/> </div> ## Results and Models We benchmark the new training schedule (crop training, large batch, unfrozen BN, 50 epochs) introduced in NAS-FPN. RetinaNet is used in the paper. | Backbone | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :---------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | 50e | 12.9 | 22.9 | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco_20200529_095329.log.json) | | R-50-NASFPN | 50e | 13.2 | 23.0 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco_20200528_230008.log.json) | **Note**: We find that it is unstable to train NAS-FPN and there is a small chance that results can be 3% mAP lower. ## Citation ```latex @inproceedings{ghiasi2019fpn, title={Nas-fpn: Learning scalable feature pyramid architecture for object detection}, author={Ghiasi, Golnaz and Lin, Tsung-Yi and Le, Quoc V}, booktitle={Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition}, pages={7036--7045}, year={2019} } ```
3,848
103.027027
881
md
mmdetection
mmdetection-master/configs/nas_fpn/metafile.yml
Collections: - Name: NAS-FPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - NAS-FPN - ResNet Paper: URL: https://arxiv.org/abs/1904.07392 Title: 'NAS-FPN: Learning Scalable Feature Pyramid Architecture for Object Detection' README: configs/nas_fpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/nas_fpn.py#L67 Version: v2.0.0 Models: - Name: retinanet_r50_fpn_crop640_50e_coco In Collection: NAS-FPN Config: configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py Metadata: Training Memory (GB): 12.9 inference time (ms/im): - value: 43.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_fpn_crop640_50e_coco/retinanet_r50_fpn_crop640_50e_coco-9b953d76.pth - Name: retinanet_r50_nasfpn_crop640_50e_coco In Collection: NAS-FPN Config: configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py Metadata: Training Memory (GB): 13.2 inference time (ms/im): - value: 43.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco/retinanet_r50_nasfpn_crop640_50e_coco-0ad1f644.pth
1,869
30.166667
157
yml
mmdetection
mmdetection-master/configs/nas_fpn/retinanet_r50_fpn_crop640_50e_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=64), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,675
30.116279
79
py
mmdetection
mmdetection-master/configs/nas_fpn/retinanet_r50_nasfpn_crop640_50e_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True # model settings norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='RetinaNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=norm_cfg, norm_eval=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict(type='NASFPN', stack_times=7, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(640, 640), ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=(640, 640)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=(640, 640)), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(640, 640), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[30, 40]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,665
30.364706
79
py
mmdetection
mmdetection-master/configs/objects365/README.md
# Objects365 Dataset > [Objects365 Dataset](https://openaccess.thecvf.com/content_ICCV_2019/papers/Shao_Objects365_A_Large-Scale_High-Quality_Dataset_for_Object_Detection_ICCV_2019_paper.pdf) <!-- [DATASET] --> ## Abstract <!-- [ABSTRACT] --> #### Objects365 Dataset V1 [Objects365 Dataset V1](http://www.objects365.org/overview.html) is a brand new dataset, designed to spur object detection research with a focus on diverse objects in the Wild. It has 365 object categories over 600K training images. More than 10 million, high-quality bounding boxes are manually labeled through a three-step, carefully designed annotation pipeline. It is the largest object detection dataset (with full annotation) so far and establishes a more challenging benchmark for the community. Objects365 can serve as a better feature learning dataset for localization-sensitive tasks like object detection and semantic segmentation. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/48282753/208368046-b7573022-06c9-4a99-af17-a6ac7407e3d8.png" height="400"/> </div> #### Objects365 Dataset V2 [Objects365 Dataset V2](http://www.objects365.org/overview.html) is based on the V1 release of the Objects365 dataset. Objects 365 annotated 365 object classes on more than 1800k images, with more than 29 million bounding boxes in the training set, surpassing PASCAL VOC, ImageNet, and COCO datasets. Objects 365 includes 11 categories of people, clothing, living room, bathroom, kitchen, office/medical, electrical appliances, transportation, food, animals, sports/musical instruments, and each category has dozens of subcategories. ## Citation ``` @inproceedings{shao2019objects365, title={Objects365: A large-scale, high-quality dataset for object detection}, author={Shao, Shuai and Li, Zeming and Zhang, Tianyuan and Peng, Chao and Yu, Gang and Zhang, Xiangyu and Li, Jing and Sun, Jian}, booktitle={Proceedings of the IEEE/CVF international conference on computer vision}, pages={8430--8439}, year={2019} } ``` ## Prepare Dataset 1. You need to download and extract Objects365 dataset. Users can download Objects365 V2 by using `tools/misc/download_dataset.py`. **Usage** ```shell python tools/misc/download_dataset.py --dataset-name objects365v2 \ --save-dir ${SAVING PATH} \ --unzip \ --delete # Optional, delete the download zip file ``` **Note:** There is no download link for Objects365 V1 right now. If you would like to download Objects365-V1, please visit [official website](http://www.objects365.org/) to concat the author. 2. The directory should be like this: ```none mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ Objects365 β”‚ β”‚ β”œβ”€β”€ Obj365_v1 β”‚ β”‚ β”‚ β”œβ”€β”€ annotations β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ objects365_train.json β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ objects365_val.json β”‚ β”‚ β”‚ β”œβ”€β”€ train # training images β”‚ β”‚ β”‚ β”œβ”€β”€ val # validation images β”‚ β”‚ β”œβ”€β”€ Obj365_v2 β”‚ β”‚ β”‚ β”œβ”€β”€ annotations β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ zhiyuan_objv2_train.json β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ zhiyuan_objv2_val.json β”‚ β”‚ β”‚ β”œβ”€β”€ train # training images β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ patch0 β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ patch1 β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ ... β”‚ β”‚ β”‚ β”œβ”€β”€ val # validation images β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ patch0 β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ patch1 β”‚ β”‚ β”‚ β”‚ β”œβ”€β”€ ... ``` ## Results and Models ### Objects365 V1 | Architecture | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | | :----------: | :------: | :-----: | :-----: | :------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Faster R-CNN | R-50 | pytorch | 1x | - | 19.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226-9ff10f95.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226.log.json) | | Faster R-CNN | R-50 | pytorch | 1350K | - | 22.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457-337d8965.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457.log.json) | | Retinanet | R-50 | pytorch | 1x | - | 14.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/retinanet_r50_fpn_1x_obj365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859-ba3e3dd5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859.log.json) | | Retinanet | R-50 | pytorch | 1350K | - | 18.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237-7517c576.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237.log.json) | ### Objects365 V2 | Architecture | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | | :----------: | :------: | :-----: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Faster R-CNN | R-50 | pytorch | 1x | - | 19.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040-5910b015.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040.log.json) | | Retinanet | R-50 | pytorch | 1x | - | 16.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/objects365/retinanet_r50_fpn_1x_obj365v2.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105-d9b191f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105.log.json) |
8,976
86.15534
558
md
mmdetection
mmdetection-master/configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/objects365v1_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=365))) data = dict(samples_per_gpu=4) # Using 32 GPUS while training optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (4 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
786
29.269231
72
py
mmdetection
mmdetection-master/configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/objects365v2_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=365))) data = dict(samples_per_gpu=4) # Using 32 GPUS while training optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (4 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
786
29.269231
72
py
mmdetection
mmdetection-master/configs/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/objects365v1_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg), roi_head=dict(bbox_head=dict(num_classes=365))) # Using 8 GPUS while training optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) runner = dict( _delete_=True, type='IterBasedRunner', max_iters=1350000) # 36 epochs lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 1000, step=[900000, 1200000]) checkpoint_config = dict(interval=150000) evaluation = dict(interval=150000, metric='bbox') # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
1,039
31.5
74
py
mmdetection
mmdetection-master/configs/objects365/metafile.yml
- Name: retinanet_r50_fpn_1x_obj365v1 In Collection: RetinaNet Config: configs/objects365/retinanet_r50_fpn_1x_obj365v1.py Metadata: Training Memory (GB): 7.4 Epochs: 12 Training Data: Objects365 v1 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v1 Metrics: box AP: 14.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v1/retinanet_r50_fpn_1x_obj365v1_20221219_181859-ba3e3dd5.pth - Name: retinanet_r50_fpn_syncbn_1350k_obj365v1 In Collection: RetinaNet Config: configs/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1.py Metadata: Training Memory (GB): 7.6 Iterations: 1350000 Training Data: Objects365 v1 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v1 Metrics: box AP: 18.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1/retinanet_r50_fpn_syncbn_1350k_obj365v1_20220513_111237-7517c576.pth - Name: retinanet_r50_fpn_1x_obj365v2 In Collection: RetinaNet Config: configs/objects365/retinanet_r50_fpn_1x_obj365v2.py Metadata: Training Memory (GB): 7.2 Epochs: 12 Training Data: Objects365 v2 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v2 Metrics: box AP: 16.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/retinanet_r50_fpn_1x_obj365v2/retinanet_r50_fpn_1x_obj365v2_20221223_122105-d9b191f1.pth - Name: faster_rcnn_r50_fpn_16x4_1x_obj365v1 In Collection: Faster R-CNN Config: configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1.py Metadata: Training Memory (GB): 11.4 Epochs: 12 Training Data: Objects365 v1 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v1 Metrics: box AP: 19.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v1/faster_rcnn_r50_fpn_16x4_1x_obj365v1_20221219_181226-9ff10f95.pth - Name: faster_rcnn_r50_fpn_syncbn_1350k_obj365v1 In Collection: Faster R-CNN Config: configs/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1.py Metadata: Training Memory (GB): 8.6 Iterations: 1350000 Training Data: Objects365 v1 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v1 Metrics: box AP: 22.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1/faster_rcnn_r50_fpn_syncbn_1350k_obj365v1_20220510_142457-337d8965.pth - Name: faster_rcnn_r50_fpn_16x4_1x_obj365v2 In Collection: Faster R-CNN Config: configs/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2.py Metadata: Training Memory (GB): 10.8 Epochs: 12 Training Data: Objects365 v1 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Objects365 v2 Metrics: box AP: 19.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/objects365/faster_rcnn_r50_fpn_16x4_1x_obj365v2/faster_rcnn_r50_fpn_16x4_1x_obj365v2_20221220_175040-5910b015.pth
3,448
32.813725
182
yml
mmdetection
mmdetection-master/configs/objects365/retinanet_r50_fpn_1x_obj365v1.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/objects365v1_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=365)) # Using 8 GPUS while training optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=10000, warmup_ratio=1.0 / 1000, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
736
29.708333
72
py
mmdetection
mmdetection-master/configs/objects365/retinanet_r50_fpn_1x_obj365v2.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/objects365v2_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=365)) # Using 8 GPUS while training optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=10000, warmup_ratio=1.0 / 1000, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
736
29.708333
72
py
mmdetection
mmdetection-master/configs/objects365/retinanet_r50_fpn_syncbn_1350k_obj365v1.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/objects365v1_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict(backbone=dict(norm_cfg=norm_cfg), bbox_head=dict(num_classes=365)) # Using 8 GPUS while training optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) runner = dict( _delete_=True, type='IterBasedRunner', max_iters=1350000) # 36 epochs lr_config = dict( policy='step', warmup='linear', warmup_iters=10000, warmup_ratio=1.0 / 1000, step=[900000, 1200000]) checkpoint_config = dict(interval=150000) evaluation = dict(interval=150000, metric='bbox') # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=16)
1,016
32.9
79
py
mmdetection
mmdetection-master/configs/openimages/README.md
# Open Images Dataset > [Open Images Dataset](https://arxiv.org/abs/1811.00982) <!-- [DATASET] --> ## Abstract <!-- [ABSTRACT] --> #### Open Images v6 [Open Images](https://storage.googleapis.com/openimages/web/index.html) is a dataset of ~9M images annotated with image-level labels, object bounding boxes, object segmentation masks, visual relationships, and localized narratives: - It contains a total of 16M bounding boxes for 600 object classes on 1.9M images, making it the largest existing dataset with object location annotations. The boxes have been largely manually drawn by professional annotators to ensure accuracy and consistency. The images are very diverse and often contain complex scenes with several objects (8.3 per image on average). - Open Images also offers visual relationship annotations, indicating pairs of objects in particular relations (e.g. "woman playing guitar", "beer on table"), object properties (e.g. "table is wooden"), and human actions (e.g. "woman is jumping"). In total it has 3.3M annotations from 1,466 distinct relationship triplets. - In V5 we added segmentation masks for 2.8M object instances in 350 classes. Segmentation masks mark the outline of objects, which characterizes their spatial extent to a much higher level of detail. - In V6 we added 675k localized narratives: multimodal descriptions of images consisting of synchronized voice, text, and mouse traces over the objects being described. (Note we originally launched localized narratives only on train in V6, but since July 2020 we also have validation and test covered.) - Finally, the dataset is annotated with 59.9M image-level labels spanning 19,957 classes. We believe that having a single dataset with unified annotations for image classification, object detection, visual relationship detection, instance segmentation, and multimodal image descriptions will enable to study these tasks jointly and stimulate progress towards genuine scene understanding. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/48282753/147199750-23e17230-c0cf-49a0-a13c-0d014d49107e.png" height="400"/> </div> #### Open Images Challenge 2019 [Open Images Challenges 2019](https://storage.googleapis.com/openimages/web/challenge2019.html) is based on the V5 release of the Open Images dataset. The images of the dataset are very varied and often contain complex scenes with several objects (explore the dataset). ## Citation ``` @article{OpenImages, author = {Alina Kuznetsova and Hassan Rom and Neil Alldrin and Jasper Uijlings and Ivan Krasin and Jordi Pont-Tuset and Shahab Kamali and Stefan Popov and Matteo Malloci and Alexander Kolesnikov and Tom Duerig and Vittorio Ferrari}, title = {The Open Images Dataset V4: Unified image classification, object detection, and visual relationship detection at scale}, year = {2020}, journal = {IJCV} } ``` ## Prepare Dataset 1. You need to download and extract Open Images dataset. 2. The Open Images dataset does not have image metas (width and height of the image), which will be used during evaluation. We suggest to get test image metas before training/testing by using `tools/misc/get_image_metas.py`. **Usage** ```shell python tools/misc/get_image_metas.py ${CONFIG} \ --out ${OUTPUT FILE NAME} ``` 3. The directory should be like this: ```none mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ OpenImages β”‚ β”‚ β”œβ”€β”€ annotations β”‚ β”‚ β”‚ β”œβ”€β”€ bbox_labels_600_hierarchy.json β”‚ β”‚ β”‚ β”œβ”€β”€ class-descriptions-boxable.csv β”‚ β”‚ β”‚ β”œβ”€β”€ oidv6-train-annotations-bbox.scv β”‚ β”‚ β”‚ β”œβ”€β”€ validation-annotations-bbox.csv β”‚ β”‚ β”‚ β”œβ”€β”€ validation-annotations-human-imagelabels-boxable.csv β”‚ β”‚ β”‚ β”œβ”€β”€ validation-image-metas.pkl # get from script β”‚ β”‚ β”œβ”€β”€ challenge2019 β”‚ β”‚ β”‚ β”œβ”€β”€ challenge-2019-train-detection-bbox.txt β”‚ β”‚ β”‚ β”œβ”€β”€ challenge-2019-validation-detection-bbox.txt β”‚ β”‚ β”‚ β”œβ”€β”€ class_label_tree.np β”‚ β”‚ β”‚ β”œβ”€β”€ class_sample_train.pkl β”‚ β”‚ β”‚ β”œβ”€β”€ challenge-2019-validation-detection-human-imagelabels.csv # download from official website β”‚ β”‚ β”‚ β”œβ”€β”€ challenge-2019-validation-metas.pkl # get from script β”‚ β”‚ β”œβ”€β”€ OpenImages β”‚ β”‚ β”‚ β”œβ”€β”€ train # training images β”‚ β”‚ β”‚ β”œβ”€β”€ test # testing images β”‚ β”‚ β”‚ β”œβ”€β”€ validation # validation images ``` **Note**: 1. The training and validation images of Open Images Challenge dataset are based on Open Images v6, but the test images are different. 2. The Open Images Challenges annotations are obtained from [TSD](https://github.com/Sense-X/TSD). You can also download the annotations from [official website](https://storage.googleapis.com/openimages/web/challenge2019_downloads.html), and set data.train.type=OpenImagesDataset, data.val.type=OpenImagesDataset, and data.test.type=OpenImagesDataset in the config 3. If users do not want to use `validation-annotations-human-imagelabels-boxable.csv` and `challenge-2019-validation-detection-human-imagelabels.csv` users can set `data.val.load_image_level_labels=False` and `data.test.load_image_level_labels=False` in the config. Please note that loading image-levels label is the default of Open Images evaluation metric. More details please refer to the [official website](https://storage.googleapis.com/openimages/web/evaluation.html) ## Results and Models | Architecture | Backbone | Style | Lr schd | Sampler | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :---------------------------: | :------: | :-----: | :-----: | :-----------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Faster R-CNN | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 51.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159.log.json) | | Faster R-CNN | R-50 | pytorch | 1x | Class Aware Sampler | 7.7 | - | 60.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424.log.json) | | Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Group Sampler | 7.7 | - | 54.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100.log.json) | | Faster R-CNN (Challenge 2019) | R-50 | pytorch | 1x | Class Aware Sampler | 7.1 | - | 65.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021.log.json) | | Retinanet | R-50 | pytorch | 1x | Group Sampler | 6.6 | - | 61.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954.log.json) | | SSD | VGG16 | pytorch | 36e | Group Sampler | 10.8 | - | 35.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/ssd300_32x8_36e_openimages.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232.log.json) | **Notes:** - 'cas' is short for 'Class Aware Sampler' ### Results of consider image level labels | Architecture | Sampler | Consider Image Level Labels | box AP | | :-------------------------------: | :-----------------: | :-------------------------: | :----: | | Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/o | 62.19 | | Faster R-CNN r50 (Challenge 2019) | Group Sampler | w/ | 54.87 | | Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/o | 71.77 | | Faster R-CNN r50 (Challenge 2019) | Class Aware Sampler | w/ | 64.98 |
11,578
76.711409
669
md
mmdetection
mmdetection-master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/openimages_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=601))) # Using 32 GPUS while training optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=26000, warmup_ratio=1.0 / 64, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
751
30.333333
72
py
mmdetection
mmdetection-master/configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] model = dict( roi_head=dict(bbox_head=dict(num_classes=500)), test_cfg=dict(rcnn=dict(score_thr=0.01))) # dataset settings dataset_type = 'OpenImagesChallengeDataset' data_root = 'data/OpenImages/' data = dict( train=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-train-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np'), val=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np', meta_file=data_root + 'challenge2019/challenge-2019-validation-metas.pkl', image_level_ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-' 'human-imagelabels.csv'), test=dict( type=dataset_type, ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-bbox.txt', img_prefix=data_root + 'OpenImages/', label_file=data_root + 'challenge2019/cls-label-description.csv', hierarchy_file=data_root + 'challenge2019/class_label_tree.np', meta_file=data_root + 'challenge2019/challenge-2019-validation-metas.pkl', image_level_ann_file=data_root + 'challenge2019/challenge-2019-validation-detection-' 'human-imagelabels.csv')) evaluation = dict(interval=1, metric='mAP') # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
1,957
39.791667
73
py
mmdetection
mmdetection-master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages.py'] # Use ClassAwareSampler data = dict( train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
166
26.833333
72
py
mmdetection
mmdetection-master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py
_base_ = ['faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py'] # Use ClassAwareSampler data = dict( train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))
176
28.5
72
py
mmdetection
mmdetection-master/configs/openimages/metafile.yml
Models: - Name: faster_rcnn_r50_fpn_32x2_1x_openimages In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 51.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_20211130_231159-e87ab7ce.pth - Name: retinanet_r50_fpn_32x2_1x_openimages In Collection: RetinaNet Config: configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py Metadata: Training Memory (GB): 6.6 Epochs: 12 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 61.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/retinanet_r50_fpn_32x2_1x_openimages/retinanet_r50_fpn_32x2_1x_openimages_20211223_071954-d2ae5462.pth - Name: ssd300_32x8_36e_openimages In Collection: SSD Config: configs/openimages/ssd300_32x8_36e_openimages.py Metadata: Training Memory (GB): 10.8 Epochs: 36 Training Data: Open Images v6 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images v6 Metrics: box AP: 35.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/ssd300_32x8_36e_openimages/ssd300_32x8_36e_openimages_20211224_000232-dce93846.pth - Name: faster_rcnn_r50_fpn_32x2_1x_openimages_challenge In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 54.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_1x_openimages_challenge_20220114_045100-0e79e5df.pth - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py Metadata: Training Memory (GB): 7.7 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 60.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_20220306_202424-98c630e5.pth - Name: faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge In Collection: Faster R-CNN Config: configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge.py Metadata: Training Memory (GB): 7.1 Epochs: 12 Training Data: Open Images Challenge 2019 Training Techniques: - SGD with Momentum - Weight Decay Results: - Task: Object Detection Dataset: Open Images Challenge 2019 Metrics: box AP: 65.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge/faster_rcnn_r50_fpn_32x2_cas_1x_openimages_challenge_20220221_192021-34c402d9.pth
3,886
36.737864
206
yml
mmdetection
mmdetection-master/configs/openimages/retinanet_r50_fpn_32x2_1x_openimages.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/openimages_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=601)) optimizer = dict(type='SGD', lr=0.08, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) lr_config = dict( policy='step', warmup='linear', warmup_iters=26000, warmup_ratio=1.0 / 64, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
703
29.608696
72
py
mmdetection
mmdetection-master/configs/openimages/ssd300_32x8_36e_openimages.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/openimages_detection.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_1x.py' ] model = dict( bbox_head=dict( num_classes=601, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_type = 'OpenImagesDataset' data_root = 'data/OpenImages/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True, normed_bbox=True), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='RandomFlip', flip_ratio=0.5), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, # using 32 GPUS while training. workers_per_gpu=0, # workers_per_gpu > 0 may occur out of memory train=dict( _delete_=True, type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/oidv6-train-annotations-bbox.csv', img_prefix=data_root + 'OpenImages/train/', label_file=data_root + 'annotations/class-descriptions-boxable.csv', hierarchy_file=data_root + 'annotations/bbox_labels_600_hierarchy.json', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.04, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=20000, warmup_ratio=0.001, step=[8, 11]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (32 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=256)
2,838
32.797619
79
py
mmdetection
mmdetection-master/configs/paa/README.md
# PAA > [Probabilistic Anchor Assignment with IoU Prediction for Object Detection](https://arxiv.org/abs/2007.08103) <!-- [ALGORITHM] --> ## Abstract In object detection, determining which anchors to assign as positive or negative samples, known as anchor assignment, has been revealed as a core procedure that can significantly affect a model's performance. In this paper we propose a novel anchor assignment strategy that adaptively separates anchors into positive and negative samples for a ground truth bounding box according to the model's learning status such that it is able to reason about the separation in a probabilistic manner. To do so we first calculate the scores of anchors conditioned on the model and fit a probability distribution to these scores. The model is then trained with anchors separated into positive and negative samples according to their probabilities. Moreover, we investigate the gap between the training and testing objectives and propose to predict the Intersection-over-Unions of detected boxes as a measure of localization quality to reduce the discrepancy. The combined score of classification and localization qualities serving as a box selection metric in non-maximum suppression well aligns with the proposed anchor assignment strategy and leads significant performance improvements. The proposed methods only add a single convolutional layer to RetinaNet baseline and does not require multiple anchors per location, so are efficient. Experimental results verify the effectiveness of the proposed methods. Especially, our models set new records for single-stage detectors on MS COCO test-dev dataset with various backbones. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143968195-519a116a-de29-437e-b4c8-30aef43dcb15.png"/> </div> ## Results and Models We provide config files to reproduce the object detection results in the ECCV 2020 paper for Probabilistic Anchor Assignment with IoU Prediction for Object Detection. | Backbone | Lr schd | Mem (GB) | Score voting | box AP | Config | Download | | :-------: | :-----: | :------: | :----------: | :----: | :---------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | 12e | 3.7 | True | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.log.json) | | R-50-FPN | 12e | 3.7 | False | 40.2 | - | | | R-50-FPN | 18e | 3.7 | True | 41.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_1.5x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.log.json) | | R-50-FPN | 18e | 3.7 | False | 41.2 | - | | | R-50-FPN | 24e | 3.7 | True | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.log.json) | | R-50-FPN | 36e | 3.7 | True | 43.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722.log.json) | | R-101-FPN | 12e | 6.2 | True | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.log.json) | | R-101-FPN | 12e | 6.2 | False | 42.4 | - | | | R-101-FPN | 24e | 6.2 | True | 43.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.log.json) | | R-101-FPN | 36e | 6.2 | True | 45.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/paa/paa_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202.log.json) | **Note**: 1. We find that the performance is unstable with 1x setting and may fluctuate by about 0.2 mAP. We report the best results. ## Citation ```latex @inproceedings{paa-eccv2020, title={Probabilistic Anchor Assignment with IoU Prediction for Object Detection}, author={Kim, Kang and Lee, Hee Seok}, booktitle = {ECCV}, year={2020} } ```
7,926
164.145833
1,515
md
mmdetection
mmdetection-master/configs/paa/metafile.yml
Collections: - Name: PAA Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Probabilistic Anchor Assignment - ResNet Paper: URL: https://arxiv.org/abs/2007.08103 Title: 'Probabilistic Anchor Assignment with IoU Prediction for Object Detection' README: configs/paa/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/paa.py#L6 Version: v2.4.0 Models: - Name: paa_r50_fpn_1x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1x_coco/paa_r50_fpn_1x_coco_20200821-936edec3.pth - Name: paa_r50_fpn_1.5x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_1.5x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_1.5x_coco/paa_r50_fpn_1.5x_coco_20200823-805d6078.pth - Name: paa_r50_fpn_2x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_2x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_2x_coco/paa_r50_fpn_2x_coco_20200821-c98bfc4e.pth - Name: paa_r50_fpn_mstrain_3x_coco In Collection: PAA Config: configs/paa/paa_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.7 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r50_fpn_mstrain_3x_coco/paa_r50_fpn_mstrain_3x_coco_20210121_145722-06a6880b.pth - Name: paa_r101_fpn_1x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_1x_coco/paa_r101_fpn_1x_coco_20200821-0a1825a4.pth - Name: paa_r101_fpn_2x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_2x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_2x_coco/paa_r101_fpn_2x_coco_20200821-6829f96b.pth - Name: paa_r101_fpn_mstrain_3x_coco In Collection: PAA Config: configs/paa/paa_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/paa/paa_r101_fpn_mstrain_3x_coco/paa_r101_fpn_mstrain_3x_coco_20210122_084202-83250d22.pth
3,350
30.914286
151
yml
mmdetection
mmdetection-master/configs/paa/paa_r101_fpn_1x_coco.py
_base_ = './paa_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
191
26.428571
61
py
mmdetection
mmdetection-master/configs/paa/paa_r101_fpn_2x_coco.py
_base_ = './paa_r101_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
123
30
53
py
mmdetection
mmdetection-master/configs/paa/paa_r101_fpn_mstrain_3x_coco.py
_base_ = './paa_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
199
27.571429
61
py