Search is not available for this dataset
repo
stringlengths
2
152
βŒ€
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/configs/paa/paa_r50_fpn_1.5x_coco.py
_base_ = './paa_r50_fpn_1x_coco.py' lr_config = dict(step=[12, 16]) runner = dict(type='EpochBasedRunner', max_epochs=18)
122
29.75
53
py
mmdetection
mmdetection-master/configs/paa/paa_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PAA', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='PAAHead', reg_decoded_bbox=True, score_voting=True, topk=9, num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.3), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.1, neg_iou_thr=0.1, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
2,120
28.873239
79
py
mmdetection
mmdetection-master/configs/paa/paa_r50_fpn_2x_coco.py
_base_ = './paa_r50_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
122
29.75
53
py
mmdetection
mmdetection-master/configs/paa/paa_r50_fpn_mstrain_3x_coco.py
_base_ = './paa_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
747
34.619048
77
py
mmdetection
mmdetection-master/configs/pafpn/README.md
# PAFPN > [Path Aggregation Network for Instance Segmentation](https://arxiv.org/abs/1803.01534) <!-- [ALGORITHM] --> ## Abstract The way that information propagates in neural networks is of great importance. In this paper, we propose Path Aggregation Network (PANet) aiming at boosting information flow in proposal-based instance segmentation framework. Specifically, we enhance the entire feature hierarchy with accurate localization signals in lower layers by bottom-up path augmentation, which shortens the information path between lower layers and topmost feature. We present adaptive feature pooling, which links feature grid and all feature levels to make useful information in each feature level propagate directly to following proposal subnetworks. A complementary branch capturing different views for each proposal is created to further improve mask prediction. These improvements are simple to implement, with subtle extra computational overhead. Our PANet reaches the 1st place in the COCO 2017 Challenge Instance Segmentation task and the 2nd place in Object Detection task without large-batch training. It is also state-of-the-art on MVD and Cityscapes. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143968947-5f2d7e8a-a236-4d59-8f2d-7fbb12764845.png"/> </div> ## Results and Models | Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | pytorch | 1x | 4.0 | 17.2 | 37.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_20200503_105836.log.json) | ## Citation ```latex @inproceedings{liu2018path, author = {Shu Liu and Lu Qi and Haifang Qin and Jianping Shi and Jiaya Jia}, title = {Path Aggregation Network for Instance Segmentation}, booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2018} } ```
3,233
91.4
1,037
md
mmdetection
mmdetection-master/configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5))
200
21.333333
56
py
mmdetection
mmdetection-master/configs/pafpn/metafile.yml
Collections: - Name: PAFPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PAFPN Paper: URL: https://arxiv.org/abs/1803.01534 Title: 'Path Aggregation Network for Instance Segmentation' README: configs/pafpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/necks/pafpn.py#L11 Version: v2.0.0 Models: - Name: faster_rcnn_r50_pafpn_1x_coco In Collection: PAFPN Config: configs/pafpn/faster_rcnn_r50_pafpn_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 58.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/pafpn/faster_rcnn_r50_pafpn_1x_coco/faster_rcnn_r50_pafpn_1x_coco_bbox_mAP-0.375_20200503_105836-b7b4b9bd.pth
1,165
28.897436
170
yml
mmdetection
mmdetection-master/configs/panoptic_fpn/README.md
# Panoptic FPN > [Panoptic feature pyramid networks](https://arxiv.org/abs/1901.02446) <!-- [ALGORITHM] --> ## Abstract The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143968979-a1593758-c9d7-44a6-a3b8-d9686ef19ce8.png" height="300"/> </div> ## Dataset PanopticFPN requires COCO and [COCO-panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) dataset for training and evaluation. You need to download and extract it in the COCO dataset path. The directory should be like this. ```none mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ coco β”‚ β”‚ β”œβ”€β”€ annotations β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_train2017 β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017.json β”‚ β”‚ β”‚ β”œβ”€β”€ panoptic_val2017 β”‚ β”‚ β”œβ”€β”€ train2017 β”‚ β”‚ β”œβ”€β”€ val2017 β”‚ β”‚ β”œβ”€β”€ test2017 ``` ## Results and Models | Backbone | style | Lr schd | Mem (GB) | Inf time (fps) | PQ | SQ | RQ | PQ_th | SQ_th | RQ_th | PQ_st | SQ_st | RQ_st | Config | Download | | :-------: | :-----: | :-----: | :------: | :------------: | :--: | :--: | :--: | :---: | :---: | :---: | :---: | :---: | :---: | :---------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | pytorch | 1x | 4.7 | | 40.2 | 77.8 | 49.3 | 47.8 | 80.9 | 57.5 | 28.9 | 73.1 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153.log.json) | | R-50-FPN | pytorch | 3x | - | - | 42.5 | 78.1 | 51.7 | 50.3 | 81.5 | 60.3 | 30.7 | 73.0 | 38.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155.log.json) | | R-101-FPN | pytorch | 1x | 6.7 | | 42.2 | 78.3 | 51.4 | 50.1 | 81.4 | 59.9 | 30.3 | 73.6 | 38.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950.log.json) | | R-101-FPN | pytorch | 3x | - | - | 44.1 | 78.9 | 53.6 | 52.1 | 81.7 | 62.3 | 32.0 | 74.6 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712.log.json) | | R2-50-FPN | pytorch | 1x | - | - | 42.5 | 78.0 | 51.8 | 50.0 | 81.4 | 60.0 | 31.1 | 72.8 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/dev/configs/panoptic_fpn/panoptic_fpn_r2_50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/rfnext/panoptic_fpn_r2_50_fpn_fp16_1x_coco/panoptic_fpn_r2_50_fpn_fp16_1x_coco-fa6c51f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/rfnext/panoptic_fpn_r2_50_fpn_fp16_1x_coco/panoptic_fpn_r2_50_fpn_fp16_1x_coco_20221114_224729.log.json) | ## Citation The base method for panoptic segmentation task. ```latex @inproceedings{kirillov2018panopticfpn, author = { Alexander Kirillov, Ross Girshick, Kaiming He, Piotr Dollar, }, title = {Panoptic Feature Pyramid Networks}, booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2019} } ```
6,704
103.765625
1,184
md
mmdetection
mmdetection-master/configs/panoptic_fpn/metafile.yml
Collections: - Name: PanopticFPN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PanopticFPN Paper: URL: https://arxiv.org/pdf/1901.02446 Title: 'Panoptic feature pyramid networks' README: configs/panoptic_fpn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/detectors/panoptic_fpn.py#L7 Version: v2.16.0 Models: - Name: panoptic_fpn_r50_fpn_1x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 12 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 40.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco/panoptic_fpn_r50_fpn_1x_coco_20210821_101153-9668fd13.pth - Name: panoptic_fpn_r50_fpn_mstrain_3x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 36 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco/panoptic_fpn_r50_fpn_mstrain_3x_coco_20210824_171155-5650f98b.pth - Name: panoptic_fpn_r101_fpn_1x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 12 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 42.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco/panoptic_fpn_r101_fpn_1x_coco_20210820_193950-ab9157a2.pth - Name: panoptic_fpn_r101_fpn_mstrain_3x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.5 Epochs: 36 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 44.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco/panoptic_fpn_r101_fpn_mstrain_3x_coco_20210823_114712-9c99acc4.pth - Name: panoptic_fpn_r2_50_fpn_fp16_1x_coco In Collection: PanopticFPN Config: configs/panoptic_fpn/panoptic_fpn_r2_50_fpn_fp16_1x_coco.py Metadata: Training Memory (GB): 3.5 Epochs: 12 Results: - Task: Panoptic Segmentation Dataset: COCO Metrics: PQ: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/rfnext/panoptic_fpn_r2_50_fpn_fp16_1x_coco/panoptic_fpn_r2_50_fpn_fp16_1x_coco-fa6c51f0.pth
2,903
33.571429
178
yml
mmdetection
mmdetection-master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_1x_coco.py
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
200
27.714286
61
py
mmdetection
mmdetection-master/configs/panoptic_fpn/panoptic_fpn_r101_fpn_mstrain_3x_coco.py
_base_ = './panoptic_fpn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
208
28.857143
61
py
mmdetection
mmdetection-master/configs/panoptic_fpn/panoptic_fpn_r2_50_fpn_fp16_1x_coco.py
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=50, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='res2net50_v1b_26w_4s-3cf99910.pth'))) fp16 = dict(loss_scale='dynamic')
312
23.076923
61
py
mmdetection
mmdetection-master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_panoptic.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='PanopticFPN', semantic_head=dict( type='PanopticFPNHead', num_things_classes=80, num_stuff_classes=53, in_channels=256, inner_channels=128, start_level=0, end_level=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True), conv_cfg=None, loss_seg=dict( type='CrossEntropyLoss', ignore_index=255, loss_weight=0.5)), panoptic_fusion_head=dict( type='HeuristicFusionHead', num_things_classes=80, num_stuff_classes=53), test_cfg=dict( panoptic=dict( score_thr=0.6, max_per_img=100, mask_thr_binary=0.5, mask_overlap=0.5, nms=dict(type='nms', iou_threshold=0.5, class_agnostic=True), stuff_area_limit=4096))) custom_hooks = []
1,035
29.470588
73
py
mmdetection
mmdetection-master/configs/panoptic_fpn/panoptic_fpn_r50_fpn_mstrain_3x_coco.py
_base_ = './panoptic_fpn_r50_fpn_1x_coco.py' # dataset settings dataset_type = 'CocoPanopticDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='LoadPanopticAnnotations', with_bbox=True, with_mask=True, with_seg=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='SegRescale', scale_factor=1 / 4), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict( _delete_=True, type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/panoptic_train2017.json', img_prefix=data_root + 'train2017/', seg_prefix=data_root + 'annotations/panoptic_train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,933
30.193548
79
py
mmdetection
mmdetection-master/configs/pascal_voc/README.md
# Pascal VOC > [The Pascal Visual Object Classes (VOC) Challenge](https://link.springer.com/article/10.1007/s11263-009-0275-4) <!-- [DATASET] --> ## Abstract The Pascal Visual Object Classes (VOC) challenge is a benchmark in visual object category recognition and detection, providing the vision and machine learning communities with a standard dataset of images and annotation, and standard evaluation procedures. Organised annually from 2005 to present, the challenge and its associated dataset has become accepted as the benchmark for object detection. This paper describes the dataset and evaluation procedure. We review the state-of-the-art in evaluated methods for both classification and detection, analyse whether the methods are statistically different, what they are learning from the images (e.g. the object or its context), and what the methods find easy or confuse. The paper concludes with lessons learnt in the three year history of the challenge, and proposes directions for future improvement and extension. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143969235-6bb4d665-0470-4bae-825c-492eb4582127.png" height="600"/> </div> ## Results and Models | Architecture | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------: | :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Faster R-CNN C4 | R-50 | caffe | 18k | | - | 80.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712//home/dong/code_sensetime/2022Q1/mmdetection/work_dirs/prepare_voc/gather/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327-847a14d2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712_20220314_234327.log.json) | | Faster R-CNN | R-50 | pytorch | 1x | 2.6 | - | 80.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712-54bef0f3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712/faster_rcnn_r50_fpn_1x_voc0712_20220320_192712.log.json) | | Retinanet | R-50 | pytorch | 1x | 2.1 | - | 77.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200617-47cbdd0e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/retinanet_r50_fpn_1x_voc0712/retinanet_r50_fpn_1x_voc0712_20200616_014642.log.json) | | SSD300 | VGG16 | - | 120e | - | - | 76.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd300_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658-17edda1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd300_voc0712/ssd300_voc0712_20220320_194658.log.json) | | SSD512 | VGG16 | - | 120e | - | - | 79.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc/ssd512_voc0712.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717-03cefefe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pascal_voc/ssd512_voc0712/ssd512_voc0712_20220320_194717.log.json) | ## Citation ```latex @Article{Everingham10, author = "Everingham, M. and Van~Gool, L. and Williams, C. K. I. and Winn, J. and Zisserman, A.", title = "The Pascal Visual Object Classes (VOC) Challenge", journal = "International Journal of Computer Vision", volume = "88", year = "2010", number = "2", month = jun, pages = "303--338", } ```
6,642
161.02439
723
md
mmdetection
mmdetection-master/configs/pascal_voc/faster_rcnn_r50_caffe_c4_mstrain_18k_voc0712.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 512), (1333, 544), (1333, 576), (1333, 608), (1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=[ data_root + 'VOC2007/ImageSets/Main/trainval.txt', data_root + 'VOC2012/ImageSets/Main/trainval.txt' ], img_prefix=[data_root + 'VOC2007/', data_root + 'VOC2012/'], pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'VOC2007/ImageSets/Main/test.txt', img_prefix=data_root + 'VOC2007/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=100, warmup_ratio=0.001, step=[12000, 16000]) # Runner type runner = dict(type='IterBasedRunner', max_iters=18000) checkpoint_config = dict(interval=3000) evaluation = dict(interval=3000, metric='mAP')
2,590
30.597561
72
py
mmdetection
mmdetection-master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
506
32.8
79
py
mmdetection
mmdetection-master/configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc0712_cocofmt.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(roi_head=dict(bbox_head=dict(num_classes=20))) CLASSES = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor') # dataset settings dataset_type = 'CocoDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1000, 600), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1000, 600), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=3, dataset=dict( type=dataset_type, ann_file='data/voc0712_trainval.json', img_prefix='data/VOCdevkit', pipeline=train_pipeline, classes=CLASSES)), val=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES), test=dict( type=dataset_type, ann_file='data/voc07_test.json', img_prefix='data/VOCdevkit', pipeline=test_pipeline, classes=CLASSES)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
2,524
32.223684
79
py
mmdetection
mmdetection-master/configs/pascal_voc/retinanet_r50_fpn_1x_voc0712.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=20)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy # actual epoch = 3 * 3 = 9 lr_config = dict(policy='step', step=[3]) # runtime settings runner = dict( type='EpochBasedRunner', max_epochs=4) # actual epoch = 4 * 3 = 12
489
31.666667
77
py
mmdetection
mmdetection-master/configs/pascal_voc/ssd300_voc0712.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/voc0712.py', '../_base_/default_runtime.py' ] model = dict( bbox_head=dict( num_classes=20, anchor_generator=dict(basesize_ratio_range=(0.2, 0.9)))) # dataset settings dataset_type = 'VOCDataset' data_root = 'data/VOCdevkit/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( type='RepeatDataset', times=10, dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[16, 20]) checkpoint_config = dict(interval=1) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,409
31.133333
79
py
mmdetection
mmdetection-master/configs/pascal_voc/ssd512_voc0712.py
_base_ = 'ssd300_voc0712.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( input_size=input_size, strides=[8, 16, 32, 64, 128, 256, 512], basesize_ratio_range=(0.15, 0.9), ratios=([2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2])))) img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,954
32.706897
79
py
mmdetection
mmdetection-master/configs/pisa/README.md
# PISA > [Prime Sample Attention in Object Detection](https://arxiv.org/abs/1904.04821) <!-- [ALGORITHM] --> ## Abstract It is a common paradigm in object detection frameworks to treat all samples equally and target at maximizing the performance on average. In this work, we revisit this paradigm through a careful study on how different samples contribute to the overall performance measured in terms of mAP. Our study suggests that the samples in each mini-batch are neither independent nor equally important, and therefore a better classifier on average does not necessarily mean higher mAP. Motivated by this study, we propose the notion of Prime Samples, those that play a key role in driving the detection performance. We further develop a simple yet effective sampling and learning strategy called PrIme Sample Attention (PISA) that directs the focus of the training process towards such samples. Our experiments demonstrate that it is often more effective to focus on prime samples than hard samples when training a detector. Particularly, On the MSCOCO dataset, PISA outperforms the random sampling baseline and hard mining schemes, e.g., OHEM and Focal Loss, consistently by around 2% on both single-stage and two-stage detectors, even with a strong backbone ResNeXt-101. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143970710-5cfd5960-fcf9-4e32-860a-acd46ce5d274.png"/> </div> ## Results and Models | PISA | Network | Backbone | Lr schd | box AP | mask AP | Config | Download | | :--: | :----------: | :------------: | :-----: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Γ— | Faster R-CNN | R-50-FPN | 1x | 36.4 | | - | | | √ | Faster R-CNN | R-50-FPN | 1x | 38.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco_20200506_185619.log.json) | | Γ— | Faster R-CNN | X101-32x4d-FPN | 1x | 40.1 | | - | | | √ | Faster R-CNN | X101-32x4d-FPN | 1x | 41.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco_20200505_181503.log.json) | | Γ— | Mask R-CNN | R-50-FPN | 1x | 37.3 | 34.2 | - | | | √ | Mask R-CNN | R-50-FPN | 1x | 39.1 | 35.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco_20200508_150500.log.json) | | Γ— | Mask R-CNN | X101-32x4d-FPN | 1x | 41.1 | 37.1 | - | | | √ | Mask R-CNN | X101-32x4d-FPN | 1x | | | | | | Γ— | RetinaNet | R-50-FPN | 1x | 35.6 | | - | | | √ | RetinaNet | R-50-FPN | 1x | 36.9 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco_20200504_014311.log.json) | | Γ— | RetinaNet | X101-32x4d-FPN | 1x | 39.0 | | - | | | √ | RetinaNet | X101-32x4d-FPN | 1x | 40.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco_20200505_001404.log.json) | | Γ— | SSD300 | VGG16 | 1x | 25.6 | | - | | | √ | SSD300 | VGG16 | 1x | 27.6 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco_20200504_144325.log.json) | | Γ— | SSD512 | VGG16 | 1x | 29.3 | | - | | | √ | SSD512 | VGG16 | 1x | 31.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pisa/pisa_ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco_20200508_131030.log.json) | **Notes:** - In the original paper, all models are trained and tested on mmdet v1.x, thus results may not be exactly the same with this release on v2.0. - It is noted PISA only modifies the training pipeline so the inference time remains the same with the baseline. ## Citation ```latex @inproceedings{cao2019prime, title={Prime sample attention in object detection}, author={Cao, Yuhang and Chen, Kai and Loy, Chen Change and Lin, Dahua}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, year={2020} } ```
11,300
220.588235
1,160
md
mmdetection
mmdetection-master/configs/pisa/metafile.yml
Collections: - Name: PISA Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - PISA - RPN - ResNet - RoIPool Paper: URL: https://arxiv.org/abs/1904.04821 Title: 'Prime Sample Attention in Object Detection' README: configs/pisa/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/roi_heads/pisa_roi_head.py#L8 Version: v2.1.0 Models: - Name: pisa_faster_rcnn_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_r50_fpn_1x_coco/pisa_faster_rcnn_r50_fpn_1x_coco-dea93523.pth - Name: pisa_faster_rcnn_x101_32x4d_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco-e4accec4.pth - Name: pisa_mask_rcnn_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_mask_rcnn_r50_fpn_1x_coco/pisa_mask_rcnn_r50_fpn_1x_coco-dfcedba6.pth - Name: pisa_retinanet_r50_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_r50_fpn_1x_coco/pisa_retinanet_r50_fpn_1x_coco-76409952.pth - Name: pisa_retinanet_x101_32x4d_fpn_1x_coco In Collection: PISA Config: configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco/pisa_retinanet_x101_32x4d_fpn_1x_coco-a0c13c73.pth - Name: pisa_ssd300_coco In Collection: PISA Config: configs/pisa/pisa_ssd300_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 27.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd300_coco/pisa_ssd300_coco-710e3ac9.pth - Name: pisa_ssd512_coco In Collection: PISA Config: configs/pisa/pisa_ssd512_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 31.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/pisa/pisa_ssd512_coco/pisa_ssd512_coco-247addee.pth
3,459
30.171171
158
yml
mmdetection
mmdetection-master/configs/pisa/pisa_faster_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
926
28.903226
77
py
mmdetection
mmdetection-master/configs/pisa/pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
933
29.129032
77
py
mmdetection
mmdetection-master/configs/pisa/pisa_mask_rcnn_r50_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
922
28.774194
77
py
mmdetection
mmdetection-master/configs/pisa/pisa_mask_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py' model = dict( roi_head=dict( type='PISARoIHead', bbox_head=dict( loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0), rcnn=dict( sampler=dict( type='ScoreHLRSampler', num=512, pos_fraction=0.25, neg_pos_ub=-1, add_gt_as_proposals=True, k=0.5, bias=0.), isr=dict(k=2, bias=0), carl=dict(k=1, bias=0.2))), test_cfg=dict( rpn=dict( nms_pre=2000, max_per_img=2000, nms=dict(type='nms', iou_threshold=0.7), min_bbox_size=0)))
929
29
77
py
mmdetection
mmdetection-master/configs/pisa/pisa_retinanet_r50_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_r50_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
265
32.25
73
py
mmdetection
mmdetection-master/configs/pisa/pisa_retinanet_x101_32x4d_fpn_1x_coco.py
_base_ = '../retinanet/retinanet_x101_32x4d_fpn_1x_coco.py' model = dict( bbox_head=dict( type='PISARetinaHead', loss_bbox=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0)), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2)))
272
33.125
73
py
mmdetection
mmdetection-master/configs/pisa/pisa_ssd300_coco.py
_base_ = '../ssd/ssd300_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
247
26.555556
71
py
mmdetection
mmdetection-master/configs/pisa/pisa_ssd512_coco.py
_base_ = '../ssd/ssd512_coco.py' model = dict( bbox_head=dict(type='PISASSDHead'), train_cfg=dict(isr=dict(k=2., bias=0.), carl=dict(k=1., bias=0.2))) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
247
26.555556
71
py
mmdetection
mmdetection-master/configs/point_rend/README.md
# PointRend > [PointRend: Image Segmentation as Rendering](https://arxiv.org/abs/1912.08193) <!-- [ALGORITHM] --> ## Abstract We present a new method for efficient high-quality image segmentation of objects and scenes. By analogizing classical computer graphics methods for efficient rendering with over- and undersampling challenges faced in pixel labeling tasks, we develop a unique perspective of image segmentation as a rendering problem. From this vantage, we present the PointRend (Point-based Rendering) neural network module: a module that performs point-based segmentation predictions at adaptively selected locations based on an iterative subdivision algorithm. PointRend can be flexibly applied to both instance and semantic segmentation tasks by building on top of existing state-of-the-art models. While many concrete implementations of the general idea are possible, we show that a simple design already achieves excellent results. Qualitatively, PointRend outputs crisp object boundaries in regions that are over-smoothed by previous methods. Quantitatively, PointRend yields significant gains on COCO and Cityscapes, for both instance and semantic segmentation. PointRend's efficiency enables output resolutions that are otherwise impractical in terms of memory or computation compared to existing approaches. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143970097-d38b6801-d3c8-468f-b8b0-639be3689907.png"/> </div> ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :------: | :---: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | caffe | 1x | 4.6 | | 38.4 | 36.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco_20200612_161407.log.json) | | R-50-FPN | caffe | 3x | 4.6 | | 41.0 | 38.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco_20200614_002632.log.json) | Note: All models are trained with multi-scale, the input image shorter side is randomly scaled to one of (640, 672, 704, 736, 768, 800). ## Citation ```latex @InProceedings{kirillov2019pointrend, title={{PointRend}: Image Segmentation as Rendering}, author={Alexander Kirillov and Yuxin Wu and Kaiming He and Ross Girshick}, journal={ArXiv:1912.08193}, year={2019} } ```
4,070
118.735294
1,199
md
mmdetection
mmdetection-master/configs/point_rend/metafile.yml
Collections: - Name: PointRend Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - PointRend - FPN - ResNet Paper: URL: https://arxiv.org/abs/1912.08193 Title: 'PointRend: Image Segmentation as Rendering' README: configs/point_rend/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/detectors/point_rend.py#L6 Version: v2.2.0 Models: - Name: point_rend_r50_caffe_fpn_mstrain_1x_coco In Collection: PointRend Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco/point_rend_r50_caffe_fpn_mstrain_1x_coco-1bcb5fb4.pth - Name: point_rend_r50_caffe_fpn_mstrain_3x_coco In Collection: PointRend Config: configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.6 Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco/point_rend_r50_caffe_fpn_mstrain_3x_coco-e0ebb6b7.pth
1,742
30.690909
166
yml
mmdetection
mmdetection-master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # model settings model = dict( type='PointRend', roi_head=dict( type='PointRendRoIHead', mask_roi_extractor=dict( type='GenericRoIExtractor', aggregation='concat', roi_layer=dict( _delete_=True, type='SimpleRoIAlign', output_size=14), out_channels=256, featmap_strides=[4]), mask_head=dict( _delete_=True, type='CoarseMaskHead', num_fcs=2, in_channels=256, conv_out_channels=256, fc_out_channels=1024, num_classes=80, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)), point_head=dict( type='MaskPointHead', num_fcs=3, in_channels=256, fc_channels=256, num_classes=80, coarse_pred_each_layer=True, loss_point=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0))), # model training and testing settings train_cfg=dict( rcnn=dict( mask_size=7, num_points=14 * 14, oversample_ratio=3, importance_sample_ratio=0.75)), test_cfg=dict( rcnn=dict( subdivision_steps=5, subdivision_num_points=28 * 28, scale_factor=2)))
1,453
31.311111
75
py
mmdetection
mmdetection-master/configs/point_rend/point_rend_r50_caffe_fpn_mstrain_3x_coco.py
_base_ = './point_rend_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
161
31.4
56
py
mmdetection
mmdetection-master/configs/pvt/README.md
# PVT > [Pyramid vision transformer: A versatile backbone for dense prediction without convolutions](https://arxiv.org/abs/2102.12122) <!-- [BACKBONE] --> ## Abstract Although using convolutional neural networks (CNNs) as backbones achieves great successes in computer vision, this work investigates a simple backbone network useful for many dense prediction tasks without convolutions. Unlike the recently-proposed Transformer model (e.g., ViT) that is specially designed for image classification, we propose Pyramid Vision Transformer~(PVT), which overcomes the difficulties of porting Transformer to various dense prediction tasks. PVT has several merits compared to prior arts. (1) Different from ViT that typically has low-resolution outputs and high computational and memory cost, PVT can be not only trained on dense partitions of the image to achieve high output resolution, which is important for dense predictions but also using a progressive shrinking pyramid to reduce computations of large feature maps. (2) PVT inherits the advantages from both CNN and Transformer, making it a unified backbone in various vision tasks without convolutions by simply replacing CNN backbones. (3) We validate PVT by conducting extensive experiments, showing that it boosts the performance of many downstream tasks, e.g., object detection, semantic, and instance segmentation. For example, with a comparable number of parameters, RetinaNet+PVT achieves 40.4 AP on the COCO dataset, surpassing RetinNet+ResNet50 (36.3 AP) by 4.1 absolute AP. We hope PVT could serve as an alternative and useful backbone for pixel-level predictions and facilitate future researches. Transformer recently has shown encouraging progresses in computer vision. In this work, we present new baselines by improving the original Pyramid Vision Transformer (abbreviated as PVTv1) by adding three designs, including (1) overlapping patch embedding, (2) convolutional feed-forward networks, and (3) linear complexity attention layers. With these modifications, our PVTv2 significantly improves PVTv1 on three tasks e.g., classification, detection, and segmentation. Moreover, PVTv2 achieves comparable or better performances than recent works such as Swin Transformer. We hope this work will facilitate state-of-the-art Transformer researches in computer vision. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143969989-6f94e695-23b1-4f8f-b406-d589fdc3cfb2.png"/> </div> ## Results and Models ### RetinaNet (PVTv1) | Backbone | Lr schd | Mem (GB) | box AP | Config | Download | | :--------: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | PVT-Tiny | 12e | 8.5 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_t_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110.log.json) | | PVT-Small | 12e | 14.5 | 40.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_s_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921.log.json) | | PVT-Medium | 12e | 20.9 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_m_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243.log.json) | ### RetinaNet (PVTv2) | Backbone | Lr schd | Mem (GB) | box AP | Config | Download | | :------: | :-----: | :------: | :----: | :------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | PVTv2-B0 | 12e | 7.4 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b0_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157.log.json) | | PVTv2-B1 | 12e | 9.5 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b1_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318.log.json) | | PVTv2-B2 | 12e | 16.2 | 44.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b2_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843.log.json) | | PVTv2-B3 | 12e | 23.0 | 46.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b3_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512.log.json) | | PVTv2-B4 | 12e | 17.0 | 46.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b4_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151.log.json) | | PVTv2-B5 | 12e | 18.7 | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/pvt/retinanet_pvt_v2_b5_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800.log.json) | ## Citation ```latex @article{wang2021pyramid, title={Pyramid vision transformer: A versatile backbone for dense prediction without convolutions}, author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, journal={arXiv preprint arXiv:2102.12122}, year={2021} } ``` ```latex @article{wang2021pvtv2, title={PVTv2: Improved Baselines with Pyramid Vision Transformer}, author={Wang, Wenhai and Xie, Enze and Li, Xiang and Fan, Deng-Ping and Song, Kaitao and Liang, Ding and Lu, Tong and Luo, Ping and Shao, Ling}, journal={arXiv preprint arXiv:2106.13797}, year={2021} } ```
9,123
156.310345
1,492
md
mmdetection
mmdetection-master/configs/pvt/metafile.yml
Models: - Name: retinanet_pvt-t_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-t_fpn_1x_coco.py Metadata: Training Memory (GB): 8.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-t_fpn_1x_coco/retinanet_pvt-t_fpn_1x_coco_20210831_103110-17b566bd.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvt-s_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-s_fpn_1x_coco.py Metadata: Training Memory (GB): 14.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-s_fpn_1x_coco/retinanet_pvt-s_fpn_1x_coco_20210906_142921-b6c94a5b.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvt-m_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvt-m_fpn_1x_coco.py Metadata: Training Memory (GB): 20.9 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvt-m_fpn_1x_coco/retinanet_pvt-m_fpn_1x_coco_20210831_103243-55effa1b.pth Paper: URL: https://arxiv.org/abs/2102.12122 Title: "Pyramid Vision Transformer: A Versatile Backbone for Dense Prediction without Convolutions" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L315 Version: 2.17.0 - Name: retinanet_pvtv2-b0_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b0_fpn_1x_coco/retinanet_pvtv2-b0_fpn_1x_coco_20210831_103157-13e9aabe.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b1_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py Metadata: Training Memory (GB): 9.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b1_fpn_1x_coco/retinanet_pvtv2-b1_fpn_1x_coco_20210831_103318-7e169a7d.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b2_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py Metadata: Training Memory (GB): 16.2 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b2_fpn_1x_coco/retinanet_pvtv2-b2_fpn_1x_coco_20210901_174843-529f0b9a.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b3_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py Metadata: Training Memory (GB): 23.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b3_fpn_1x_coco/retinanet_pvtv2-b3_fpn_1x_coco_20210903_151512-8357deff.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b4_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py Metadata: Training Memory (GB): 17.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b4_fpn_1x_coco/retinanet_pvtv2-b4_fpn_1x_coco_20210901_170151-83795c86.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0 - Name: retinanet_pvtv2-b5_fpn_1x_coco In Collection: RetinaNet Config: configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py Metadata: Training Memory (GB): 18.7 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x NVIDIA V100 GPUs Architecture: - PyramidVisionTransformerV2 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/pvt/retinanet_pvtv2-b5_fpn_1x_coco/retinanet_pvtv2-b5_fpn_1x_coco_20210902_201800-3420eb57.pth Paper: URL: https://arxiv.org/abs/2106.13797 Title: "PVTv2: Improved Baselines with Pyramid Vision Transformer" README: configs/pvt/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.17.0/mmdet/models/backbones/pvt.py#L543 Version: 2.17.0
8,674
34.553279
155
yml
mmdetection
mmdetection-master/configs/pvt/retinanet_pvt-l_fpn_1x_coco.py
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_large.pth'))) fp16 = dict(loss_scale=dict(init_scale=512))
283
34.5
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvt-m_fpn_1x_coco.py
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 4, 18, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_medium.pth')))
239
33.285714
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvt-s_fpn_1x_coco.py
_base_ = 'retinanet_pvt-t_fpn_1x_coco.py' model = dict( backbone=dict( num_layers=[3, 4, 6, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_small.pth')))
237
33
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvt-t_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformer', num_layers=[2, 2, 2, 2], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_tiny.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
593
33.941176
77
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b0_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RetinaNet', backbone=dict( _delete_=True, type='PyramidVisionTransformerV2', embed_dims=32, num_layers=[2, 2, 2, 2], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b0.pth')), neck=dict(in_channels=[32, 64, 160, 256])) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
618
33.388889
77
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b1_fpn_1x_coco.py
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b1.pth')), neck=dict(in_channels=[64, 128, 320, 512]))
278
33.875
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b2_fpn_1x_coco.py
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 4, 6, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b2.pth')), neck=dict(in_channels=[64, 128, 320, 512]))
311
33.666667
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b3_fpn_1x_coco.py
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 4, 18, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b3.pth')), neck=dict(in_channels=[64, 128, 320, 512]))
312
33.777778
66
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b4_fpn_1x_coco.py
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 8, 27, 3], init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b4.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) # dataset settings data = dict(samples_per_gpu=1, workers_per_gpu=1) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8)
668
34.210526
70
py
mmdetection
mmdetection-master/configs/pvt/retinanet_pvtv2-b5_fpn_1x_coco.py
_base_ = 'retinanet_pvtv2-b0_fpn_1x_coco.py' model = dict( backbone=dict( embed_dims=64, num_layers=[3, 6, 40, 3], mlp_ratios=(4, 4, 4, 4), init_cfg=dict(checkpoint='https://github.com/whai362/PVT/' 'releases/download/v2/pvt_v2_b5.pth')), neck=dict(in_channels=[64, 128, 320, 512])) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001 / 1.4, weight_decay=0.0001) # dataset settings data = dict(samples_per_gpu=1, workers_per_gpu=1) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (1 samples per GPU) auto_scale_lr = dict(base_batch_size=8)
701
34.1
70
py
mmdetection
mmdetection-master/configs/queryinst/README.md
# QueryInst > [Instances as Queries](https://openaccess.thecvf.com/content/ICCV2021/html/Fang_Instances_As_Queries_ICCV_2021_paper.html) <!-- [ALGORITHM] --> ## Abstract We present QueryInst, a new perspective for instance segmentation. QueryInst is a multi-stage end-to-end system that treats instances of interest as learnable queries, enabling query based object detectors, e.g., Sparse R-CNN, to have strong instance segmentation performance. The attributes of instances such as categories, bounding boxes, instance masks, and instance association embeddings are represented by queries in a unified manner. In QueryInst, a query is shared by both detection and segmentation via dynamic convolutions and driven by parallelly-supervised multi-stage learning. We conduct extensive experiments on three challenging benchmarks, i.e., COCO, CityScapes, and YouTube-VIS to evaluate the effectiveness of QueryInst in object detection, instance segmentation, and video instance segmentation tasks. For the first time, we demonstrate that a simple end-to-end query based framework can achieve the state-of-the-art performance in various instance-level recognition tasks. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143971527-c1b7ff78-e95f-4edb-9d5e-3d6d7d902999.png"/> </div> ## Results and Models | Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | mask AP | Config | Download | | :-------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | QueryInst | R-50-FPN | pytorch | 1x | 100 | False | False | 42.0 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916.log.json) | | QueryInst | R-50-FPN | pytorch | 3x | 100 | True | False | 44.8 | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643.log.json) | | QueryInst | R-50-FPN | pytorch | 3x | 300 | True | True | 47.5 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802.log.json) | | QueryInst | R-101-FPN | pytorch | 3x | 100 | True | False | 46.4 | 41.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048.log.json) | | QueryInst | R-101-FPN | pytorch | 3x | 300 | True | True | 49.0 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621.log.json) | ## Citation ```latex @InProceedings{Fang_2021_ICCV, author = {Fang, Yuxin and Yang, Shusheng and Wang, Xinggang and Li, Yu and Fang, Chen and Shan, Ying and Feng, Bin and Liu, Wenyu}, title = {Instances As Queries}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, month = {October}, year = {2021}, pages = {6910-6919} } ```
6,665
179.162162
994
md
mmdetection
mmdetection-master/configs/queryinst/metafile.yml
Collections: - Name: QueryInst Metadata: Training Data: COCO Training Techniques: - AdamW - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - QueryInst Paper: URL: https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf Title: 'Instances as Queries' README: configs/queryinst/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/detectors/queryinst.py Version: v2.18.0 Models: - Name: queryinst_r50_fpn_1x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_1x_coco/queryinst_r50_fpn_1x_coco_20210907_084916-5a8f1998.pth - Name: queryinst_r50_fpn_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco/queryinst_r50_fpn_mstrain_480-800_3x_coco_20210901_103643-7837af86.pth - Name: queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_101802-85cffbd8.pth - Name: queryinst_r101_fpn_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco/queryinst_r101_fpn_mstrain_480-800_3x_coco_20210904_104048-91f9995b.pth - Name: queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: QueryInst Config: configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20210904_153621-76cce59f.pth
3,570
34.356436
223
yml
mmdetection
mmdetection-master/configs/queryinst/queryinst_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
233
28.25
76
py
mmdetection
mmdetection-master/configs/queryinst/queryinst_r101_fpn_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
214
25.875
61
py
mmdetection
mmdetection-master/configs/queryinst/queryinst_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='QueryInst', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), mask_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=14, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ], mask_head=[ dict( type='DynamicMaskHead', dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=14, with_proj=False, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), num_convs=4, num_classes=80, roi_feat_size=14, in_channels=256, conv_kernel_size=3, conv_out_channels=256, class_agnostic=False, norm_cfg=dict(type='BN'), upsample_cfg=dict(type='deconv', scale_factor=2), loss_mask=dict( type='DiceLoss', loss_weight=8.0, use_sigmoid=True, activate=False, eps=1e-5)) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1, mask_size=28, ) for _ in range(num_stages) ]), test_cfg=dict( rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11], warmup_iters=1000) runner = dict(type='EpochBasedRunner', max_epochs=12)
4,956
34.661871
79
py
mmdetection
mmdetection-master/configs/queryinst/queryinst_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals, mask_thr_binary=0.5))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
2,254
40
78
py
mmdetection
mmdetection-master/configs/queryinst/queryinst_r50_fpn_mstrain_480-800_3x_coco.py
_base_ = './queryinst_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
879
35.666667
77
py
mmdetection
mmdetection-master/configs/regnet/README.md
# RegNet > [Designing Network Design Spaces](https://arxiv.org/abs/2003.13678) <!-- [BACKBONE] --> ## Abstract In this work, we present a new network design paradigm. Our goal is to help advance the understanding of network design and discover design principles that generalize across settings. Instead of focusing on designing individual network instances, we design network design spaces that parametrize populations of networks. The overall process is analogous to classic manual design of networks, but elevated to the design space level. Using our methodology we explore the structure aspect of network design and arrive at a low-dimensional design space consisting of simple, regular networks that we call RegNet. The core insight of the RegNet parametrization is surprisingly simple: widths and depths of good networks can be explained by a quantized linear function. We analyze the RegNet design space and arrive at interesting findings that do not match the current practice of network design. The RegNet design space provides simple and fast networks that work well across a wide range of flop regimes. Under comparable training settings and flops, the RegNet models outperform the popular EfficientNet models while being up to 5x faster on GPUs. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143971942-da50f719-61e9-43bd-9468-0dbfbe80284e.png"/> </div> ## Introduction We implement RegNetX and RegNetY models in detection systems and provide their first results on Mask R-CNN, Faster R-CNN and RetinaNet. The pre-trained models are converted from [model zoo of pycls](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). ## Usage To use a regnet model, there are two steps to do: 1. Convert the model to ResNet-style supported by MMDetection 2. Modify backbone and neck in config accordingly ### Convert model We already prepare models of FLOPs from 400M to 12G in our model zoo. For more general usage, we also provide script `regnet2mmdet.py` in the tools directory to convert the key of models pretrained by [pycls](https://github.com/facebookresearch/pycls/) to ResNet-style checkpoints used in MMDetection. ```bash python -u tools/model_converters/regnet2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. ### Modify config The users can modify the config's `depth` of backbone and corresponding keys in `arch` according to the configs in the [pycls model zoo](https://github.com/facebookresearch/pycls/blob/master/MODEL_ZOO.md). The parameter `in_channels` in FPN can be found in the Figure 15 & 16 of the paper (`wi` in the legend). This directory already provides some configs with their performance, using RegNetX from 800MF to 12GF level. For other pre-trained models or self-implemented regnet models, the users are responsible to check these parameters by themselves. **Note**: Although Fig. 15 & 16 also provide `w0`, `wa`, `wm`, `group_w`, and `bot_mul` for `arch`, they are quantized thus inaccurate, using them sometimes produces different backbone that does not match the key in the pre-trained model. ## Results and Models ### Mask R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :----------------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-FPN](../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.4 | 12.0 | 38.2 | 34.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205_050542.log.json) | | [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | | [RegNetX-4.0GF-FPN](./mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | pytorch | 1x | 5.5 | | 41.5 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217.log.json) | | [R-101-FPN](../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | pytorch | 1x | 6.4 | 10.3 | 40.0 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204-1efe0ed5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r101_fpn_1x_coco/mask_rcnn_r101_fpn_1x_coco_20200204_144809.log.json) | | [RegNetX-6.4GF-FPN](./mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | pytorch | 1x | 6.1 | | 41.0 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439.log.json) | | [X-101-32x4d-FPN](../mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | pytorch | 1x | 7.6 | 9.4 | 41.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205-478d0b67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_x101_32x4d_fpn_1x_coco/mask_rcnn_x101_32x4d_fpn_1x_coco_20200205_034906.log.json) | | [RegNetX-8.0GF-FPN](./mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | pytorch | 1x | 6.4 | | 41.7 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515.log.json) | | [RegNetX-12GF-FPN](./mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | pytorch | 1x | 7.4 | | 42.2 | 38 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552.log.json) | | [RegNetX-3.2GF-FPN-DCN-C3-C5](./mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | pytorch | 1x | 5.0 | | 40.3 | 36.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726.log.json) | ### Faster R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-FPN](../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | pytorch | 1x | 4.0 | 18.2 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | | [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.5 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927.log.json) | | [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | pytorch | 2x | 4.5 | | 41.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955.log.json) | ### RetinaNet | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-----------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-FPN](../retinanet/retinanet_r50_fpn_1x_coco.py) | pytorch | 1x | 3.8 | 16.6 | 36.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/retinanet/retinanet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130-c2398f9e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/retinanet/retinanet_r50_fpn_1x_coco/retinanet_r50_fpn_1x_coco_20200130_002941.log.json) | | [RegNetX-800MF-FPN](./retinanet_regnetx-800MF_fpn_1x_coco.py) | pytorch | 1x | 2.5 | | 35.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403.log.json) | | [RegNetX-1.6GF-FPN](./retinanet_regnetx-1.6GF_fpn_1x_coco.py) | pytorch | 1x | 3.3 | | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403.log.json) | | [RegNetX-3.2GF-FPN](./retinanet_regnetx-3.2GF_fpn_1x_coco.py) | pytorch | 1x | 4.2 | | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141.log.json) | ### Pre-trained models We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. | Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :---------------: | :---------------------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Faster RCNN | [RegNetX-400MF-FPN](./faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.3 | | 37.1 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112.log.json) | | Faster RCNN | [RegNetX-800MF-FPN](./faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 2.8 | | 38.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118.log.json) | | Faster RCNN | [RegNetX-1.6GF-FPN](./faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.4 | | 40.5 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325.log.json) | | Faster RCNN | [RegNetX-3.2GF-FPN](./faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.4 | | 42.3 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152.log.json) | | Faster RCNN | [RegNetX-4GF-FPN](./faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.9 | | 42.8 | - | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201.log.json) | | Mask RCNN | [RegNetX-400MF-FPN](./mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.5 | | 37.6 | 34.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443.log.json) | | Mask RCNN | [RegNetX-800MF-FPN](./mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 2.9 | | 39.5 | 36.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641.log.json) | | Mask RCNN | [RegNetX-1.6GF-FPN](./mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 3.6 | | 40.9 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641.log.json) | | Mask RCNN | [RegNetX-3.2GF-FPN](./mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.0 | | 43.1 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221.log.json) | | Mask RCNN | [RegNetX-4GF-FPN](./mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py) | pytorch | 3x | 5.1 | | 43.4 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621.log.json) | | Cascade Mask RCNN | [RegNetX-400MF-FPN](./cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.3 | | 41.6 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619.log.json) | | Cascade Mask RCNN | [RegNetX-800MF-FPN](./cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 4.8 | | 42.8 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616.log.json) | | Cascade Mask RCNN | [RegNetX-1.6GF-FPN](./cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.4 | | 44.5 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616.log.json) | | Cascade Mask RCNN | [RegNetX-3.2GF-FPN](./cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.4 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616.log.json) | | Cascade Mask RCNN | [RegNetX-4GF-FPN](./cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | pytorch | 3x | 6.9 | | 45.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034.log.json) | ### Notice 1. The models are trained using a different weight decay, i.e., `weight_decay=5e-5` according to the setting in ImageNet training. This brings improvement of at least 0.7 AP absolute but does not improve the model using ResNet-50. 2. RetinaNets using RegNets are trained with learning rate 0.02 with gradient clip. We find that using learning rate 0.02 could improve the results by at least 0.7 AP absolute and gradient clip is necessary to stabilize the training. However, this does not improve the performance of ResNet-50-FPN RetinaNet. ## Citation ```latex @article{radosavovic2020designing, title={Designing Network Design Spaces}, author={Ilija Radosavovic and Raj Prateek Kosaraju and Ross Girshick and Kaiming He and Piotr DollΓ‘r}, year={2020}, eprint={2003.13678}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
29,864
243.795082
1,145
md
mmdetection
mmdetection-master/configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
534
28.722222
73
py
mmdetection
mmdetection-master/configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco_instance.py', '../_base_/models/cascade_mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005)
2,005
30.34375
77
py
mmdetection
mmdetection-master/configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5))
533
28.666667
73
py
mmdetection
mmdetection-master/configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
535
28.777778
73
py
mmdetection
mmdetection-master/configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
_base_ = 'cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
534
28.722222
73
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
528
28.388889
73
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
1,920
32.12069
73
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py
_base_ = './faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
140
34.25
53
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(weight_decay=0.00005)
1,888
29.467742
77
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5))
527
28.333333
73
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
529
28.444444
73
py
mmdetection
mmdetection-master/configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
528
28.388889
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
760
27.185185
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_12gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_12gf')), neck=dict( type='FPN', in_channels=[224, 448, 896, 2240], out_channels=256, num_outs=5))
520
27.944444
72
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ # Images are converted to float32 directly after loading in PyCls dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005)
2,015
33.169492
77
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py
_base_ = 'mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')))
305
37.25
74
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,261
32.761194
77
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_400mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_400mf')), neck=dict( type='FPN', in_channels=[32, 64, 160, 384], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
759
27.148148
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5))
521
28
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_4.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_4.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 560, 1360], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
761
27.222222
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_6.4gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_6.4gf')), neck=dict( type='FPN', in_channels=[168, 392, 784, 1624], out_channels=256, num_outs=5))
522
28.055556
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py
_base_ = [ '../common/mstrain-poly_3x_coco_instance.py', '../_base_/models/mask_rcnn_r50_fpn.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
760
27.185185
73
py
mmdetection
mmdetection-master/configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py
_base_ = './mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_8.0gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_8.0gf')), neck=dict( type='FPN', in_channels=[80, 240, 720, 1920], out_channels=256, num_outs=5))
521
28
73
py
mmdetection
mmdetection-master/configs/regnet/metafile.yml
Models: - Name: mask_rcnn_regnetx-3.2GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_1x_coco_20200520_163141-2a9d1814.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-4GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco.py Metadata: Training Memory (GB): 5.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_1x_coco/mask_rcnn_regnetx-4GF_fpn_1x_coco_20200517_180217-32e9c92d.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-6.4GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco.py Metadata: Training Memory (GB): 6.1 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-6.4GF_fpn_1x_coco/mask_rcnn_regnetx-6.4GF_fpn_1x_coco_20200517_180439-3a7aae83.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-8GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco.py Metadata: Training Memory (GB): 6.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-8GF_fpn_1x_coco/mask_rcnn_regnetx-8GF_fpn_1x_coco_20200517_180515-09daa87e.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-12GF_fpn_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-12GF_fpn_1x_coco/mask_rcnn_regnetx-12GF_fpn_1x_coco_20200517_180552-b538bd8b.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_regnetx-3.2GF_fpn_mdconv_c3-c5_1x_coco_20200520_172726-75f40794.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_1x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_1x_coco/faster_rcnn_regnetx-3.2GF_fpn_1x_coco_20200517_175927-126fd9bf.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_2x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco.py Metadata: Training Memory (GB): 4.5 Epochs: 24 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_2x_coco/faster_rcnn_regnetx-3.2GF_fpn_2x_coco_20200520_223955-e2081918.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-800MF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py Metadata: Training Memory (GB): 2.5 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 35.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-800MF_fpn_1x_coco/retinanet_regnetx-800MF_fpn_1x_coco_20200517_191403-f6f91d10.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-1.6GF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py Metadata: Training Memory (GB): 3.3 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco/retinanet_regnetx-1.6GF_fpn_1x_coco_20200517_191403-37009a9d.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: retinanet_regnetx-3.2GF_fpn_1x_coco In Collection: RetinaNet Config: configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py Metadata: Training Memory (GB): 4.2 Epochs: 12 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco/retinanet_regnetx-3.2GF_fpn_1x_coco_20200520_163141-cb1509e8.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 2.3 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210526_095112-e1967c37.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 2.8 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210526_095118-a2c70b20.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 3.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-1_20210526_095325-94aa46cc.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-3_20210526_095152-e16a5227.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco In Collection: Faster R-CNN Config: configs/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/faster_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210526_095201-65eaf841.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 2.5 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-400MF_fpn_mstrain-poly_3x_coco_20210601_235443-8aac57a4.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 2.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-800MF_fpn_mstrain-poly_3x_coco_20210602_210641-715d51f5.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 3.6 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-1.6GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-1_20210602_210641-6764cff5.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.0 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco_20200521_202221-99879813.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco In Collection: Mask R-CNN Config: configs/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco/mask_rcnn_regnetx-4GF_fpn_mstrain-poly_3x_coco_20210602_032621-00f0331c.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.3 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-400MF_fpn_mstrain_3x_coco_20210715_211619-5142f449.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 4.8 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-800MF_fpn_mstrain_3x_coco_20210715_211616-dcbd13f4.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 5.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-1.6GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-1_20210715_211616-75f29a61.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.4 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-3.2GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-3_20210715_211616-b9c2c58b.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0 - Name: cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco In Collection: Cascade R-CNN Config: configs/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco.py Metadata: Training Memory (GB): 6.9 Epochs: 36 Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - RegNet Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/regnet/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco/cascade_mask_rcnn_regnetx-4GF_fpn_mstrain_3x_coco_20210715_212034-cbb1be4c.pth Paper: URL: https://arxiv.org/abs/2003.13678 Title: 'Designing Network Design Spaces' README: configs/regnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.1.0/mmdet/models/backbones/regnet.py#L11 Version: v2.1.0
27,623
33.616541
200
yml
mmdetection
mmdetection-master/configs/regnet/retinanet_regnetx-1.6GF_fpn_1x_coco.py
_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_1.6gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_1.6gf')), neck=dict( type='FPN', in_channels=[72, 168, 408, 912], out_channels=256, num_outs=5))
520
27.944444
73
py
mmdetection
mmdetection-master/configs/regnet/retinanet_regnetx-3.2GF_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( _delete_=True, type='RegNet', arch='regnetx_3.2gf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_3.2gf')), neck=dict( type='FPN', in_channels=[96, 192, 432, 1008], out_channels=256, num_outs=5)) img_norm_cfg = dict( # The mean and std are used in PyCls when training RegNets mean=[103.53, 116.28, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.00005) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
2,004
32.416667
73
py
mmdetection
mmdetection-master/configs/regnet/retinanet_regnetx-800MF_fpn_1x_coco.py
_base_ = './retinanet_regnetx-3.2GF_fpn_1x_coco.py' model = dict( backbone=dict( type='RegNet', arch='regnetx_800mf', out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://regnetx_800mf')), neck=dict( type='FPN', in_channels=[64, 128, 288, 672], out_channels=256, num_outs=5))
520
27.944444
73
py
mmdetection
mmdetection-master/configs/reppoints/README.md
# RepPoints > [RepPoints: Point Set Representation for Object Detection](https://arxiv.org/abs/1904.11490) <!-- [ALGORITHM] --> ## Abstract Modern object detectors rely heavily on rectangular bounding boxes, such as anchors, proposals and the final predictions, to represent objects at various recognition stages. The bounding box is convenient to use but provides only a coarse localization of objects and leads to a correspondingly coarse extraction of object features. In this paper, we present RepPoints(representative points), a new finer representation of objects as a set of sample points useful for both localization and recognition. Given ground truth localization and recognition targets for training, RepPoints learn to automatically arrange themselves in a manner that bounds the spatial extent of an object and indicates semantically significant local areas. They furthermore do not require the use of anchors to sample a space of bounding boxes. We show that an anchor-free object detector based on RepPoints can be as effective as the state-of-the-art anchor-based detection methods, with 46.5 AP and 67.4 AP50 on the COCO test-dev detection benchmark, using ResNet-101 model. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143972514-93247220-4dad-4eb3-a51b-a1115dc7d449.png"/> </div> ## Introdution By [Ze Yang](https://yangze.tech/), [Shaohui Liu](http://b1ueber2y.me/), and [Han Hu](https://ancientmooner.github.io/). We provide code support and configuration files to reproduce the results in the paper for ["RepPoints: Point Set Representation for Object Detection"](https://arxiv.org/abs/1904.11490) on COCO object detection. **RepPoints**, initially described in [arXiv](https://arxiv.org/abs/1904.11490), is a new representation method for visual objects, on which visual understanding tasks are typically centered. Visual object representation, aiming at both geometric description and appearance feature extraction, is conventionally achieved by `bounding box + RoIPool (RoIAlign)`. The bounding box representation is convenient to use; however, it provides only a rectangular localization of objects that lacks geometric precision and may consequently degrade feature quality. Our new representation, RepPoints, models objects by a `point set` instead of a `bounding box`, which learns to adaptively position themselves over an object in a manner that circumscribes the object’s `spatial extent` and enables `semantically aligned feature extraction`. This richer and more flexible representation maintains the convenience of bounding boxes while facilitating various visual understanding applications. This repo demonstrated the effectiveness of RepPoints for COCO object detection. Another feature of this repo is the demonstration of an `anchor-free detector`, which can be as effective as state-of-the-art anchor-based detection methods. The anchor-free detector can utilize either `bounding box` or `RepPoints` as the basic object representation. ## Results and Models The results on COCO 2017val are shown in the table below. | Method | Backbone | GN | Anchor | convert func | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------: | :-----------: | :-: | :----: | :----------: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | BBox | R-50-FPN | Y | single | - | 1x | 3.9 | 15.9 | 36.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | | BBox | R-50-FPN | Y | none | - | 1x | 3.9 | 15.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916.log.json) | | RepPoints | R-50-FPN | N | none | moment | 1x | 3.3 | 18.5 | 37.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330_233609.log.json) | | RepPoints | R-50-FPN | Y | none | moment | 1x | 3.9 | 17.5 | 38.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952.log.json) | | RepPoints | R-50-FPN | Y | none | moment | 2x | 3.9 | - | 38.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329_150020.log.json) | | RepPoints | R-101-FPN | Y | none | moment | 2x | 5.8 | 13.7 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329_132205.log.json) | | RepPoints | R-101-FPN-DCN | Y | none | moment | 2x | 5.9 | 12.1 | 42.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132134.log.json) | | RepPoints | X-101-FPN-DCN | Y | none | moment | 2x | 7.1 | 9.3 | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329_132201.log.json) | **Notes:** - `R-xx`, `X-xx` denote the ResNet and ResNeXt architectures, respectively. - `DCN` denotes replacing 3x3 conv with the 3x3 deformable convolution in `c3-c5` stages of backbone. - `none` in the `anchor` column means 2-d `center point` (x,y) is used to represent the initial object hypothesis. `single` denotes one 4-d anchor box (x,y,w,h) with IoU based label assign criterion is adopted. - `moment`, `partial MinMax`, `MinMax` in the `convert func` column are three functions to convert a point set to a pseudo box. - Note the results here are slightly different from those reported in the paper, due to framework change. While the original paper uses an [MXNet](https://mxnet.apache.org/) implementation, we re-implement the method in [PyTorch](https://pytorch.org/) based on mmdetection. ## Citation ```latex @inproceedings{yang2019reppoints, title={RepPoints: Point Set Representation for Object Detection}, author={Yang, Ze and Liu, Shaohui and Hu, Han and Wang, Liwei and Lin, Stephen}, booktitle={The IEEE International Conference on Computer Vision (ICCV)}, month={Oct}, year={2019} } ```
11,035
182.933333
1,061
md
mmdetection
mmdetection-master/configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='minmax', use_grid_points=True))
140
46
77
py
mmdetection
mmdetection-master/configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict( bbox_head=dict(transform_method='minmax', use_grid_points=True), # training and testing settings train_cfg=dict( init=dict( assigner=dict( _delete_=True, type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1))))
452
31.357143
68
py
mmdetection
mmdetection-master/configs/reppoints/metafile.yml
Collections: - Name: RepPoints Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Group Normalization - FPN - RepPoints - ResNet Paper: URL: https://arxiv.org/abs/1904.11490 Title: 'RepPoints: Point Set Representation for Object Detection' README: configs/reppoints/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/detectors/reppoints_detector.py#L9 Version: v2.0.0 Models: - Name: bbox_r50_grid_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/bbox_r50_grid_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 62.89 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 36.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - Name: bbox_r50_grid_center_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/bbox_r50_grid_center_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 64.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco/bbox_r50_grid_fpn_gn-neck%2Bhead_1x_coco_20200329_145916-0eedf8d1.pth - Name: reppoints_moment_r50_fpn_1x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.3 inference time (ms/im): - value: 54.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_1x_coco/reppoints_moment_r50_fpn_1x_coco_20200330-b73db8d1.pth - Name: reppoints_moment_r50_fpn_gn-neck+head_1x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 57.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_1x_coco_20200329_145952-3e51b550.pth - Name: reppoints_moment_r50_fpn_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 3.9 inference time (ms/im): - value: 57.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r50_fpn_gn-neck%2Bhead_2x_coco_20200329-91babaa2.pth - Name: reppoints_moment_r101_fpn_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 5.8 inference time (ms/im): - value: 72.99 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_gn-neck%2Bhead_2x_coco_20200329-4fbc7310.pth - Name: reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 5.9 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-3309fbf2.pth - Name: reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco In Collection: RepPoints Config: configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py Metadata: Training Memory (GB): 7.1 inference time (ms/im): - value: 107.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck%2Bhead_2x_coco_20200329-f87da1ea.pth
6,299
33.615385
214
yml
mmdetection
mmdetection-master/configs/reppoints/reppoints_minmax_r50_fpn_gn-neck+head_1x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='minmax'))
118
38.666667
61
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_r101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( depth=101, dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
340
36.888889
72
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_r101_fpn_gn-neck+head_2x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
217
30.142857
61
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='RepPointsDetector', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5), bbox_head=dict( type='RepPointsHead', num_classes=80, in_channels=256, feat_channels=256, point_feat_channels=256, stacked_convs=3, num_points=9, gradient_mul=0.1, point_strides=[8, 16, 32, 64, 128], point_base_scale=4, loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox_init=dict(type='SmoothL1Loss', beta=0.11, loss_weight=0.5), loss_bbox_refine=dict(type='SmoothL1Loss', beta=0.11, loss_weight=1.0), transform_method='moment'), # training and testing settings train_cfg=dict( init=dict( assigner=dict(type='PointAssigner', scale=4, pos_num=1), allowed_border=-1, pos_weight=-1, debug=False), refine=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0, ignore_iof_thr=-1), allowed_border=-1, pos_weight=-1, debug=False)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.5), max_per_img=100)) optimizer = dict(lr=0.01)
2,065
29.382353
79
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py
_base_ = './reppoints_moment_r50_fpn_1x_coco.py' norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict(neck=dict(norm_cfg=norm_cfg), bbox_head=dict(norm_cfg=norm_cfg)) optimizer = dict(lr=0.01)
215
42.2
77
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
148
36.25
61
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_moment_x101_fpn_dconv_c3-c5_gn-neck+head_2x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
562
32.117647
76
py
mmdetection
mmdetection-master/configs/reppoints/reppoints_partial_minmax_r50_fpn_gn-neck+head_1x_coco.py
_base_ = './reppoints_moment_r50_fpn_gn-neck+head_1x_coco.py' model = dict(bbox_head=dict(transform_method='partial_minmax'))
126
41.333333
63
py
mmdetection
mmdetection-master/configs/res2net/README.md
# Res2Net > [Res2Net: A New Multi-scale Backbone Architecture](https://arxiv.org/abs/1904.01169) <!-- [BACKBONE] --> ## Abstract Representing features at multiple scales is of great importance for numerous vision tasks. Recent advances in backbone convolutional neural networks (CNNs) continually demonstrate stronger multi-scale representation ability, leading to consistent performance gains on a wide range of applications. However, most existing methods represent the multi-scale features in a layer-wise manner. In this paper, we propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. The proposed Res2Net block can be plugged into the state-of-the-art backbone CNN models, e.g., ResNet, ResNeXt, and DLA. We evaluate the Res2Net block on all these models and demonstrate consistent performance gains over baseline models on widely-used datasets, e.g., CIFAR-100 and ImageNet. Further ablation studies and experimental results on representative computer vision tasks, i.e., object detection, class activation mapping, and salient object detection, further verify the superiority of the Res2Net over the state-of-the-art baseline methods. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143972411-8d08113f-9fce-4d24-a138-4fadf2c54f9a.png" height="300"/> </div> ## Introduction We propose a novel building block for CNNs, namely Res2Net, by constructing hierarchical residual-like connections within one single residual block. The Res2Net represents multi-scale features at a granular level and increases the range of receptive fields for each network layer. | Backbone | Params. | GFLOPs | top-1 err. | top-5 err. | | :---------------: | :-----: | :----: | :--------: | :--------: | | ResNet-101 | 44.6 M | 7.8 | 22.63 | 6.44 | | ResNeXt-101-64x4d | 83.5M | 15.5 | 20.40 | - | | HRNetV2p-W48 | 77.5M | 16.1 | 20.70 | 5.50 | | Res2Net-101 | 45.2M | 8.3 | 18.77 | 4.64 | Compared with other backbone networks, Res2Net requires fewer parameters and FLOPs. **Note:** - GFLOPs for classification are calculated with image size (224x224). ## Results and Models ### Faster R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :--------: | :-----: | :-----: | :------: | :------------: | :----: | :---------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R2-101-FPN | pytorch | 2x | 7.4 | - | 43.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco-175f1da6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/faster_rcnn_r2_101_fpn_2x_coco/faster_rcnn_r2_101_fpn_2x_coco_20200514_231734.log.json) | ### Mask R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R2-101-FPN | pytorch | 2x | 7.9 | - | 43.6 | 38.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco-17f061e8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/mask_rcnn_r2_101_fpn_2x_coco/mask_rcnn_r2_101_fpn_2x_coco_20200515_002413.log.json) | ### Cascade R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R2-101-FPN | pytorch | 20e | 7.8 | - | 45.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco-f4b7b7db.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_rcnn_r2_101_fpn_20e_coco/cascade_rcnn_r2_101_fpn_20e_coco_20200515_091644.log.json) | ### Cascade Mask R-CNN | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :----------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R2-101-FPN | pytorch | 20e | 9.5 | - | 46.4 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco-8a7b41e1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco/cascade_mask_rcnn_r2_101_fpn_20e_coco_20200515_091645.log.json) | ### Hybrid Task Cascade (HTC) | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :--------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R2-101-FPN | pytorch | 20e | - | - | 47.5 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/res2net/htc_r2_101_fpn_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco-3a8d2112.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/res2net/htc_r2_101_fpn_20e_coco/htc_r2_101_fpn_20e_coco_20200515_150029.log.json) | - Res2Net ImageNet pretrained models are in [Res2Net-PretrainedModels](https://github.com/Res2Net/Res2Net-PretrainedModels). - More applications of Res2Net are in [Res2Net-Github](https://github.com/Res2Net/). ## Citation ```latex @article{gao2019res2net, title={Res2Net: A New Multi-scale Backbone Architecture}, author={Gao, Shang-Hua and Cheng, Ming-Ming and Zhao, Kai and Zhang, Xin-Yu and Yang, Ming-Hsuan and Torr, Philip}, journal={IEEE TPAMI}, year={2020}, doi={10.1109/TPAMI.2019.2938758}, } ```
10,383
132.128205
1,236
md
mmdetection
mmdetection-master/configs/res2net/cascade_mask_rcnn_r2_101_fpn_20e_coco.py
_base_ = '../cascade_rcnn/cascade_mask_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
299
26.272727
64
py
mmdetection
mmdetection-master/configs/res2net/cascade_rcnn_r2_101_fpn_20e_coco.py
_base_ = '../cascade_rcnn/cascade_rcnn_r50_fpn_20e_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
294
25.818182
62
py
mmdetection
mmdetection-master/configs/res2net/faster_rcnn_r2_101_fpn_2x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
291
25.545455
62
py
mmdetection
mmdetection-master/configs/res2net/htc_r2_101_fpn_20e_coco.py
_base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s'))) # learning policy lr_config = dict(step=[16, 19]) runner = dict(type='EpochBasedRunner', max_epochs=20)
379
26.142857
62
py
mmdetection
mmdetection-master/configs/res2net/mask_rcnn_r2_101_fpn_2x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
287
25.181818
62
py