Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
214
34.833333
72
py
mmdetection
mmdetection-master/configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( _delete_=True, type='DeformRoIPoolPack', output_size=7, output_channels=256), out_channels=256, featmap_strides=[4, 8, 16, 32])))
408
30.461538
56
py
mmdetection
mmdetection-master/configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
557
31.823529
76
py
mmdetection
mmdetection-master/configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r101_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
211
34.333333
72
py
mmdetection
mmdetection-master/configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
210
34.166667
72
py
mmdetection
mmdetection-master/configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
240
29.125
72
py
mmdetection
mmdetection-master/configs/dcn/metafile.yml
Collections: - Name: Deformable Convolutional Networks Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution Paper: URL: https://arxiv.org/abs/1703.06211 Title: "Deformable Convolutional Networks" README: configs/dcn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 56.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-d68aed1e.pth - Name: faster_rcnn_r50_fpn_dpool_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r50_fpn_dpool_1x_coco.py Metadata: Training Memory (GB): 5.0 inference time (ms/im): - value: 58.14 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_dpool_1x_coco/faster_rcnn_r50_fpn_dpool_1x_coco_20200307-90d3c01d.pth - Name: faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 80 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco/faster_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-1377f13d.pth - Name: faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 7.3 inference time (ms/im): - value: 100 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/faster_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco_20200203-4f85c69c.pth - Name: mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 64.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200203-4d9ad43b.pth - Name: mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco.py Metadata: Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training Training Memory (GB): 3.0 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.9 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_dconv_c3-c5_1x_coco_20210520_180247-c06429d2.pth - Name: mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.5 inference time (ms/im): - value: 85.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200216-a71f5bce.pth - Name: cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 68.49 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200130-2f1fca44.pth - Name: cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.4 inference time (ms/im): - value: 90.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200203-3b2f0594.pth - Name: cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 6.0 inference time (ms/im): - value: 100 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 38.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r50_fpn_dconv_c3-c5_1x_coco_20200202-42e767a2.pth - Name: cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 116.28 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_r101_fpn_dconv_c3-c5_1x_coco_20200204-df0c5f10.pth - Name: cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks Config: configs/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 9.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco/cascade_mask_rcnn_x101_32x4d_fpn_dconv_c3-c5_1x_coco-e75f90c8.pth
9,291
33.03663
183
yml
mmdetection
mmdetection-master/configs/dcnv2/README.md
# DCNv2 > [Deformable ConvNets v2: More Deformable, Better Results](https://arxiv.org/abs/1811.11168) <!-- [ALGORITHM] --> ## Abstract The superior performance of Deformable Convolutional Networks arises from its ability to adapt to the geometric variations of objects. Through an examination of its adaptive behavior, we observe that while the spatial support for its neural features conforms more closely than regular ConvNets to object structure, this support may nevertheless extend well beyond the region of interest, causing features to be influenced by irrelevant image content. To address this problem, we present a reformulation of Deformable ConvNets that improves its ability to focus on pertinent image regions, through increased modeling power and stronger training. The modeling power is enhanced through a more comprehensive integration of deformable convolution within the network, and by introducing a modulation mechanism that expands the scope of deformation modeling. To effectively harness this enriched modeling capability, we guide network training via a proposed feature mimicking scheme that helps the network to learn features that reflect the object focus and classification power of RCNN features. With the proposed contributions, this new version of Deformable ConvNets yields significant performance gains over the original model and produces leading results on the COCO benchmark for object detection and instance segmentation. ## Results and Models | Backbone | Model | Style | Conv | Pool | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :---------------: | :----: | :-----: | :-----------: | :----: | :-----: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.1 | 17.6 | 41.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130_222144.log.json) | | \*R-50-FPN (dg=4) | Faster | pytorch | mdconv(c3-c5) | - | 1x | 4.2 | 17.4 | 41.5 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130_222058.log.json) | | R-50-FPN | Faster | pytorch | - | mdpool | 1x | 5.8 | 16.6 | 38.7 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307_203304.log.json) | | R-50-FPN | Mask | pytorch | mdconv(c3-c5) | - | 1x | 4.5 | 15.1 | 41.5 | 37.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203_063443.log.json) | | R-50-FPN (FP16) | Mask | pytorch | mdconv(c3-c5) | - | 1x | 3.1 | | 42.0 | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434.log.json) | **Notes:** - `mdconv` denotes modulated deformable convolution, `c3-c5` means adding dconv in resnet stage 3 to 5. `mdpool` denotes modulated deformable roi pooling. - The dcn ops are modified from https://github.com/chengdazhi/Deformable-Convolution-V2-PyTorch, which should be more memory efficient and slightly faster. - (\*) For R-50-FPN (dg=4), dg is short for deformable_group. This model is trained and tested on Amazon EC2 p3dn.24xlarge instance. - **Memory, Train/Inf time is outdated.** ## Citation ```latex @article{zhu2018deformable, title={Deformable ConvNets v2: More Deformable, Better Results}, author={Zhu, Xizhou and Hu, Han and Lin, Stephen and Dai, Jifeng}, journal={arXiv preprint arXiv:1811.11168}, year={2018} } ```
6,564
171.763158
1,323
md
mmdetection
mmdetection-master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
216
35.166667
74
py
mmdetection
mmdetection-master/configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=4, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
216
35.166667
74
py
mmdetection
mmdetection-master/configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict( _delete_=True, type='ModulatedDeformRoIPoolPack', output_size=7, output_channels=256), out_channels=256, featmap_strides=[4, 8, 16, 32])))
417
31.153846
56
py
mmdetection
mmdetection-master/configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True))) fp16 = dict(loss_scale=512.)
242
29.375
74
py
mmdetection
mmdetection-master/configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py
_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
212
34.5
74
py
mmdetection
mmdetection-master/configs/dcnv2/metafile.yml
Collections: - Name: Deformable Convolutional Networks v2 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution Paper: URL: https://arxiv.org/abs/1811.11168 Title: "Deformable ConvNets v2: More Deformable, Better Results" README: configs/dcnv2/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/dcn/deform_conv.py#L15 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.1 inference time (ms/im): - value: 56.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200130-d099253b.pth - Name: faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcnv2/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 57.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco/faster_rcnn_r50_fpn_mdconv_c3-c5_group4_1x_coco_20200130-01262257.pth - Name: faster_rcnn_r50_fpn_mdpool_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcnv2/faster_rcnn_r50_fpn_mdpool_1x_coco.py Metadata: Training Memory (GB): 5.8 inference time (ms/im): - value: 60.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/faster_rcnn_r50_fpn_mdpool_1x_coco/faster_rcnn_r50_fpn_mdpool_1x_coco_20200307-c0df27ff.pth - Name: mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcnv2/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 4.5 inference time (ms/im): - value: 66.23 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/dcn/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_mdconv_c3-c5_1x_coco_20200203-ad97591f.pth - Name: mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco In Collection: Deformable Convolutional Networks v2 Config: configs/dcnv2/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco.py Metadata: Training Memory (GB): 3.1 Training Techniques: - SGD with Momentum - Weight Decay - Mixed Precision Training Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/fp16/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco/mask_rcnn_r50_fpn_fp16_mdconv_c3-c5_1x_coco_20210520_180434-cf8fefa5.pth
4,213
32.983871
182
yml
mmdetection
mmdetection-master/configs/ddod/README.md
# DDOD > [Disentangle Your Dense Object Detector](https://arxiv.org/pdf/2107.02963.pdf) <!-- [ALGORITHM] --> ## Abstract Deep learning-based dense object detectors have achieved great success in the past few years and have been applied to numerous multimedia applications such as video understanding. However, the current training pipeline for dense detectors is compromised to lots of conjunctions that may not hold. In this paper, we investigate three such important conjunctions: 1) only samples assigned as positive in classification head are used to train the regression head; 2) classification and regression share the same input feature and computational fields defined by the parallel head architecture; and 3) samples distributed in different feature pyramid layers are treated equally when computing the loss. We first carry out a series of pilot experiments to show disentangling such conjunctions can lead to persistent performance improvement. Then, based on these findings, we propose Disentangled Dense Object Detector(DDOD), in which simple and effective disentanglement mechanisms are designed and integrated into the current state-of-the-art dense object detectors. Extensive experiments on MS COCO benchmark show that our approach can lead to 2.0 mAP, 2.4 mAP and 2.2 mAP absolute improvements on RetinaNet, FCOS, and ATSS baselines with negligible extra overhead. Notably, our best model reaches 55.0 mAP on the COCO test-dev set and 93.5 AP on the hard subset of WIDER FACE, achieving new state-of-the-art performance on these two competitive benchmarks. Code is available at https://github.com/zehuichen123/DDOD. <div align=center> <img src="https://user-images.githubusercontent.com/17425982/159212920-2e99d433-82c9-46cf-8f3a-32fdf3c566f5.png"/> </div> ## Results and Models | Model | Backbone | Style | Lr schd | Mem (GB) | box AP | Config | Download | | :-------: | :------: | :-----: | :-----: | :------: | :----: | :--------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | DDOD-ATSS | R-50 | pytorch | 1x | 3.4 | 41.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ddod/ddod_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737.log.json) | ## Citation ```latex @inproceedings{chen2021disentangle, title={Disentangle Your Dense Object Detector}, author={Chen, Zehui and Yang, Chenhongyi and Li, Qiaofei and Zhao, Feng and Zha, Zheng-Jun and Wu, Feng}, booktitle={Proceedings of the 29th ACM International Conference on Multimedia}, pages={4939--4948}, year={2021} } ```
3,435
106.375
1,513
md
mmdetection
mmdetection-master/configs/ddod/ddod_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='DDOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='DDODHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_iou=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), train_cfg=dict( # assigner is mean cls_assigner assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8), reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # This `persistent_workers` is only valid when PyTorch>=1.7.0 data = dict(persistent_workers=True) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
2,101
29.911765
79
py
mmdetection
mmdetection-master/configs/ddod/metafile.yml
Collections: - Name: DDOD Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - DDOD - FPN - ResNet Paper: URL: https://arxiv.org/pdf/2107.02963.pdf Title: 'Disentangle Your Dense Object Detector' README: configs/ddod/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/models/detectors/ddod.py#L6 Version: v2.25.0 Models: - Name: ddod_r50_fpn_1x_coco In Collection: DDOD Config: configs/ddod/ddod_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.4 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/ddod/ddod_r50_fpn_1x_coco/ddod_r50_fpn_1x_coco_20220523_223737-29b2fc67.pth
951
27
136
yml
mmdetection
mmdetection-master/configs/deepfashion/README.md
# DeepFashion > [DeepFashion: Powering Robust Clothes Recognition and Retrieval With Rich Annotations](https://openaccess.thecvf.com/content_cvpr_2016/html/Liu_DeepFashion_Powering_Robust_CVPR_2016_paper.html) <!-- [DATASET] --> ## Abstract Recent advances in clothes recognition have been driven by the construction of clothes datasets. Existing datasets are limited in the amount of annotations and are difficult to cope with the various challenges in real-world applications. In this work, we introduce DeepFashion, a large-scale clothes dataset with comprehensive annotations. It contains over 800,000 images, which are richly annotated with massive attributes, clothing landmarks, and correspondence of images taken under different scenarios including store, street snapshot, and consumer. Such rich annotations enable the development of powerful algorithms in clothes recognition and facilitating future researches. To demonstrate the advantages of DeepFashion, we propose a new deep model, namely FashionNet, which learns clothing features by jointly predicting clothing attributes and landmarks. The estimated landmarks are then employed to pool or gate the learned features. It is optimized in an iterative manner. Extensive experiments demonstrate the effectiveness of FashionNet and the usefulness of DeepFashion. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143876310-08470a6a-ea3a-4ec1-a6f2-8ec5df36a8a0.png"/> </div> ## Introduction [MMFashion](https://github.com/open-mmlab/mmfashion) develops "fashion parsing and segmentation" module based on the dataset [DeepFashion-Inshop](https://drive.google.com/drive/folders/0B7EVK8r0v71pVDZFQXRsMDZCX1E?usp=sharing). Its annotation follows COCO style. To use it, you need to first download the data. Note that we only use "img_highres" in this task. The file tree should be like this: ```sh mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── DeepFashion │ │ ├── In-shop │ │ ├── Anno │ │ │   ├── segmentation │ │ │   | ├── DeepFashion_segmentation_train.json │ │ │   | ├── DeepFashion_segmentation_query.json │ │ │   | ├── DeepFashion_segmentation_gallery.json │ │ │   ├── list_bbox_inshop.txt │ │ │   ├── list_description_inshop.json │ │ │   ├── list_item_inshop.txt │ │ │   └── list_landmarks_inshop.txt │ │ ├── Eval │ │ │ └── list_eval_partition.txt │ │ ├── Img │ │ │ ├── img │ │ │ │ ├──XXX.jpg │ │ │ ├── img_highres │ │ │ └── ├──XXX.jpg ``` After that you can train the Mask RCNN r50 on DeepFashion-In-shop dataset by launching training with the `mask_rcnn_r50_fpn_1x.py` config or creating your own config file. ## Results and Models | Backbone | Model type | Dataset | bbox detection Average Precision | segmentation Average Precision | Config | Download (Google) | | :------: | :--------: | :-----------------: | :------------------------------: | :----------------------------: | :----------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | ResNet50 | Mask RCNN | DeepFashion-In-shop | 0.599 | 0.584 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/mask_rcnn_r50_fpn_15e_deepfashion_20200329_192752.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion/20200329_192752.log.json) | ## Citation ```latex @inproceedings{liuLQWTcvpr16DeepFashion, author = {Liu, Ziwei and Luo, Ping and Qiu, Shi and Wang, Xiaogang and Tang, Xiaoou}, title = {DeepFashion: Powering Robust Clothes Recognition and Retrieval with Rich Annotations}, booktitle = {Proceedings of IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2016} } ```
4,713
65.394366
1,083
md
mmdetection
mmdetection-master/configs/deepfashion/mask_rcnn_r50_fpn_15e_deepfashion.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/deepfashion.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict(num_classes=15), mask_head=dict(num_classes=15))) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=15)
351
31
78
py
mmdetection
mmdetection-master/configs/deformable_detr/README.md
# Deformable DETR > [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) <!-- [ALGORITHM] --> ## Abstract DETR has been recently proposed to eliminate the need for many hand-designed components in object detection while demonstrating good performance. However, it suffers from slow convergence and limited feature spatial resolution, due to the limitation of Transformer attention modules in processing image feature maps. To mitigate these issues, we proposed Deformable DETR, whose attention modules only attend to a small set of key sampling points around a reference. Deformable DETR can achieve better performance than DETR (especially on small objects) with 10 times less training epochs. Extensive experiments on the COCO benchmark demonstrate the effectiveness of our approach. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143877617-ad9b24fd-77ce-46aa-9689-1a44b5594132.png"/> </div> ## Results and Models | Backbone | Model | Lr schd | box AP | Config | Download | | :------: | :---------------------------------: | :-----: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | Deformable DETR | 50e | 44.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.log.json) | | R-50 | + iterative bounding box refinement | 50e | 46.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.log.json) | | R-50 | ++ two-stage Deformable DETR | 50e | 46.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.log.json) | # NOTE 1. All models are trained with batch size 32. 2. The performance is unstable. `Deformable DETR` and `iterative bounding box refinement` may fluctuate about 0.3 mAP. `two-stage Deformable DETR` may fluctuate about 0.2 mAP. ## Citation We provide the config files for Deformable DETR: [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159). ```latex @inproceedings{ zhu2021deformable, title={Deformable DETR: Deformable Transformers for End-to-End Object Detection}, author={Xizhou Zhu and Weijie Su and Lewei Lu and Bin Li and Xiaogang Wang and Jifeng Dai}, booktitle={International Conference on Learning Representations}, year={2021}, url={https://openreview.net/forum?id=gZ9hCDWe6ke} } ```
4,895
115.571429
679
md
mmdetection
mmdetection-master/configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DeformableDETR', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='ChannelMapper', in_channels=[512, 1024, 2048], kernel_size=1, out_channels=256, act_cfg=None, norm_cfg=dict(type='GN', num_groups=32), num_outs=4), bbox_head=dict( type='DeformableDETRHead', num_query=300, num_classes=80, in_channels=2048, sync_cls_avg_factor=True, as_two_stage=False, transformer=dict( type='DeformableDetrTransformer', encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=dict( type='MultiScaleDeformableAttention', embed_dims=256), feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DeformableDetrTransformerDecoder', num_layers=6, return_intermediate=True, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), dict( type='MultiScaleDeformableAttention', embed_dims=256) ], feedforward_channels=1024, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')))), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True, offset=-0.5), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[ [ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', # The radio of all image in train dataset < 7 # follow the original impl img_scale=[(400, 4200), (500, 4200), (600, 4200)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ] ]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(filter_empty_gt=False, pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=2e-4, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={ 'backbone': dict(lr_mult=0.1), 'sampling_offsets': dict(lr_mult=0.1), 'reference_points': dict(lr_mult=0.1) })) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[40]) runner = dict(type='EpochBasedRunner', max_epochs=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (16 GPUs) x (2 samples per GPU) auto_scale_lr = dict(base_batch_size=32)
6,666
36.455056
79
py
mmdetection
mmdetection-master/configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py
_base_ = 'deformable_detr_r50_16x2_50e_coco.py' model = dict(bbox_head=dict(with_box_refine=True))
99
32.333333
50
py
mmdetection
mmdetection-master/configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py
_base_ = 'deformable_detr_refine_r50_16x2_50e_coco.py' model = dict(bbox_head=dict(as_two_stage=True))
103
33.666667
54
py
mmdetection
mmdetection-master/configs/deformable_detr/metafile.yml
Collections: - Name: Deformable DETR Metadata: Training Data: COCO Training Techniques: - AdamW - Multi Scale Train - Gradient Clip Training Resources: 8x V100 GPUs Architecture: - ResNet - Transformer Paper: URL: https://openreview.net/forum?id=gZ9hCDWe6ke Title: 'Deformable DETR: Deformable Transformers for End-to-End Object Detection' README: configs/deformable_detr/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/deformable_detr.py#L6 Version: v2.12.0 Models: - Name: deformable_detr_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_r50_16x2_50e_coco/deformable_detr_r50_16x2_50e_coco_20210419_220030-a12b9512.pth - Name: deformable_detr_refine_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_refine_r50_16x2_50e_coco/deformable_detr_refine_r50_16x2_50e_coco_20210419_220503-5f5dff21.pth - Name: deformable_detr_twostage_refine_r50_16x2_50e_coco In Collection: Deformable DETR Config: configs/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco.py Metadata: Epochs: 50 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/deformable_detr/deformable_detr_twostage_refine_r50_16x2_50e_coco/deformable_detr_twostage_refine_r50_16x2_50e_coco_20210419_220613-9d28ab72.pth
2,108
36
205
yml
mmdetection
mmdetection-master/configs/detectors/README.md
# DetectoRS > [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/abs/2006.02334) <!-- [ALGORITHM] --> ## Abstract Many modern object detectors demonstrate outstanding performances by using the mechanism of looking and thinking twice. In this paper, we explore this mechanism in the backbone design for object detection. At the macro level, we propose Recursive Feature Pyramid, which incorporates extra feedback connections from Feature Pyramid Networks into the bottom-up backbone layers. At the micro level, we propose Switchable Atrous Convolution, which convolves the features with different atrous rates and gathers the results using switch functions. Combining them results in DetectoRS, which significantly improves the performances of object detection. On COCO test-dev, DetectoRS achieves state-of-the-art 55.7% box AP for object detection, 48.5% mask AP for instance segmentation, and 50.0% PQ for panoptic segmentation. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143877901-24451581-2c50-4a54-b000-c4cb111e29ad.png"/> </div> ## Introduction DetectoRS requires COCO and [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) dataset for training. You need to download and extract it in the COCO dataset path. The directory should be like this. ```none mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 | | ├── stuffthingmaps ``` ## Results and Models DetectoRS includes two major components: - Recursive Feature Pyramid (RFP). - Switchable Atrous Convolution (SAC). They can be used independently. Combining them together results in DetectoRS. The results on COCO 2017 val are shown in the below table. | Method | Detector | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------: | :-----------------: | :-----: | :------: | :------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | RFP | Cascade + ResNet-50 | 1x | 7.5 | - | 44.8 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco_20200624_104126.log.json) | | SAC | Cascade + ResNet-50 | 1x | 5.6 | - | 45.0 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco_20200624_104402.log.json) | | DetectoRS | Cascade + ResNet-50 | 1x | 9.9 | - | 47.4 | | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco_20200706_001203.log.json) | | RFP | HTC + ResNet-50 | 1x | 11.2 | - | 46.6 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_rfp_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco_20200624_103053.log.json) | | SAC | HTC + ResNet-50 | 1x | 9.3 | - | 46.4 | 40.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/htc_r50_sac_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco_20200624_103111.log.json) | | DetectoRS | HTC + ResNet-50 | 1x | 13.6 | - | 49.1 | 42.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r50_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco_20200624_103659.log.json) | | DetectoRS | HTC + ResNet-101 | 20e | 19.6 | | 50.5 | 43.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detectors/detectors_htc_r101_20e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638-348d533b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r101_20e_coco/detectors_htc_r101_20e_coco_20210419_203638.log.json) | *Note*: This is a re-implementation based on MMDetection-V2. The original implementation is based on MMDetection-V1. ## Citation We provide the config files for [DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution](https://arxiv.org/pdf/2006.02334.pdf). ```latex @article{qiao2020detectors, title={DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution}, author={Qiao, Siyuan and Chen, Liang-Chieh and Yuille, Alan}, journal={arXiv preprint arXiv:2006.02334}, year={2020} } ```
7,232
102.328571
816
md
mmdetection
mmdetection-master/configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), pretrained='torchvision://resnet50', style='pytorch')))
851
28.37931
72
py
mmdetection
mmdetection-master/configs/detectors/cascade_rcnn_r50_sac_1x_coco.py
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True)))
382
28.461538
72
py
mmdetection
mmdetection-master/configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py
_base_ = [ '../_base_/models/cascade_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet50', style='pytorch')))
1,053
30.939394
72
py
mmdetection
mmdetection-master/configs/detectors/detectors_htc_r101_20e_coco.py
_base_ = '../htc/htc_r101_fpn_20e_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet101', style='pytorch')))
920
30.758621
57
py
mmdetection
mmdetection-master/configs/detectors/detectors_htc_r50_1x_coco.py
_base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True), pretrained='torchvision://resnet50', style='pytorch')))
916
30.62069
57
py
mmdetection
mmdetection-master/configs/detectors/htc_r50_rfp_1x_coco.py
_base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), output_img=True), neck=dict( type='RFP', rfp_steps=2, aspp_out_channels=64, aspp_dilations=(1, 3, 6, 1), rfp_backbone=dict( rfp_inplanes=256, type='DetectoRS_ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, conv_cfg=dict(type='ConvAWS'), pretrained='torchvision://resnet50', style='pytorch')))
714
27.6
57
py
mmdetection
mmdetection-master/configs/detectors/htc_r50_sac_1x_coco.py
_base_ = '../htc/htc_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='DetectoRS_ResNet', conv_cfg=dict(type='ConvAWS'), sac=dict(type='SAC', use_deform=True), stage_with_sac=(False, True, True, True)))
245
26.333333
50
py
mmdetection
mmdetection-master/configs/detectors/metafile.yml
Collections: - Name: DetectoRS Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ASPP - FPN - RFP - RPN - ResNet - RoIAlign - SAC Paper: URL: https://arxiv.org/abs/2006.02334 Title: 'DetectoRS: Detecting Objects with Recursive Feature Pyramid and Switchable Atrous Convolution' README: configs/detectors/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/backbones/detectors_resnet.py#L205 Version: v2.2.0 Models: - Name: cascade_rcnn_r50_rfp_1x_coco In Collection: DetectoRS Config: configs/detectors/cascade_rcnn_r50_rfp_1x_coco.py Metadata: Training Memory (GB): 7.5 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_rfp_1x_coco/cascade_rcnn_r50_rfp_1x_coco-8cf51bfd.pth - Name: cascade_rcnn_r50_sac_1x_coco In Collection: DetectoRS Config: configs/detectors/cascade_rcnn_r50_sac_1x_coco.py Metadata: Training Memory (GB): 5.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/cascade_rcnn_r50_sac_1x_coco/cascade_rcnn_r50_sac_1x_coco-24bfda62.pth - Name: detectors_cascade_rcnn_r50_1x_coco In Collection: DetectoRS Config: configs/detectors/detectors_cascade_rcnn_r50_1x_coco.py Metadata: Training Memory (GB): 9.9 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_cascade_rcnn_r50_1x_coco/detectors_cascade_rcnn_r50_1x_coco-32a10ba0.pth - Name: htc_r50_rfp_1x_coco In Collection: DetectoRS Config: configs/detectors/htc_r50_rfp_1x_coco.py Metadata: Training Memory (GB): 11.2 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.6 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_rfp_1x_coco/htc_r50_rfp_1x_coco-8ff87c51.pth - Name: htc_r50_sac_1x_coco In Collection: DetectoRS Config: configs/detectors/htc_r50_sac_1x_coco.py Metadata: Training Memory (GB): 9.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.4 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/htc_r50_sac_1x_coco/htc_r50_sac_1x_coco-bfa60c54.pth - Name: detectors_htc_r50_1x_coco In Collection: DetectoRS Config: configs/detectors/detectors_htc_r50_1x_coco.py Metadata: Training Memory (GB): 13.6 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/detectors/detectors_htc_r50_1x_coco/detectors_htc_r50_1x_coco-329b1453.pth
3,568
30.034783
153
yml
mmdetection
mmdetection-master/configs/detr/README.md
# DETR > [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872) <!-- [ALGORITHM] --> ## Abstract We present a new method that views object detection as a direct set prediction problem. Our approach streamlines the detection pipeline, effectively removing the need for many hand-designed components like a non-maximum suppression procedure or anchor generation that explicitly encode our prior knowledge about the task. The main ingredients of the new framework, called DEtection TRansformer or DETR, are a set-based global loss that forces unique predictions via bipartite matching, and a transformer encoder-decoder architecture. Given a fixed small set of learned object queries, DETR reasons about the relations of the objects and the global image context to directly output the final set of predictions in parallel. The new model is conceptually simple and does not require a specialized library, unlike many other modern detectors. DETR demonstrates accuracy and run-time performance on par with the well-established and highly-optimized Faster RCNN baseline on the challenging COCO object detection dataset. Moreover, DETR can be easily generalized to produce panoptic segmentation in a unified manner. We show that it significantly outperforms competitive baselines. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143878072-0a7434e4-416b-4315-aeea-a8297f4d6453.png"/> </div> ## Results and Models | Backbone | Model | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | DETR | 150e | 7.9 | | 40.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/detr/detr_r50_8x2_150e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835.log.json) | ## Citation We provide the config files for DETR: [End-to-End Object Detection with Transformers](https://arxiv.org/abs/2005.12872). ```latex @inproceedings{detr, author = {Nicolas Carion and Francisco Massa and Gabriel Synnaeve and Nicolas Usunier and Alexander Kirillov and Sergey Zagoruyko}, title = {End-to-End Object Detection with Transformers}, booktitle = {ECCV}, year = {2020} } ```
3,299
85.842105
1,176
md
mmdetection
mmdetection-master/configs/detr/detr_r50_8x2_150e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='DETR', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), bbox_head=dict( type='DETRHead', num_classes=80, in_channels=2048, transformer=dict( type='Transformer', encoder=dict( type='DetrTransformerEncoder', num_layers=6, transformerlayers=dict( type='BaseTransformerLayer', attn_cfgs=[ dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1) ], feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'ffn', 'norm'))), decoder=dict( type='DetrTransformerDecoder', return_intermediate=True, num_layers=6, transformerlayers=dict( type='DetrTransformerDecoderLayer', attn_cfgs=dict( type='MultiheadAttention', embed_dims=256, num_heads=8, dropout=0.1), feedforward_channels=2048, ffn_dropout=0.1, operation_order=('self_attn', 'norm', 'cross_attn', 'norm', 'ffn', 'norm')), )), positional_encoding=dict( type='SinePositionalEncoding', num_feats=128, normalize=True), loss_cls=dict( type='CrossEntropyLoss', bg_cls_weight=0.1, use_sigmoid=False, loss_weight=1.0, class_weight=1.0), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='ClassificationCost', weight=1.), reg_cost=dict(type='BBoxL1Cost', weight=5.0, box_format='xywh'), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0))), test_cfg=dict(max_per_img=100)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # train_pipeline, NOTE the img_scale and the Pad's size_divisor is different # from the default setting in mmdet. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] # test_pipeline, NOTE the Pad's size_divisor is different from the default # setting (size_divisor=32). While there is little effect on the performance # whether we use the default setting or use size_divisor=1. test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=1), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( type='AdamW', lr=0.0001, weight_decay=0.0001, paramwise_cfg=dict( custom_keys={'backbone': dict(lr_mult=0.1, decay_mult=1.0)})) optimizer_config = dict(grad_clip=dict(max_norm=0.1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[100]) runner = dict(type='EpochBasedRunner', max_epochs=150)
5,858
37.801325
79
py
mmdetection
mmdetection-master/configs/detr/metafile.yml
Collections: - Name: DETR Metadata: Training Data: COCO Training Techniques: - AdamW - Multi Scale Train - Gradient Clip Training Resources: 8x V100 GPUs Architecture: - ResNet - Transformer Paper: URL: https://arxiv.org/abs/2005.12872 Title: 'End-to-End Object Detection with Transformers' README: configs/detr/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.7.0/mmdet/models/detectors/detr.py#L7 Version: v2.7.0 Models: - Name: detr_r50_8x2_150e_coco In Collection: DETR Config: configs/detr/detr_r50_8x2_150e_coco.py Metadata: Training Memory (GB): 7.9 Epochs: 150 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/detr/detr_r50_8x2_150e_coco/detr_r50_8x2_150e_coco_20201130_194835-2c4b8974.pth
971
27.588235
140
yml
mmdetection
mmdetection-master/configs/double_heads/README.md
# Double Heads > [Rethinking Classification and Localization for Object Detection](https://arxiv.org/abs/1904.06493) <!-- [ALGORITHM] --> ## Abstract Two head structures (i.e. fully connected head and convolution head) have been widely used in R-CNN based detectors for classification and localization tasks. However, there is a lack of understanding of how does these two head structures work for these two tasks. To address this issue, we perform a thorough analysis and find an interesting fact that the two head structures have opposite preferences towards the two tasks. Specifically, the fully connected head (fc-head) is more suitable for the classification task, while the convolution head (conv-head) is more suitable for the localization task. Furthermore, we examine the output feature maps of both heads and find that fc-head has more spatial sensitivity than conv-head. Thus, fc-head has more capability to distinguish a complete object from part of an object, but is not robust to regress the whole object. Based upon these findings, we propose a Double-Head method, which has a fully connected head focusing on classification and a convolution head for bounding box regression. Without bells and whistles, our method gains +3.5 and +2.8 AP on MS COCO dataset from Feature Pyramid Network (FPN) baselines with ResNet-50 and ResNet-101 backbones, respectively. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143879010-e30f654b-f93e-44b2-a186-c251fdca5bda.png"/> </div> ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :-----: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | pytorch | 1x | 6.8 | 9.5 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130_220238.log.json) | ## Citation ```latex @article{wu2019rethinking, title={Rethinking Classification and Localization for Object Detection}, author={Yue Wu and Yinpeng Chen and Lu Yuan and Zicheng Liu and Lijuan Wang and Hongzhi Li and Yun Fu}, year={2019}, eprint={1904.06493}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
3,394
101.878788
1,223
md
mmdetection
mmdetection-master/configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DoubleHeadRoIHead', reg_roi_scale_factor=1.3, bbox_head=dict( _delete_=True, type='DoubleConvFCBBoxHead', num_convs=4, num_fcs=2, in_channels=256, conv_out_channels=1024, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0))))
845
34.25
77
py
mmdetection
mmdetection-master/configs/double_heads/metafile.yml
Collections: - Name: Rethinking Classification and Localization for Object Detection Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - RPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/pdf/1904.06493 Title: 'Rethinking Classification and Localization for Object Detection' README: configs/double_heads/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/models/roi_heads/double_roi_head.py#L6 Version: v2.0.0 Models: - Name: dh_faster_rcnn_r50_fpn_1x_coco In Collection: Rethinking Classification and Localization for Object Detection Config: configs/double_heads/dh_faster_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 6.8 inference time (ms/im): - value: 105.26 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/double_heads/dh_faster_rcnn_r50_fpn_1x_coco/dh_faster_rcnn_r50_fpn_1x_coco_20200130-586b67df.pth
1,359
31.380952
157
yml
mmdetection
mmdetection-master/configs/dyhead/README.md
# DyHead > [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) <!-- [ALGORITHM] --> ## Abstract The complex nature of combining localization and classification in object detection has resulted in the flourished development of methods. Previous works tried to improve the performance in various object detection heads but failed to present a unified view. In this paper, we present a novel dynamic head framework to unify object detection heads with attentions. By coherently combining multiple self-attention mechanisms between feature levels for scale-awareness, among spatial locations for spatial-awareness, and within output channels for task-awareness, the proposed approach significantly improves the representation ability of object detection heads without any computational overhead. Further experiments demonstrate that the effectiveness and efficiency of the proposed dynamic head on the COCO benchmark. With a standard ResNeXt-101-DCN backbone, we largely improve the performance over popular object detectors and achieve a new state-of-the-art at 54.0 AP. Furthermore, with latest transformer backbone and extra data, we can push current best COCO result to a new record at 60.6 AP. <div align=center> <img src="https://user-images.githubusercontent.com/42844407/149169448-fcafb6d0-b866-41cc-9422-94de9f1e1761.png" height="300"/> </div> ## Results and Models | Method | Backbone | Style | Setting | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :----: | :------: | :-----: | :----------: | :-----: | :------: | :------------: | :----: | :----------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | ATSS | R-50 | caffe | reproduction | 1x | 5.4 | 13.2 | 42.5 | [config](./atss_r50_caffe_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939.log.json) | | ATSS | R-50 | pytorch | simple | 1x | 4.9 | 13.7 | 43.3 | [config](./atss_r50_fpn_dyhead_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314.log.json) | - We trained the above models with 4 GPUs and 4 `samples_per_gpu`. - The `reproduction` setting aims to reproduce the official implementation based on Detectron2. - The `simple` setting serves as a minimum example to use DyHead in MMDetection. Specifically, - it adds `DyHead` to `neck` after `FPN` - it sets `stacked_convs=0` to `bbox_head` - The `simple` setting achieves higher AP than the original implementation. We have not conduct ablation study between the two settings. `dict(type='Pad', size_divisor=128)` may further improve AP by prefer spatial alignment across pyramid levels, although large padding reduces efficiency. We also trained the model with Swin-L backbone. Results are as below. | Method | Backbone | Style | Setting | Lr schd | mstrain | box AP | Config | Download | | :----: | :------: | :---: | :----------: | :-----: | :------: | :----: | :----------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | ATSS | Swin-L | caffe | reproduction | 2x | 480~1200 | 56.2 | [config](./atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315.log.json) | ## Relation to Other Methods - DyHead can be regarded as an improved [SEPC](https://arxiv.org/abs/2005.03101) with [DyReLU modules](https://arxiv.org/abs/2003.10027) and simplified [SE blocks](https://arxiv.org/abs/1709.01507). - Xiyang Dai et al., the author team of DyHead, adopt it for [Dynamic DETR](https://openaccess.thecvf.com/content/ICCV2021/html/Dai_Dynamic_DETR_End-to-End_Object_Detection_With_Dynamic_Attention_ICCV_2021_paper.html). The description of Dynamic Encoder in Sec. 3.2 will help you understand DyHead. ## Citation ```latex @inproceedings{DyHead_CVPR2021, author = {Dai, Xiyang and Chen, Yinpeng and Xiao, Bin and Chen, Dongdong and Liu, Mengchen and Yuan, Lu and Zhang, Lei}, title = {Dynamic Head: Unifying Object Detection Heads With Attentions}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, year = {2021} } ```
6,655
124.584906
1,098
md
mmdetection
mmdetection-master/configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # use caffe img_norm, size_divisor=128, pillow resize img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=(1333, 800), keep_ratio=True, backend='pillow'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True, backend='pillow'), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
3,608
30.938053
73
py
mmdetection
mmdetection-master/configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='ATSS', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=[ dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), dict(type='DyHead', in_channels=256, out_channels=256, num_blocks=6) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
2,043
29.969697
79
py
mmdetection
mmdetection-master/configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py
_base_ = '../_base_/default_runtime.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth' # noqa model = dict( type='ATSS', backbone=dict( type='SwinTransformer', pretrain_img_size=384, embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=[ dict( type='FPN', in_channels=[384, 768, 1536], out_channels=256, start_level=0, add_extra_convs='on_output', num_outs=5), dict( type='DyHead', in_channels=256, out_channels=256, num_blocks=6, # disable zero_init_offset to follow official implementation zero_init_offset=False) ], bbox_head=dict( type='ATSSHead', num_classes=80, in_channels=256, pred_kernel_size=1, # follow DyHead official implementation stacked_convs=0, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128], center_offset=0.5), # follow DyHead official implementation bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0), loss_centerness=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(2000, 480), (2000, 1200)], multiscale_mode='range', keep_ratio=True, backend='pillow'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2000, 1200), flip=False, transforms=[ dict(type='Resize', keep_ratio=True, backend='pillow'), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=128), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type='RepeatDataset', times=2, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) evaluation = dict(interval=1, metric='bbox') # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='AdamW', lr=0.00005, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
5,122
30.048485
129
py
mmdetection
mmdetection-master/configs/dyhead/metafile.yml
Collections: - Name: DyHead Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 4x T4 GPUs Architecture: - ATSS - DyHead - FPN - ResNet - Deformable Convolution - Pyramid Convolution Paper: URL: https://arxiv.org/abs/2106.08322 Title: 'Dynamic Head: Unifying Object Detection Heads with Attentions' README: configs/dyhead/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.22.0/mmdet/models/necks/dyhead.py#L130 Version: v2.22.0 Models: - Name: atss_r50_caffe_fpn_dyhead_1x_coco In Collection: DyHead Config: configs/dyhead/atss_r50_caffe_fpn_dyhead_1x_coco.py Metadata: Training Memory (GB): 5.4 inference time (ms/im): - value: 75.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_for_reproduction_1x_coco/atss_r50_fpn_dyhead_for_reproduction_4x4_1x_coco_20220107_213939-162888e6.pth - Name: atss_r50_fpn_dyhead_1x_coco In Collection: DyHead Config: configs/dyhead/atss_r50_fpn_dyhead_1x_coco.py Metadata: Training Memory (GB): 4.9 inference time (ms/im): - value: 73.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_r50_fpn_dyhead_4x4_1x_coco/atss_r50_fpn_dyhead_4x4_1x_coco_20211219_023314-eaa620c6.pth - Name: atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco In Collection: DyHead Config: configs/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco.py Metadata: Training Memory (GB): 58.4 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 56.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/dyhead/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco/atss_swin-l-p4-w12_fpn_dyhead_mstrain_2x_coco_20220509_100315-bc5b6516.pth
2,473
31.12987
190
yml
mmdetection
mmdetection-master/configs/dynamic_rcnn/README.md
# Dynamic R-CNN > [Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training](https://arxiv.org/abs/2004.06002) <!-- [ALGORITHM] --> ## Abstract Although two-stage object detectors have continuously advanced the state-of-the-art performance in recent years, the training process itself is far from crystal. In this work, we first point out the inconsistency problem between the fixed network settings and the dynamic training procedure, which greatly affects the performance. For example, the fixed label assignment strategy and regression loss function cannot fit the distribution change of proposals and thus are harmful to training high quality detectors. Consequently, we propose Dynamic R-CNN to adjust the label assignment criteria (IoU threshold) and the shape of regression loss function (parameters of SmoothL1 Loss) automatically based on the statistics of proposals during training. This dynamic design makes better use of the training samples and pushes the detector to fit more high quality samples. Specifically, our method improves upon ResNet-50-FPN baseline with 1.9% AP and 5.5% AP90 on the MS COCO dataset with no extra overhead. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143879518-842f5bec-9f65-4454-93a1-9b3b0c42ec3c.png"/> </div> ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | 1x | 3.8 | | 38.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x_20200618_095048.log.json) | ## Citation ```latex @article{DynamicRCNN, author = {Hongkai Zhang and Hong Chang and Bingpeng Ma and Naiyan Wang and Xilin Chen}, title = {Dynamic {R-CNN}: Towards High Quality Object Detection via Dynamic Training}, journal = {arXiv preprint arXiv:2004.06002}, year = {2020} } ```
3,034
96.903226
1,003
md
mmdetection
mmdetection-master/configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DynamicRoIHead', bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0))), train_cfg=dict( rpn_proposal=dict(nms=dict(iou_threshold=0.85)), rcnn=dict( dynamic_rcnn=dict( iou_topk=75, beta_topk=10, update_iter_interval=100, initial_iou=0.4, initial_beta=1.0))), test_cfg=dict(rpn=dict(nms=dict(iou_threshold=0.85))))
1,051
35.275862
77
py
mmdetection
mmdetection-master/configs/dynamic_rcnn/metafile.yml
Collections: - Name: Dynamic R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Dynamic R-CNN - FPN - RPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/pdf/2004.06002 Title: 'Dynamic R-CNN: Towards High Quality Object Detection via Dynamic Training' README: configs/dynamic_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.2.0/mmdet/models/roi_heads/dynamic_roi_head.py#L11 Version: v2.2.0 Models: - Name: dynamic_rcnn_r50_fpn_1x_coco In Collection: Dynamic R-CNN Config: configs/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 3.8 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 38.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/dynamic_rcnn/dynamic_rcnn_r50_fpn_1x/dynamic_rcnn_r50_fpn_1x-62a3f276.pth
1,083
29.111111
134
yml
mmdetection
mmdetection-master/configs/efficientnet/README.md
# EfficientNet > [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946v5) <!-- [BACKBONE] --> ## Introduction Convolutional Neural Networks (ConvNets) are commonly developed at a fixed resource budget, and then scaled up for better accuracy if more resources are available. In this paper, we systematically study model scaling and identify that carefully balancing network depth, width, and resolution can lead to better performance. Based on this observation, we propose a new scaling method that uniformly scales all dimensions of depth/width/resolution using a simple yet highly effective compound coefficient. We demonstrate the effectiveness of this method on scaling up MobileNets and ResNet. To go even further, we use neural architecture search to design a new baseline network and scale it up to obtain a family of models, called EfficientNets, which achieve much better accuracy and efficiency than previous ConvNets. In particular, our EfficientNet-B7 achieves state-of-the-art 84.3% top-1 accuracy on ImageNet, while being 8.4x smaller and 6.1x faster on inference than the best existing ConvNet. Our EfficientNets also transfer well and achieve state-of-the-art accuracy on CIFAR-100 (91.7%), Flowers (98.8%), and 3 other transfer learning datasets, with an order of magnitude fewer parameters. ## Results and Models ### RetinaNet | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Efficientnet-b3 | pytorch | 1x | - | - | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806.log.json) | ## Citation ```latex @article{tan2019efficientnet, title={Efficientnet: Rethinking model scaling for convolutional neural networks}, author={Tan, Mingxing and Le, Quoc V}, journal={arXiv preprint arXiv:1905.11946}, year={2019} } ```
3,341
106.806452
608
md
mmdetection
mmdetection-master/configs/efficientnet/metafile.yml
Models: - Name: retinanet_effb3_fpn_crop896_8x4_1x_coco In Collection: RetinaNet Config: configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco/retinanet_effb3_fpn_crop896_8x4_1x_coco_20220322_234806-615a0dda.pth Paper: URL: https://arxiv.org/abs/1905.11946v5 Title: 'EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks' README: configs/efficientnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.23.0/mmdet/models/backbones/efficientnet.py#L159 Version: v2.23.0
814
39.75
182
yml
mmdetection
mmdetection-master/configs/efficientnet/retinanet_effb3_fpn_crop896_8x4_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] cudnn_benchmark = True norm_cfg = dict(type='BN', requires_grad=True) checkpoint = 'https://download.openmmlab.com/mmclassification/v0/efficientnet/efficientnet-b3_3rdparty_8xb32-aa_in1k_20220119-5b4887a0.pth' # noqa model = dict( backbone=dict( _delete_=True, type='EfficientNet', arch='b3', drop_path_rate=0.2, out_indices=(3, 4, 5), frozen_stages=0, norm_cfg=dict( type='SyncBN', requires_grad=True, eps=1e-3, momentum=0.01), norm_eval=False, init_cfg=dict( type='Pretrained', prefix='backbone', checkpoint=checkpoint)), neck=dict( in_channels=[48, 136, 384], start_level=0, out_channels=256, relu_before_extra_convs=True, no_norm_on_lateral=True, norm_cfg=norm_cfg), bbox_head=dict(type='RetinaSepBNHead', num_ins=5, norm_cfg=norm_cfg), # training and testing settings train_cfg=dict(assigner=dict(neg_iou_thr=0.5))) # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) img_size = (896, 896) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=img_size, ratio_range=(0.8, 1.2), keep_ratio=True), dict(type='RandomCrop', crop_size=img_size), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_size, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=img_size), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=4, workers_per_gpu=4, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer_config = dict(grad_clip=None) optimizer = dict( type='SGD', lr=0.04, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict(norm_decay_mult=0, bypass_duplicate=True)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[8, 11]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=12) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (4 samples per GPU) auto_scale_lr = dict(base_batch_size=32)
3,016
30.757895
147
py
mmdetection
mmdetection-master/configs/empirical_attention/README.md
# Empirical Attention > [An Empirical Study of Spatial Attention Mechanisms in Deep Networks](https://arxiv.org/abs/1904.05873) <!-- [ALGORITHM] --> ## Abstract Attention mechanisms have become a popular component in deep neural networks, yet there has been little examination of how different influencing factors and methods for computing attention from these factors affect performance. Toward a better general understanding of attention mechanisms, we present an empirical study that ablates various spatial attention elements within a generalized attention formulation, encompassing the dominant Transformer attention as well as the prevalent deformable convolution and dynamic convolution modules. Conducted on a variety of applications, the study yields significant findings about spatial attention in deep networks, some of which run counter to conventional understanding. For example, we find that the query and key content comparison in Transformer attention is negligible for self-attention, but vital for encoder-decoder attention. A proper combination of deformable convolution with key content only saliency achieves the best accuracy-efficiency tradeoff in self-attention. Our results suggest that there exists much room for improvement in the design of attention mechanisms. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143879619-f1817da9-1573-45c9-891d-cfe55ad54911.png"/> </div> ## Results and Models | Backbone | Attention Component | DCN | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :-----------------: | :-: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | 1111 | N | 1x | 8.0 | 13.8 | 40.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130_210344.log.json) | | R-50 | 0010 | N | 1x | 4.2 | 18.4 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130_210125.log.json) | | R-50 | 1111 | Y | 1x | 8.0 | 12.7 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130_204442.log.json) | | R-50 | 0010 | Y | 1x | 4.2 | 17.1 | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130_210410.log.json) | ## Citation ```latex @article{zhu2019empirical, title={An Empirical Study of Spatial Attention Mechanisms in Deep Networks}, author={Zhu, Xizhou and Cheng, Dazhi and Zhang, Zheng and Lin, Stephen and Dai, Jifeng}, journal={arXiv preprint arXiv:1904.05873}, year={2019} } ```
5,489
160.470588
1,128
md
mmdetection
mmdetection-master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ]))
403
27.857143
56
py
mmdetection
mmdetection-master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='0010', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
575
32.882353
72
py
mmdetection
mmdetection-master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict(plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ]))
403
27.857143
56
py
mmdetection
mmdetection-master/configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py
_base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( plugins=[ dict( cfg=dict( type='GeneralizedAttention', spatial_range=-1, num_heads=8, attention_type='1111', kv_stride=2), stages=(False, False, True, True), position='after_conv2') ], dcn=dict(type='DCN', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)))
575
32.882353
72
py
mmdetection
mmdetection-master/configs/empirical_attention/metafile.yml
Collections: - Name: Empirical Attention Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Deformable Convolution - FPN - RPN - ResNet - RoIAlign - Spatial Attention Paper: URL: https://arxiv.org/pdf/1904.05873 Title: 'An Empirical Study of Spatial Attention Mechanisms in Deep Networks' README: configs/empirical_attention/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.0.0/mmdet/ops/generalized_attention.py#L10 Version: v2.0.0 Models: - Name: faster_rcnn_r50_fpn_attention_1111_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 72.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_1x_coco/faster_rcnn_r50_fpn_attention_1111_1x_coco_20200130-403cccba.pth - Name: faster_rcnn_r50_fpn_attention_0010_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 54.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_1x_coco/faster_rcnn_r50_fpn_attention_0010_1x_coco_20200130-7cb0c14d.pth - Name: faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco.py Metadata: Training Memory (GB): 8.0 inference time (ms/im): - value: 78.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco/faster_rcnn_r50_fpn_attention_1111_dcn_1x_coco_20200130-8b2523a6.pth - Name: faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco In Collection: Empirical Attention Config: configs/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco.py Metadata: Training Memory (GB): 4.2 inference time (ms/im): - value: 58.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (800, 1333) Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/empirical_attention/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco/faster_rcnn_r50_fpn_attention_0010_dcn_1x_coco_20200130-1a2e831d.pth
3,593
33.557692
196
yml
mmdetection
mmdetection-master/configs/fast_rcnn/README.md
# Fast R-CNN > [Fast R-CNN](https://arxiv.org/abs/1504.08083) <!-- [ALGORITHM] --> ## Abstract This paper proposes a Fast Region-based Convolutional Network method (Fast R-CNN) for object detection. Fast R-CNN builds on previous work to efficiently classify object proposals using deep convolutional networks. Compared to previous work, Fast R-CNN employs several innovations to improve training and testing speed while also increasing detection accuracy. Fast R-CNN trains the very deep VGG16 network 9x faster than R-CNN, is 213x faster at test-time, and achieves a higher mAP on PASCAL VOC 2012. Compared to SPPnet, Fast R-CNN trains VGG16 3x faster, tests 10x faster, and is more accurate. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143882189-6258c05c-f2a1-4320-9282-7e2f2d502eb2.png"/> </div> ## Introduction Before training the Fast R-CNN, users should first train an [RPN](../rpn/README.md), and use the RPN to extract the region proposals. - Firstly, extract the region proposals of the val set by this command as below: ```bash ./tools/dist_test.sh \ configs/rpn_r50_fpn_1x_coco.py \ checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ 8 \ --out proposals/rpn_r50_fpn_1x_val2017.pkl ``` - Then, change the `ann_file` and `img_prefix` of `data.test` in the RPN config to train set as below: ```python data = dict( test=dict( ann_file='data/coco/annotations/instances_train2017.json', img_prefix='data/coco/train2017/')) ``` - Extract the region proposals of the train set by this command as below: ```bash ./tools/dist_test.sh \ configs/rpn_r50_fpn_1x_coco.py \ checkpoints/rpn_r50_fpn_1x_coco_20200218-5525fa2e.pth \ 8 \ --out proposals/rpn_r50_fpn_1x_train2017.pkl ``` - Modify the path of `proposal_file` in Fast R-CNN config as below: ```python data = dict( train=dict( proposal_file='proposals/rpn_r50_fpn_1x_train2017.pkl'), val=dict( proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl'), test=dict( proposal_file='proposals/rpn_r50_fpn_1x_val2017.pkl')) ``` Finally, users can start training the Fast R-CNN. ## Results and Models ## Citation ```latex @inproceedings{girshick2015fast, title={Fast r-cnn}, author={Girshick, Ross}, booktitle={Proceedings of the IEEE international conference on computer vision}, year={2015} } ```
2,400
31.445946
598
md
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './fast_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
222
26.875
67
py
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r101_fpn_1x_coco.py
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r101_fpn_2x_coco.py
_base_ = './fast_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
197
27.285714
61
py
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(type='BN', requires_grad=False), style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=2000), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['proposals']), dict( type='ToDataContainer', fields=[dict(key='proposals', stack=False)]), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,710
33.918367
78
py
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/fast_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=2000), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'proposals', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadProposals', num_max_proposals=None), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='ToTensor', keys=['proposals']), dict( type='ToDataContainer', fields=[dict(key='proposals', stack=False)]), dict(type='Collect', keys=['img', 'proposals']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_train2017.pkl', pipeline=train_pipeline), val=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline), test=dict( proposal_file=data_root + 'proposals/rpn_r50_fpn_1x_val2017.pkl', pipeline=test_pipeline))
1,944
35.698113
78
py
mmdetection
mmdetection-master/configs/fast_rcnn/fast_rcnn_r50_fpn_2x_coco.py
_base_ = './fast_rcnn_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
147
23.666667
53
py
mmdetection
mmdetection-master/configs/faster_rcnn/README.md
# Faster R-CNN > [Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks](https://arxiv.org/abs/1506.01497) <!-- [ALGORITHM] --> ## Abstract State-of-the-art object detection networks depend on region proposal algorithms to hypothesize object locations. Advances like SPPnet and Fast R-CNN have reduced the running time of these detection networks, exposing region proposal computation as a bottleneck. In this work, we introduce a Region Proposal Network (RPN) that shares full-image convolutional features with the detection network, thus enabling nearly cost-free region proposals. An RPN is a fully convolutional network that simultaneously predicts object bounds and objectness scores at each position. The RPN is trained end-to-end to generate high-quality region proposals, which are used by Fast R-CNN for detection. We further merge RPN and Fast R-CNN into a single network by sharing their convolutional features---using the recently popular terminology of neural networks with 'attention' mechanisms, the RPN component tells the unified network where to look. For the very deep VGG-16 model, our detection system has a frame rate of 5fps (including all steps) on a GPU, while achieving state-of-the-art object detection accuracy on PASCAL VOC 2007, 2012, and MS COCO datasets with only 300 proposals per image. In ILSVRC and COCO 2015 competitions, Faster R-CNN and RPN are the foundations of the 1st-place winning entries in several tracks. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143881188-ab87720f-5059-4b4e-a928-b540fb8fb84d.png" height="300"/> </div> ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-C4 | caffe | 1x | - | - | 35.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152-3f885b85.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco/faster_rcnn_r50_caffe_c4_1x_coco_20220316_150152.log.json) | | R-50-DC5 | caffe | 1x | - | - | 37.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909-531f0f43.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco/faster_rcnn_r50_caffe_dc5_1x_coco_20201030_151909.log.json) | | R-50-FPN | caffe | 1x | 3.8 | | 37.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_bbox_mAP-0.378_20200504_180032-c5925ee5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco/faster_rcnn_r50_caffe_fpn_1x_coco_20200504_180032.log.json) | | R-50-FPN | pytorch | 1x | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | | R-50-FPN (FP16) | pytorch | 1x | 3.4 | 28.8 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204-d4dc1471.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/fp16/faster_rcnn_r50_fpn_fp16_1x_coco/faster_rcnn_r50_fpn_fp16_1x_coco_20200204_143530.log.json) | | R-50-FPN | pytorch | 2x | - | - | 38.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_bbox_mAP-0.384_20200504_210434-a5d8aa15.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_2x_coco/faster_rcnn_r50_fpn_2x_coco_20200504_210434.log.json) | | R-101-FPN | caffe | 1x | 5.7 | | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_bbox_mAP-0.398_20200504_180057-b269e9dd.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco/faster_rcnn_r101_caffe_fpn_1x_coco_20200504_180057.log.json) | | R-101-FPN | pytorch | 1x | 6.0 | 15.6 | 39.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130-f513f705.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_1x_coco/faster_rcnn_r101_fpn_1x_coco_20200130_204655.log.json) | | R-101-FPN | pytorch | 2x | - | - | 39.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_bbox_mAP-0.398_20200504_210455-1d2dac9c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_2x_coco/faster_rcnn_r101_fpn_2x_coco_20200504_210455.log.json) | | X-101-32x4d-FPN | pytorch | 1x | 7.2 | 13.8 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203-cff10310.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco/faster_rcnn_x101_32x4d_fpn_1x_coco_20200203_000520.log.json) | | X-101-32x4d-FPN | pytorch | 2x | - | - | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_bbox_mAP-0.412_20200506_041400-64a12c0b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco/faster_rcnn_x101_32x4d_fpn_2x_coco_20200506_041400.log.json) | | X-101-64x4d-FPN | pytorch | 1x | 10.3 | 9.4 | 42.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204-833ee192.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco/faster_rcnn_x101_64x4d_fpn_1x_coco_20200204_134340.log.json) | | X-101-64x4d-FPN | pytorch | 2x | - | - | 41.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033-5961fa95.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco/faster_rcnn_x101_64x4d_fpn_2x_coco_20200512_161033.log.json) | ## Different regression loss We trained with R-50-FPN pytorch style backbone for 1x schedule. | Backbone | Loss type | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :------------: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-FPN | L1Loss | 4.0 | 21.4 | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130_204655.log.json) | | R-50-FPN | IoULoss | | | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954-938e81f0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco/faster_rcnn_r50_fpn_iou_1x_coco_20200506_095954.log.json) | | R-50-FPN | GIoULoss | | | 37.6 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco-0eada910.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_giou_1x_coco_20200505_161120.log.json) | | R-50-FPN | BoundedIoULoss | | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco-98ad993b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_bounded_iou_1x_coco_20200505_160738.log.json) | ## Pre-trained Models We also train some models with longer schedules and multi-scale training. The users could finetune them for downstream tasks. | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :----------------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-C4](./faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | caffe | 1x | - | | 35.9 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527-db276fed.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco/faster_rcnn_r50_caffe_c4_mstrain_1x_coco_20220316_150527.log.json) | | [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | caffe | 1x | - | | 37.4 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851-b33d21b9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco_20201028_233851.log.json) | | [R-50-DC5](./faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | caffe | 3x | - | | 38.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107-34a53b2c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco_20201028_002107.log.json) | | [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | caffe | 2x | 3.7 | | 39.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_bbox_mAP-0.397_20200504_231813-10b2de58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco_20200504_231813.log.json) | | [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 3.7 | | 39.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054-1f77628b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_20210526_095054.log.json) | | [R-50-FPN](./faster_rcnn_r50_fpn_mstrain_3x_coco.py) | pytorch | 3x | 3.9 | | 40.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822-e10bd31c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco/faster_rcnn_r50_fpn_mstrain_3x_coco_20210524_110822.log.json) | | [R-101-FPN](./faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | caffe | 3x | 5.6 | | 42.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742-a7ae426d.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco_20210526_095742.log.json) | | [R-101-FPN](./faster_rcnn_r101_fpn_mstrain_3x_coco.py) | pytorch | 3x | 5.8 | | 41.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822-4d4d2ca8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco/faster_rcnn_r101_fpn_mstrain_3x_coco_20210524_110822.log.json) | | [X-101-32x4d-FPN](./faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 7.0 | | 42.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151-16b9b260.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco_20210524_124151.log.json) | | [X-101-32x8d-FPN](./faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.1 | | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954-002e082a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco_20210604_182954.log.json) | | [X-101-64x4d-FPN](./faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | pytorch | 3x | 10.0 | | 43.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528-26c63de6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco_20210524_124528.log.json) | We further finetune some pre-trained models on the COCO subsets, which only contain only a few of the 80 categories. | Backbone | Style | Class name | Pre-traind model | Mem (GB) | box AP | Config | Download | | ----------------------------------------------------------------------------- | ----- | ------------------ | ------------------------------------------------------------------- | -------- | ------ | --------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | caffe | person | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 55.8 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929-d022e227.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person/faster_rcnn_r50_fpn_1x_coco-person_20201216_175929.log.json) | | [R-50-FPN](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | caffe | person-bicycle-car | [R-50-FPN-Caffe-3x](./faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py) | 3.7 | 44.1 | [config](./faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117-6eda6d92.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car/faster_rcnn_r50_fpn_1x_coco-person-bicycle-car_20201216_173117.log.json) | ## Torchvision New Receipe (TNR) Torchvision released its high-precision ResNet models. The training details can be found on the [Pytorch website](https://pytorch.org/blog/how-to-train-state-of-the-art-models-using-torchvision-latest-primitives/). Here, we have done grid searches on learning rate and weight decay and found the optimal hyper-parameter on the detection task. | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------------------------------------------------: | :-----: | :-----: | :------: | :------------: | :----: | :-----------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | [R-50-TNR](./faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | pytorch | 1x | - | | 40.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147-efedfda4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco_20220320_085147.log.json) | ## Citation ```latex @article{Ren_2017, title={Faster R-CNN: Towards Real-Time Object Detection with Region Proposal Networks}, journal={IEEE Transactions on Pattern Analysis and Machine Intelligence}, publisher={Institute of Electrical and Electronics Engineers (IEEE)}, author={Ren, Shaoqing and He, Kaiming and Girshick, Ross and Sun, Jian}, year={2017}, month={Jun}, } ```
26,948
301.797753
1,311
md
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_caffe_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe')))
224
27.125
67
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r101_caffe_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet101_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,526
29.54
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r101_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
199
27.571429
61
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r101_fpn_2x_coco.py
_base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
199
27.571429
61
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r101_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
206
24.875
61
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001)
1,388
33.725
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_c4_mstrain_1x_coco.py
_base_ = './faster_rcnn_r50_caffe_c4_1x_coco.py' # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,314
32.717949
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,304
33.342105
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_dc5.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,448
32.697674
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_dc5_mstrain_3x_coco.py
_base_ = './faster_rcnn_r50_caffe_dc5_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
162
31.6
57
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,410
32.595238
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_90k_coco.py
_base_ = 'faster_rcnn_r50_caffe_fpn_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox')
372
22.3125
69
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person-bicycle-car.py
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=3))) classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
476
46.7
209
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-person.py
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' model = dict(roi_head=dict(bbox_head=dict(num_classes=1))) classes = ('person', ) data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) load_from = 'https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco_bbox_mAP-0.398_20200504_163323-30042637.pth' # noqa
460
45.1
209
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,554
32.085106
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_2x_coco.py
_base_ = './faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict(step=[16, 23]) runner = dict(type='EpochBasedRunner', max_epochs=24)
162
31.6
57
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_3x_coco.py
_base_ = 'faster_rcnn_r50_fpn_mstrain_3x_coco.py' model = dict( backbone=dict( norm_cfg=dict(requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe'))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,505
30.375
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_90k_coco.py
_base_ = 'faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py' # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[60000, 80000]) # Runner type runner = dict(_delete_=True, type='IterBasedRunner', max_iters=90000) checkpoint_config = dict(interval=10000) evaluation = dict(interval=10000, metric='bbox')
380
22.8125
69
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ]
177
28.666667
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_2x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ]
177
28.666667
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_bounded_iou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='BoundedIoULoss', loss_weight=10.0))))
207
28.714286
70
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_ciou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='CIoULoss', loss_weight=12.0))))
201
27.857143
64
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_fp16_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' # fp16 settings fp16 = dict(loss_scale=512.)
89
21.5
43
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_giou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='GIoULoss', loss_weight=10.0))))
201
27.857143
64
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_iou_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( bbox_head=dict( reg_decoded_bbox=True, loss_bbox=dict(type='IoULoss', loss_weight=10.0))))
200
27.714286
63
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ]
91
22
77
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_ohem_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict(train_cfg=dict(rcnn=dict(sampler=dict(type='OHEMSampler'))))
118
38.666667
73
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_soft_nms_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( test_cfg=dict( rcnn=dict( score_thr=0.05, nms=dict(type='soft_nms', iou_threshold=0.5), max_per_img=100)))
347
25.769231
72
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_r50_fpn_tnr-pretrain_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] checkpoint = 'https://download.pytorch.org/models/resnet50-11ad3fa6.pth' model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint))) # `lr` and `weight_decay` have been searched to be optimal. optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, weight_decay=0.1, paramwise_cfg=dict(norm_decay_mult=0., bypass_duplicate=True))
561
30.222222
75
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
421
27.133333
76
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_2x_coco.py
_base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
421
27.133333
76
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_32x4d_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
468
26.588235
77
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_32x8d_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=8, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnext101_32x8d'))) # ResNeXt-101-32x8d model trained with Caffe2 at FB, # so the mean and std need to be changed. img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False) # In mstrain 3x config, img_scale=[(1333, 640), (1333, 800)], # multiscale_mode='range' train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,923
29.539683
77
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_1x_coco.py
_base_ = './faster_rcnn_r50_fpn_1x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
421
27.133333
76
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_2x_coco.py
_base_ = './faster_rcnn_r50_fpn_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
421
27.133333
76
py
mmdetection
mmdetection-master/configs/faster_rcnn/faster_rcnn_x101_64x4d_fpn_mstrain_3x_coco.py
_base_ = [ '../common/mstrain_3x_coco.py', '../_base_/models/faster_rcnn_r50_fpn.py' ] model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
468
26.588235
77
py