Search is not available for this dataset
repo
stringlengths
2
152
βŒ€
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
231
32.142857
75
py
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py' # noqa: E501 model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
257
35.857143
101
py
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] dataset_type = 'LVISV1Dataset' data_root = 'data/lvis_v1/' data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_train.json', img_prefix=data_root, pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/lvis_v1_val.json', img_prefix=data_root, pipeline=test_pipeline)) evaluation = dict(interval=24, metric=['bbox', 'segm'])
2,510
32.039474
77
py
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
200
32.5
71
py
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/lvis_v1_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( roi_head=dict( bbox_head=dict( num_classes=1203, cls_predictor_cfg=dict(type='NormedLinear', tempearture=20), loss_cls=dict( type='SeesawLoss', p=0.8, q=2.0, num_classes=1203, loss_weight=1.0)), mask_head=dict(num_classes=1203)), test_cfg=dict( rcnn=dict( score_thr=0.0001, # LVIS allows up to 300 max_per_img=300))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(dataset=dict(pipeline=train_pipeline))) evaluation = dict(interval=12, metric=['bbox', 'segm'])
1,486
34.404762
77
py
mmdetection
mmdetection-master/configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py
_base_ = './mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py' model = dict( roi_head=dict( mask_head=dict( predictor_cfg=dict(type='NormedConv2d', tempearture=20))))
204
33.166667
75
py
mmdetection
mmdetection-master/configs/seesaw_loss/metafile.yml
Collections: - Name: Seesaw Loss Metadata: Training Data: LVIS Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign - Seesaw Loss Paper: URL: https://arxiv.org/abs/2008.10032 Title: 'Seesaw Loss for Long-Tailed Instance Segmentation' README: configs/seesaw_loss/README.md Models: - Name: mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 25.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 25.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-a698dd3d.pth - Name: mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 25.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 25.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a1c11314.pth - Name: mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.4 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-8e6e6dd5.pth - Name: mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.2 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 27.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-a0b59c42.pth - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-392a804b.pth - Name: mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 27.6 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 26.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r50_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-cd0f6a12.pth - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 28.9 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 27.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-e68eb464.pth - Name: mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 28.9 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 28.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-1d817139.pth - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 33.1 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 29.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_mstrain_2x_lvis_v1-71e2215e.pth - Name: cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 33.0 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 30.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_random_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-8b5a6745.pth - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 30.0 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 29.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_mstrain_2x_lvis_v1-5d8ca2a4.pth - Name: cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1 In Collection: Seesaw Loss Config: configs/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: LVIS v1 Metrics: box AP: 32.8 - Task: Instance Segmentation Dataset: LVIS v1 Metrics: mask AP: 30.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/seesaw_loss/cascade_mask_rcnn_r101_fpn_sample1e-3_seesaw_loss_normed_mask_mstrain_2x_lvis_v1-c8551505.pth
7,809
37.284314
166
yml
mmdetection
mmdetection-master/configs/selfsup_pretrain/README.md
# Backbones Trained by Self-Supervise Algorithms <!-- [OTHERS] --> ## Abstract Unsupervised image representations have significantly reduced the gap with supervised pretraining, notably with the recent achievements of contrastive learning methods. These contrastive methods typically work online and rely on a large number of explicit pairwise feature comparisons, which is computationally challenging. In this paper, we propose an online algorithm, SwAV, that takes advantage of contrastive methods without requiring to compute pairwise comparisons. Specifically, our method simultaneously clusters the data while enforcing consistency between cluster assignments produced for different augmentations (or views) of the same image, instead of comparing features directly as in contrastive learning. Simply put, we use a swapped prediction mechanism where we predict the cluster assignment of a view from the representation of another view. Our method can be trained with large and small batches and can scale to unlimited amounts of data. Compared to previous contrastive methods, our method is more memory efficient since it does not require a large memory bank or a special momentum network. In addition, we also propose a new data augmentation strategy, multi-crop, that uses a mix of views with different resolutions in place of two full-resolution views, without increasing the memory or compute requirements much. We validate our findings by achieving 75.3% top-1 accuracy on ImageNet with ResNet-50, as well as surpassing supervised pretraining on all the considered transfer tasks. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143997246-ac40fd8a-9b48-4ff5-a0d9-ba10e1e333d2.png"/> </div> We present Momentum Contrast (MoCo) for unsupervised visual representation learning. From a perspective on contrastive learning as dictionary look-up, we build a dynamic dictionary with a queue and a moving-averaged encoder. This enables building a large and consistent dictionary on-the-fly that facilitates contrastive unsupervised learning. MoCo provides competitive results under the common linear protocol on ImageNet classification. More importantly, the representations learned by MoCo transfer well to downstream tasks. MoCo can outperform its supervised pre-training counterpart in 7 detection/segmentation tasks on PASCAL VOC, COCO, and other datasets, sometimes surpassing it by large margins. This suggests that the gap between unsupervised and supervised representation learning has been largely closed in many vision tasks. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143997315-5ff824d4-1007-4b59-8952-bc5a2c0bfd78.png" height="300"/> </div> ## Usage To use a self-supervisely pretrained backbone, there are two steps to do: 1. Download and convert the model to PyTorch-style supported by MMDetection 2. Modify the config and change the training setting accordingly ### Convert model For more general usage, we also provide script `selfsup2mmdet.py` in the tools directory to convert the key of models pretrained by different self-supervised methods to PyTorch-style checkpoints used in MMDetection. ```bash python -u tools/model_converters/selfsup2mmdet.py ${PRETRAIN_PATH} ${STORE_PATH} --selfsup ${method} ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. For example, to use a ResNet-50 backbone released by MoCo, you can download it from [here](https://dl.fbaipublicfiles.com/moco/moco_checkpoints/moco_v2_800ep/moco_v2_800ep_pretrain.pth.tar) and use the following command ```bash python -u tools/model_converters/selfsup2mmdet.py ./moco_v2_800ep_pretrain.pth.tar mocov2_r50_800ep_pretrain.pth --selfsup moco ``` To use the ResNet-50 backbone released by SwAV, you can download it from [here](https://dl.fbaipublicfiles.com/deepcluster/swav_800ep_pretrain.pth.tar) ### Modify config The backbone requires SyncBN and the `frozen_stages` need to be changed. A config that use the moco backbone is as below ```python _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( pretrained='./mocov2_r50_800ep_pretrain.pth', backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False)) ``` ## Results and Models | Method | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :-------: | :-----------------------------------------------------------------: | :-----: | :------------: | :------: | :------------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | pytorch | 1x | | | 38.0 | 34.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614-a8b63483.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco_20210604_114614.log.json) | | Mask RCNN | [R50 by MoCo v2](./mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 40.8 | 36.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717-d95df20a.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco_20210605_163717.log.json) | | Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | pytorch | 1x | | | 39.1 | 35.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640-7b9baf28.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco/mask_rcnn_r50_fpn_swav-pretrain_1x_coco_20210604_114640.log.json) | | Mask RCNN | [R50 by SwAV](./mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | pytorch | multi-scale 2x | | | 41.3 | 37.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717-08e26fca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco_20210605_163717.log.json) | ### Notice 1. We only provide single-scale 1x and multi-scale 2x configs as examples to show how to use backbones trained by self-supervised algorithms. We will try to reproduce the results in their corresponding paper using the released backbone in the future. Please stay tuned. ## Citation We support to apply the backbone models pre-trained by different self-supervised methods in detection systems and provide their results on Mask R-CNN. The pre-trained models are converted from [MoCo](https://github.com/facebookresearch/moco) and downloaded from [SwAV](https://github.com/facebookresearch/swav). For SwAV, please cite ```latex @article{caron2020unsupervised, title={Unsupervised Learning of Visual Features by Contrasting Cluster Assignments}, author={Caron, Mathilde and Misra, Ishan and Mairal, Julien and Goyal, Priya and Bojanowski, Piotr and Joulin, Armand}, booktitle={Proceedings of Advances in Neural Information Processing Systems (NeurIPS)}, year={2020} } ``` For MoCo, please cite ```latex @Article{he2019moco, author = {Kaiming He and Haoqi Fan and Yuxin Wu and Saining Xie and Ross Girshick}, title = {Momentum Contrast for Unsupervised Visual Representation Learning}, journal = {arXiv preprint arXiv:1911.05722}, year = {2019}, } @Article{chen2020mocov2, author = {Xinlei Chen and Haoqi Fan and Ross Girshick and Kaiming He}, title = {Improved Baselines with Momentum Contrastive Learning}, journal = {arXiv preprint arXiv:2003.04297}, year = {2020}, } ```
10,082
90.663636
1,510
md
mmdetection
mmdetection-master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth')))
418
28.928571
78
py
mmdetection
mmdetection-master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_mocov2-pretrain_ms-2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./mocov2_r50_800ep_pretrain.pth'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
1,072
31.515152
78
py
mmdetection
mmdetection-master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar')))
416
28.785714
76
py
mmdetection
mmdetection-master/configs/selfsup_pretrain/mask_rcnn_r50_fpn_swav-pretrain_ms-2x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] model = dict( backbone=dict( frozen_stages=0, norm_cfg=dict(type='SyncBN', requires_grad=True), norm_eval=False, init_cfg=dict( type='Pretrained', checkpoint='./swav_800ep_pretrain.pth.tar'))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] data = dict(train=dict(pipeline=train_pipeline))
1,070
31.454545
77
py
mmdetection
mmdetection-master/configs/simple_copy_paste/README.md
# SimpleCopyPaste > [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) <!-- [ALGORITHM] --> ## Abstract Building instance segmentation models that are data-efficient and can handle rare object categories is an important challenge in computer vision. Leveraging data augmentations is a promising direction towards addressing this challenge. Here, we perform a systematic study of the Copy-Paste augmentation (\[13, 12\]) for instance segmentation where we randomly paste objects onto an image. Prior studies on Copy-Paste relied on modeling the surrounding visual context for pasting the objects. However, we find that the simple mechanism of pasting objects randomly is good enough and can provide solid gains on top of strong baselines. Furthermore, we show Copy-Paste is additive with semi-supervised methods that leverage extra data through pseudo labeling (e.g. self-training). On COCO instance segmentation, we achieve 49.1 mask AP and 57.3 box AP, an improvement of +0.6 mask AP and +1.5 box AP over the previous state-of-the-art. We further demonstrate that Copy-Paste can lead to significant improvements on the LVIS benchmark. Our baseline model outperforms the LVIS 2020 Challenge winning entry by +3.6 mask AP on rare categories. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/161843866-c5b769da-58b2-4c1f-8078-db4a4ded3881.png"/> </div> ## Results and Models ### Mask R-CNN with Standard Scale Jittering (SSJ) and Simple Copy-Paste(SCP) Standard Scale Jittering(SSJ) resizes and crops an image with a resize range of 0.8 to 1.25 of the original image size, and Simple Copy-Paste(SCP) selects a random subset of objects from one of the images and pastes them onto the other image. | Backbone | Training schedule | Augmentation | batch size | box AP | mask AP | Config | Download | | :------: | :---------------: | :----------: | :--------: | :----: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | 90k | SSJ | 64 | 43.3 | 39.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409.log.json) | | R-50 | 90k | SSJ+SCP | 64 | 43.8 | 39.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307.log.json) | | R-50 | 270k | SSJ | 64 | 43.5 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940.log.json) | | R-50 | 270k | SSJ+SCP | 64 | 45.1 | 40.3 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229.log.json) | ## Citation ```latex @inproceedings{ghiasi2021simple, title={Simple copy-paste is a strong data augmentation method for instance segmentation}, author={Ghiasi, Golnaz and Cui, Yin and Srinivas, Aravind and Qian, Rui and Lin, Tsung-Yi and Cubuk, Ekin D and Le, Quoc V and Zoph, Barret}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={2918--2928}, year={2021} } ```
6,383
162.692308
1,136
md
mmdetection
mmdetection-master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs '../common/ssj_270k_coco_instance.py', ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg)))
813
37.761905
76
py
mmdetection
mmdetection-master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py' # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) # 90k iterations with batch_size 64 is roughly equivalent to 48 epochs runner = dict(type='IterBasedRunner', max_iters=90000)
346
42.375
71
py
mmdetection
mmdetection-master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', # 270k iterations with batch_size 64 is roughly equivalent to 144 epochs '../common/ssj_scp_270k_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict(frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg)))
816
37.904762
76
py
mmdetection
mmdetection-master/configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py' # lr steps at [0.9, 0.95, 0.975] of the maximum iterations lr_config = dict( warmup_iters=500, warmup_ratio=0.067, step=[81000, 85500, 87750]) # 90k iterations with batch_size 64 is roughly equivalent to 48 epochs runner = dict(type='IterBasedRunner', max_iters=90000)
350
42.875
75
py
mmdetection
mmdetection-master/configs/simple_copy_paste/metafile.yml
Collections: - Name: SimpleCopyPaste Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 32x A100 GPUs Architecture: - Softmax - RPN - Convolution - Dense Connections - FPN - ResNet - RoIAlign Paper: URL: https://arxiv.org/abs/2012.07177 Title: "Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation" README: configs/simple_copy_paste/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.25.0/mmdet/datasets/pipelines/transforms.py#L2762 Version: v2.25.0 Models: - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 270000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.5 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_270k_coco_20220324_182940-33a100c5.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 90000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.3 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_32x2_90k_coco_20220316_181409-f79c84c5.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 270000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.1 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco_20220324_201229-80ee90b7.pth - Name: mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco In Collection: SimpleCopyPaste Config: configs/simplecopypaste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco.py Metadata: Training Memory (GB): 7.2 Iterations: 90000 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.8 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_90k_coco_20220316_181307-6bc5726f.pth
3,526
36.924731
231
yml
mmdetection
mmdetection-master/configs/solo/README.md
# SOLO > [SOLO: Segmenting Objects by Locations](https://arxiv.org/abs/1912.04488) <!-- [ALGORITHM] --> ## Abstract We present a new, embarrassingly simple approach to instance segmentation in images. Compared to many other dense prediction tasks, e.g., semantic segmentation, it is the arbitrary number of instances that have made instance segmentation much more challenging. In order to predict a mask for each instance, mainstream approaches either follow the 'detect-thensegment' strategy as used by Mask R-CNN, or predict category masks first then use clustering techniques to group pixels into individual instances. We view the task of instance segmentation from a completely new perspective by introducing the notion of "instance categories", which assigns categories to each pixel within an instance according to the instance's location and size, thus nicely converting instance mask segmentation into a classification-solvable problem. Now instance segmentation is decomposed into two classification tasks. We demonstrate a much simpler and flexible instance segmentation framework with strong performance, achieving on par accuracy with Mask R-CNN and outperforming recent singleshot instance segmenters in accuracy. We hope that this very simple and strong framework can serve as a baseline for many instance-level recognition tasks besides instance segmentation. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143998371-10e6f14b-4506-481d-91a7-5f8f58213307.png"/> </div> ## Results and Models ### SOLO | Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | | :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | N | 1x | 8.0 | 14.0 | 33.1 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055.log.json) | | R-50 | pytorch | Y | 3x | 7.4 | 14.0 | 35.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353.log.json) | ### Decoupled SOLO | Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | | :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | N | 1x | 7.8 | 12.5 | 33.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348.log.json) | | R-50 | pytorch | Y | 3x | 7.9 | 12.5 | 36.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504.log.json) | - Decoupled SOLO has a decoupled head which is different from SOLO head. Decoupled SOLO serves as an efficient and equivalent variant in accuracy of SOLO. Please refer to the corresponding config files for details. ### Decoupled Light SOLO | Backbone | Style | MS train | Lr schd | Mem (GB) | Inf time (fps) | mask AP | Download | | :------: | :-----: | :------: | :-----: | :------: | :------------: | :-----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | Y | 3x | 2.2 | 31.2 | 32.9 | [model](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703.log.json) | - Decoupled Light SOLO using decoupled structure similar to Decoupled SOLO head, with light-weight head and smaller input size, Please refer to the corresponding config files for details. ## Citation ```latex @inproceedings{wang2020solo, title = {{SOLO}: Segmenting Objects by Locations}, author = {Wang, Xinlong and Kong, Tao and Shen, Chunhua and Jiang, Yuning and Li, Lei}, booktitle = {Proc. Eur. Conf. Computer Vision (ECCV)}, year = {2020} } ```
6,475
116.745455
1,258
md
mmdetection
mmdetection-master/configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py
_base_ = './decoupled_solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOLightHead', num_classes=80, in_channels=256, stacked_convs=4, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(852, 512), (852, 480), (852, 448), (852, 416), (852, 384), (852, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(852, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,062
31.234375
78
py
mmdetection
mmdetection-master/configs/solo/decoupled_solo_r50_fpn_1x_coco.py
_base_ = [ './solo_r50_fpn_1x_coco.py', ] # model settings model = dict( mask_head=dict( type='DecoupledSOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))) optimizer = dict(type='SGD', lr=0.01)
822
27.37931
78
py
mmdetection
mmdetection-master/configs/solo/decoupled_solo_r50_fpn_3x_coco.py
_base_ = './solo_r50_fpn_3x_coco.py' # model settings model = dict( mask_head=dict( type='DecoupledSOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict( type='DiceLoss', use_sigmoid=True, activate=False, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)))
775
28.846154
78
py
mmdetection
mmdetection-master/configs/solo/metafile.yml
Collections: - Name: SOLO Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - Convolution - ResNet Paper: https://arxiv.org/abs/1912.04488 README: configs/solo/README.md Models: - Name: decoupled_solo_r50_fpn_1x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 12 inference time (ms/im): - value: 116.4 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_1x_coco/decoupled_solo_r50_fpn_1x_coco_20210820_233348-6337c589.pth - Name: decoupled_solo_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 7.9 Epochs: 36 inference time (ms/im): - value: 117.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 36.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_r50_fpn_3x_coco/decoupled_solo_r50_fpn_3x_coco_20210821_042504-7b3301ec.pth - Name: decoupled_solo_light_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/decoupled_solo_light_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 2.2 Epochs: 36 inference time (ms/im): - value: 35.0 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (852, 512) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 32.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/decoupled_solo_light_r50_fpn_3x_coco/decoupled_solo_light_r50_fpn_3x_coco_20210906_142703-e70e226f.pth - Name: solo_r50_fpn_3x_coco In Collection: SOLO Config: configs/solo/solo_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 7.4 Epochs: 36 inference time (ms/im): - value: 94.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 35.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_3x_coco/solo_r50_fpn_3x_coco_20210901_012353-11d224d7.pth - Name: solo_r50_fpn_1x_coco In Collection: SOLO Config: configs/solo/solo_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 8.0 Epochs: 12 inference time (ms/im): - value: 95.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1333, 800) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/solo/solo_r50_fpn_1x_coco/solo_r50_fpn_1x_coco_20210821_035055-2290a6b8.pth
3,420
28.491379
168
yml
mmdetection
mmdetection-master/configs/solo/solo_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='SOLO', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, num_outs=5), mask_head=dict( type='SOLOHead', num_classes=80, in_channels=256, stacked_convs=7, feat_channels=256, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), # model training and testing settings test_cfg=dict( nms_pre=500, score_thr=0.1, mask_thr=0.5, filter_thr=0.05, kernel='gaussian', # gaussian/linear sigma=2.0, max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01)
1,523
27.222222
78
py
mmdetection
mmdetection-master/configs/solo/solo_r50_fpn_3x_coco.py
_base_ = './solo_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), (1333, 672), (1333, 640)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
942
31.517241
77
py
mmdetection
mmdetection-master/configs/solov2/README.md
# SOLOv2 > [SOLOv2: Dynamic and Fast Instance Segmentation](https://arxiv.org/abs/2003.10152) <!-- [ALGORITHM] --> ## Abstract In this work, we aim at building a simple, direct, and fast instance segmentation framework with strong performance. We follow the principle of the SOLO method of Wang et al. "SOLO: segmenting objects by locations". Importantly, we take one step further by dynamically learning the mask head of the object segmenter such that the mask head is conditioned on the location. Specifically, the mask branch is decoupled into a mask kernel branch and mask feature branch, which are responsible for learning the convolution kernel and the convolved features respectively. Moreover, we propose Matrix NMS (non maximum suppression) to significantly reduce the inference time overhead due to NMS of masks. Our Matrix NMS performs NMS with parallel matrix operations in one shot, and yields better results. We demonstrate a simple direct instance segmentation system, outperforming a few state-of-the-art methods in both speed and accuracy. A light-weight version of SOLOv2 executes at 31.3 FPS and yields 37.1% AP. Moreover, our state-of-the-art results in object detection (from our mask byproduct) and panoptic segmentation show the potential to serve as a new strong baseline for many instance-level recognition tasks besides instance segmentation. <div align=center> <img src="https://user-images.githubusercontent.com/48282753/167235090-f20dab74-43a5-44ed-9f11-4e5f08866f45.png"/> </div> ## Results and Models ### SOLOv2 | Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | | :--------: | :-----: | :------: | :-----: | :------: | :-----: | :-----------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | N | 1x | 5.1 | 34.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858.log.json) | | R-50 | pytorch | Y | 3x | 5.1 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856.log.json) | | R-101 | pytorch | Y | 3x | 6.9 | 39.1 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119.log.json) | | R-101(DCN) | pytorch | Y | 3x | 7.1 | 41.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734.log.json) | | X-101(DCN) | pytorch | Y | 3x | 11.3 | 42.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337.log.json) | ### Light SOLOv2 | Backbone | Style | MS train | Lr schd | Mem (GB) | mask AP | Config | Download | | :------: | :-----: | :------: | :-----: | :------: | :-----: | :------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-18 | pytorch | Y | 3x | 9.1 | 29.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r18_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717.log.json) | | R-34 | pytorch | Y | 3x | 9.3 | 31.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r34_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839.log.json) | | R-50 | pytorch | Y | 3x | 9.9 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/solov2/solov2_light_r50_fpn_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256.log.json) | ## Citation ```latex @article{wang2020solov2, title={SOLOv2: Dynamic and Fast Instance Segmentation}, author={Wang, Xinlong and Zhang, Rufeng and Kong, Tao and Li, Lei and Shen, Chunhua}, journal={Proc. Advances in Neural Information Processing Systems (NeurIPS)}, year={2020} } ```
7,591
125.533333
478
md
mmdetection
mmdetection-master/configs/solov2/metafile.yml
Collections: - Name: SOLOv2 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x A100 GPUs Architecture: - FPN - Convolution - ResNet Paper: https://arxiv.org/abs/2003.10152 README: configs/solov2/README.md Models: - Name: solov2_r50_fpn_1x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 12 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 34.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_1x_coco/solov2_r50_fpn_1x_coco_20220512_125858-a357fa23.pth - Name: solov2_r50_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 5.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r50_fpn_3x_coco/solov2_r50_fpn_3x_coco_20220512_125856-fed092d4.pth - Name: solov2_r101_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r101_fpn_3x_coco.py Metadata: Training Memory (GB): 6.9 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_fpn_3x_coco/solov2_r101_fpn_3x_coco_20220511_095119-c559a076.pth - Name: solov2_r101_dcn_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_r101_dcn_fpn_3x_coco.py Metadata: Training Memory (GB): 7.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_r101_dcn_fpn_3x_coco/solov2_r101_dcn_fpn_3x_coco_20220513_214734-16c966cb.pth - Name: solov2_x101_dcn_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_x101_dcn_fpn_3x_coco.py Metadata: Training Memory (GB): 11.3 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_x101_dcn_fpn_3x_coco/solov2_x101_dcn_fpn_3x_coco_20220513_214337-aef41095.pth - Name: solov2_light_r18_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r18_fpn_3x_coco.py Metadata: Training Memory (GB): 9.1 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 29.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r18_fpn_3x_coco/solov2_light_r18_fpn_3x_coco_20220511_083717-75fa355b.pth - Name: solov2_light_r34_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r34_fpn_3x_coco.py Metadata: Training Memory (GB): 9.3 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 31.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r34_fpn_3x_coco/solov2_light_r34_fpn_3x_coco_20220511_091839-e51659d3.pth - Name: solov2_light_r50_fpn_3x_coco In Collection: SOLOv2 Config: configs/solov2/solov2_light_r50_fpn_3x_coco.py Metadata: Training Memory (GB): 9.9 Epochs: 36 Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 33.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/solov2/solov2_light_r50_fpn_3x_coco/solov2_light_r50_fpn_3x_coco_20220512_165256-c93a6074.pth
3,914
31.625
154
yml
mmdetection
mmdetection-master/configs/solov2/solov2_light_r18_fpn_3x_coco.py
_base_ = 'solov2_light_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=18, init_cfg=dict(checkpoint='torchvision://resnet18')), neck=dict(in_channels=[64, 128, 256, 512]))
213
25.75
70
py
mmdetection
mmdetection-master/configs/solov2/solov2_light_r34_fpn_3x_coco.py
_base_ = 'solov2_light_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=34, init_cfg=dict(checkpoint='torchvision://resnet34')), neck=dict(in_channels=[64, 128, 256, 512]))
213
25.75
70
py
mmdetection
mmdetection-master/configs/solov2/solov2_light_r50_dcn_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=dict( feat_channels=256, stacked_convs=3, scale_ranges=((1, 64), (32, 128), (64, 256), (128, 512), (256, 2048)), mask_feature_head=dict(out_channels=128), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=False)) # light solov2 head # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) # data img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), (768, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(448, 768), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,991
30.619048
78
py
mmdetection
mmdetection-master/configs/solov2/solov2_light_r50_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_1x_coco.py' # model settings model = dict( mask_head=dict( stacked_convs=2, feat_channels=256, scale_ranges=((1, 56), (28, 112), (56, 224), (112, 448), (224, 896)), mask_feature_head=dict(out_channels=128))) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36) # data img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(768, 512), (768, 480), (768, 448), (768, 416), (768, 384), (768, 352)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(448, 768), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,747
29.137931
78
py
mmdetection
mmdetection-master/configs/solov2/solov2_r101_dcn_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101'), dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), mask_head=dict( mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=True))
452
31.357143
78
py
mmdetection
mmdetection-master/configs/solov2/solov2_r101_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( depth=101, init_cfg=dict(checkpoint='torchvision://resnet101')))
161
22.142857
72
py
mmdetection
mmdetection-master/configs/solov2/solov2_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='SOLOv2', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), style='pytorch'), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, num_outs=5), mask_head=dict( type='SOLOV2Head', num_classes=80, in_channels=256, feat_channels=512, stacked_convs=4, strides=[8, 8, 16, 32, 32], scale_ranges=((1, 96), (48, 192), (96, 384), (192, 768), (384, 2048)), pos_scale=0.2, num_grids=[40, 36, 24, 16, 12], cls_down_index=0, mask_feature_head=dict( feat_channels=128, start_level=0, end_level=3, out_channels=256, mask_stride=4, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)), loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0)), # model training and testing settings test_cfg=dict( nms_pre=500, score_thr=0.1, mask_thr=0.5, filter_thr=0.05, kernel='gaussian', # gaussian/linear sigma=2.0, max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2))
1,825
28.451613
78
py
mmdetection
mmdetection-master/configs/solov2/solov2_r50_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 800), (1333, 768), (1333, 736), (1333, 704), (1333, 672), (1333, 640)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=1.0 / 3, step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
942
31.517241
77
py
mmdetection
mmdetection-master/configs/solov2/solov2_x101_dcn_fpn_3x_coco.py
_base_ = 'solov2_r50_fpn_3x_coco.py' # model settings model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')), mask_head=dict( mask_feature_head=dict(conv_cfg=dict(type='DCNv2')), dcn_cfg=dict(type='DCNv2'), dcn_apply_to_all_conv=True))
555
29.888889
78
py
mmdetection
mmdetection-master/configs/sparse_rcnn/README.md
# Sparse R-CNN > [Sparse R-CNN: End-to-End Object Detection with Learnable Proposals](https://arxiv.org/abs/2011.12450) <!-- [ALGORITHM] --> ## Abstract We present Sparse R-CNN, a purely sparse method for object detection in images. Existing works on object detection heavily rely on dense object candidates, such as k anchor boxes pre-defined on all grids of image feature map of size HΓ—W. In our method, however, a fixed sparse set of learned object proposals, total length of N, are provided to object recognition head to perform classification and location. By eliminating HWk (up to hundreds of thousands) hand-designed object candidates to N (e.g. 100) learnable proposals, Sparse R-CNN completely avoids all efforts related to object candidates design and many-to-one label assignment. More importantly, final predictions are directly output without non-maximum suppression post-procedure. Sparse R-CNN demonstrates accuracy, run-time and training convergence performance on par with the well-established detector baselines on the challenging COCO dataset, e.g., achieving 45.0 AP in standard 3Γ— training schedule and running at 22 fps using ResNet-50 FPN model. We hope our work could inspire re-thinking the convention of dense prior in object detectors. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143998489-8a5a687d-ceec-4590-8347-708e427e7dfe.png" height="300"/> </div> ## Results and Models | Model | Backbone | Style | Lr schd | Number of Proposals | Multi-Scale | RandomCrop | box AP | Config | Download | | :----------: | :-------: | :-----: | :-----: | :-----------------: | :---------: | :--------: | :----: | :----------------------------------------------------------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Sparse R-CNN | R-50-FPN | pytorch | 1x | 100 | False | False | 37.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.log.json) | | Sparse R-CNN | R-50-FPN | pytorch | 3x | 100 | True | False | 42.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.log.json) | | Sparse R-CNN | R-50-FPN | pytorch | 3x | 300 | True | True | 45.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.log.json) | | Sparse R-CNN | R-101-FPN | pytorch | 3x | 100 | True | False | 44.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.log.json) | | Sparse R-CNN | R-101-FPN | pytorch | 3x | 300 | True | True | 46.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.log.json) | ### Notes We observe about 0.3 AP noise especially when using ResNet-101 as the backbone. ## Citation ```latex @article{peize2020sparse, title = {{SparseR-CNN}: End-to-End Object Detection with Learnable Proposals}, author = {Peize Sun and Rufeng Zhang and Yi Jiang and Tao Kong and Chenfeng Xu and Wei Zhan and Masayoshi Tomizuka and Lei Li and Zehuan Yuan and Changhu Wang and Ping Luo}, journal = {arXiv preprint arXiv:2011.12450}, year = {2020} } ```
6,964
177.589744
1,110
md
mmdetection
mmdetection-master/configs/sparse_rcnn/metafile.yml
Collections: - Name: Sparse R-CNN Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - Sparse R-CNN Paper: URL: https://arxiv.org/abs/2011.12450 Title: 'Sparse R-CNN: End-to-End Object Detection with Learnable Proposals' README: configs/sparse_rcnn/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.9.0/mmdet/models/detectors/sparse_rcnn.py#L6 Version: v2.9.0 Models: - Name: sparse_rcnn_r50_fpn_1x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco/sparse_rcnn_r50_fpn_1x_coco_20201222_214453-dc79b137.pth - Name: sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco_20201218_154234-7bc5c054.pth - Name: sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 45.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_024605-9fe92701.pth - Name: sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco_20201223_121552-6c46c9d6.pth - Name: sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco In Collection: Sparse R-CNN Config: configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco_20201223_023452-c23c3564.pth
3,153
37.938272
229
yml
mmdetection
mmdetection-master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
235
28.5
78
py
mmdetection
mmdetection-master/configs/sparse_rcnn/sparse_rcnn_r101_fpn_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
216
26.125
61
py
mmdetection
mmdetection-master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] num_stages = 6 num_proposals = 100 model = dict( type='SparseRCNN', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=0, add_extra_convs='on_input', num_outs=4), rpn_head=dict( type='EmbeddingRPNHead', num_proposals=num_proposals, proposal_feature_channel=256), roi_head=dict( type='SparseRoIHead', num_stages=num_stages, stage_loss_weights=[1] * num_stages, proposal_feature_channel=256, bbox_roi_extractor=dict( type='SingleRoIExtractor', roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2), out_channels=256, featmap_strides=[4, 8, 16, 32]), bbox_head=[ dict( type='DIIHead', num_classes=80, num_ffn_fcs=2, num_heads=8, num_cls_fcs=1, num_reg_fcs=3, feedforward_channels=2048, in_channels=256, dropout=0.0, ffn_act_cfg=dict(type='ReLU', inplace=True), dynamic_conv_cfg=dict( type='DynamicConv', in_channels=256, feat_channels=64, out_channels=256, input_feat_shape=7, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN')), loss_bbox=dict(type='L1Loss', loss_weight=5.0), loss_iou=dict(type='GIoULoss', loss_weight=2.0), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=2.0), bbox_coder=dict( type='DeltaXYWHBBoxCoder', clip_border=False, target_means=[0., 0., 0., 0.], target_stds=[0.5, 0.5, 1., 1.])) for _ in range(num_stages) ]), # training and testing settings train_cfg=dict( rpn=None, rcnn=[ dict( assigner=dict( type='HungarianAssigner', cls_cost=dict(type='FocalLossCost', weight=2.0), reg_cost=dict(type='BBoxL1Cost', weight=5.0), iou_cost=dict(type='IoUCost', iou_mode='giou', weight=2.0)), sampler=dict(type='PseudoSampler'), pos_weight=1) for _ in range(num_stages) ]), test_cfg=dict(rpn=None, rcnn=dict(max_per_img=num_proposals))) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.000025, weight_decay=0.0001) optimizer_config = dict(_delete_=True, grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict(policy='step', step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
3,469
35.145833
79
py
mmdetection
mmdetection-master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_300_proposals_crop_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py' num_proposals = 300 model = dict( rpn_head=dict(num_proposals=num_proposals), test_cfg=dict( _delete_=True, rpn=None, rcnn=dict(max_per_img=num_proposals))) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR. train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline))
2,191
40.358491
78
py
mmdetection
mmdetection-master/configs/sparse_rcnn/sparse_rcnn_r50_fpn_mstrain_480-800_3x_coco.py
_base_ = './sparse_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) min_values = (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, value) for value in min_values], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline)) lr_config = dict(policy='step', step=[27, 33]) runner = dict(type='EpochBasedRunner', max_epochs=36)
853
34.583333
77
py
mmdetection
mmdetection-master/configs/ssd/README.md
# SSD > [SSD: Single Shot MultiBox Detector](https://arxiv.org/abs/1512.02325) <!-- [ALGORITHM] --> ## Abstract We present a method for detecting objects in images using a single deep neural network. Our approach, named SSD, discretizes the output space of bounding boxes into a set of default boxes over different aspect ratios and scales per feature map location. At prediction time, the network generates scores for the presence of each object category in each default box and produces adjustments to the box to better match the object shape. Additionally, the network combines predictions from multiple feature maps with different resolutions to naturally handle objects of various sizes. Our SSD model is simple relative to methods that require object proposals because it completely eliminates proposal generation and subsequent pixel or feature resampling stage and encapsulates all computation in a single network. This makes SSD easy to train and straightforward to integrate into systems that require a detection component. Experimental results on the PASCAL VOC, MS COCO, and ILSVRC datasets confirm that SSD has comparable accuracy to methods that utilize an additional object proposal step and is much faster, while providing a unified framework for both training and inference. Compared to other single stage methods, SSD has much better accuracy, even with a smaller input image size. For 300Γ—300 input, SSD achieves 72.1% mAP on VOC2007 test at 58 FPS on a Nvidia Titan X and for 500Γ—500 input, SSD achieves 75.1% mAP, outperforming a comparable state of the art Faster R-CNN model. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143998553-4e12f681-6025-46b4-8410-9e2e1e53a8ec.png"/> </div> ## Results and models of SSD | Backbone | Size | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :------: | :--: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | VGG16 | 300 | caffe | 120e | 9.9 | 43.7 | 25.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd300_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428.log.json) | | VGG16 | 512 | caffe | 120e | 19.4 | 30.7 | 29.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssd512_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849.log.json) | ## Results and models of SSD-Lite | Backbone | Size | Training from scratch | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :---------: | :--: | :-------------------: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | MobileNetV2 | 320 | yes | 600e | 4.0 | 69.9 | 21.3 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627.log.json) | ## Notice ### Compatibility In v2.14.0, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored SSD neck and head for more flexible usage. If users want to use the SSD checkpoint trained in the older versions, we provide a scripts `tools/model_converters/upgrade_ssd_version.py` to convert the model weights. ```bash python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} ``` - OLD_MODEL_PATH: the path to load the old version SSD model. - NEW_MODEL_PATH: the path to save the converted model weights. ### SSD-Lite training settings There are some differences between our implementation of MobileNetV2 SSD-Lite and the one in [TensorFlow 1.x detection model zoo](https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md) . 1. Use 320x320 as input size instead of 300x300. 2. The anchor sizes are different. 3. The C4 feature map is taken from the last layer of stage 4 instead of the middle of the block. 4. The model in TensorFlow1.x is trained on coco 2014 and validated on coco minival2014, but we trained and validated the model on coco 2017. The mAP on val2017 is usually a little lower than minival2014 (refer to the results in TensorFlow Object Detection API, e.g., MobileNetV2 SSD gets 22 mAP on minival2014 but 20.2 mAP on val2017). ## Citation ```latex @article{Liu_2016, title={SSD: Single Shot MultiBox Detector}, journal={ECCV}, author={Liu, Wei and Anguelov, Dragomir and Erhan, Dumitru and Szegedy, Christian and Reed, Scott and Fu, Cheng-Yang and Berg, Alexander C.}, year={2016}, } ```
6,646
104.507937
1,486
md
mmdetection
mmdetection-master/configs/ssd/ascend_ssd300_coco.py
_base_ = [ '../_base_/models/ascend_ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,371
31.493151
79
py
mmdetection
mmdetection-master/configs/ssd/metafile.yml
Collections: - Name: SSD Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - VGG Paper: URL: https://arxiv.org/abs/1512.02325 Title: 'SSD: Single Shot MultiBox Detector' README: configs/ssd/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.14.0/mmdet/models/dense_heads/ssd_head.py#L16 Version: v2.14.0 Models: - Name: ssd300_coco In Collection: SSD Config: configs/ssd/ssd300_coco.py Metadata: Training Memory (GB): 9.9 inference time (ms/im): - value: 22.88 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (300, 300) Epochs: 120 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 25.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd300_coco/ssd300_coco_20210803_015428-d231a06e.pth - Name: ssd512_coco In Collection: SSD Config: configs/ssd/ssd512_coco.py Metadata: Training Memory (GB): 19.4 inference time (ms/im): - value: 32.57 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512, 512) Epochs: 120 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 29.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssd512_coco/ssd512_coco_20210803_022849-0a47a1ca.pth - Name: ssdlite_mobilenetv2_scratch_600e_coco In Collection: SSD Config: configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py Metadata: Training Memory (GB): 4.0 inference time (ms/im): - value: 14.3 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (320, 320) Epochs: 600 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 21.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/ssd/ssdlite_mobilenetv2_scratch_600e_coco/ssdlite_mobilenetv2_scratch_600e_coco_20210629_110627-974d9307.pth
2,277
27.835443
169
yml
mmdetection
mmdetection-master/configs/ssd/ssd300_coco.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_2x.py', '../_base_/default_runtime.py' ] # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(300, 300), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(300, 300), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,360
31.791667
79
py
mmdetection
mmdetection-master/configs/ssd/ssd300_fp16_coco.py
_base_ = ['./ssd300_coco.py'] fp16 = dict(loss_scale='dynamic') # learning policy # In order to avoid non-convergence in the early stage of # mixed-precision training, the warmup in the lr_config is set to linear, # warmup_iters increases and warmup_ratio decreases. lr_config = dict(warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10)
345
33.6
75
py
mmdetection
mmdetection-master/configs/ssd/ssd512_coco.py
_base_ = 'ssd300_coco.py' input_size = 512 model = dict( neck=dict( out_channels=(512, 1024, 512, 256, 256, 256, 256), level_strides=(2, 2, 2, 2, 1), level_paddings=(1, 1, 1, 1, 1), last_kernel_size=4), bbox_head=dict( in_channels=(512, 1024, 512, 256, 256, 256, 256), anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, input_size=input_size, basesize_ratio_range=(0.1, 0.9), strides=[8, 16, 32, 64, 128, 256, 512], ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(512, 512), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(512, 512), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=3, train=dict( _delete_=True, type='RepeatDataset', times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(_delete_=True) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
2,820
32.188235
79
py
mmdetection
mmdetection-master/configs/ssd/ssd512_fp16_coco.py
_base_ = ['./ssd512_coco.py'] # fp16 settings fp16 = dict(loss_scale='dynamic') # learning policy # In order to avoid non-convergence in the early stage of # mixed-precision training, the warmup in the lr_config is set to linear, # warmup_iters increases and warmup_ratio decreases. lr_config = dict(warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10)
360
35.1
75
py
mmdetection
mmdetection-master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/default_runtime.py' ] model = dict( type='SingleStageDetector', backbone=dict( type='MobileNetV2', out_indices=(4, 7), norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), neck=dict( type='SSDNeck', in_channels=(96, 1280), out_channels=(96, 1280, 512, 256, 256, 128), level_strides=(2, 2, 2, 2), level_paddings=(1, 1, 1, 1), l2_norm_scale=None, use_depthwise=True, norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), act_cfg=dict(type='ReLU6'), init_cfg=dict(type='TruncNormal', layer='Conv2d', std=0.03)), bbox_head=dict( type='SSDHead', in_channels=(96, 1280, 512, 256, 256, 128), num_classes=80, use_depthwise=True, norm_cfg=dict(type='BN', eps=0.001, momentum=0.03), act_cfg=dict(type='ReLU6'), init_cfg=dict(type='Normal', layer='Conv2d', std=0.001), # set anchor size manually instead of using the predefined # SSD300 setting. anchor_generator=dict( type='SSDAnchorGenerator', scale_major=False, strides=[16, 32, 64, 107, 160, 320], ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]], min_sizes=[48, 100, 150, 202, 253, 304], max_sizes=[100, 150, 202, 253, 304, 320]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2])), # model training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, nms=dict(type='nms', iou_threshold=0.45), min_bbox_size=0, score_thr=0.02, max_per_img=200)) cudnn_benchmark = True # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=320), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=320), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=24, workers_per_gpu=4, train=dict( _delete_=True, type='RepeatDataset', # use RepeatDataset to speed up training times=5, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.015, momentum=0.9, weight_decay=4.0e-5) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='CosineAnnealing', warmup='linear', warmup_iters=500, warmup_ratio=0.001, min_lr=0) runner = dict(type='EpochBasedRunner', max_epochs=120) # Avoid evaluation and saving weights too frequently evaluation = dict(interval=5, metric='bbox') checkpoint_config = dict(interval=5) custom_hooks = [ dict(type='NumClassCheckHook'), dict(type='CheckInvalidLossHook', interval=50, priority='VERY_LOW') ] # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (24 samples per GPU) auto_scale_lr = dict(base_batch_size=192)
4,928
31.642384
77
py
mmdetection
mmdetection-master/configs/strong_baselines/README.md
# Strong Baselines <!-- [OTHERS] --> We train Mask R-CNN with large-scale jitter and longer schedule as strong baselines. The modifications follow those in [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/configs/new_baselines). ## Results and Models | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :------: | :-----: | :-----: | :------: | :------------: | :----: | :-----: | :-----------------------------------------------------------------------: | :----------------------: | | R-50-FPN | pytorch | 50e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py) | [model](<>) \| [log](<>) | | R-50-FPN | pytorch | 100e | | | | | [config](./mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | | R-50-FPN | caffe | 100e | | | 44.7 | 40.4 | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py) | [model](<>) \| [log](<>) | | R-50-FPN | caffe | 400e | | | | | [config](./mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py) | [model](<>) \| [log](<>) | ## Notice When using large-scale jittering, there are sometimes empty proposals in the box and mask heads during training. This requires MMSyncBN that allows empty tensors. Therefore, please use mmcv-full>=1.3.14 to train models supported in this directory.
1,636
76.952381
182
md
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed # Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( backbone=dict( frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None, style='caffe'), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg))) file_client_args = dict(backend='disk') # file_client_args = dict( # backend='petrel', # path_mapping=dict({ # './data/': 's3://openmmlab/datasets/detection/', # 'data/': 's3://openmmlab/datasets/detection/' # })) img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) image_size = (1024, 1024) train_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=image_size, ratio_range=(0.1, 2.0), multiscale_mode='range', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=image_size, recompute_bbox=True, allow_negative_crop=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(1e-2, 1e-2)), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=image_size), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile', file_client_args=file_client_args), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # Use RepeatDataset to speed up training data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,703
32.382716
77
py
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
_base_ = 'mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' fp16 = dict(loss_scale=512.)
102
33.333333
72
py
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_400e_coco.py
_base_ = './mask_rcnn_r50_caffe_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 16 (for 400 epochs) data = dict(train=dict(times=4 * 4)) lr_config = dict(warmup_iters=500 * 4)
261
36.428571
74
py
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../common/lsj_100e_coco_instance.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) # Use MMSyncBN that handles empty tensor in head. It can be changed to # SyncBN after https://github.com/pytorch/pytorch/issues/36530 is fixed # Requires MMCV-full after https://github.com/open-mmlab/mmcv/pull/1205. head_norm_cfg = dict(type='MMSyncBN', requires_grad=True) model = dict( # the model is trained from scratch, so init_cfg is None backbone=dict( frozen_stages=-1, norm_eval=False, norm_cfg=norm_cfg, init_cfg=None), neck=dict(norm_cfg=norm_cfg), rpn_head=dict(num_convs=2), # leads to 0.1+ mAP roi_head=dict( bbox_head=dict( type='Shared4Conv1FCBBoxHead', conv_out_channels=256, norm_cfg=head_norm_cfg), mask_head=dict(norm_cfg=head_norm_cfg)))
893
37.869565
77
py
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_fp16_coco.py
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # use FP16 fp16 = dict(loss_scale=512.)
107
26
66
py
mmdetection
mmdetection-master/configs/strong_baselines/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_50e_coco.py
_base_ = 'mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_lsj_100e_coco.py' # Use RepeatDataset to speed up training # change repeat time from 4 (for 100 epochs) to 2 (for 50 epochs) data = dict(train=dict(times=2))
208
33.833333
66
py
mmdetection
mmdetection-master/configs/swin/README.md
# Swin > [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) <!-- [BACKBONE] --> ## Abstract This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143999551-6a527048-de38-485c-a1b6-3133ffa5bfaa.png"/> </div> ## Results and Models ### Mask R-CNN | Backbone | Pretrain | Lr schd | Multi-scale crop | FP16 | Mem (GB) | Inf time (fps) | box AP | mask AP | Config | Download | | :------: | :---------: | :-----: | :--------------: | :--: | :------: | :------------: | :----: | :-----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | Swin-T | ImageNet-1K | 1x | no | no | 7.6 | | 42.7 | 39.3 | [config](./mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937.log.json) | | Swin-T | ImageNet-1K | 3x | yes | no | 10.2 | | 46.0 | 41.6 | [config](./mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725.log.json) | | Swin-T | ImageNet-1K | 3x | yes | yes | 7.8 | | 46.0 | 41.7 | [config](./mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006.log.json) | | Swin-S | ImageNet-1K | 3x | yes | yes | 11.9 | | 48.2 | 43.2 | [config](./mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808.log.json) | ### Notice Please follow the example of `retinanet_swin-t-p4-w7_fpn_1x_coco.py` when you want to combine Swin Transformer with the one-stage detector. Because there is a layer norm at the outs of Swin Transformer, you must set `start_level` as 0 in FPN, so we have to set the `out_indices` of backbone as `[1,2,3]`. ## Citation ```latex @article{liu2021Swin, title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, journal={arXiv preprint arXiv:2103.14030}, year={2021} } ```
5,732
135.5
1,455
md
mmdetection
mmdetection-master/configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py' pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth' # noqa model = dict( backbone=dict( depths=[2, 2, 18, 2], init_cfg=dict(type='Pretrained', checkpoint=pretrained)))
318
44.571429
124
py
mmdetection
mmdetection-master/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[8, 11]) runner = dict(max_epochs=12)
1,301
29.27907
123
py
mmdetection
mmdetection-master/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py
_base_ = './mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py' # you need to set mode='dynamic' if you are using pytorch<=1.5.0 fp16 = dict(loss_scale=dict(init_scale=512))
169
41.5
64
py
mmdetection
mmdetection-master/configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py
_base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/coco_instance.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( type='MaskRCNN', backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(0, 1, 2, 3), with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[96, 192, 384, 768])) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) # augmentation strategy originates from DETR / Sparse RCNN train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='AutoAugment', policies=[[ dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', keep_ratio=True) ], [ dict( type='Resize', img_scale=[(400, 1333), (500, 1333), (600, 1333)], multiscale_mode='value', keep_ratio=True), dict( type='RandomCrop', crop_type='absolute_range', crop_size=(384, 600), allow_negative_crop=True), dict( type='Resize', img_scale=[(480, 1333), (512, 1333), (544, 1333), (576, 1333), (608, 1333), (640, 1333), (672, 1333), (704, 1333), (736, 1333), (768, 1333), (800, 1333)], multiscale_mode='value', override=True, keep_ratio=True) ]]), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] data = dict(train=dict(pipeline=train_pipeline)) optimizer = dict( _delete_=True, type='AdamW', lr=0.0001, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict(warmup_iters=1000, step=[27, 33]) runner = dict(max_epochs=36)
3,305
34.934783
123
py
mmdetection
mmdetection-master/configs/swin/metafile.yml
Models: - Name: mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco.py Metadata: Training Memory (GB): 11.9 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.2 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 43.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-s-p4-w7_fpn_fp16_ms-crop-3x_coco_20210903_104808-b92c91f1.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco.py Metadata: Training Memory (GB): 10.2 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_ms-crop-3x_coco_20210906_131725-bacf6f7b.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_1x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco.py Metadata: Training Memory (GB): 7.6 Epochs: 12 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.7 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 39.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_1x_coco/mask_rcnn_swin-t-p4-w7_fpn_1x_coco_20210902_120937-9d6b7cfa.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0 - Name: mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco In Collection: Mask R-CNN Config: configs/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco.py Metadata: Training Memory (GB): 7.8 Epochs: 36 Training Data: COCO Training Techniques: - AdamW Training Resources: 8x V100 GPUs Architecture: - Swin Transformer Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.0 - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 41.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/swin/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco/mask_rcnn_swin-t-p4-w7_fpn_fp16_ms-crop-3x_coco_20210908_165006-90a4008c.pth Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'Swin Transformer: Hierarchical Vision Transformer using Shifted Windows' README: configs/swin/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.16.0/mmdet/models/backbones/swin.py#L465 Version: v2.16.0
4,301
34.553719
190
yml
mmdetection
mmdetection-master/configs/swin/retinanet_swin-t-p4-w7_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] pretrained = 'https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth' # noqa model = dict( backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.2, patch_norm=True, out_indices=(1, 2, 3), # Please only add indices that would be used # in FPN, otherwise some parameter will not be used with_cp=False, convert_weights=True, init_cfg=dict(type='Pretrained', checkpoint=pretrained)), neck=dict(in_channels=[192, 384, 768], start_level=0, num_outs=5)) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
1,073
33.645161
123
py
mmdetection
mmdetection-master/configs/timm_example/README.md
# Timm Example > [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) <!-- [OTHERS] --> ## Abstract Py**T**orch **Im**age **M**odels (`timm`) is a collection of image models, layers, utilities, optimizers, schedulers, data-loaders / augmentations, and reference training / validation scripts that aim to pull together a wide variety of SOTA models with ability to reproduce ImageNet training results. <!-- <div align=center> <img src="" height="400" /> </div> --> ## Results and Models ### RetinaNet | Backbone | Style | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :-------------: | :-----: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------: | :------: | | R-50 | pytorch | 1x | | | | [config](./retinanet_timm_tv_resnet50_fpn_1x_coco.py) | | | EfficientNet-B1 | - | 1x | | | | [config](./retinanet_timm_efficientnet_b1_fpn_1x_coco.py) | | ## Usage ### Install additional requirements MMDetection supports timm backbones via `TIMMBackbone`, a wrapper class in MMClassification. Thus, you need to install `mmcls` in addition to timm. If you have already installed requirements for mmdet, run ```shell pip install 'dataclasses; python_version<"3.7"' pip install timm pip install 'mmcls>=0.20.0' ``` See [this document](https://mmclassification.readthedocs.io/en/latest/install.html) for the details of MMClassification installation. ### Edit config - See example configs for basic usage. - See the documents of [timm feature extraction](https://rwightman.github.io/pytorch-image-models/feature_extraction/#multi-scale-feature-maps-feature-pyramid) and [TIMMBackbone](https://mmclassification.readthedocs.io/en/latest/api.html#mmcls.models.backbones.TIMMBackbone) for details. - Which feature map is output depends on the backbone. Please check `backbone out_channels` and `backbone out_strides` in your log, and modify `model.neck.in_channels` and `model.backbone.out_indices` if necessary. - If you use Vision Transformer models that do not support `features_only=True`, add `custom_hooks = []` to your config to disable `NumClassCheckHook`. ## Citation ```latex @misc{rw2019timm, author = {Ross Wightman}, title = {PyTorch Image Models}, year = {2019}, publisher = {GitHub}, journal = {GitHub repository}, doi = {10.5281/zenodo.4414861}, howpublished = {\url{https://github.com/rwightman/pytorch-image-models}} } ```
2,623
40.650794
300
md
mmdetection
mmdetection-master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, type='mmcls.TIMMBackbone', model_name='efficientnet_b1', features_only=True, pretrained=True, out_indices=(1, 2, 3, 4)), neck=dict(in_channels=[24, 40, 112, 320])) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
679
31.380952
75
py
mmdetection
mmdetection-master/configs/timm_example/retinanet_timm_tv_resnet50_fpn_1x_coco.py
_base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, type='mmcls.TIMMBackbone', model_name='tv_resnet50', # ResNet-50 with torchvision weights features_only=True, pretrained=True, out_indices=(1, 2, 3, 4))) optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
666
32.35
75
py
mmdetection
mmdetection-master/configs/tood/README.md
# TOOD > [TOOD: Task-aligned One-stage Object Detection](https://arxiv.org/abs/2108.07755) <!-- [ALGORITHM] --> ## Abstract One-stage object detection is commonly implemented by optimizing two sub-tasks: object classification and localization, using heads with two parallel branches, which might lead to a certain level of spatial misalignment in predictions between the two tasks. In this work, we propose a Task-aligned One-stage Object Detection (TOOD) that explicitly aligns the two tasks in a learning-based manner. First, we design a novel Task-aligned Head (T-Head) which offers a better balance between learning task-interactive and task-specific features, as well as a greater flexibility to learn the alignment via a task-aligned predictor. Second, we propose Task Alignment Learning (TAL) to explicitly pull closer (or even unify) the optimal anchors for the two tasks during training via a designed sample assignment scheme and a task-aligned loss. Extensive experiments are conducted on MS-COCO, where TOOD achieves a 51.1 AP at single-model single-scale testing. This surpasses the recent one-stage detectors by a large margin, such as ATSS (47.7 AP), GFL (48.2 AP), and PAA (49.0 AP), with fewer parameters and FLOPs. Qualitative results also demonstrate the effectiveness of TOOD for better aligning the tasks of object classification and localization. <div align=center> <img src="https://user-images.githubusercontent.com/12907710/145400075-e08191f5-8afa-4335-9b3b-27926fc9a26e.png"/> </div> ## Results and Models | Backbone | Style | Anchor Type | Lr schd | Multi-scale Training | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :---------------: | :-----: | :----------: | :-----: | :------------------: | :------: | :------------: | :----: | :------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | Anchor-free | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425.log) | | R-50 | pytorch | Anchor-based | 1x | N | 4.1 | | 42.4 | [config](./tood_r50_fpn_anchor_based_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105.log) | | R-50 | pytorch | Anchor-free | 2x | Y | 4.1 | | 44.5 | [config](./tood_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231.log) | | R-101 | pytorch | Anchor-free | 2x | Y | 6.0 | | 46.1 | [config](./tood_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232.log) | | R-101-dcnv2 | pytorch | Anchor-free | 2x | Y | 6.2 | | 49.3 | [config](./tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728.log) | | X-101-64x4d | pytorch | Anchor-free | 2x | Y | 10.2 | | 47.6 | [config](./tood_x101_64x4d_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519.log) | | X-101-64x4d-dcnv2 | pytorch | Anchor-free | 2x | Y | | | | [config](./tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py) | [model](<>) \| [log](<>) | \[1\] *1x and 2x mean the model is trained for 90K and 180K iterations, respectively.* \ \[2\] *All results are obtained with a single model and without any test time data augmentation such as multi-scale, flipping and etc..* \ \[3\] *`dcnv2` denotes deformable convolutional networks v2.* \\ ## Citation ```latex @inproceedings{feng2021tood, title={TOOD: Task-aligned One-stage Object Detection}, author={Feng, Chengjian and Zhong, Yujie and Gao, Yu and Scott, Matthew R and Huang, Weilin}, booktitle={ICCV}, year={2021} } ```
6,820
165.365854
1,244
md
mmdetection
mmdetection-master/configs/tood/metafile.yml
Collections: - Name: TOOD Metadata: Training Data: COCO Training Techniques: - SGD Training Resources: 8x V100 GPUs Architecture: - TOOD Paper: URL: https://arxiv.org/abs/2108.07755 Title: 'TOOD: Task-aligned One-stage Object Detection' README: configs/tood/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.20.0/mmdet/models/detectors/tood.py#L7 Version: v2.20.0 Models: - Name: tood_r101_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r101_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 6.0 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.1 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_mstrain_2x_coco/tood_r101_fpn_mstrain_2x_coco_20211210_144232-a18f53c8.pth - Name: tood_x101_64x4d_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 10.2 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 47.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_x101_64x4d_fpn_mstrain_2x_coco/tood_x101_64x4d_fpn_mstrain_2x_coco_20211211_003519-a4f36113.pth - Name: tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py Metadata: Training Memory (GB): 6.2 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco_20211210_213728-4a824142.pth - Name: tood_r50_fpn_anchor_based_1x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_anchor_based_1x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_anchor_based_1x_coco/tood_r50_fpn_anchor_based_1x_coco_20211214_100105-b776c134.pth - Name: tood_r50_fpn_1x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_1x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 42.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_1x_coco/tood_r50_fpn_1x_coco_20211210_103425-20e20746.pth - Name: tood_r50_fpn_mstrain_2x_coco In Collection: TOOD Config: configs/tood/tood_r50_fpn_mstrain_2x_coco.py Metadata: Training Memory (GB): 4.1 Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/tood/tood_r50_fpn_mstrain_2x_coco/tood_r50_fpn_mstrain_2x_coco_20211210_144231-3b23174c.pth
3,206
32.40625
178
yml
mmdetection
mmdetection-master/configs/tood/tood_r101_fpn_dconv_c3-c5_mstrain_2x_coco.py
_base_ = './tood_r101_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), bbox_head=dict(num_dcn=2))
241
29.25
78
py
mmdetection
mmdetection-master/configs/tood/tood_r101_fpn_mstrain_2x_coco.py
_base_ = './tood_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
201
24.25
61
py
mmdetection
mmdetection-master/configs/tood/tood_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='TOOD', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', num_outs=5), bbox_head=dict( type='TOODHead', num_classes=80, in_channels=256, stacked_convs=6, feat_channels=256, anchor_type='anchor_free', anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], octave_base_scale=8, scales_per_octave=1, strides=[8, 16, 32, 64, 128]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), initial_loss_cls=dict( type='FocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input gamma=2.0, alpha=0.25, loss_weight=1.0), loss_cls=dict( type='QualityFocalLoss', use_sigmoid=True, activated=True, # use probability instead of logit as input beta=2.0, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=2.0)), train_cfg=dict( initial_epoch=4, initial_assigner=dict(type='ATSSAssigner', topk=9), assigner=dict(type='TaskAlignedAssigner', topk=13), alpha=1, beta=6, allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) # custom hooks custom_hooks = [dict(type='SetEpochInfoHook')]
2,306
29.76
79
py
mmdetection
mmdetection-master/configs/tood/tood_r50_fpn_anchor_based_1x_coco.py
_base_ = './tood_r50_fpn_1x_coco.py' model = dict(bbox_head=dict(anchor_type='anchor_based'))
94
30.666667
56
py
mmdetection
mmdetection-master/configs/tood/tood_r50_fpn_mstrain_2x_coco.py
_base_ = './tood_r50_fpn_1x_coco.py' # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24) # multi-scale training img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 800)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] data = dict(train=dict(pipeline=train_pipeline))
789
33.347826
77
py
mmdetection
mmdetection-master/configs/tood/tood_x101_64x4d_fpn_dconv_c4-c5_mstrain_2x_coco.py
_base_ = './tood_x101_64x4d_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deformable_groups=1, fallback_on_stride=False), stage_with_dcn=(False, False, True, True), ), bbox_head=dict(num_dcn=2))
253
30.75
78
py
mmdetection
mmdetection-master/configs/tood/tood_x101_64x4d_fpn_mstrain_2x_coco.py
_base_ = './tood_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
447
25.352941
76
py
mmdetection
mmdetection-master/configs/tridentnet/README.md
# TridentNet > [Scale-Aware Trident Networks for Object Detection](https://arxiv.org/abs/1901.01892) <!-- [ALGORITHM] --> ## Abstract Scale variation is one of the key challenges in object detection. In this work, we first present a controlled experiment to investigate the effect of receptive fields for scale variation in object detection. Based on the findings from the exploration experiments, we propose a novel Trident Network (TridentNet) aiming to generate scale-specific feature maps with a uniform representational power. We construct a parallel multi-branch architecture in which each branch shares the same transformation parameters but with different receptive fields. Then, we adopt a scale-aware training scheme to specialize each branch by sampling object instances of proper scales for training. As a bonus, a fast approximation version of TridentNet could achieve significant improvements without any additional parameters and computational cost compared with the vanilla detector. On the COCO dataset, our TridentNet with ResNet-101 backbone achieves state-of-the-art single-model results of 48.4 mAP. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/143999668-0927922e-efc2-45fa-8bfc-1e3df18720f5.png"/> </div> ## Results and Models We reports the test results using only one branch for inference. | Backbone | Style | mstrain | Lr schd | Mem (GB) | Inf time (fps) | box AP | Download | | :------: | :---: | :-----: | :-----: | :------: | :------------: | :----: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | caffe | N | 1x | | | 37.7 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838.log.json) | | R-50 | caffe | Y | 1x | | | 37.6 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839.log.json) | | R-50 | caffe | Y | 3x | | | 40.3 | [model](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539.log.json) | **Note** Similar to [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/projects/TridentNet), we haven't implemented the Scale-aware Training Scheme in section 4.2 of the paper. ## Citation ```latex @InProceedings{li2019scale, title={Scale-Aware Trident Networks for Object Detection}, author={Li, Yanghao and Chen, Yuntao and Wang, Naiyan and Zhang, Zhaoxiang}, journal={The International Conference on Computer Vision (ICCV)}, year={2019} } ```
3,931
99.820513
986
md
mmdetection
mmdetection-master/configs/tridentnet/metafile.yml
Collections: - Name: TridentNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - ResNet - TridentNet Block Paper: URL: https://arxiv.org/abs/1901.01892 Title: 'Scale-Aware Trident Networks for Object Detection' README: configs/tridentnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.8.0/mmdet/models/detectors/trident_faster_rcnn.py#L6 Version: v2.8.0 Models: - Name: tridentnet_r50_caffe_1x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_1x_coco/tridentnet_r50_caffe_1x_coco_20201230_141838-2ec0b530.pth - Name: tridentnet_r50_caffe_mstrain_1x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco/tridentnet_r50_caffe_mstrain_1x_coco_20201230_141839-6ce55ccb.pth - Name: tridentnet_r50_caffe_mstrain_3x_coco In Collection: TridentNet Config: configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py Metadata: Epochs: 36 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.3 Weights: https://download.openmmlab.com/mmdetection/v2.0/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco/tridentnet_r50_caffe_mstrain_3x_coco_20201130_100539-46d227ba.pth
1,921
33.321429
174
yml
mmdetection
mmdetection-master/configs/tridentnet/tridentnet_r50_caffe_1x_coco.py
_base_ = [ '../_base_/models/faster_rcnn_r50_caffe_c4.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='TridentFasterRCNN', backbone=dict( type='TridentResNet', trident_dilations=(1, 2, 3), num_branch=3, test_branch_idx=1, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron2/resnet50_caffe')), roi_head=dict(type='TridentRoIHead', num_branch=3, test_branch_idx=1), train_cfg=dict( rpn_proposal=dict(max_per_img=500), rcnn=dict( sampler=dict(num=128, pos_fraction=0.5, add_gt_as_proposals=False)))) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,868
32.375
74
py
mmdetection
mmdetection-master/configs/tridentnet/tridentnet_r50_caffe_mstrain_1x_coco.py
_base_ = 'tridentnet_r50_caffe_1x_coco.py' # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode='value', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] data = dict(train=dict(pipeline=train_pipeline))
756
31.913043
72
py
mmdetection
mmdetection-master/configs/tridentnet/tridentnet_r50_caffe_mstrain_3x_coco.py
_base_ = 'tridentnet_r50_caffe_mstrain_1x_coco.py' lr_config = dict(step=[28, 34]) runner = dict(type='EpochBasedRunner', max_epochs=36)
138
26.8
53
py
mmdetection
mmdetection-master/configs/vfnet/README.md
# VarifocalNet > [VarifocalNet: An IoU-aware Dense Object Detector](https://arxiv.org/abs/2008.13367) <!-- [ALGORITHM] --> ## Abstract Accurately ranking the vast number of candidate detections is crucial for dense object detectors to achieve high performance. Prior work uses the classification score or a combination of classification and predicted localization scores to rank candidates. However, neither option results in a reliable ranking, thus degrading detection performance. In this paper, we propose to learn an Iou-aware Classification Score (IACS) as a joint representation of object presence confidence and localization accuracy. We show that dense object detectors can achieve a more accurate ranking of candidate detections based on the IACS. We design a new loss function, named Varifocal Loss, to train a dense object detector to predict the IACS, and propose a new star-shaped bounding box feature representation for IACS prediction and bounding box refinement. Combining these two new components and a bounding box refinement branch, we build an IoU-aware dense object detector based on the FCOS+ATSS architecture, that we call VarifocalNet or VFNet for short. Extensive experiments on MS COCO show that our VFNet consistently surpasses the strong baseline by ∼2.0 AP with different backbones. Our best model VFNet-X-1200 with Res2Net-101-DCN achieves a single-model single-scale AP of 55.1 on COCO test-dev, which is state-of-the-art among various object detectors. <div align=center> <img src="https://user-images.githubusercontent.com/9102141/97464778-4b9ab000-197c-11eb-9283-ab2907ee0252.png"/> </div> ## Introduction **VarifocalNet (VFNet)** learns to predict the IoU-aware classification score which mixes the object presence confidence and localization accuracy together as the detection score for a bounding box. The learning is supervised by the proposed Varifocal Loss (VFL), based on a new star-shaped bounding box feature representation (the features at nine yellow sampling points). Given the new representation, the object localization accuracy is further improved by refining the initially regressed bounding box. The full paper is available at: [https://arxiv.org/abs/2008.13367](https://arxiv.org/abs/2008.13367). ## Results and Models | Backbone | Style | DCN | MS train | Lr schd | Inf time (fps) | box AP (val) | box AP (test-dev) | Config | Download | | :---------: | :-----: | :-: | :------: | :-----: | :------------: | :----------: | :---------------: | :--------------------------------------------------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50 | pytorch | N | N | 1x | - | 41.6 | 41.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco.json) | | R-50 | pytorch | N | Y | 2x | - | 44.5 | 44.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco.json) | | R-50 | pytorch | Y | Y | 2x | - | 47.8 | 48.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | | R-101 | pytorch | N | N | 1x | - | 43.0 | 43.6 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco.json) | | R-101 | pytorch | N | Y | 2x | - | 46.2 | 46.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco.json) | | R-101 | pytorch | Y | Y | 2x | - | 49.0 | 49.2 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | | X-101-32x4d | pytorch | Y | Y | 2x | - | 49.7 | 50.0 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | | X-101-64x4d | pytorch | Y | Y | 2x | - | 50.4 | 50.8 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.json) | **Notes:** - The MS-train scale range is 1333x\[480:960\] (`range` mode) and the inference scale keeps 1333x800. - DCN means using `DCNv2` in both backbone and head. - Inference time will be updated soon. - More results and pre-trained models can be found in [VarifocalNet-Github](https://github.com/hyz-xmaster/VarifocalNet) ## Citation ```latex @article{zhang2020varifocalnet, title={VarifocalNet: An IoU-aware Dense Object Detector}, author={Zhang, Haoyang and Wang, Ying and Dayoub, Feras and S{\"u}nderhauf, Niko}, journal={arXiv preprint arXiv:2008.13367}, year={2020} } ```
8,844
179.510204
1,350
md
mmdetection
mmdetection-master/configs/vfnet/metafile.yml
Collections: - Name: VFNet Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet - Varifocal Loss Paper: URL: https://arxiv.org/abs/2008.13367 Title: 'VarifocalNet: An IoU-aware Dense Object Detector' README: configs/vfnet/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.6.0/mmdet/models/detectors/vfnet.py#L6 Version: v2.6.0 Models: - Name: vfnet_r50_fpn_1x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 41.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_1x_coco/vfnet_r50_fpn_1x_coco_20201027-38db6f58.pth - Name: vfnet_r50_fpn_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 44.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mstrain_2x_coco/vfnet_r50_fpn_mstrain_2x_coco_20201027-7cc75bd2.pth - Name: vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 48.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-6879c318.pth - Name: vfnet_r101_fpn_1x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_1x_coco.py Metadata: Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 43.6 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_1x_coco/vfnet_r101_fpn_1x_coco_20201027pth-c831ece7.pth - Name: vfnet_r101_fpn_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 46.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mstrain_2x_coco/vfnet_r101_fpn_mstrain_2x_coco_20201027pth-4a5d53f1.pth - Name: vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-7729adb5.pth - Name: vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-d300a6fc.pth - Name: vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco In Collection: VFNet Config: configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py Metadata: Epochs: 24 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco_20201027pth-b5f6da5e.pth
4,075
33.837607
191
yml
mmdetection
mmdetection-master/configs/vfnet/vfnet_r101_fpn_1x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
193
26.714286
61
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r101_fpn_2x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101'))) lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
279
30.111111
61
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNet', depth=101, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
546
33.1875
74
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r101_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
201
27.857143
61
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r2_101_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
602
30.736842
74
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r2_101_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='Res2Net', depth=101, scales=4, base_width=26, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://res2net101_v1d_26w_4s')))
464
26.352941
62
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r50_fpn_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # model settings model = dict( type='VFNet', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_output', # use P5 num_outs=5, relu_before_extra_convs=True), bbox_head=dict( type='VFNetHead', num_classes=80, in_channels=256, stacked_convs=3, feat_channels=256, strides=[8, 16, 32, 64, 128], center_sampling=False, dcn_on_last_conv=False, use_atss=True, use_vfl=True, loss_cls=dict( type='VarifocalLoss', use_sigmoid=True, alpha=0.75, gamma=2.0, iou_weighted=True, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.5), loss_bbox_refine=dict(type='GIoULoss', loss_weight=2.0)), # training and testing settings train_cfg=dict( assigner=dict(type='ATSSAssigner', topk=9), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # data setting dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # optimizer optimizer = dict( lr=0.01, paramwise_cfg=dict(bias_lr_mult=2., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.1, step=[8, 11]) runner = dict(type='EpochBasedRunner', max_epochs=12)
3,240
29.009259
79
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True)), bbox_head=dict(dcn_on_last_conv=True))
248
34.571429
74
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_r50_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Resize', img_scale=[(1333, 480), (1333, 960)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # learning policy lr_config = dict(step=[16, 22]) runner = dict(type='EpochBasedRunner', max_epochs=24)
1,312
31.825
77
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_x101_32x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
585
31.555556
76
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_x101_32x4d_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=32, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_32x4d')))
447
27
76
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_x101_64x4d_fpn_mdconv_c3-c5_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mdconv_c3-c5_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', dcn=dict(type='DCNv2', deform_groups=1, fallback_on_stride=False), stage_with_dcn=(False, True, True, True), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
585
31.555556
76
py
mmdetection
mmdetection-master/configs/vfnet/vfnet_x101_64x4d_fpn_mstrain_2x_coco.py
_base_ = './vfnet_r50_fpn_mstrain_2x_coco.py' model = dict( backbone=dict( type='ResNeXt', depth=101, groups=64, base_width=4, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnext101_64x4d')))
447
27
76
py
mmdetection
mmdetection-master/configs/wider_face/README.md
# WIDER FACE > [WIDER FACE: A Face Detection Benchmark](https://arxiv.org/abs/1511.06523) <!-- [DATASET] --> ## Abstract Face detection is one of the most studied topics in the computer vision community. Much of the progresses have been made by the availability of face detection benchmark datasets. We show that there is a gap between current face detection performance and the real world requirements. To facilitate future face detection research, we introduce the WIDER FACE dataset, which is 10 times larger than existing datasets. The dataset contains rich annotations, including occlusions, poses, event categories, and face bounding boxes. Faces in the proposed dataset are extremely challenging due to large variations in scale, pose and occlusion, as shown in Fig. 1. Furthermore, we show that WIDER FACE dataset is an effective training source for face detection. We benchmark several representative detection systems, providing an overview of state-of-the-art performance and propose a solution to deal with large scale variation. Finally, we discuss common failure cases that worth to be further investigated. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/144000364-3320de79-34fc-40a6-938f-bb512f05a4bb.png" height="400"/> </div> ## Introduction To use the WIDER Face dataset you need to download it and extract to the `data/WIDERFace` folder. Annotation in the VOC format can be found in this [repo](https://github.com/sovrasov/wider-face-pascal-voc-annotations.git). You should move the annotation files from `WIDER_train_annotations` and `WIDER_val_annotations` folders to the `Annotation` folders inside the corresponding directories `WIDER_train` and `WIDER_val`. Also annotation lists `val.txt` and `train.txt` should be copied to `data/WIDERFace` from `WIDER_train_annotations` and `WIDER_val_annotations`. The directory should be like this: ``` mmdetection β”œβ”€β”€ mmdet β”œβ”€β”€ tools β”œβ”€β”€ configs β”œβ”€β”€ data β”‚ β”œβ”€β”€ WIDERFace β”‚ β”‚ β”œβ”€β”€ WIDER_train β”‚ | β”‚ β”œβ”€β”€0--Parade β”‚ | β”‚ β”œβ”€β”€ ... β”‚ | β”‚ β”œβ”€β”€ Annotations β”‚ β”‚ β”œβ”€β”€ WIDER_val β”‚ | β”‚ β”œβ”€β”€0--Parade β”‚ | β”‚ β”œβ”€β”€ ... β”‚ | β”‚ β”œβ”€β”€ Annotations β”‚ β”‚ β”œβ”€β”€ val.txt β”‚ β”‚ β”œβ”€β”€ train.txt ``` After that you can train the SSD300 on WIDER by launching training with the `ssd300_wider_face.py` config or create your own config based on the presented one. ## Citation ```latex @inproceedings{yang2016wider, Author = {Yang, Shuo and Luo, Ping and Loy, Chen Change and Tang, Xiaoou}, Booktitle = {IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, Title = {WIDER FACE: A Face Detection Benchmark}, Year = {2016} } ```
2,669
45.034483
1,000
md
mmdetection
mmdetection-master/configs/wider_face/ssd300_wider_face.py
_base_ = [ '../_base_/models/ssd300.py', '../_base_/datasets/wider_face.py', '../_base_/default_runtime.py' ] model = dict(bbox_head=dict(num_classes=1)) # optimizer optimizer = dict(type='SGD', lr=0.012, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[16, 20]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=24) log_config = dict(interval=1)
557
28.368421
71
py
mmdetection
mmdetection-master/configs/yolact/README.md
# YOLACT > [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) <!-- [ALGORITHM] --> ## Abstract We present a simple, fully-convolutional model for real-time instance segmentation that achieves 29.8 mAP on MS COCO at 33.5 fps evaluated on a single Titan Xp, which is significantly faster than any previous competitive approach. Moreover, we obtain this result after training on only one GPU. We accomplish this by breaking instance segmentation into two parallel subtasks: (1) generating a set of prototype masks and (2) predicting per-instance mask coefficients. Then we produce instance masks by linearly combining the prototypes with the mask coefficients. We find that because this process doesn't depend on repooling, this approach produces very high-quality masks and exhibits temporal stability for free. Furthermore, we analyze the emergent behavior of our prototypes and show they learn to localize instances on their own in a translation variant manner, despite being fully-convolutional. Finally, we also propose Fast NMS, a drop-in 12 ms faster replacement for standard NMS that only has a marginal performance penalty. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/144001225-8c62cad7-a330-4f60-873f-61aa17e99223.png"/> </div> ## Introduction A simple, fully convolutional model for real-time instance segmentation. This is the code for our paper: - [YOLACT: Real-time Instance Segmentation](https://arxiv.org/abs/1904.02689) <!-- - [YOLACT++: Better Real-time Instance Segmentation](https://arxiv.org/abs/1912.06218) --> For a real-time demo, check out our ICCV video: [![IMAGE ALT TEXT HERE](https://img.youtube.com/vi/0pMfmo8qfpQ/0.jpg)](https://www.youtube.com/watch?v=0pMfmo8qfpQ) ## Evaluation Here are our YOLACT models along with their FPS on a Titan Xp and mAP on COCO's `val`: | Image Size | GPU x BS | Backbone | \*FPS | mAP | Weights | Configs | Download | | :--------: | :------: | :-----------: | :---: | :--: | :-----: | :----------------------------------------------------------------------------------------------------: | :-----------------------------------------------------------------------------------------------------------------------------: | | 550 | 1x8 | Resnet50-FPN | 42.5 | 29.0 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth) | | 550 | 8x8 | Resnet50-FPN | 42.5 | 28.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r50_8x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth) | | 550 | 1x8 | Resnet101-FPN | 33.5 | 30.4 | | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolact/yolact_r101_1x8_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth) | \*Note: The FPS is evaluated by the [original implementation](https://github.com/dbolya/yolact). When calculating FPS, only the model inference time is taken into account. Data loading and post-processing operations such as converting masks to RLE code, generating COCO JSON results, image rendering are not included. ## Training All the aforementioned models are trained with a single GPU. It typically takes ~12GB VRAM when using resnet-101 as the backbone. If you want to try multiple GPUs training, you may have to modify the configuration files accordingly, such as adjusting the training schedule and freezing batch norm. ```Shell # Trains using the resnet-101 backbone with a batch size of 8 on a single GPU. ./tools/dist_train.sh configs/yolact/yolact_r101.py 1 ``` ## Testing Please refer to [mmdetection/docs/getting_started.md](https://mmdetection.readthedocs.io/en/latest/1_exist_data_model.html#test-existing-models). ## Citation If you use YOLACT or this code base in your work, please cite ```latex @inproceedings{yolact-iccv2019, author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, title = {YOLACT: {Real-time} Instance Segmentation}, booktitle = {ICCV}, year = {2019}, } ``` <!-- For YOLACT++, please cite ```latex @misc{yolact-plus-arxiv2019, title = {YOLACT++: Better Real-time Instance Segmentation}, author = {Daniel Bolya and Chong Zhou and Fanyi Xiao and Yong Jae Lee}, year = {2019}, eprint = {1912.06218}, archivePrefix = {arXiv}, primaryClass = {cs.CV} } ``` -->
4,963
64.315789
1,034
md