Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmdetection
mmdetection-master/configs/yolact/metafile.yml
Collections: - Name: YOLACT Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - FPN - ResNet Paper: URL: https://arxiv.org/abs/1904.02689 Title: 'YOLACT: Real-time Instance Segmentation' README: configs/yolact/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.5.0/mmdet/models/detectors/yolact.py#L9 Version: v2.5.0 Models: - Name: yolact_r50_1x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r50_1x8_coco.py Metadata: Training Resources: 1x V100 GPU Batch Size: 8 inference time (ms/im): - value: 23.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 29.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_1x8_coco/yolact_r50_1x8_coco_20200908-f38d58df.pth - Name: yolact_r50_8x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r50_8x8_coco.py Metadata: Batch Size: 64 inference time (ms/im): - value: 23.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 28.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r50_8x8_coco/yolact_r50_8x8_coco_20200908-ca34f5db.pth - Name: yolact_r101_1x8_coco In Collection: YOLACT Config: configs/yolact/yolact_r101_1x8_coco.py Metadata: Training Resources: 1x V100 GPU Batch Size: 8 inference time (ms/im): - value: 29.85 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (550, 550) Results: - Task: Instance Segmentation Dataset: COCO Metrics: mask AP: 30.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolact/yolact_r101_1x8_coco/yolact_r101_1x8_coco_20200908-4cbe9101.pth
2,305
28.189873
131
yml
mmdetection
mmdetection-master/configs/yolact/yolact_r101_1x8_coco.py
_base_ = './yolact_r50_1x8_coco.py' model = dict( backbone=dict( depth=101, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet101')))
192
23.125
61
py
mmdetection
mmdetection-master/configs/yolact/yolact_r50_1x8_coco.py
_base_ = '../_base_/default_runtime.py' # model settings img_size = 550 model = dict( type='YOLACT', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=-1, # do not freeze stem norm_cfg=dict(type='BN', requires_grad=True), norm_eval=False, # update the statistics of bn zero_init_residual=False, style='pytorch', init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, start_level=1, add_extra_convs='on_input', num_outs=5, upsample_cfg=dict(mode='bilinear')), bbox_head=dict( type='YOLACTHead', num_classes=80, in_channels=256, feat_channels=256, anchor_generator=dict( type='AnchorGenerator', octave_base_scale=3, scales_per_octave=1, base_sizes=[8, 16, 32, 64, 128], ratios=[0.5, 1.0, 2.0], strides=[550.0 / x for x in [69, 35, 18, 9, 5]], centers=[(550 * 0.5 / x, 550 * 0.5 / x) for x in [69, 35, 18, 9, 5]]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[0.1, 0.1, 0.2, 0.2]), loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, reduction='none', loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5), num_head_convs=1, num_protos=32, use_ohem=True), mask_head=dict( type='YOLACTProtonet', in_channels=256, num_protos=32, num_classes=80, max_masks_to_train=100, loss_mask_weight=6.125), segm_head=dict( type='YOLACTSegmHead', num_classes=80, in_channels=256, loss_segm=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='MaxIoUAssigner', pos_iou_thr=0.5, neg_iou_thr=0.4, min_pos_iou=0., ignore_iof_thr=-1, gt_max_assign_all=False), # smoothl1_beta=1., allowed_border=-1, pos_weight=-1, neg_pos_ratio=3, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, iou_thr=0.5, top_k=200, max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.68, 116.78, 103.94], std=[58.40, 57.12, 57.38], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict(type='FilterAnnotations', min_gt_bbox_wh=(4.0, 4.0)), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 4)), dict( type='MinIoURandomCrop', min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(img_size, img_size), keep_ratio=False), dict(type='RandomFlip', flip_ratio=0.5), dict( type='PhotoMetricDistortion', brightness_delta=32, contrast_range=(0.5, 1.5), saturation_range=(0.5, 1.5), hue_delta=18), dict(type='Normalize', **img_norm_cfg), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(img_size, img_size), flip=False, transforms=[ dict(type='Resize', keep_ratio=False), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=1e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict() # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.1, step=[20, 42, 49, 52]) runner = dict(type='EpochBasedRunner', max_epochs=55) cudnn_benchmark = True evaluation = dict(metric=['bbox', 'segm']) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (1 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=8)
5,272
30.76506
79
py
mmdetection
mmdetection-master/configs/yolact/yolact_r50_8x8_coco.py
_base_ = 'yolact_r50_1x8_coco.py' optimizer = dict(type='SGD', lr=8e-3, momentum=0.9, weight_decay=5e-4) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.1, step=[20, 42, 49, 52]) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
507
28.882353
70
py
mmdetection
mmdetection-master/configs/yolo/README.md
# YOLOv3 > [YOLOv3: An Incremental Improvement](https://arxiv.org/abs/1804.02767) <!-- [ALGORITHM] --> ## Abstract We present some updates to YOLO! We made a bunch of little design changes to make it better. We also trained this new network that's pretty swell. It's a little bigger than last time but more accurate. It's still fast though, don't worry. At 320x320 YOLOv3 runs in 22 ms at 28.2 mAP, as accurate as SSD but three times faster. When we look at the old .5 IOU mAP detection metric YOLOv3 is quite good. It achieves 57.9 mAP@50 in 51 ms on a Titan X, compared to 57.5 mAP@50 in 198 ms by RetinaNet, similar performance but 3.8x faster. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/144001433-b4f7fb5e-3b7a-414b-b949-93733213b670.png" height="300"/> </div> ## Results and Models | Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :--------: | :---: | :-----: | :------: | :------------: | :----: | :--------------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | DarkNet-53 | 320 | 273e | 2.7 | 63.9 | 27.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_320_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-20200819_172101.log.json) | | DarkNet-53 | 416 | 273e | 3.8 | 61.2 | 30.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-20200819_173424.log.json) | | DarkNet-53 | 608 | 273e | 7.4 | 48.1 | 33.7 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020.log.json) | ## Mixed Precision Training We also train YOLOv3 with mixed precision training. | Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :--------: | :---: | :-----: | :------: | :------------: | :----: | :-------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | DarkNet-53 | 608 | 273e | 4.7 | 48.1 | 33.8 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542.log.json) | ## Lightweight models | Backbone | Scale | Lr schd | Mem (GB) | Inf time (fps) | box AP | Config | Download | | :---------: | :---: | :-----: | :------: | :------------: | :----: | :----------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | MobileNetV2 | 416 | 300e | 5.3 | | 23.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823.log.json) | | MobileNetV2 | 320 | 300e | 3.2 | | 22.2 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349.log.json) | Notice: We reduce the number of channels to 96 in both head and neck. It can reduce the flops and parameters, which makes these models more suitable for edge devices. ## Credit This implementation originates from the project of Haoyu Wu(@wuhy08) at Western Digital. ## Citation ```latex @misc{redmon2018yolov3, title={YOLOv3: An Incremental Improvement}, author={Joseph Redmon and Ali Farhadi}, year={2018}, eprint={1804.02767}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
7,661
135.821429
538
md
mmdetection
mmdetection-master/configs/yolo/metafile.yml
Collections: - Name: YOLOv3 Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - DarkNet Paper: URL: https://arxiv.org/abs/1804.02767 Title: 'YOLOv3: An Incremental Improvement' README: configs/yolo/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.4.0/mmdet/models/detectors/yolo.py#L8 Version: v2.4.0 Models: - Name: yolov3_d53_320_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_320_273e_coco.py Metadata: Training Memory (GB): 2.7 inference time (ms/im): - value: 15.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (320, 320) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 27.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_320_273e_coco/yolov3_d53_320_273e_coco-421362b6.pth - Name: yolov3_d53_mstrain-416_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_mstrain-416_273e_coco.py Metadata: Training Memory (GB): 3.8 inference time (ms/im): - value: 16.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (416, 416) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 30.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-416_273e_coco/yolov3_d53_mstrain-416_273e_coco-2b60fcd9.pth - Name: yolov3_d53_mstrain-608_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_mstrain-608_273e_coco.py Metadata: Training Memory (GB): 7.4 inference time (ms/im): - value: 20.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (608, 608) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 33.7 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_mstrain-608_273e_coco/yolov3_d53_mstrain-608_273e_coco_20210518_115020-a2c3acb8.pth - Name: yolov3_d53_fp16_mstrain-608_273e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py Metadata: Training Memory (GB): 4.7 inference time (ms/im): - value: 20.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (608, 608) Epochs: 273 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 33.8 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_d53_fp16_mstrain-608_273e_coco/yolov3_d53_fp16_mstrain-608_273e_coco_20210517_213542-4bc34944.pth - Name: yolov3_mobilenetv2_320_300e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_mobilenetv2_320_300e_coco.py Metadata: Training Memory (GB): 3.2 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 22.2 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_320_300e_coco/yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth - Name: yolov3_mobilenetv2_mstrain-416_300e_coco In Collection: YOLOv3 Config: configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py Metadata: Training Memory (GB): 5.3 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 23.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco/yolov3_mobilenetv2_mstrain-416_300e_coco_20210718_010823-f68a07b3.pth
3,996
30.976
176
yml
mmdetection
mmdetection-master/configs/yolo/yolov3_d53_320_273e_coco.py
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,439
32.488372
72
py
mmdetection
mmdetection-master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # fp16 settings fp16 = dict(loss_scale='dynamic')
99
24
48
py
mmdetection
mmdetection-master/configs/yolo/yolov3_d53_mstrain-416_273e_coco.py
_base_ = './yolov3_d53_mstrain-608_273e_coco.py' # dataset settings img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (416, 416)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,453
32.813953
77
py
mmdetection
mmdetection-master/configs/yolo/yolov3_d53_mstrain-608_273e_coco.py
_base_ = '../_base_/default_runtime.py' # model settings model = dict( type='YOLOV3', backbone=dict( type='Darknet', depth=53, out_indices=(3, 4, 5), init_cfg=dict(type='Pretrained', checkpoint='open-mmlab://darknet53')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[1024, 512, 256], out_channels=[512, 256, 128]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[512, 256, 128], out_channels=[1024, 512, 256], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile', to_float32=True), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=[(320, 320), (608, 608)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(608, 608), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.001, momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=2000, # same as burn-in in darknet warmup_ratio=0.1, step=[218, 246]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=273) evaluation = dict(interval=1, metric=['bbox']) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
4,418
32.225564
79
py
mmdetection
mmdetection-master/configs/yolo/yolov3_mobilenetv2_320_300e_coco.py
_base_ = ['./yolov3_mobilenetv2_mstrain-416_300e_coco.py'] # yapf:disable model = dict( bbox_head=dict( anchor_generator=dict( base_sizes=[[(220, 125), (128, 222), (264, 266)], [(35, 87), (102, 96), (60, 170)], [(10, 15), (24, 36), (72, 42)]]))) # yapf:enable # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict(type='Resize', img_scale=(320, 320), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(320, 320), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( train=dict(dataset=dict(pipeline=train_pipeline)), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,756
31.537037
77
py
mmdetection
mmdetection-master/configs/yolo/yolov3_mobilenetv2_mstrain-416_300e_coco.py
_base_ = '../_base_/default_runtime.py' # model settings model = dict( type='YOLOV3', backbone=dict( type='MobileNetV2', out_indices=(2, 4, 6), act_cfg=dict(type='LeakyReLU', negative_slope=0.1), init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://mmdet/mobilenet_v2')), neck=dict( type='YOLOV3Neck', num_scales=3, in_channels=[320, 96, 32], out_channels=[96, 96, 96]), bbox_head=dict( type='YOLOV3Head', num_classes=80, in_channels=[96, 96, 96], out_channels=[96, 96, 96], anchor_generator=dict( type='YOLOAnchorGenerator', base_sizes=[[(116, 90), (156, 198), (373, 326)], [(30, 61), (62, 45), (59, 119)], [(10, 13), (16, 30), (33, 23)]], strides=[32, 16, 8]), bbox_coder=dict(type='YOLOBBoxCoder'), featmap_strides=[32, 16, 8], loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_conf=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0, reduction='sum'), loss_xy=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=2.0, reduction='sum'), loss_wh=dict(type='MSELoss', loss_weight=2.0, reduction='sum')), # training and testing settings train_cfg=dict( assigner=dict( type='GridAssigner', pos_iou_thr=0.5, neg_iou_thr=0.5, min_pos_iou=0)), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, conf_thr=0.005, nms=dict(type='nms', iou_threshold=0.45), max_per_img=100)) # dataset settings dataset_type = 'CocoDataset' data_root = 'data/coco/' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict( type='Expand', mean=img_norm_cfg['mean'], to_rgb=img_norm_cfg['to_rgb'], ratio_range=(1, 2)), dict( type='MinIoURandomCrop', min_ious=(0.4, 0.5, 0.6, 0.7, 0.8, 0.9), min_crop_size=0.3), dict( type='Resize', img_scale=[(320, 320), (416, 416)], multiscale_mode='range', keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=24, workers_per_gpu=4, train=dict( type='RepeatDataset', # use RepeatDataset to speed up training times=10, dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline)), val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer optimizer = dict(type='SGD', lr=0.003, momentum=0.9, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=35, norm_type=2)) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=4000, warmup_ratio=0.0001, step=[24, 28]) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=30) evaluation = dict(interval=1, metric=['bbox']) find_unused_parameters = True # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (24 samples per GPU) auto_scale_lr = dict(base_batch_size=192)
4,664
31.622378
78
py
mmdetection
mmdetection-master/configs/yolof/README.md
# YOLOF > [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) <!-- [ALGORITHM] --> ## Abstract This paper revisits feature pyramids networks (FPN) for one-stage detectors and points out that the success of FPN is due to its divide-and-conquer solution to the optimization problem in object detection rather than multi-scale feature fusion. From the perspective of optimization, we introduce an alternative way to address the problem instead of adopting the complex feature pyramids - {\\em utilizing only one-level feature for detection}. Based on the simple and efficient solution, we present You Only Look One-level Feature (YOLOF). In our method, two key components, Dilated Encoder and Uniform Matching, are proposed and bring considerable improvements. Extensive experiments on the COCO benchmark prove the effectiveness of the proposed model. Our YOLOF achieves comparable results with its feature pyramids counterpart RetinaNet while being 2.5× faster. Without transformer layers, YOLOF can match the performance of DETR in a single-level feature manner with 7× less training epochs. With an image size of 608×608, YOLOF achieves 44.3 mAP running at 60 fps on 2080Ti, which is 13% faster than YOLOv4. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/144001639-257374ef-7d4f-412b-a783-88abdd22f277.png"/> </div> ## Results and Models | Backbone | Style | Epoch | Lr schd | Mem (GB) | box AP | Config | Download | | :------: | :---: | :---: | :-----: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | R-50-C5 | caffe | Y | 1x | 8.3 | 37.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolof/yolof_r50_c5_8x8_1x_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427.log.json) | **Note**: 1. We find that the performance is unstable and may fluctuate by about 0.3 mAP. mAP 37.4 ~ 37.7 is acceptable in YOLOF_R_50_C5_1x. Such fluctuation can also be found in the [original implementation](https://github.com/chensnathan/YOLOF). 2. In addition to instability issues, sometimes there are large loss fluctuations and NAN, so there may still be problems with this project, which will be improved subsequently. ## Citation ```latex @inproceedings{chen2021you, title={You Only Look One-level Feature}, author={Chen, Qiang and Wang, Yingming and Yang, Tong and Zhang, Xiangyu and Cheng, Jian and Sun, Jian}, booktitle={IEEE Conference on Computer Vision and Pattern Recognition}, year={2021} } ```
3,467
95.333333
1,112
md
mmdetection
mmdetection-master/configs/yolof/metafile.yml
Collections: - Name: YOLOF Metadata: Training Data: COCO Training Techniques: - SGD with Momentum - Weight Decay Training Resources: 8x V100 GPUs Architecture: - Dilated Encoder - ResNet Paper: URL: https://arxiv.org/abs/2103.09460 Title: 'You Only Look One-level Feature' README: configs/yolof/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.12.0/mmdet/models/detectors/yolof.py#L6 Version: v2.12.0 Models: - Name: yolof_r50_c5_8x8_1x_coco In Collection: YOLOF Config: configs/yolof/yolof_r50_c5_8x8_1x_coco.py Metadata: Training Memory (GB): 8.3 Epochs: 12 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 37.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolof/yolof_r50_c5_8x8_1x_coco/yolof_r50_c5_8x8_1x_coco_20210425_024427-8e864411.pth
959
28.090909
145
yml
mmdetection
mmdetection-master/configs/yolof/yolof_r50_c5_8x8_1x_coco.py
_base_ = [ '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( type='YOLOF', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(3, ), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=False), norm_eval=True, style='caffe', init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://detectron/resnet50_caffe')), neck=dict( type='DilatedEncoder', in_channels=2048, out_channels=512, block_mid_channels=128, num_residual_blocks=4, block_dilations=[2, 4, 6, 8]), bbox_head=dict( type='YOLOFHead', num_classes=80, in_channels=512, reg_decoded_bbox=True, anchor_generator=dict( type='AnchorGenerator', ratios=[1.0], scales=[1, 2, 4, 8, 16], strides=[32]), bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[.0, .0, .0, .0], target_stds=[1., 1., 1., 1.], add_ctr_clamp=True, ctr_clamp=32), loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0), loss_bbox=dict(type='GIoULoss', loss_weight=1.0)), # training and testing settings train_cfg=dict( assigner=dict( type='UniformAssigner', pos_ignore_thr=0.15, neg_ignore_thr=0.7), allowed_border=-1, pos_weight=-1, debug=False), test_cfg=dict( nms_pre=1000, min_bbox_size=0, score_thr=0.05, nms=dict(type='nms', iou_threshold=0.6), max_per_img=100)) # optimizer optimizer = dict( type='SGD', lr=0.12, momentum=0.9, weight_decay=0.0001, paramwise_cfg=dict( norm_decay_mult=0., custom_keys={'backbone': dict(lr_mult=1. / 3)})) lr_config = dict(warmup_iters=1500, warmup_ratio=0.00066667) # use caffe img_norm img_norm_cfg = dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='RandomShift', shift_ratio=0.5, max_shift_px=32), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=8, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
3,504
30.294643
77
py
mmdetection
mmdetection-master/configs/yolof/yolof_r50_c5_8x8_iter-1x_coco.py
_base_ = './yolof_r50_c5_8x8_1x_coco.py' # We implemented the iter-based config according to the source code. # COCO dataset has 117266 images after filtering. We use 8 gpu and # 8 batch size training, so 22500 is equivalent to # 22500/(117266/(8x8))=12.3 epoch, 15000 is equivalent to 8.2 epoch, # 20000 is equivalent to 10.9 epoch. Due to lr(0.12) is large, # the iter-based and epoch-based setting have about 0.2 difference on # the mAP evaluation value. lr_config = dict(step=[15000, 20000]) runner = dict(_delete_=True, type='IterBasedRunner', max_iters=22500) checkpoint_config = dict(interval=2500) evaluation = dict(interval=4500) log_config = dict(interval=20)
671
43.8
69
py
mmdetection
mmdetection-master/configs/yolox/README.md
# YOLOX > [YOLOX: Exceeding YOLO Series in 2021](https://arxiv.org/abs/2107.08430) <!-- [ALGORITHM] --> ## Abstract In this report, we present some experienced improvements to YOLO series, forming a new high-performance detector -- YOLOX. We switch the YOLO detector to an anchor-free manner and conduct other advanced detection techniques, i.e., a decoupled head and the leading label assignment strategy SimOTA to achieve state-of-the-art results across a large scale range of models: For YOLO-Nano with only 0.91M parameters and 1.08G FLOPs, we get 25.3% AP on COCO, surpassing NanoDet by 1.8% AP; for YOLOv3, one of the most widely used detectors in industry, we boost it to 47.3% AP on COCO, outperforming the current best practice by 3.0% AP; for YOLOX-L with roughly the same amount of parameters as YOLOv4-CSP, YOLOv5-L, we achieve 50.0% AP on COCO at a speed of 68.9 FPS on Tesla V100, exceeding YOLOv5-L by 1.8% AP. Further, we won the 1st Place on Streaming Perception Challenge (Workshop on Autonomous Driving at CVPR 2021) using a single YOLOX-L model. We hope this report can provide useful experience for developers and researchers in practical scenes, and we also provide deploy versions with ONNX, TensorRT, NCNN, and Openvino supported. <div align=center> <img src="https://user-images.githubusercontent.com/40661020/144001736-9fb303dd-eac7-46b0-ad45-214cfa51e928.png"/> </div> ## Results and Models | Backbone | size | Mem (GB) | box AP | Config | Download | | :--------: | :--: | :------: | :----: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------: | | YOLOX-tiny | 416 | 3.5 | 32.0 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_tiny_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234.log.json) | | YOLOX-s | 640 | 7.6 | 40.5 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711.log.json) | | YOLOX-l | 640 | 19.9 | 49.4 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_l_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236.log.json) | | YOLOX-x | 640 | 28.1 | 50.9 | [config](https://github.com/open-mmlab/mmdetection/tree/master/configs/yolox/yolox_x_8x8_300e_coco.py) | [model](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254.log.json) | **Note**: 1. The test score threshold is 0.001, and the box AP indicates the best AP. 2. Due to the need for pre-training weights, we cannot reproduce the performance of the `yolox-nano` model. Please refer to https://github.com/Megvii-BaseDetection/YOLOX/issues/674 for more information. 3. We also trained the model by the official release of YOLOX based on [Megvii-BaseDetection/YOLOX#735](https://github.com/Megvii-BaseDetection/YOLOX/issues/735) with commit ID [38c633](https://github.com/Megvii-BaseDetection/YOLOX/tree/38c633bf176462ee42b110c70e4ffe17b5753208). We found that the best AP of `YOLOX-tiny`, `YOLOX-s`, `YOLOX-l`, and `YOLOX-x` is 31.8, 40.3, 49.2, and 50.9, respectively. The performance is consistent with that of our re-implementation (see Table above) but still has a gap (0.3~0.8 AP) in comparison with the reported performance in their [README](https://github.com/Megvii-BaseDetection/YOLOX/blob/38c633bf176462ee42b110c70e4ffe17b5753208/README.md#benchmark). ## Citation ```latex @article{yolox2021, title={{YOLOX}: Exceeding YOLO Series in 2021}, author={Ge, Zheng and Liu, Songtao and Wang, Feng and Li, Zeming and Sun, Jian}, journal={arXiv preprint arXiv:2107.08430}, year={2021} } ```
5,251
130.3
1,138
md
mmdetection
mmdetection-master/configs/yolox/metafile.yml
Collections: - Name: YOLOX Metadata: Training Data: COCO Training Techniques: - SGD with Nesterov - Weight Decay - Cosine Annealing Lr Updater Training Resources: 8x TITANXp GPUs Architecture: - CSPDarkNet - PAFPN Paper: URL: https://arxiv.org/abs/2107.08430 Title: 'YOLOX: Exceeding YOLO Series in 2021' README: configs/yolox/README.md Code: URL: https://github.com/open-mmlab/mmdetection/blob/v2.15.1/mmdet/models/detectors/yolox.py#L6 Version: v2.15.1 Models: - Name: yolox_s_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_s_8x8_300e_coco.py Metadata: Training Memory (GB): 7.6 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 40.5 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_s_8x8_300e_coco/yolox_s_8x8_300e_coco_20211121_095711-4592a793.pth - Name: yolox_l_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_l_8x8_300e_coco.py Metadata: Training Memory (GB): 19.9 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 49.4 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_l_8x8_300e_coco/yolox_l_8x8_300e_coco_20211126_140236-d3bd2b23.pth - Name: yolox_x_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_x_8x8_300e_coco.py Metadata: Training Memory (GB): 28.1 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 50.9 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_x_8x8_300e_coco/yolox_x_8x8_300e_coco_20211126_140254-1ef88d67.pth - Name: yolox_tiny_8x8_300e_coco In Collection: YOLOX Config: configs/yolox/yolox_tiny_8x8_300e_coco.py Metadata: Training Memory (GB): 3.5 Epochs: 300 Results: - Task: Object Detection Dataset: COCO Metrics: box AP: 32.0 Weights: https://download.openmmlab.com/mmdetection/v2.0/yolox/yolox_tiny_8x8_300e_coco/yolox_tiny_8x8_300e_coco_20211124_171234-b4047906.pth
2,257
30.802817
145
yml
mmdetection
mmdetection-master/configs/yolox/yolox_l_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.0, widen_factor=1.0), neck=dict( in_channels=[256, 512, 1024], out_channels=256, num_csp_blocks=3), bbox_head=dict(in_channels=256, feat_channels=256))
272
29.333333
74
py
mmdetection
mmdetection-master/configs/yolox/yolox_m_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.67, widen_factor=0.75), neck=dict(in_channels=[192, 384, 768], out_channels=192, num_csp_blocks=2), bbox_head=dict(in_channels=192, feat_channels=192), )
266
28.666667
79
py
mmdetection
mmdetection-master/configs/yolox/yolox_nano_8x8_300e_coco.py
_base_ = './yolox_tiny_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=0.33, widen_factor=0.25, use_depthwise=True), neck=dict( in_channels=[64, 128, 256], out_channels=64, num_csp_blocks=1, use_depthwise=True), bbox_head=dict(in_channels=64, feat_channels=64, use_depthwise=True))
356
28.75
77
py
mmdetection
mmdetection-master/configs/yolox/yolox_s_8x8_300e_coco.py
_base_ = ['../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py'] img_scale = (640, 640) # height, width # model settings model = dict( type='YOLOX', input_size=img_scale, random_size_range=(15, 25), random_size_interval=10, backbone=dict(type='CSPDarknet', deepen_factor=0.33, widen_factor=0.5), neck=dict( type='YOLOXPAFPN', in_channels=[128, 256, 512], out_channels=128, num_csp_blocks=1), bbox_head=dict( type='YOLOXHead', num_classes=80, in_channels=128, feat_channels=128), train_cfg=dict(assigner=dict(type='SimOTAAssigner', center_radius=2.5)), # In order to align the source code, the threshold of the val phase is # 0.01, and the threshold of the test phase is 0.001. test_cfg=dict(score_thr=0.01, nms=dict(type='nms', iou_threshold=0.65))) # dataset settings data_root = 'data/coco/' dataset_type = 'CocoDataset' train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict( type='MixUp', img_scale=img_scale, ratio_range=(0.8, 1.6), pad_val=114.0), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), # According to the official implementation, multi-scale # training is not considered here but in the # 'mmdet/models/detectors/yolox.py'. dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, # If the image is three-channel, the pad value needs # to be set separately for each channel. pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] train_dataset = dict( type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=train_pipeline) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=img_scale, flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] data = dict( samples_per_gpu=8, workers_per_gpu=4, persistent_workers=True, train=train_dataset, val=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) # optimizer # default 8 gpu optimizer = dict( type='SGD', lr=0.01, momentum=0.9, weight_decay=5e-4, nesterov=True, paramwise_cfg=dict(norm_decay_mult=0., bias_decay_mult=0.)) optimizer_config = dict(grad_clip=None) max_epochs = 300 num_last_epochs = 15 resume_from = None interval = 10 # learning policy lr_config = dict( _delete_=True, policy='YOLOX', warmup='exp', by_epoch=False, warmup_by_epoch=True, warmup_ratio=1, warmup_iters=5, # 5 epoch num_last_epochs=num_last_epochs, min_lr_ratio=0.05) runner = dict(type='EpochBasedRunner', max_epochs=max_epochs) custom_hooks = [ dict( type='YOLOXModeSwitchHook', num_last_epochs=num_last_epochs, priority=48), dict( type='SyncNormHook', num_last_epochs=num_last_epochs, interval=interval, priority=48), dict( type='ExpMomentumEMAHook', resume_from=resume_from, momentum=0.0001, priority=49) ] checkpoint_config = dict(interval=interval) evaluation = dict( save_best='auto', # The evaluation interval is 'interval' when running epoch is # less than ‘max_epochs - num_last_epochs’. # The evaluation interval is 1 when running epoch is greater than # or equal to ‘max_epochs - num_last_epochs’. interval=interval, dynamic_intervals=[(max_epochs - num_last_epochs, 1)], metric='bbox') log_config = dict(interval=50) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
4,997
29.108434
79
py
mmdetection
mmdetection-master/configs/yolox/yolox_tiny_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( random_size_range=(10, 20), backbone=dict(deepen_factor=0.33, widen_factor=0.375), neck=dict(in_channels=[96, 192, 384], out_channels=96), bbox_head=dict(in_channels=96, feat_channels=96)) img_scale = (640, 640) # height, width train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.5, 1.5), border=(-img_scale[0] // 2, -img_scale[1] // 2)), dict(type='YOLOXHSVRandomAug'), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Resize', img_scale=img_scale, keep_ratio=True), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='FilterAnnotations', min_gt_bbox_wh=(1, 1), keep_empty=False), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(416, 416), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Pad', pad_to_square=True, pad_val=dict(img=(114.0, 114.0, 114.0))), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']) ]) ] train_dataset = dict(pipeline=train_pipeline) data = dict( train=train_dataset, val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # NOTE: `auto_scale_lr` is for automatically scaling LR, # USER SHOULD NOT CHANGE ITS VALUES. # base_batch_size = (8 GPUs) x (8 samples per GPU) auto_scale_lr = dict(base_batch_size=64)
1,821
29.881356
76
py
mmdetection
mmdetection-master/configs/yolox/yolox_x_8x8_300e_coco.py
_base_ = './yolox_s_8x8_300e_coco.py' # model settings model = dict( backbone=dict(deepen_factor=1.33, widen_factor=1.25), neck=dict( in_channels=[320, 640, 1280], out_channels=320, num_csp_blocks=4), bbox_head=dict(in_channels=320, feat_channels=320))
274
29.555556
74
py
mmdetection
mmdetection-master/demo/create_result_gif.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import os import os.path as osp import matplotlib.patches as mpatches import matplotlib.pyplot as plt import mmcv import numpy as np try: import imageio except ImportError: imageio = None def parse_args(): parser = argparse.ArgumentParser(description='Create GIF for demo') parser.add_argument( 'image_dir', help='directory where result ' 'images save path generated by ‘analyze_results.py’') parser.add_argument( '--out', type=str, default='result.gif', help='gif path where will be saved') args = parser.parse_args() return args def _generate_batch_data(sampler, batch_size): batch = [] for idx in sampler: batch.append(idx) if len(batch) == batch_size: yield batch batch = [] if len(batch) > 0: yield batch def create_gif(frames, gif_name, duration=2): """Create gif through imageio. Args: frames (list[ndarray]): Image frames gif_name (str): Saved gif name duration (int): Display interval (s), Default: 2 """ if imageio is None: raise RuntimeError('imageio is not installed,' 'Please use “pip install imageio” to install') imageio.mimsave(gif_name, frames, 'GIF', duration=duration) def create_frame_by_matplotlib(image_dir, nrows=1, fig_size=(300, 300), font_size=15): """Create gif frame image through matplotlib. Args: image_dir (str): Root directory of result images nrows (int): Number of rows displayed, Default: 1 fig_size (tuple): Figure size of the pyplot figure. Default: (300, 300) font_size (int): Font size of texts. Default: 15 Returns: list[ndarray]: image frames """ result_dir_names = os.listdir(image_dir) assert len(result_dir_names) == 2 # Longer length has higher priority result_dir_names.reverse() images_list = [] for dir_names in result_dir_names: images_list.append(mmcv.scandir(osp.join(image_dir, dir_names))) frames = [] for paths in _generate_batch_data(zip(*images_list), nrows): fig, axes = plt.subplots(nrows=nrows, ncols=2) fig.suptitle('Good/bad case selected according ' 'to the COCO mAP of the single image') det_patch = mpatches.Patch(color='salmon', label='prediction') gt_patch = mpatches.Patch(color='royalblue', label='ground truth') # bbox_to_anchor may need to be finetuned plt.legend( handles=[det_patch, gt_patch], bbox_to_anchor=(1, -0.18), loc='lower right', borderaxespad=0.) if nrows == 1: axes = [axes] dpi = fig.get_dpi() # set fig size and margin fig.set_size_inches( (fig_size[0] * 2 + fig_size[0] // 20) / dpi, (fig_size[1] * nrows + fig_size[1] // 3) / dpi, ) fig.tight_layout() # set subplot margin plt.subplots_adjust( hspace=.05, wspace=0.05, left=0.02, right=0.98, bottom=0.02, top=0.98) for i, (path_tuple, ax_tuple) in enumerate(zip(paths, axes)): image_path_left = osp.join( osp.join(image_dir, result_dir_names[0], path_tuple[0])) image_path_right = osp.join( osp.join(image_dir, result_dir_names[1], path_tuple[1])) image_left = mmcv.imread(image_path_left) image_left = mmcv.rgb2bgr(image_left) image_right = mmcv.imread(image_path_right) image_right = mmcv.rgb2bgr(image_right) if i == 0: ax_tuple[0].set_title( result_dir_names[0], fontdict={'size': font_size}) ax_tuple[1].set_title( result_dir_names[1], fontdict={'size': font_size}) ax_tuple[0].imshow( image_left, extent=(0, *fig_size, 0), interpolation='bilinear') ax_tuple[0].axis('off') ax_tuple[1].imshow( image_right, extent=(0, *fig_size, 0), interpolation='bilinear') ax_tuple[1].axis('off') canvas = fig.canvas s, (width, height) = canvas.print_to_buffer() buffer = np.frombuffer(s, dtype='uint8') img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) img = rgb.astype('uint8') frames.append(img) return frames def main(): args = parse_args() frames = create_frame_by_matplotlib(args.image_dir) create_gif(frames, args.out) if __name__ == '__main__': main()
4,930
29.067073
79
py
mmdetection
mmdetection-master/demo/image_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import asyncio from argparse import ArgumentParser from mmdet.apis import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) def parse_args(): parser = ArgumentParser() parser.add_argument('img', help='Image file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument('--out-file', default=None, help='Path to output file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--palette', default='coco', choices=['coco', 'voc', 'citys', 'random'], help='Color palette used for visualization') parser.add_argument( '--score-thr', type=float, default=0.3, help='bbox score threshold') parser.add_argument( '--async-test', action='store_true', help='whether to set async options for async inference.') args = parser.parse_args() return args def main(args): # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # test a single image result = inference_detector(model, args.img) # show the results show_result_pyplot( model, args.img, result, palette=args.palette, score_thr=args.score_thr, out_file=args.out_file) async def async_main(args): # build the model from a config file and a checkpoint file model = init_detector(args.config, args.checkpoint, device=args.device) # test a single image tasks = asyncio.create_task(async_inference_detector(model, args.img)) result = await asyncio.gather(tasks) # show the results show_result_pyplot( model, args.img, result[0], palette=args.palette, score_thr=args.score_thr, out_file=args.out_file) if __name__ == '__main__': args = parse_args() if args.async_test: asyncio.run(async_main(args)) else: main(args)
2,164
30.376812
79
py
mmdetection
mmdetection-master/demo/video_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv from mmdet.apis import inference_detector, init_detector def parse_args(): parser = argparse.ArgumentParser(description='MMDetection video demo') parser.add_argument('video', help='Video file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument('--out', type=str, help='Output video file') parser.add_argument('--show', action='store_true', help='Show video') parser.add_argument( '--wait-time', type=float, default=1, help='The interval of show (s), 0 is block') args = parser.parse_args() return args def main(): args = parse_args() assert args.out or args.show, \ ('Please specify at least one operation (save/show the ' 'video) with the argument "--out" or "--show"') model = init_detector(args.config, args.checkpoint, device=args.device) video_reader = mmcv.VideoReader(args.video) video_writer = None if args.out: fourcc = cv2.VideoWriter_fourcc(*'mp4v') video_writer = cv2.VideoWriter( args.out, fourcc, video_reader.fps, (video_reader.width, video_reader.height)) for frame in mmcv.track_iter_progress(video_reader): result = inference_detector(model, frame) frame = model.show_result(frame, result, score_thr=args.score_thr) if args.show: cv2.namedWindow('video', 0) mmcv.imshow(frame, 'video', args.wait_time) if args.out: video_writer.write(frame) if video_writer: video_writer.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
1,974
30.854839
76
py
mmdetection
mmdetection-master/demo/video_gpuaccel_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import mmcv import numpy as np import torch from torchvision.transforms import functional as F from mmdet.apis import init_detector from mmdet.datasets.pipelines import Compose try: import ffmpegcv except ImportError: raise ImportError( 'Please install ffmpegcv with:\n\n pip install ffmpegcv') def parse_args(): parser = argparse.ArgumentParser( description='MMDetection video demo with GPU acceleration') parser.add_argument('video', help='Video file') parser.add_argument('config', help='Config file') parser.add_argument('checkpoint', help='Checkpoint file') parser.add_argument( '--device', default='cuda:0', help='Device used for inference') parser.add_argument( '--score-thr', type=float, default=0.3, help='Bbox score threshold') parser.add_argument('--out', type=str, help='Output video file') parser.add_argument('--show', action='store_true', help='Show video') parser.add_argument( '--nvdecode', action='store_true', help='Use NVIDIA decoder') parser.add_argument( '--wait-time', type=float, default=1, help='The interval of show (s), 0 is block') args = parser.parse_args() return args def prefetch_img_metas(cfg, ori_wh): w, h = ori_wh cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' test_pipeline = Compose(cfg.data.test.pipeline) data = {'img': np.zeros((h, w, 3), dtype=np.uint8)} data = test_pipeline(data) img_metas = data['img_metas'][0].data return img_metas def process_img(frame_resize, img_metas, device): assert frame_resize.shape == img_metas['pad_shape'] frame_cuda = torch.from_numpy(frame_resize).to(device).float() frame_cuda = frame_cuda.permute(2, 0, 1) # HWC to CHW mean = torch.from_numpy(img_metas['img_norm_cfg']['mean']).to(device) std = torch.from_numpy(img_metas['img_norm_cfg']['std']).to(device) frame_cuda = F.normalize(frame_cuda, mean=mean, std=std, inplace=True) frame_cuda = frame_cuda[None, :, :, :] # NCHW data = {'img': [frame_cuda], 'img_metas': [[img_metas]]} return data def main(): args = parse_args() assert args.out or args.show, \ ('Please specify at least one operation (save/show the ' 'video) with the argument "--out" or "--show"') model = init_detector(args.config, args.checkpoint, device=args.device) if args.nvdecode: VideoCapture = ffmpegcv.VideoCaptureNV else: VideoCapture = ffmpegcv.VideoCapture video_origin = VideoCapture(args.video) img_metas = prefetch_img_metas(model.cfg, (video_origin.width, video_origin.height)) resize_wh = img_metas['pad_shape'][1::-1] video_resize = VideoCapture( args.video, resize=resize_wh, resize_keepratio=True, resize_keepratioalign='topleft', pix_fmt='rgb24') video_writer = None if args.out: video_writer = ffmpegcv.VideoWriter(args.out, fps=video_origin.fps) with torch.no_grad(): for frame_resize, frame_origin in zip( mmcv.track_iter_progress(video_resize), video_origin): data = process_img(frame_resize, img_metas, args.device) result = model(return_loss=False, rescale=True, **data)[0] frame_mask = model.show_result( frame_origin, result, score_thr=args.score_thr) if args.show: cv2.namedWindow('video', 0) mmcv.imshow(frame_mask, 'video', args.wait_time) if args.out: video_writer.write(frame_mask) if video_writer: video_writer.release() video_origin.release() video_resize.release() cv2.destroyAllWindows() if __name__ == '__main__': main()
3,892
33.149123
77
py
mmdetection
mmdetection-master/demo/webcam_demo.py
# Copyright (c) OpenMMLab. All rights reserved. import argparse import cv2 import torch from mmdet.apis import inference_detector, init_detector def parse_args(): parser = argparse.ArgumentParser(description='MMDetection webcam demo') parser.add_argument('config', help='test config file path') parser.add_argument('checkpoint', help='checkpoint file') parser.add_argument( '--device', type=str, default='cuda:0', help='CPU/CUDA device option') parser.add_argument( '--camera-id', type=int, default=0, help='camera device id') parser.add_argument( '--score-thr', type=float, default=0.5, help='bbox score threshold') args = parser.parse_args() return args def main(): args = parse_args() device = torch.device(args.device) model = init_detector(args.config, args.checkpoint, device=device) camera = cv2.VideoCapture(args.camera_id) print('Press "Esc", "q" or "Q" to exit.') while True: ret_val, img = camera.read() result = inference_detector(model, img) ch = cv2.waitKey(1) if ch == 27 or ch == ord('q') or ch == ord('Q'): break model.show_result( img, result, score_thr=args.score_thr, wait_time=1, show=True) if __name__ == '__main__': main()
1,308
26.270833
78
py
mmdetection
mmdetection-master/docker/serve/entrypoint.sh
#!/bin/bash set -e if [[ "$1" = "serve" ]]; then shift 1 torchserve --start --ts-config /home/model-server/config.properties else eval "$@" fi # prevent docker exit tail -f /dev/null
197
14.230769
71
sh
mmdetection
mmdetection-master/docs/en/1_exist_data_model.md
# 1: Inference and train with existing models and standard datasets MMDetection provides hundreds of existing and existing detection models in [Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html)), and supports multiple standard datasets, including Pascal VOC, COCO, CityScapes, LVIS, etc. This note will show how to perform common tasks on these existing models and standard datasets, including: - Use existing models to inference on given images. - Test existing models on standard datasets. - Train predefined models on standard datasets. ## Inference with existing models By inference, we mean using trained models to detect objects on images. In MMDetection, a model is defined by a configuration file and existing model parameters are save in a checkpoint file. To start with, we recommend [Faster RCNN](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) with this [configuration file](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) and this [checkpoint file](https://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth). It is recommended to download the checkpoint file to `checkpoints` directory. ### High-level APIs for inference MMDetection provide high-level Python APIs for inference on images. Here is an example of building the model and inference on given images or videos. ```python from mmdet.apis import init_detector, inference_detector import mmcv # Specify the path to model config and checkpoint file config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # build the model from a config file and a checkpoint file model = init_detector(config_file, checkpoint_file, device='cuda:0') # test a single image and show the results img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once result = inference_detector(model, img) # visualize the results in a new window model.show_result(img, result) # or save the visualization results to image files model.show_result(img, result, out_file='result.jpg') # test a video and show the results video = mmcv.VideoReader('video.mp4') for frame in video: result = inference_detector(model, frame) model.show_result(frame, result, wait_time=1) ``` A notebook demo can be found in [demo/inference_demo.ipynb](https://github.com/open-mmlab/mmdetection/blob/master/demo/inference_demo.ipynb). Note: `inference_detector` only supports single-image inference for now. ### Asynchronous interface - supported for Python 3.7+ For Python 3.7+, MMDetection also supports async interfaces. By utilizing CUDA streams, it allows not to block CPU on GPU bound inference code and enables better CPU/GPU utilization for single-threaded application. Inference can be done concurrently either between different input data samples or between different models of some inference pipeline. See `tests/async_benchmark.py` to compare the speed of synchronous and asynchronous interfaces. ```python import asyncio import torch from mmdet.apis import init_detector, async_inference_detector from mmdet.utils.contextmanagers import concurrent async def main(): config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' device = 'cuda:0' model = init_detector(config_file, checkpoint=checkpoint_file, device=device) # queue is used for concurrent inference of multiple images streamqueue = asyncio.Queue() # queue size defines concurrency level streamqueue_size = 3 for _ in range(streamqueue_size): streamqueue.put_nowait(torch.cuda.Stream(device=device)) # test a single image and show the results img = 'test.jpg' # or img = mmcv.imread(img), which will only load it once async with concurrent(streamqueue): result = await async_inference_detector(model, img) # visualize the results in a new window model.show_result(img, result) # or save the visualization results to image files model.show_result(img, result, out_file='result.jpg') asyncio.run(main()) ``` ### Demos We also provide three demo scripts, implemented with high-level APIs and supporting functionality codes. Source codes are available [here](https://github.com/open-mmlab/mmdetection/tree/master/demo). #### Image demo This script performs inference on a single image. ```shell python demo/image_demo.py \ ${IMAGE_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] ``` Examples: ```shell python demo/image_demo.py demo/demo.jpg \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --device cpu ``` #### Webcam demo This is a live demo from a webcam. ```shell python demo/webcam_demo.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--camera-id ${CAMERA-ID}] \ [--score-thr ${SCORE_THR}] ``` Examples: ```shell python demo/webcam_demo.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth ``` #### Video demo This script performs inference on a video. ```shell python demo/video_demo.py \ ${VIDEO_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] \ [--out ${OUT_FILE}] \ [--show] \ [--wait-time ${WAIT_TIME}] ``` Examples: ```shell python demo/video_demo.py demo/demo.mp4 \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --out result.mp4 ``` #### Video demo with GPU acceleration This script performs inference on a video with GPU acceleration. ```shell python demo/video_gpuaccel_demo.py \ ${VIDEO_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] \ [--nvdecode] \ [--out ${OUT_FILE}] \ [--show] \ [--wait-time ${WAIT_TIME}] ``` Examples: ```shell python demo/video_gpuaccel_demo.py demo/demo.mp4 \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --nvdecode --out result.mp4 ``` ## Test existing models on standard datasets To evaluate a model's accuracy, one usually tests the model on some standard datasets. MMDetection supports multiple public datasets including COCO, Pascal VOC, CityScapes, and [more](https://github.com/open-mmlab/mmdetection/tree/master/configs/_base_/datasets). This section will show how to test existing models on supported datasets. ### Prepare datasets Public datasets like [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC/index.html) or mirror and [COCO](https://cocodataset.org/#download) are available from official websites or mirrors. Note: In the detection task, Pascal VOC 2012 is an extension of Pascal VOC 2007 without overlap, and we usually use them together. It is recommended to download and extract the dataset somewhere outside the project directory and symlink the dataset root to `$MMDETECTION/data` as below. If your folder structure is different, you may need to change the corresponding paths in config files. We provide a script to download datasets such as COCO , you can run `python tools/misc/download_dataset.py --dataset-name coco2017` to download COCO dataset. For more usage please refer to [dataset-download](https://github.com/open-mmlab/mmdetection/tree/master/docs/en/useful_tools.md#dataset-download) ```text mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ ├── cityscapes │ │ ├── annotations │ │ ├── leftImg8bit │ │ │ ├── train │ │ │ ├── val │ │ ├── gtFine │ │ │ ├── train │ │ │ ├── val │ ├── VOCdevkit │ │ ├── VOC2007 │ │ ├── VOC2012 ``` Some models require additional [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) datasets, such as HTC, DetectoRS and SCNet, you can download and unzip then move to the coco folder. The directory should be like this. ```text mmdetection ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ │ ├── stuffthingmaps ``` Panoptic segmentation models like PanopticFPN require additional [COCO Panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) datasets, you can download and unzip then move to the coco annotation folder. The directory should be like this. ```text mmdetection ├── data │ ├── coco │ │ ├── annotations │ │ │ ├── panoptic_train2017.json │ │ │ ├── panoptic_train2017 │ │ │ ├── panoptic_val2017.json │ │ │ ├── panoptic_val2017 │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 ``` The [cityscapes](https://www.cityscapes-dataset.com/) annotations need to be converted into the coco format using `tools/dataset_converters/cityscapes.py`: ```shell pip install cityscapesscripts python tools/dataset_converters/cityscapes.py \ ./data/cityscapes \ --nproc 8 \ --out-dir ./data/cityscapes/annotations ``` TODO: CHANGE TO THE NEW PATH ### Test existing models We provide testing scripts for evaluating an existing model on the whole dataset (COCO, PASCAL VOC, Cityscapes, etc.). The following testing environments are supported: - single GPU - CPU - single node multiple GPUs - multiple nodes Choose the proper script to perform testing depending on the testing environment. ```shell # single-gpu testing python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # CPU: disable GPUs and run single-gpu testing script export CUDA_VISIBLE_DEVICES=-1 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # multi-gpu testing bash tools/dist_test.sh \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ ${GPU_NUM} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] ``` `tools/dist_test.sh` also supports multi-node testing, but relies on PyTorch's [launch utility](https://pytorch.org/docs/stable/distributed.html#launch-utility). Optional arguments: - `RESULT_FILE`: Filename of the output results in pickle format. If not specified, the results will not be saved to a file. - `EVAL_METRICS`: Items to be evaluated on the results. Allowed values depend on the dataset, e.g., `proposal_fast`, `proposal`, `bbox`, `segm` are available for COCO, `mAP`, `recall` for PASCAL VOC. Cityscapes could be evaluated by `cityscapes` as well as all COCO metrics. - `--show`: If specified, detection results will be plotted on the images and shown in a new window. It is only applicable to single GPU testing and used for debugging and visualization. Please make sure that GUI is available in your environment. Otherwise, you may encounter an error like `cannot connect to X server`. - `--show-dir`: If specified, detection results will be plotted on the images and saved to the specified directory. It is only applicable to single GPU testing and used for debugging and visualization. You do NOT need a GUI available in your environment for using this option. - `--show-score-thr`: If specified, detections with scores below this threshold will be removed. - `--cfg-options`: if specified, the key-value pair optional cfg will be merged into config file - `--eval-options`: if specified, the key-value pair optional eval cfg will be kwargs for dataset.evaluate() function, it's only for evaluation ### Examples Assuming that you have already downloaded the checkpoints to the directory `checkpoints/`. 1. Test Faster R-CNN and visualize the results. Press any key for the next image. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn). ```shell python tools/test.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --show ``` 2. Test Faster R-CNN and save the painted images for future visualization. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn). ```shell python tools/test.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --show-dir faster_rcnn_r50_fpn_1x_results ``` 3. Test Faster R-CNN on PASCAL VOC (without saving the test results) and evaluate the mAP. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc). ```shell python tools/test.py \ configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc.py \ checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth \ --eval mAP ``` 4. Test Mask R-CNN with 8 GPUs, and evaluate the bbox and mask AP. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). ```shell ./tools/dist_test.sh \ configs/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --out results.pkl \ --eval bbox segm ``` 5. Test Mask R-CNN with 8 GPUs, and evaluate the **classwise** bbox and mask AP. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). ```shell ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --out results.pkl \ --eval bbox segm \ --options "classwise=True" ``` 6. Test Mask R-CNN on COCO test-dev with 8 GPUs, and generate JSON files for submitting to the official evaluation server. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn). ```shell ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --format-only \ --options "jsonfile_prefix=./mask_rcnn_test-dev_results" ``` This command generates two JSON files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcnn_test-dev_results.segm.json`. 7. Test Mask R-CNN on Cityscapes test with 8 GPUs, and generate txt and png files for submitting to the official evaluation server. Config and checkpoint files are available [here](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes). ```shell ./tools/dist_test.sh \ configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py \ checkpoints/mask_rcnn_r50_fpn_1x_cityscapes_20200227-afe51d5a.pth \ 8 \ --format-only \ --options "txtfile_prefix=./mask_rcnn_cityscapes_test_results" ``` The generated png and txt would be under `./mask_rcnn_cityscapes_test_results` directory. ### Test without Ground Truth Annotations MMDetection supports to test models without ground-truth annotations using `CocoDataset`. If your dataset format is not in COCO format, please convert them to COCO format. For example, if your dataset format is VOC, you can directly convert it to COCO format by the [script in tools.](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/pascal_voc.py) If your dataset format is Cityscapes, you can directly convert it to COCO format by the [script in tools.](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/cityscapes.py) The rest of the formats can be converted using [this script](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/images2coco.py). ```shel python tools/dataset_converters/images2coco.py \ ${IMG_PATH} \ ${CLASSES} \ ${OUT} \ [--exclude-extensions] ``` arguments: - `IMG_PATH`: The root path of images. - `CLASSES`: The text file with a list of categories. - `OUT`: The output annotation json file name. The save dir is in the same directory as `IMG_PATH`. - `exclude-extensions`: The suffix of images to be excluded, such as 'png' and 'bmp'. After the conversion is complete, you can use the following command to test ```shell # single-gpu testing python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ --format-only \ --options ${JSONFILE_PREFIX} \ [--show] # CPU: disable GPUs and run single-gpu testing script export CUDA_VISIBLE_DEVICES=-1 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # multi-gpu testing bash tools/dist_test.sh \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ ${GPU_NUM} \ --format-only \ --options ${JSONFILE_PREFIX} \ [--show] ``` Assuming that the checkpoints in the [model zoo](https://mmdetection.readthedocs.io/en/latest/modelzoo_statistics.html) have been downloaded to the directory `checkpoints/`, we can test Mask R-CNN on COCO test-dev with 8 GPUs, and generate JSON files using the following command. ```sh ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --format-only \ --options "jsonfile_prefix=./mask_rcnn_test-dev_results" ``` This command generates two JSON files `mask_rcnn_test-dev_results.bbox.json` and `mask_rcnn_test-dev_results.segm.json`. ### Batch Inference MMDetection supports inference with a single image or batched images in test mode. By default, we use single-image inference and you can use batch inference by modifying `samples_per_gpu` in the config of test data. You can do that either by modifying the config as below. ```shell data = dict(train=dict(...), val=dict(...), test=dict(samples_per_gpu=2, ...)) ``` Or you can set it through `--cfg-options` as `--cfg-options data.test.samples_per_gpu=2` ### Deprecated ImageToTensor In test mode, `ImageToTensor` pipeline is deprecated, it's replaced by `DefaultFormatBundle` that recommended to manually replace it in the test data pipeline in your config file. examples: ```python # use ImageToTensor (deprecated) pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # manually replace ImageToTensor to DefaultFormatBundle (recommended) pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] ``` ## Train predefined models on standard datasets MMDetection also provides out-of-the-box tools for training detection models. This section will show how to train _predefined_ models (under [configs](https://github.com/open-mmlab/mmdetection/tree/master/configs)) on standard datasets i.e. COCO. ### Prepare datasets Training requires preparing datasets too. See section [Prepare datasets](#prepare-datasets) above for details. **Note**: Currently, the config files under `configs/cityscapes` use COCO pretrained weights to initialize. You could download the existing models in advance if the network connection is unavailable or slow. Otherwise, it would cause errors at the beginning of training. ### Learning rate automatically scale **Important**: The default learning rate in config files is for 8 GPUs and 2 sample per gpu (batch size = 8 * 2 = 16). And it had been set to `auto_scale_lr.base_batch_size` in `config/_base_/default_runtime.py`. Learning rate will be automatically scaled base on this value when the batch size is `16`. Meanwhile, in order not to affect other codebase which based on mmdet, the flag `auto_scale_lr.enable` is set to `False` by default. If you want to enable this feature, you need to add argument `--auto-scale-lr`. And you need to check the config name which you want to use before you process the command, because the config name indicates the default batch size. By default, it is `8 x 2 = 16 batch size`, like `faster_rcnn_r50_caffe_fpn_90k_coco.py` or `pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py`. In other cases, you will see the config file name have `_NxM_` in dictating, like `cornernet_hourglass104_mstest_32x3_210e_coco.py` which batch size is `32 x 3 = 96`, or `scnet_x101_64x4d_fpn_8x1_20e_coco.py` which batch size is `8 x 1 = 8`. **Please remember to check the bottom of the specific config file you want to use, it will have `auto_scale_lr.base_batch_size` if the batch size is not `16`. If you can't find those values, check the config file which in `_base_=[xxx]` and you will find it. Please do not modify its values if you want to automatically scale the LR.** Learning rate automatically scale basic usage is as follows. ```shell python tools/train.py \ ${CONFIG_FILE} \ --auto-scale-lr \ [optional arguments] ``` If you enabled this feature, the learning rate will be automatically scaled according to the number of GPUs of the machine and the batch size of training. See [linear scaling rule](https://arxiv.org/abs/1706.02677) for details. For example, If there are 4 GPUs and 2 pictures on each GPU, `lr = 0.01`, then if there are 16 GPUs and 4 pictures on each GPU, it will automatically scale to `lr = 0.08`. If you don't want to use it, you need to calculate the learning rate according to the [linear scaling rule](https://arxiv.org/abs/1706.02677) manually then change `optimizer.lr` in specific config file. ### Training on a single GPU We provide `tools/train.py` to launch training jobs on a single GPU. The basic usage is as follows. ```shell python tools/train.py \ ${CONFIG_FILE} \ [optional arguments] ``` During training, log files and checkpoints will be saved to the working directory, which is specified by `work_dir` in the config file or via CLI argument `--work-dir`. By default, the model is evaluated on the validation set every epoch, the evaluation interval can be specified in the config file as shown below. ```python # evaluate the model every 12 epoch. evaluation = dict(interval=12) ``` This tool accepts several optional arguments, including: - `--no-validate` (**not suggested**): Disable evaluation during training. - `--work-dir ${WORK_DIR}`: Override the working directory. - `--resume-from ${CHECKPOINT_FILE}`: Resume from a previous checkpoint file. - `--options 'Key=value'`: Overrides other settings in the used config. **Note**: Difference between `resume-from` and `load-from`: `resume-from` loads both the model weights and optimizer status, and the epoch is also inherited from the specified checkpoint. It is usually used for resuming the training process that is interrupted accidentally. `load-from` only loads the model weights and the training epoch starts from 0. It is usually used for finetuning. ### Training on CPU The process of training on the CPU is consistent with single GPU training. We just need to disable GPUs before the training process. ```shell export CUDA_VISIBLE_DEVICES=-1 ``` And then run the script [above](#training-on-a-single-GPU). **Note**: We do not recommend users to use CPU for training because it is too slow. We support this feature to allow users to debug on machines without GPU for convenience. ### Training on multiple GPUs We provide `tools/dist_train.sh` to launch training on multiple GPUs. The basic usage is as follows. ```shell bash ./tools/dist_train.sh \ ${CONFIG_FILE} \ ${GPU_NUM} \ [optional arguments] ``` Optional arguments remain the same as stated [above](#training-on-a-single-GPU). #### Launch multiple jobs simultaneously If you would like to launch multiple jobs on a single machine, e.g., 2 jobs of 4-GPU training on a machine with 8 GPUs, you need to specify different ports (29500 by default) for each job to avoid communication conflict. If you use `dist_train.sh` to launch training jobs, you can set the port in commands. ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 ``` ### Train with multiple machines If you launch with multiple machines simply connected with ethernet, you can simply run following commands: On the first machine: ```shell NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS ``` On the second machine: ```shell NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS ``` Usually it is slow if you do not have high speed networking like InfiniBand. ### Manage jobs with Slurm [Slurm](https://slurm.schedmd.com/) is a good job scheduling system for computing clusters. On a cluster managed by Slurm, you can use `slurm_train.sh` to spawn training jobs. It supports both single-node and multi-node training. The basic usage is as follows. ```shell [GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} ``` Below is an example of using 16 GPUs to train Mask R-CNN on a Slurm partition named _dev_, and set the work-dir to some shared file systems. ```shell GPUS=16 ./tools/slurm_train.sh dev mask_r50_1x configs/mask_rcnn_r50_fpn_1x_coco.py /nfs/xxxx/mask_rcnn_r50_fpn_1x ``` You can check [the source code](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) to review full arguments and environment variables. When using Slurm, the port option need to be set in one of the following ways: 1. Set the port through `--options`. This is more recommended since it does not change the original configs. ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --options 'dist_params.port=29500' CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --options 'dist_params.port=29501' ``` 2. Modify the config files to set different communication ports. In `config1.py`, set ```python dist_params = dict(backend='nccl', port=29500) ``` In `config2.py`, set ```python dist_params = dict(backend='nccl', port=29501) ``` Then you can launch two jobs with `config1.py` and `config2.py`. ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} ```
27,696
38.680516
736
md
mmdetection
mmdetection-master/docs/en/2_new_data_model.md
# 2: Train with customized datasets In this note, you will know how to inference, test, and train predefined models with customized datasets. We use the [balloon dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon) as an example to describe the whole process. The basic steps are as below: 1. Prepare the customized dataset 2. Prepare a config 3. Train, test, inference models on the customized dataset. ## Prepare the customized dataset There are three ways to support a new dataset in MMDetection: 1. reorganize the dataset into COCO format. 2. reorganize the dataset into a middle format. 3. implement a new dataset. Usually we recommend to use the first two methods which are usually easier than the third. In this note, we give an example for converting the data into COCO format. **Note**: MMDetection only supports evaluating mask AP of dataset in COCO format for now. So for instance segmentation task users should convert the data into coco format. ### COCO annotation format The necessary keys of COCO format for instance segmentation is as below, for the complete details, please refer [here](https://cocodataset.org/#format-data). ```json { "images": [image], "annotations": [annotation], "categories": [category] } image = { "id": int, "width": int, "height": int, "file_name": str, } annotation = { "id": int, "image_id": int, "category_id": int, "segmentation": RLE or [polygon], "area": float, "bbox": [x,y,width,height], "iscrowd": 0 or 1, } categories = [{ "id": int, "name": str, "supercategory": str, }] ``` Assume we use the balloon dataset. After downloading the data, we need to implement a function to convert the annotation format into the COCO format. Then we can use implemented COCODataset to load the data and perform training and evaluation. If you take a look at the dataset, you will find the dataset format is as below: ```json {'base64_img_data': '', 'file_attributes': {}, 'filename': '34020010494_e5cb88e1c4_k.jpg', 'fileref': '', 'regions': {'0': {'region_attributes': {}, 'shape_attributes': {'all_points_x': [1020, 1000, 994, 1003, 1023, 1050, 1089, 1134, 1190, 1265, 1321, 1361, 1403, 1428, 1442, 1445, 1441, 1427, 1400, 1361, 1316, 1269, 1228, 1198, 1207, 1210, 1190, 1177, 1172, 1174, 1170, 1153, 1127, 1104, 1061, 1032, 1020], 'all_points_y': [963, 899, 841, 787, 738, 700, 663, 638, 621, 619, 643, 672, 720, 765, 800, 860, 896, 942, 990, 1035, 1079, 1112, 1129, 1134, 1144, 1153, 1166, 1166, 1150, 1136, 1129, 1122, 1112, 1084, 1037, 989, 963], 'name': 'polygon'}}}, 'size': 1115004} ``` The annotation is a JSON file where each key indicates an image's all annotations. The code to convert the balloon dataset into coco format is as below. ```python import os.path as osp import mmcv def convert_balloon_to_coco(ann_file, out_file, image_prefix): data_infos = mmcv.load(ann_file) annotations = [] images = [] obj_count = 0 for idx, v in enumerate(mmcv.track_iter_progress(data_infos.values())): filename = v['filename'] img_path = osp.join(image_prefix, filename) height, width = mmcv.imread(img_path).shape[:2] images.append(dict( id=idx, file_name=filename, height=height, width=width)) bboxes = [] labels = [] masks = [] for _, obj in v['regions'].items(): assert not obj['region_attributes'] obj = obj['shape_attributes'] px = obj['all_points_x'] py = obj['all_points_y'] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] x_min, y_min, x_max, y_max = ( min(px), min(py), max(px), max(py)) data_anno = dict( image_id=idx, id=obj_count, category_id=0, bbox=[x_min, y_min, x_max - x_min, y_max - y_min], area=(x_max - x_min) * (y_max - y_min), segmentation=[poly], iscrowd=0) annotations.append(data_anno) obj_count += 1 coco_format_json = dict( images=images, annotations=annotations, categories=[{'id':0, 'name': 'balloon'}]) mmcv.dump(coco_format_json, out_file) ``` Using the function above, users can successfully convert the annotation file into json format, then we can use `CocoDataset` to train and evaluate the model. ## Prepare a config The second step is to prepare a config thus the dataset could be successfully loaded. Assume that we want to use Mask R-CNN with FPN, the config to train the detector on balloon dataset is as below. Assume the config is under directory `configs/balloon/` and named as `mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py`, the config is as below. ```python # The new config inherits a base config to highlight the necessary modification _base_ = 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # We also need to change the num_classes in head to match the dataset's annotation model = dict( roi_head=dict( bbox_head=dict(num_classes=1), mask_head=dict(num_classes=1))) # Modify dataset related settings dataset_type = 'COCODataset' classes = ('balloon',) data = dict( train=dict( img_prefix='balloon/train/', classes=classes, ann_file='balloon/train/annotation_coco.json'), val=dict( img_prefix='balloon/val/', classes=classes, ann_file='balloon/val/annotation_coco.json'), test=dict( img_prefix='balloon/val/', classes=classes, ann_file='balloon/val/annotation_coco.json')) # We can use the pre-trained Mask RCNN model to obtain higher performance load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' ``` This checkpoint file can be downloaded [here](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth) ## Train a new model To train a model with the new config, you can simply run ```shell python tools/train.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py ``` For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). ## Test and inference To test the trained model, you can simply run ```shell python tools/test.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py work_dirs/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon/latest.pth --eval bbox segm ``` For more detailed usages, please refer to the [Case 1](1_exist_data_model.md).
7,204
25.985019
345
md
mmdetection
mmdetection-master/docs/en/3_exist_data_new_model.md
# 3: Train with customized models and standard datasets In this note, you will know how to train, test and inference your own customized models under standard datasets. We use the cityscapes dataset to train a customized Cascade Mask R-CNN R50 model as an example to demonstrate the whole process, which using [`AugFPN`](https://github.com/Gus-Guo/AugFPN) to replace the default `FPN` as neck, and add `Rotate` or `Translate` as training-time auto augmentation. The basic steps are as below: 1. Prepare the standard dataset 2. Prepare your own customized model 3. Prepare a config 4. Train, test, and inference models on the standard dataset. ## Prepare the standard dataset In this note, as we use the standard cityscapes dataset as an example. It is recommended to symlink the dataset root to `$MMDETECTION/data`. If your folder structure is different, you may need to change the corresponding paths in config files. ```none mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ ├── cityscapes │ │ ├── annotations │ │ ├── leftImg8bit │ │ │ ├── train │ │ │ ├── val │ │ ├── gtFine │ │ │ ├── train │ │ │ ├── val │ ├── VOCdevkit │ │ ├── VOC2007 │ │ ├── VOC2012 ``` Or you can set your dataset root through ```bash export MMDET_DATASETS=$data_root ``` We will replace dataset root with `$MMDET_DATASETS`, so you don't have to modify the corresponding path in config files. The cityscapes annotations have to be converted into the coco format using `tools/dataset_converters/cityscapes.py`: ```shell pip install cityscapesscripts python tools/dataset_converters/cityscapes.py ./data/cityscapes --nproc 8 --out-dir ./data/cityscapes/annotations ``` Currently the config files in `cityscapes` use COCO pre-trained weights to initialize. You could download the pre-trained models in advance if network is unavailable or slow, otherwise it would cause errors at the beginning of training. ## Prepare your own customized model The second step is to use your own module or training setting. Assume that we want to implement a new neck called `AugFPN` to replace with the default `FPN` under the existing detector Cascade Mask R-CNN R50. The following implements`AugFPN` under MMDetection. ### 1. Define a new neck (e.g. AugFPN) Firstly create a new file `mmdet/models/necks/augfpn.py`. ```python from ..builder import NECKS @NECKS.register_module() class AugFPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False): pass def forward(self, inputs): # implementation is ignored pass ``` ### 2. Import the module You can either add the following line to `mmdet/models/necks/__init__.py`, ```python from .augfpn import AugFPN ``` or alternatively add ```python custom_imports = dict( imports=['mmdet.models.necks.augfpn.py'], allow_failed_imports=False) ``` to the config file and avoid modifying the original code. ### 3. Modify the config file ```python neck=dict( type='AugFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5) ``` For more detailed usages about customize your own models (e.g. implement a new backbone, head, loss, etc) and runtime training settings (e.g. define a new optimizer, use gradient clip, customize training schedules and hooks, etc), please refer to the guideline [Customize Models](tutorials/customize_models.md) and [Customize Runtime Settings](tutorials/customize_runtime.md) respectively. ## Prepare a config The third step is to prepare a config for your own training setting. Assume that we want to add `AugFPN` and `Rotate` or `Translate` augmentation to existing Cascade Mask R-CNN R50 to train the cityscapes dataset, and assume the config is under directory `configs/cityscapes/` and named as `cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py`, the config is as below. ```python # The new config inherits the base configs to highlight the necessary modification _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] model = dict( # set None to avoid loading ImageNet pretrained backbone, # instead here we set `load_from` to load from COCO pretrained detectors. backbone=dict(init_cfg=None), # replace neck from defaultly `FPN` to our new implemented module `AugFPN` neck=dict( type='AugFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), # We also need to change the num_classes in head from 80 to 8, to match the # cityscapes dataset's annotation. This modification involves `bbox_head` and `mask_head`. roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # change the number of classes from defaultly COCO to cityscapes num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # change the number of classes from defaultly COCO to cityscapes num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # change the number of classes from defaultly COCO to cityscapes num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, # change the number of classes from defaultly COCO to cityscapes num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) # over-write `train_pipeline` for new added `AutoAugment` training setting img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='AutoAugment', policies=[ [dict( type='Rotate', level=5, img_fill_val=(124, 116, 104), prob=0.5, scale=1) ], [dict(type='Rotate', level=7, img_fill_val=(124, 116, 104)), dict( type='Translate', level=5, prob=0.5, img_fill_val=(124, 116, 104)) ], ]), dict( type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] # set batch_size per gpu, and set new training pipeline data = dict( samples_per_gpu=1, workers_per_gpu=3, # over-write `pipeline` with new training pipeline setting train=dict(dataset=dict(pipeline=train_pipeline))) # Set optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # Set customized learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8]) runner = dict(type='EpochBasedRunner', max_epochs=10) # We can use the COCO pretrained Cascade Mask R-CNN R50 model for more stable performance initialization load_from = 'https://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth' ``` ## Train a new model To train a model with the new config, you can simply run ```shell python tools/train.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py ``` For more detailed usages, please refer to the [Case 1](1_exist_data_model.md). ## Test and inference To test the trained model, you can simply run ```shell python tools/test.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py work_dirs/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py/latest.pth --eval bbox segm ``` For more detailed usages, please refer to the [Case 1](1_exist_data_model.md).
10,219
34.985915
405
md
mmdetection
mmdetection-master/docs/en/changelog.md
## Changelog ### v2.28.2 (24/2/2023) #### New Features and Improvements - Add Twitter, Discord, Medium and YouTube link (#9774) - Update `customize_runtime.md` (#9797) #### Bug Fixes - Fix `WIDERFace SSD` loss for Nan problem (#9734) - Fix missing API documentation in Readthedoc (#9729) - Fix the configuration file and log path of CenterNet (#9791) #### Contributors A total of 4 developers contributed to this release. Thanks @co63oc, @Ginray, @vansin, @RangiLyu ### v2.28.1 (1/2/2023) #### Bug Fixes - Enable to set float mlp_ratio in SwinTransformer (#8670) - Fix import error that causes training failure (#9694) - Fix isort version in lint (#9685) - Fix init_cfg of YOLOF (#8243) #### Contributors A total of 4 developers contributed to this release. Thanks @triple-Mu, @i-aki-y, @twmht, @RangiLyu ### v2.28.0 (28/1/2023) #### Highlights - Support Objects365 Dataset and Separated and Occluded COCO metric - Support acceleration of RetinaNet and SSD on Ascend - Deprecate the support of Python 3.6 #### New Features and Improvements - Support Objects365 Dataset (#7525) - Support [Separated and Occluded COCO metric](https://arxiv.org/abs/2210.10046) (#9574) - Support acceleration of RetinaNet and SSD on Ascend with documentation (#9648, #9614) - Added missing `-` to `--format-only` in documentation. #### Deprecations - Upgrade the minimum Python version to 3.7, the support of Python 3.6 is no longer guaranteed (#9604) #### Bug Fixes - Fix validation loss logging by (#9663) - Fix inconsistent float precision between mmdet and mmcv (#9570) - Fix argument name for fp32 in `DeformableDETRHead` (#9607) - Fix typo of all config file path in Metafile.yml (#9627) #### Contributors A total of 11 developers contributed to this release. Thanks @eantono, @akstt, @@lpizzinidev, @RangiLyu, @kbumsik, @tianleiSHI, @nijkah, @BIGWangYuDong, @wangjiangben-hw, @@jamiechoi1995, @ZwwWayne ## New Contributors - @kbumsik made their first contribution in https://github.com/open-mmlab/mmdetection/pull/9627 - @akstt made their first contribution in https://github.com/open-mmlab/mmdetection/pull/9614 - @lpizzinidev made their first contribution in https://github.com/open-mmlab/mmdetection/pull/9649 - @eantono made their first contribution in https://github.com/open-mmlab/mmdetection/pull/9663 ### v2.27.0 (5/1/2023) #### Highlights - Support receptive field search of CNN models([TPAMI 2022: RF-Next](http://mftp.mmcheng.net/Papers/22TPAMI-ActionSeg.pdf)) (#8191) #### Bug Fixes - Fix deadlock issue related with MMDetWandbHook (#9476) #### Improvements - Add minimum GitHub token permissions for workflows (#8928) - Delete compatible code for parrots in roi extractor (#9503) - Deprecate np.bool Type Alias (#9498) - Replace numpy transpose with torch permute to speed-up data pre-processing (#9533) #### Documents - Fix typo in docs/zh_cn/tutorials/config.md (#9416) - Fix Faster RCNN FP16 config link in README (#9366) #### Contributors A total of 12 developers contributed to this release. Thanks @Min-Sheng, @gasvn, @lzyhha, @jbwang1997, @zachcoleman, @chenyuwang814, @MilkClouds, @Fizzez, @boahc077, @apatsekin, @zytx121, @DonggeunYu ### v2.26.0 (23/11/2022) #### Highlights - Support training on [NPU](docs/en/device/npu.md) (#9267) #### Bug Fixes - Fix RPN visualization (#9151) - Fix readthedocs by freezing the dependency versions (#9154) - Fix device argument error in MMDet_Tutorial.ipynb (#9112) - Fix solov2 cannot dealing with empty gt image (#9185) - Fix random flipping ratio comparison of mixup image (#9336) #### Improvements - Complement necessary argument of seg_suffix of cityscapes (#9330) - Support copy paste based on bbox when there is no gt mask (#8905) - Make scipy as a default dependency in runtime (#9186) #### Documents - Delete redundant Chinese characters in docs (#9175) - Add MMEval in README (#9217) #### Contributors A total of 11 developers contributed to this release. Thanks @wangjiangben-hw, @motokimura, @AdorableJiang, @BainOuO, @JarvisKevin, @wanghonglie, @zytx121, @BIGWangYuDong, @hhaAndroid, @RangiLyu, @ZwwWayne ### v2.25.3 (25/10/2022) #### Bug Fixes - Skip remote sync when wandb is offline (#8755) - Fix jpg to png bug when using seg maps (#9078) #### Improvements - Fix typo in warning (#8844) - Fix CI for timm, pycocotools, onnx (#9034) - Upgrade pre-commit hooks (#8964) #### Documents - Update BoundedIoULoss config in readme (#8808) - Fix Faster R-CNN Readme (#8803) - Update location of test_cfg and train_cfg (#8792) - Fix issue template (#8966) - Update random sampler docstring (#9033) - Fix wrong image link (#9054) - Fix FPG readme (#9041) #### Contributors A total of 13 developers contributed to this release. Thanks @Zheng-LinXiao, @i-aki-y, @fbagci, @sudoAimer, @Czm369, @DrRyanHuang, @RangiLyu, @wanghonglie, @shinya7y, @Ryoo72, @akshaygulabrao, @gy-7, @Neesky ### v2.25.2 (15/9/2022) #### Bug Fixes - Fix DyDCNv2 RuntimeError (#8485) - Fix repeated import of CascadeRPNHead (#8578) - Fix absolute positional embedding of swin backbone (#8127) - Fix get train_pipeline method of val workflow (#8575) #### Improvements - Upgrade onnxsim to at least 0.4.0 (#8383) - Support tuple format in analyze_results script (#8549) - Fix floordiv warning (#8648) #### Documents - Fix typo in HTC link (#8487) - Fix docstring of `BboxOverlaps2D` (#8512) - Added missed Chinese tutorial link (#8564) - Fix mistakes in gaussian radius formula (#8607) - Update config documentation about how to Add WandB Hook (#8663) - Add mmengine link in readme (#8799) - Update issue template (#8802) #### Contributors A total of 16 developers contributed to this release. Thanks @daquexian, @lyq10085, @ZwwWayne, @fbagci, @BubblyYi, @fathomson, @ShunchiZhang, @ceasona, @Happylkx, @normster, @chhluo, @Lehsuby, @JiayuXu0, @Nourollah, @hewanru-bit, @RangiLyu ### v2.25.1 (29/7/2022) #### Bug Fixes - Fix single GPU distributed training of cuda device specifying (#8176) - Fix PolygonMask bug in FilterAnnotations (#8136) - Fix mdformat version to support python3.6 (#8195) - Fix GPG key error in Dockerfile (#8215) - Fix `WandbLoggerHook` error (#8273) - Fix Pytorch 1.10 incompatibility issues (#8439) #### Improvements - Add `mim` to `extras_require` in setup.py (#8194) - Support get image shape on macOS (#8434) - Add test commands of `mim` in CI (#8230 & #8240) - Update `maskformer` to be compatible when cfg is a dictionary (#8263) - Clean `Pillow` version check in CI (#8229) #### Documents - Change example hook name in tutorials (#8118) - Update projects (#8120) - Update metafile and release new models (#8294) - Add download link in tutorials (#8391) #### Contributors A total of 15 developers contributed to this release. Thanks @ZwwWayne, @ayulockin, @Mxbonn, @p-mishra1, @Youth-Got, @MiXaiLL76, @chhluo, @jbwang1997, @atinfinity, @shinya7y, @duanzhihua, @STLAND-admin, @BIGWangYuDong, @grimoire, @xiaoyuan0203 ### v2.25.0 (31/5/2022) #### Highlights - Support dedicated `WandbLogger` hook - Support [ConvNeXt](configs/convnext), [DDOD](configs/ddod), [SOLOv2](configs/solov2) - Support [Mask2Former](configs/mask2former) for instance segmentation - Rename [config files of Mask2Former](configs/mask2former) #### Backwards incompatible changes - Rename [config files of Mask2Former](configs/mask2former) (#7571) <table align="center"> <thead> <tr align='center'> <td>before v2.25.0</td> <td>after v2.25.0</td> </tr> </thead> <tbody><tr valign='top'> <th> - `mask2former_xxx_coco.py` represents config files for **panoptic segmentation**. </th> <th> - `mask2former_xxx_coco.py` represents config files for **instance segmentation**. - `mask2former_xxx_coco-panoptic.py` represents config files for **panoptic segmentation**. </th></tr> </tbody></table> #### New Features - Support [ConvNeXt](https://arxiv.org/abs/2201.03545) (#7281) - Support [DDOD](https://arxiv.org/abs/2107.02963) (#7279) - Support [SOLOv2](https://arxiv.org/abs/2003.10152) (#7441) - Support [Mask2Former](https://arxiv.org/abs/2112.01527) for instance segmentation (#7571, #8032) #### Bug Fixes - Enable YOLOX training on different devices (#7912) - Fix the log plot error when evaluation with `interval != 1` (#7784) - Fix RuntimeError of HTC (#8083) #### Improvements - Support dedicated `WandbLogger` hook (#7459) Users can set ```python cfg.log_config.hooks = [ dict(type='MMDetWandbHook', init_kwargs={'project': 'MMDetection-tutorial'}, interval=10, log_checkpoint=True, log_checkpoint_metadata=True, num_eval_images=10)] ``` in the config to use `MMDetWandbHook`. Example can be found in this [colab tutorial](https://colab.research.google.com/drive/1RCSXHZwDZvakFh3eo9RuNrJbCGqD0dru?usp=sharing#scrollTo=WTEdPDRaBz2C) - Add `AvoidOOM` to avoid OOM (#7434, #8091) Try to use `AvoidCUDAOOM` to avoid GPU out of memory. It will first retry after calling `torch.cuda.empty_cache()`. If it still fails, it will then retry by converting the type of inputs to FP16 format. If it still fails, it will try to copy inputs from GPUs to CPUs to continue computing. Try AvoidOOM in code to make the code continue to run when GPU memory runs out: ```python from mmdet.utils import AvoidCUDAOOM output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) ``` Users can also try `AvoidCUDAOOM` as a decorator to make the code continue to run when GPU memory runs out: ```python from mmdet.utils import AvoidCUDAOOM @AvoidCUDAOOM.retry_if_cuda_oom def function(*args, **kwargs): ... return xxx ``` - Support reading `gpu_collect` from `cfg.evaluation.gpu_collect` (#7672) - Speedup the Video Inference by Accelerating data-loading Stage (#7832) - Support replacing the `${key}` with the value of `cfg.key` (#7492) - Accelerate result analysis in `analyze_result.py`. The evaluation time is speedup by 10 ~ 15 times and only tasks 10 ~ 15 minutes now. (#7891) - Support to set `block_dilations` in `DilatedEncoder` (#7812) - Support panoptic segmentation result analysis (#7922) - Release DyHead with Swin-Large backbone (#7733) - Documentations updating and adding - Fix wrong default type of `act_cfg` in `SwinTransformer` (#7794) - Fix text errors in the tutorials (#7959) - Rewrite the [installation guide](docs/en/get_started.md) (#7897) - [Useful hooks](docs/en/tutorials/useful_hooks.md) (#7810) - Fix heading anchor in documentation (#8006) - Replace `markdownlint` with `mdformat` for avoiding installing ruby (#8009) #### Contributors A total of 20 developers contributed to this release. Thanks @ZwwWayne, @DarthThomas, @solyaH, @LutingWang, @chenxinfeng4, @Czm369, @Chenastron, @chhluo, @austinmw, @Shanyaliux @hellock, @Y-M-Y, @jbwang1997, @hhaAndroid, @Irvingao, @zhanggefan, @BIGWangYuDong, @Keiku, @PeterVennerstrom, @ayulockin ### v2.24.0 (26/4/2022) #### Highlights - Support [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177) - Support automatically scaling LR according to GPU number and samples per GPU - Support Class Aware Sampler that improves performance on OpenImages Dataset #### New Features - Support [Simple Copy-Paste is a Strong Data Augmentation Method for Instance Segmentation](https://arxiv.org/abs/2012.07177), see [example configs](configs/simple_copy_paste/mask_rcnn_r50_fpn_syncbn-all_rpn-2conv_ssj_scp_32x2_270k_coco.py) (#7501) - Support Class Aware Sampler, users can set ```python data=dict(train_dataloader=dict(class_aware_sampler=dict(num_sample_class=1)))) ``` in the config to use `ClassAwareSampler`. Examples can be found in [the configs of OpenImages Dataset](https://github.com/open-mmlab/mmdetection/tree/master/configs/openimages/faster_rcnn_r50_fpn_32x2_cas_1x_openimages.py). (#7436) - Support automatically scaling LR according to GPU number and samples per GPU. (#7482) In each config, there is a corresponding config of auto-scaling LR as below, ```python auto_scale_lr = dict(enable=True, base_batch_size=N) ``` where `N` is the batch size used for the current learning rate in the config (also equals to `samples_per_gpu` * gpu number to train this config). By default, we set `enable=False` so that the original usages will not be affected. Users can set `enable=True` in each config or add `--auto-scale-lr` after the command line to enable this feature and should check the correctness of `base_batch_size` in customized configs. - Support setting dataloader arguments in config and add functions to handle config compatibility. (#7668) The comparison between the old and new usages is as below. <table align="center"> <thead> <tr align='center'> <td>v2.23.0</td> <td>v2.24.0</td> </tr> </thead> <tbody><tr valign='top'> <th> ```python data = dict( samples_per_gpu=64, workers_per_gpu=4, train=dict(type='xxx', ...), val=dict(type='xxx', samples_per_gpu=4, ...), test=dict(type='xxx', ...), ) ``` </th> <th> ```python # A recommended config that is clear data = dict( train=dict(type='xxx', ...), val=dict(type='xxx', ...), test=dict(type='xxx', ...), # Use different batch size during inference. train_dataloader=dict(samples_per_gpu=64, workers_per_gpu=4), val_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), test_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), ) # Old style still works but allows to set more arguments about data loaders data = dict( samples_per_gpu=64, # only works for train_dataloader workers_per_gpu=4, # only works for train_dataloader train=dict(type='xxx', ...), val=dict(type='xxx', ...), test=dict(type='xxx', ...), # Use different batch size during inference. val_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), test_dataloader=dict(samples_per_gpu=8, workers_per_gpu=2), ) ``` </th></tr> </tbody></table> - Support memory profile hook. Users can use it to monitor the memory usages during training as below (#7560) ```python custom_hooks = [ dict(type='MemoryProfilerHook', interval=50) ] ``` - Support to run on PyTorch with MLU chip (#7578) - Support re-spliting data batch with tag (#7641) - Support the `DiceCost` used by [K-Net](https://arxiv.org/abs/2106.14855) in `MaskHungarianAssigner` (#7716) - Support splitting COCO data for Semi-supervised object detection (#7431) - Support Pathlib for Config.fromfile (#7685) - Support to use file client in OpenImages dataset (#7433) - Add a probability parameter to Mosaic transformation (#7371) - Support specifying interpolation mode in `Resize` pipeline (#7585) #### Bug Fixes - Avoid invalid bbox after deform_sampling (#7567) - Fix the issue that argument color_theme does not take effect when exporting confusion matrix (#7701) - Fix the `end_level` in Necks, which should be the index of the end input backbone level (#7502) - Fix the bug that `mix_results` may be None in `MultiImageMixDataset` (#7530) - Fix the bug in ResNet plugin when two plugins are used (#7797) #### Improvements - Enhance `load_json_logs` of analyze_logs.py for resumed training logs (#7732) - Add argument `out_file` in image_demo.py (#7676) - Allow mixed precision training with `SimOTAAssigner` (#7516) - Updated INF to 100000.0 to be the same as that in the official YOLOX (#7778) - Add documentations of: - how to get channels of a new backbone (#7642) - how to unfreeze the backbone network (#7570) - how to train fast_rcnn model (#7549) - proposals in Deformable DETR (#7690) - from-scratch install script in get_started.md (#7575) - Release pre-trained models of - [Mask2Former](configs/mask2former) (#7595, #7709) - RetinaNet with ResNet-18 and release models (#7387) - RetinaNet with EfficientNet backbone (#7646) #### Contributors A total of 27 developers contributed to this release. Thanks @jovialio, @zhangsanfeng2022, @HarryZJ, @jamiechoi1995, @nestiank, @PeterH0323, @RangeKing, @Y-M-Y, @mattcasey02, @weiji14, @Yulv-git, @xiefeifeihu, @FANG-MING, @meng976537406, @nijkah, @sudz123, @CCODING04, @SheffieldCao, @Czm369, @BIGWangYuDong, @zytx121, @jbwang1997, @chhluo, @jshilong, @RangiLyu, @hhaAndroid, @ZwwWayne ### v2.23.0 (28/3/2022) #### Highlights - Support Mask2Former: [Masked-attention Mask Transformer for Universal Image Segmentation](https://arxiv.org/abs/2112.01527) - Support EfficientNet: [EfficientNet: Rethinking Model Scaling for Convolutional Neural Networks](https://arxiv.org/abs/1905.11946) - Support setting data root through environment variable `MMDET_DATASETS`, users don't have to modify the corresponding path in config files anymore. - Find a good recipe for fine-tuning high precision ResNet backbone pre-trained by Torchvision. #### New Features - Support [Mask2Former](configs/mask2former)(#6938)(#7466)(#7471) - Support [EfficientNet](configs/efficientnet) (#7514) - Support setting data root through environment variable `MMDET_DATASETS`, users don't have to modify the corresponding path in config files anymore. (#7386) - Support setting different seeds to different ranks (#7432) - Update the `dist_train.sh` so that the script can be used to support launching multi-node training on machines without slurm (#7415) - Find a good recipe for fine-tuning high precision ResNet backbone pre-trained by Torchvision (#7489) #### Bug Fixes - Fix bug in VOC unit test which removes the data directory (#7270) - Adjust the order of `get_classes` and `FileClient` (#7276) - Force the inputs of `get_bboxes` in yolox_head to float32 (#7324) - Fix misplaced arguments in LoadPanopticAnnotations (#7388) - Fix reduction=mean in CELoss. (#7449) - Update unit test of CrossEntropyCost (#7537) - Fix memory leaking in panpotic segmentation evaluation (#7538) - Fix the bug of shape broadcast in YOLOv3 (#7551) #### Improvements - Add Chinese version of onnx2tensorrt.md (#7219) - Update colab tutorials (#7310) - Update information about Localization Distillation (#7350) - Add Chinese version of `finetune.md` (#7178) - Update YOLOX log for non square input (#7235) - Add `nproc` in `coco_panoptic.py` for panoptic quality computing (#7315) - Allow to set channel_order in LoadImageFromFile (#7258) - Take point sample related functions out of mask_point_head (#7353) - Add instance evaluation for coco_panoptic (#7313) - Enhance the robustness of analyze_logs.py (#7407) - Supplementary notes of sync_random_seed (#7440) - Update docstring of cross entropy loss (#7472) - Update pascal voc result (#7503) - We create How-to documentation to record any questions about How to xxx. In this version, we added - How to use Mosaic augmentation (#7507) - How to use backbone in mmcls (#7438) - How to produce and submit the prediction results of panoptic segmentation models on COCO test-dev set (#7430)) #### Contributors A total of 27 developers contributed to this release. Thanks @ZwwWayne, @haofanwang, @shinya7y, @chhluo, @yangrisheng, @triple-Mu, @jbwang1997, @HikariTJU, @imflash217, @274869388, @zytx121, @matrixgame2018, @jamiechoi1995, @BIGWangYuDong, @JingweiZhang12, @Xiangxu-0103, @hhaAndroid, @jshilong, @osbm, @ceroytres, @bunge-bedstraw-herb, @Youth-Got, @daavoo, @jiangyitong, @RangiLyu, @CCODING04, @yarkable ### v2.22.0 (24/2/2022) #### Highlights - Support MaskFormer: [Per-Pixel Classification is Not All You Need for Semantic Segmentation](https://arxiv.org/abs/2107.06278) (#7212) - Support DyHead: [Dynamic Head: Unifying Object Detection Heads with Attentions](https://arxiv.org/abs/2106.08322) (#6823) - Release a good recipe of using ResNet in object detectors pre-trained by [ResNet Strikes Back](https://arxiv.org/abs/2110.00476), which consistently brings about 3~4 mAP improvements over RetinaNet, Faster/Mask/Cascade Mask R-CNN (#7001) - Support [Open Images Dataset](https://storage.googleapis.com/openimages/web/index.html) (#6331) - Support TIMM backbone: [PyTorch Image Models](https://github.com/rwightman/pytorch-image-models) (#7020) #### New Features - Support [MaskFormer](configs/maskformer) (#7212) - Support [DyHead](configs/dyhead) (#6823) - Support [ResNet Strikes Back](configs/resnet_strikes_back) (#7001) - Support [OpenImages Dataset](configs/openimages) (#6331) - Support [TIMM backbone](configs/timm_example) (#7020) - Support visualization for Panoptic Segmentation (#7041) #### Breaking Changes In order to support the visualization for Panoptic Segmentation, the `num_classes` can not be `None` when using the `get_palette` function to determine whether to use the panoptic palette. #### Bug Fixes - Fix bug for the best checkpoints can not be saved when the `key_score` is None (#7101) - Fix MixUp transform filter boxes failing case (#7080) - Add missing properties in SABLHead (#7091) - Fix bug when NaNs exist in confusion matrix (#7147) - Fix PALETTE AttributeError in downstream task (#7230) #### Improvements - Speed up SimOTA matching (#7098) - Add Chinese translation of `docs_zh-CN/tutorials/init_cfg.md` (#7188) #### Contributors A total of 20 developers contributed to this release. Thanks @ZwwWayne, @hhaAndroid, @RangiLyu, @AronLin, @BIGWangYuDong, @jbwang1997, @zytx121, @chhluo, @shinya7y, @LuooChen, @dvansa, @siatwangmin, @del-zhenwu, @vikashranjan26, @haofanwang, @jamiechoi1995, @HJoonKwon, @yarkable, @zhijian-liu, @RangeKing ### v2.21.0 (8/2/2022) ### Breaking Changes To standardize the contents in config READMEs and meta files of OpenMMLab projects, the READMEs and meta files in each config directory have been significantly changed. The template will be released in the future, for now, you can refer to the examples of README for [algorithm](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/README.md), [dataset](https://github.com/open-mmlab/mmdetection/blob/master/configs/deepfashion/README.md) and [backbone](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet/README.md). To align with the standard, the configs in dcn are put into to two directories named `dcn` and `dcnv2`. #### New Features - Allow to customize colors of different classes during visualization (#6716) - Support CPU training (#7016) - Add download script of COCO, LVIS, and VOC dataset (#7015) #### Bug Fixes - Fix weight conversion issue of RetinaNet with Swin-S (#6973) - Update `__repr__` of `Compose` (#6951) - Fix BadZipFile Error when build docker (#6966) - Fix bug in non-distributed multi-gpu training/testing (#7019) - Fix bbox clamp in PyTorch 1.10 (#7074) - Relax the requirement of PALETTE in dataset wrappers (#7085) - Keep the same weights before reassign in the PAA head (#7032) - Update code demo in doc (#7092) #### Improvements - Speed-up training by allow to set variables of multi-processing (#6974, #7036) - Add links of Chinese tutorials in readme (#6897) - Disable cv2 multiprocessing by default for acceleration (#6867) - Deprecate the support for "python setup.py test" (#6998) - Re-organize metafiles and config readmes (#7051) - Fix None grad problem during training TOOD by adding `SigmoidGeometricMean` (#7090) #### Contributors A total of 26 developers contributed to this release. Thanks @del-zhenwu, @zimoqingfeng, @srishilesh, @imyhxy, @jenhaoyang, @jliu-ac, @kimnamu, @ShengliLiu, @garvan2021, @ciusji, @DIYer22, @kimnamu, @q3394101, @zhouzaida, @gaotongxiao, @topsy404, @AntoAndGar, @jbwang1997, @nijkah, @ZwwWayne, @Czm369, @jshilong, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @AronLin ### v2.20.0 (27/12/2021) #### New Features - Support [TOOD](configs/tood/README.md): Task-aligned One-stage Object Detection (ICCV 2021 Oral) (#6746) - Support resuming from the latest checkpoint automatically (#6727) #### Bug Fixes - Fix wrong bbox `loss_weight` of the PAA head (#6744) - Fix the padding value of `gt_semantic_seg` in batch collating (#6837) - Fix test error of lvis when using `classwise` (#6845) - Avoid BC-breaking of `get_local_path` (#6719) - Fix bug in `sync_norm_hook` when the BN layer does not exist (#6852) - Use pycocotools directly no matter what platform it is (#6838) #### Improvements - Add unit test for SimOTA with no valid bbox (#6770) - Use precommit to check readme (#6802) - Support selecting GPU-ids in non-distributed testing time (#6781) #### Contributors A total of 16 developers contributed to this release. Thanks @ZwwWayne, @Czm369, @jshilong, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @jamiechoi1995, @AronLin, @Keiku, @gkagkos, @fcakyon, @www516717402, @vansin, @zactodd, @kimnamu, @jenhaoyang ### v2.19.1 (14/12/2021) #### New Features - Release [YOLOX](configs/yolox/README.md) COCO pretrained models (#6698) #### Bug Fixes - Fix DCN initialization in DenseHead (#6625) - Fix initialization of ConvFCHead (#6624) - Fix PseudoSampler in RCNN (#6622) - Fix weight initialization in Swin and PVT (#6663) - Fix dtype bug in BaseDenseHead (#6767) - Fix SimOTA with no valid bbox (#6733) #### Improvements - Add an example of combining swin and one-stage models (#6621) - Add `get_ann_info` to dataset_wrappers (#6526) - Support keeping image ratio in the multi-scale training of YOLOX (#6732) - Support `bbox_clip_border` for the augmentations of YOLOX (#6730) #### Documents - Update metafile (#6717) - Add mmhuman3d in readme (#6699) - Update FAQ docs (#6587) - Add doc for `detect_anomalous_params` (#6697) #### Contributors A total of 11 developers contributed to this release. Thanks @ZwwWayne, @LJoson, @Czm369, @jshilong, @ZCMax, @RangiLyu, @BIGWangYuDong, @hhaAndroid, @zhaoxin111, @GT9505, @shinya7y ### v2.19.0 (29/11/2021) #### Highlights - Support [Label Assignment Distillation](https://arxiv.org/abs/2108.10520) - Support `persistent_workers` for Pytorch >= 1.7 - Align accuracy to the updated official YOLOX #### New Features - Support [Label Assignment Distillation](https://arxiv.org/abs/2108.10520) (#6342) - Support `persistent_workers` for Pytorch >= 1.7 (#6435) #### Bug Fixes - Fix repeatedly output warning message (#6584) - Avoid infinite GPU waiting in dist training (#6501) - Fix SSD512 config error (#6574) - Fix MMDetection model to ONNX command (#6558) #### Improvements - Refactor configs of FP16 models (#6592) - Align accuracy to the updated official YOLOX (#6443) - Speed up training and reduce memory cost when using PhotoMetricDistortion. (#6442) - Make OHEM work with seesaw loss (#6514) #### Documents - Update README.md (#6567) #### Contributors A total of 11 developers contributed to this release. Thanks @FloydHsiu, @RangiLyu, @ZwwWayne, @AndreaPi, @st9007a, @hachreak, @BIGWangYuDong, @hhaAndroid, @AronLin, @chhluo, @vealocia, @HarborYuan, @st9007a, @jshilong ### v2.18.1 (15/11/2021) #### Highlights - Release [QueryInst](http://arxiv.org/abs/2105.01928) pre-trained weights (#6460) - Support plot confusion matrix (#6344) #### New Features - Release [QueryInst](http://arxiv.org/abs/2105.01928) pre-trained weights (#6460) - Support plot confusion matrix (#6344) #### Bug Fixes - Fix aug test error when the number of prediction bboxes is 0 (#6398) - Fix SpatialReductionAttention in PVT (#6488) - Fix wrong use of `trunc_normal_init` in PVT and Swin-Transformer (#6432) #### Improvements - Save the printed AP information of COCO API to logger (#6505) - Always map location to cpu when load checkpoint (#6405) - Set a random seed when the user does not set a seed (#6457) #### Documents - Chinese version of [Corruption Benchmarking](robustness_benchmarking.md) (#6375) - Fix config path in docs (#6396) - Update GRoIE readme (#6401) #### Contributors A total of 11 developers contributed to this release. Thanks @st9007a, @hachreak, @HarborYuan, @vealocia, @chhluo, @AndreaPi, @AronLin, @BIGWangYuDong, @hhaAndroid, @RangiLyu, @ZwwWayne ### v2.18.0 (27/10/2021) #### Highlights - Support [QueryInst](http://arxiv.org/abs/2105.01928) (#6050) - Refactor dense heads to decouple onnx export logics from `get_bboxes` and speed up inference (#5317, #6003, #6369, #6268, #6315) #### New Features - Support [QueryInst](http://arxiv.org/abs/2105.01928) (#6050) - Support infinite sampler (#5996) #### Bug Fixes - Fix init_weight in fcn_mask_head (#6378) - Fix type error in imshow_bboxes of RPN (#6386) - Fix broken colab link in MMDetection Tutorial (#6382) - Make sure the device and dtype of scale_factor are the same as bboxes (#6374) - Remove sampling hardcode (#6317) - Fix RandomAffine bbox coordinate recorrection (#6293) - Fix init bug of final cls/reg layer in convfc head (#6279) - Fix img_shape broken in auto_augment (#6259) - Fix kwargs parameter missing error in two_stage (#6256) #### Improvements - Unify the interface of stuff head and panoptic head (#6308) - Polish readme (#6243) - Add code-spell pre-commit hook and fix a typo (#6306) - Fix typo (#6245, #6190) - Fix sampler unit test (#6284) - Fix `forward_dummy` of YOLACT to enable `get_flops` (#6079) - Fix link error in the config documentation (#6252) - Adjust the order to beautify the document (#6195) #### Refactors - Refactor one-stage get_bboxes logic (#5317) - Refactor ONNX export of One-Stage models (#6003, #6369) - Refactor dense_head and speedup (#6268) - Migrate to use prior_generator in training of dense heads (#6315) #### Contributors A total of 18 developers contributed to this release. Thanks @Boyden, @onnkeat, @st9007a, @vealocia, @yhcao6, @DapangpangX, @yellowdolphin, @cclauss, @kennymckormick, @pingguokiller, @collinzrj, @AndreaPi, @AronLin, @BIGWangYuDong, @hhaAndroid, @jshilong, @RangiLyu, @ZwwWayne ### v2.17.0 (28/9/2021) #### Highlights - Support [PVT](https://arxiv.org/abs/2102.12122) and [PVTv2](https://arxiv.org/abs/2106.13797) - Support [SOLO](https://arxiv.org/abs/1912.04488) - Support large scale jittering and New Mask R-CNN baselines - Speed up `YOLOv3` inference #### New Features - Support [PVT](https://arxiv.org/abs/2102.12122) and [PVTv2](https://arxiv.org/abs/2106.13797) (#5780) - Support [SOLO](https://arxiv.org/abs/1912.04488) (#5832) - Support large scale jittering and New Mask R-CNN baselines (#6132) - Add a general data structure for the results of models (#5508) - Added a base class for one-stage instance segmentation (#5904) - Speed up `YOLOv3` inference (#5991) - Release Swin Transformer pre-trained models (#6100) - Support mixed precision training in `YOLOX` (#5983) - Support `val` workflow in `YOLACT` (#5986) - Add script to test `torchserve` (#5936) - Support `onnxsim` with dynamic input shape (#6117) #### Bug Fixes - Fix the function naming errors in `model_wrappers` (#5975) - Fix regression loss bug when the input is an empty tensor (#5976) - Fix scores not contiguous error in `centernet_head` (#6016) - Fix missing parameters bug in `imshow_bboxes` (#6034) - Fix bug in `aug_test` of `HTC` when the length of `det_bboxes` is 0 (#6088) - Fix empty proposal errors in the training of some two-stage models (#5941) - Fix `dynamic_axes` parameter error in `ONNX` dynamic shape export (#6104) - Fix `dynamic_shape` bug of `SyncRandomSizeHook` (#6144) - Fix the Swin Transformer config link error in the configuration (#6172) #### Improvements - Add filter rules in `Mosaic` transform (#5897) - Add size divisor in get flops to avoid some potential bugs (#6076) - Add Chinese translation of `docs_zh-CN/tutorials/customize_dataset.md` (#5915) - Add Chinese translation of `conventions.md` (#5825) - Add description of the output of data pipeline (#5886) - Add dataset information in the README file for `PanopticFPN` (#5996) - Add `extra_repr` for `DropBlock` layer to get details in the model printing (#6140) - Fix CI out of memory and add PyTorch1.9 Python3.9 unit tests (#5862) - Fix download links error of some model (#6069) - Improve the generalization of XML dataset (#5943) - Polish assertion error messages (#6017) - Remove `opencv-python-headless` dependency by `albumentations` (#5868) - Check dtype in transform unit tests (#5969) - Replace the default theme of documentation with PyTorch Sphinx Theme (#6146) - Update the paper and code fields in the metafile (#6043) - Support to customize padding value of segmentation map (#6152) - Support to resize multiple segmentation maps (#5747) #### Contributors A total of 24 developers contributed to this release. Thanks @morkovka1337, @HarborYuan, @guillaumefrd, @guigarfr, @www516717402, @gaotongxiao, @ypwhs, @MartaYang, @shinya7y, @justiceeem, @zhaojinjian0000, @VVsssssk, @aravind-anantha, @wangbo-zhao, @czczup, @whai362, @czczup, @marijnl, @AronLin, @BIGWangYuDong, @hhaAndroid, @jshilong, @RangiLyu, @ZwwWayne ### v2.16.0 (30/8/2021) #### Highlights - Support [Panoptic FPN](https://arxiv.org/abs/1901.02446) and [Swin Transformer](https://arxiv.org/abs/2103.14030) #### New Features - Support [Panoptic FPN](https://arxiv.org/abs/1901.02446) and release models (#5577, #5902) - Support Swin Transformer backbone (#5748) - Release RetinaNet models pre-trained with multi-scale 3x schedule (#5636) - Add script to convert unlabeled image list to coco format (#5643) - Add hook to check whether the loss value is valid (#5674) - Add YOLO anchor optimizing tool (#5644) - Support export onnx models without post process. (#5851) - Support classwise evaluation in CocoPanopticDataset (#5896) - Adapt browse_dataset for concatenated datasets. (#5935) - Add `PatchEmbed` and `PatchMerging` with `AdaptivePadding` (#5952) #### Bug Fixes - Fix unit tests of YOLOX (#5859) - Fix lose randomness in `imshow_det_bboxes` (#5845) - Make output result of `ImageToTensor` contiguous (#5756) - Fix inference bug when calling `regress_by_class` in RoIHead in some cases (#5884) - Fix bug in CIoU loss where alpha should not have gradient. (#5835) - Fix the bug that `multiscale_output` is defined but not used in HRNet (#5887) - Set the priority of EvalHook to LOW. (#5882) - Fix a YOLOX bug when applying bbox rescaling in test mode (#5899) - Fix mosaic coordinate error (#5947) - Fix dtype of bbox in RandomAffine. (#5930) #### Improvements - Add Chinese version of `data_pipeline` and (#5662) - Support to remove state dicts of EMA when publishing models. (#5858) - Refactor the loss function in HTC and SCNet (#5881) - Use warnings instead of logger.warning (#5540) - Use legacy coordinate in metric of VOC (#5627) - Add Chinese version of customize_losses (#5826) - Add Chinese version of model_zoo (#5827) #### Contributors A total of 19 developers contributed to this release. Thanks @ypwhs, @zywvvd, @collinzrj, @OceanPang, @ddonatien, @@haotian-liu, @viibridges, @Muyun99, @guigarfr, @zhaojinjian0000, @jbwang1997,@wangbo-zhao, @xvjiarui, @RangiLyu, @jshilong, @AronLin, @BIGWangYuDong, @hhaAndroid, @ZwwWayne ### v2.15.1 (11/8/2021) #### Highlights - Support [YOLOX](https://arxiv.org/abs/2107.08430) #### New Features - Support [YOLOX](https://arxiv.org/abs/2107.08430)(#5756, #5758, #5760, #5767, #5770, #5774, #5777, #5808, #5828, #5848) #### Bug Fixes - Update correct SSD models. (#5789) - Fix casting error in mask structure (#5820) - Fix MMCV deployment documentation links. (#5790) #### Improvements - Use dynamic MMCV download link in TorchServe dockerfile (#5779) - Rename the function `upsample_like` to `interpolate_as` for more general usage (#5788) #### Contributors A total of 14 developers contributed to this release. Thanks @HAOCHENYE, @xiaohu2015, @HsLOL, @zhiqwang, @Adamdad, @shinya7y, @Johnson-Wang, @RangiLyu, @jshilong, @mmeendez8, @AronLin, @BIGWangYuDong, @hhaAndroid, @ZwwWayne ### v2.15.0 (02/8/2021) #### Highlights - Support adding [MIM](https://github.com/open-mmlab/mim) dependencies during pip installation - Support MobileNetV2 for SSD-Lite and YOLOv3 - Support Chinese Documentation #### New Features - Add function `upsample_like` (#5732) - Support to output pdf and epub format documentation (#5738) - Support and release Cascade Mask R-CNN 3x pre-trained models (#5645) - Add `ignore_index` to CrossEntropyLoss (#5646) - Support adding [MIM](https://github.com/open-mmlab/mim) dependencies during pip installation (#5676) - Add MobileNetV2 config and models for YOLOv3 (#5510) - Support COCO Panoptic Dataset (#5231) - Support ONNX export of cascade models (#5486) - Support DropBlock with RetinaNet (#5544) - Support MobileNetV2 SSD-Lite (#5526) #### Bug Fixes - Fix the device of label in multiclass_nms (#5673) - Fix error of backbone initialization from pre-trained checkpoint in config file (#5603, #5550) - Fix download links of RegNet pretrained weights (#5655) - Fix two-stage runtime error given empty proposal (#5559) - Fix flops count error in DETR (#5654) - Fix unittest for `NumClassCheckHook` when it is not used. (#5626) - Fix description bug of using custom dataset (#5546) - Fix bug of `multiclass_nms` that returns the global indices (#5592) - Fix `valid_mask` logic error in RPNHead (#5562) - Fix unit test error of pretrained configs (#5561) - Fix typo error in anchor_head.py (#5555) - Fix bug when using dataset wrappers (#5552) - Fix a typo error in demo/MMDet_Tutorial.ipynb (#5511) - Fixing crash in `get_root_logger` when `cfg.log_level` is not None (#5521) - Fix docker version (#5502) - Fix optimizer parameter error when using `IterBasedRunner` (#5490) #### Improvements - Add unit tests for MMTracking (#5620) - Add Chinese translation of documentation (#5718, #5618, #5558, #5423, #5593, #5421, #5408. #5369, #5419, #5530, #5531) - Update resource limit (#5697) - Update docstring for InstaBoost (#5640) - Support key `reduction_override` in all loss functions (#5515) - Use repeatdataset to accelerate CenterNet training (#5509) - Remove unnecessary code in autoassign (#5519) - Add documentation about `init_cfg` (#5273) #### Contributors A total of 18 developers contributed to this release. Thanks @OceanPang, @AronLin, @hellock, @Outsider565, @RangiLyu, @ElectronicElephant, @likyoo, @BIGWangYuDong, @hhaAndroid, @noobying, @yyz561, @likyoo, @zeakey, @ZwwWayne, @ChenyangLiu, @johnson-magic, @qingswu, @BuxianChen ### v2.14.0 (29/6/2021) #### Highlights - Add `simple_test` to dense heads to improve the consistency of single-stage and two-stage detectors - Revert the `test_mixins` to single image test to improve efficiency and readability - Add Faster R-CNN and Mask R-CNN config using multi-scale training with 3x schedule #### New Features - Support pretrained models from MoCo v2 and SwAV (#5286) - Add Faster R-CNN and Mask R-CNN config using multi-scale training with 3x schedule (#5179, #5233) - Add `reduction_override` in MSELoss (#5437) - Stable support of exporting DETR to ONNX with dynamic shapes and batch inference (#5168) - Stable support of exporting PointRend to ONNX with dynamic shapes and batch inference (#5440) #### Bug Fixes - Fix size mismatch bug in `multiclass_nms` (#4980) - Fix the import path of `MultiScaleDeformableAttention` (#5338) - Fix errors in config of GCNet ResNext101 models (#5360) - Fix Grid-RCNN error when there is no bbox result (#5357) - Fix errors in `onnx_export` of bbox_head when setting reg_class_agnostic (#5468) - Fix type error of AutoAssign in the document (#5478) - Fix web links ending with `.md` (#5315) #### Improvements - Add `simple_test` to dense heads to improve the consistency of single-stage and two-stage detectors (#5264) - Add support for mask diagonal flip in TTA (#5403) - Revert the `test_mixins` to single image test to improve efficiency and readability (#5249) - Make YOLOv3 Neck more flexible (#5218) - Refactor SSD to make it more general (#5291) - Refactor `anchor_generator` and `point_generator` (#5349) - Allow to configure out the `mask_head` of the HTC algorithm (#5389) - Delete deprecated warning in FPN (#5311) - Move `model.pretrained` to `model.backbone.init_cfg` (#5370) - Make deployment tools more friendly to use (#5280) - Clarify installation documentation (#5316) - Add ImageNet Pretrained Models docs (#5268) - Add FAQ about training loss=nan solution and COCO AP or AR =-1 (# 5312, #5313) - Change all weight links of http to https (#5328) ### v2.13.0 (01/6/2021) #### Highlights - Support new methods: [CenterNet](https://arxiv.org/abs/1904.07850), [Seesaw Loss](https://arxiv.org/abs/2008.10032), [MobileNetV2](https://arxiv.org/abs/1801.04381) #### New Features - Support paper [Objects as Points](https://arxiv.org/abs/1904.07850) (#4602) - Support paper [Seesaw Loss for Long-Tailed Instance Segmentation (CVPR 2021)](https://arxiv.org/abs/2008.10032) (#5128) - Support [MobileNetV2](https://arxiv.org/abs/1801.04381) backbone and inverted residual block (#5122) - Support [MIM](https://github.com/open-mmlab/mim) (#5143) - ONNX exportation with dynamic shapes of CornerNet (#5136) - Add `mask_soft` config option to allow non-binary masks (#4615) - Add PWC metafile (#5135) #### Bug Fixes - Fix YOLOv3 FP16 training error (#5172) - Fix Cacscade R-CNN TTA test error when `det_bboxes` length is 0 (#5221) - Fix `iou_thr` variable naming errors in VOC recall calculation function (#5195) - Fix Faster R-CNN performance dropped in ONNX Runtime (#5197) - Fix DETR dict changed error when using python 3.8 during iteration (#5226) #### Improvements - Refactor ONNX export of two stage detector (#5205) - Replace MMDetection's EvalHook with MMCV's EvalHook for consistency (#4806) - Update RoI extractor for ONNX (#5194) - Use better parameter initialization in YOLOv3 head for higher performance (#5181) - Release new DCN models of Mask R-CNN by mixed-precision training (#5201) - Update YOLOv3 model weights (#5229) - Add DetectoRS ResNet-101 model weights (#4960) - Discard bboxes with sizes equals to `min_bbox_size` (#5011) - Remove duplicated code in DETR head (#5129) - Remove unnecessary object in class definition (#5180) - Fix doc link (#5192) ### v2.12.0 (01/5/2021) #### Highlights - Support new methods: [AutoAssign](https://arxiv.org/abs/2007.03496), [YOLOF](https://arxiv.org/abs/2103.09460), and [Deformable DETR](https://arxiv.org/abs/2010.04159) - Stable support of exporting models to ONNX with batched images and dynamic shape (#5039) #### Backwards Incompatible Changes MMDetection is going through big refactoring for more general and convenient usages during the releases from v2.12.0 to v2.15.0 (maybe longer). In v2.12.0 MMDetection inevitably brings some BC-breakings, including the MMCV dependency, model initialization, model registry, and mask AP evaluation. - MMCV version. MMDetection v2.12.0 relies on the newest features in MMCV 1.3.3, including `BaseModule` for unified parameter initialization, model registry, and the CUDA operator `MultiScaleDeformableAttn` for [Deformable DETR](https://arxiv.org/abs/2010.04159). Note that MMCV 1.3.2 already contains all the features used by MMDet but has known issues. Therefore, we recommend users skip MMCV v1.3.2 and use v1.3.3, though v1.3.2 might work for most cases. - Unified model initialization (#4750). To unify the parameter initialization in OpenMMLab projects, MMCV supports `BaseModule` that accepts `init_cfg` to allow the modules' parameters initialized in a flexible and unified manner. Now the users need to explicitly call `model.init_weights()` in the training script to initialize the model (as in [here](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162), previously this was handled by the detector. The models in MMDetection have been re-benchmarked to ensure accuracy based on PR #4750. __The downstream projects should update their code accordingly to use MMDetection v2.12.0__. - Unified model registry (#5059). To easily use backbones implemented in other OpenMMLab projects, MMDetection migrates to inherit the model registry created in MMCV (#760). In this way, as long as the backbone is supported in an OpenMMLab project and that project also uses the registry in MMCV, users can use that backbone in MMDetection by simply modifying the config without copying the code of that backbone into MMDetection. - Mask AP evaluation (#4898). Previous versions calculate the areas of masks through the bounding boxes when calculating the mask AP of small, medium, and large instances. To indeed use the areas of masks, we pop the key `bbox` during mask AP calculation. This change does not affect the overall mask AP evaluation and aligns the mask AP of similar models in other projects like Detectron2. #### New Features - Support paper [AutoAssign: Differentiable Label Assignment for Dense Object Detection](https://arxiv.org/abs/2007.03496) (#4295) - Support paper [You Only Look One-level Feature](https://arxiv.org/abs/2103.09460) (#4295) - Support paper [Deformable DETR: Deformable Transformers for End-to-End Object Detection](https://arxiv.org/abs/2010.04159) (#4778) - Support calculating IoU with FP16 tensor in `bbox_overlaps` to save memory and keep speed (#4889) - Add `__repr__` in custom dataset to count the number of instances (#4756) - Add windows support by updating requirements.txt (#5052) - Stable support of exporting models to ONNX with batched images and dynamic shape, including SSD, FSAF,FCOS, YOLOv3, RetinaNet, Faster R-CNN, and Mask R-CNN (#5039) #### Improvements - Use MMCV `MODEL_REGISTRY` (#5059) - Unified parameter initialization for more flexible usage (#4750) - Rename variable names and fix docstring in anchor head (#4883) - Support training with empty GT in Cascade RPN (#4928) - Add more details of usage of `test_robustness` in documentation (#4917) - Changing to use `pycocotools` instead of `mmpycocotools` to fully support Detectron2 and MMDetection in one environment (#4939) - Update torch serve dockerfile to support dockers of more versions (#4954) - Add check for training with single class dataset (#4973) - Refactor transformer and DETR Head (#4763) - Update FPG model zoo (#5079) - More accurate mask AP of small/medium/large instances (#4898) #### Bug Fixes - Fix bug in mean_ap.py when calculating mAP by 11 points (#4875) - Fix error when key `meta` is not in old checkpoints (#4936) - Fix hanging bug when training with empty GT in VFNet, GFL, and FCOS by changing the place of `reduce_mean` (#4923, #4978, #5058) - Fix asyncronized inference error and provide related demo (#4941) - Fix IoU losses dimensionality unmatch error (#4982) - Fix torch.randperm whtn using PyTorch 1.8 (#5014) - Fix empty bbox error in `mask_head` when using CARAFE (#5062) - Fix `supplement_mask` bug when there are zero-size RoIs (#5065) - Fix testing with empty rois in RoI Heads (#5081) ### v2.11.0 (01/4/2021) __Highlights__ - Support new method: [Localization Distillation for Object Detection](https://arxiv.org/pdf/2102.12252.pdf) - Support Pytorch2ONNX with batch inference and dynamic shape __New Features__ - Support [Localization Distillation for Object Detection](https://arxiv.org/pdf/2102.12252.pdf) (#4758) - Support Pytorch2ONNX with batch inference and dynamic shape for Faster-RCNN and mainstream one-stage detectors (#4796) __Improvements__ - Support batch inference in head of RetinaNet (#4699) - Add batch dimension in second stage of Faster-RCNN (#4785) - Support batch inference in bbox coder (#4721) - Add check for `ann_ids` in `COCODataset` to ensure it is unique (#4789) - support for showing the FPN results (#4716) - support dynamic shape for grid_anchor (#4684) - Move pycocotools version check to when it is used (#4880) __Bug Fixes__ - Fix a bug of TridentNet when doing the batch inference (#4717) - Fix a bug of Pytorch2ONNX in FASF (#4735) - Fix a bug when show the image with float type (#4732) ### v2.10.0 (01/03/2021) #### Highlights - Support new methods: [FPG](https://arxiv.org/abs/2004.03580) - Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN. #### New Features - Support ONNX2TensorRT for SSD, FSAF, FCOS, YOLOv3, and Faster R-CNN (#4569) - Support [Feature Pyramid Grids (FPG)](https://arxiv.org/abs/2004.03580) (#4645) - Support video demo (#4420) - Add seed option for sampler (#4665) - Support to customize type of runner (#4570, #4669) - Support synchronizing BN buffer in `EvalHook` (#4582) - Add script for GIF demo (#4573) #### Bug Fixes - Fix ConfigDict AttributeError and add Colab link (#4643) - Avoid crash in empty gt training of GFL head (#4631) - Fix `iou_thrs` bug in RPN evaluation (#4581) - Fix syntax error of config when upgrading model version (#4584) #### Improvements - Refactor unit test file structures (#4600) - Refactor nms config (#4636) - Get loading pipeline by checking the class directly rather than through config strings (#4619) - Add doctests for mask target generation and mask structures (#4614) - Use deep copy when copying pipeline arguments (#4621) - Update documentations (#4642, #4650, #4620, #4630) - Remove redundant code calling `import_modules_from_strings` (#4601) - Clean deprecated FP16 API (#4571) - Check whether `CLASSES` is correctly initialized in the initialization of `XMLDataset` (#4555) - Support batch inference in the inference API (#4462, #4526) - Clean deprecated warning and fix 'meta' error (#4695) ### v2.9.0 (01/02/2021) #### Highlights - Support new methods: [SCNet](https://arxiv.org/abs/2012.10150), [Sparse R-CNN](https://arxiv.org/abs/2011.12450) - Move `train_cfg` and `test_cfg` into model in configs - Support to visualize results based on prediction quality #### New Features - Support [SCNet](https://arxiv.org/abs/2012.10150) (#4356) - Support [Sparse R-CNN](https://arxiv.org/abs/2011.12450) (#4219) - Support evaluate mAP by multiple IoUs (#4398) - Support concatenate dataset for testing (#4452) - Support to visualize results based on prediction quality (#4441) - Add ONNX simplify option to Pytorch2ONNX script (#4468) - Add hook for checking compatibility of class numbers in heads and datasets (#4508) #### Bug Fixes - Fix CPU inference bug of Cascade RPN (#4410) - Fix NMS error of CornerNet when there is no prediction box (#4409) - Fix TypeError in CornerNet inference (#4411) - Fix bug of PAA when training with background images (#4391) - Fix the error that the window data is not destroyed when `out_file is not None` and `show==False` (#4442) - Fix order of NMS `score_factor` that will decrease the performance of YOLOv3 (#4473) - Fix bug in HTC TTA when the number of detection boxes is 0 (#4516) - Fix resize error in mask data structures (#4520) #### Improvements - Allow to customize classes in LVIS dataset (#4382) - Add tutorials for building new models with existing datasets (#4396) - Add CPU compatibility information in documentation (#4405) - Add documentation of deprecated `ImageToTensor` for batch inference (#4408) - Add more details in documentation for customizing dataset (#4430) - Switch `imshow_det_bboxes` visualization backend from OpenCV to Matplotlib (#4389) - Deprecate `ImageToTensor` in `image_demo.py` (#4400) - Move train_cfg/test_cfg into model (#4347, #4489) - Update docstring for `reg_decoded_bbox` option in bbox heads (#4467) - Update dataset information in documentation (#4525) - Release pre-trained R50 and R101 PAA detectors with multi-scale 3x training schedules (#4495) - Add guidance for speed benchmark (#4537) ### v2.8.0 (04/01/2021) #### Highlights - Support new methods: [Cascade RPN](https://arxiv.org/abs/1909.06720), [TridentNet](https://arxiv.org/abs/1901.01892) #### New Features - Support [Cascade RPN](https://arxiv.org/abs/1909.06720) (#1900) - Support [TridentNet](https://arxiv.org/abs/1901.01892) (#3313) #### Bug Fixes - Fix bug of show result in async_benchmark (#4367) - Fix scale factor in MaskTestMixin (#4366) - Fix but when returning indices in `multiclass_nms` (#4362) - Fix bug of empirical attention in resnext backbone error (#4300) - Fix bug of `img_norm_cfg` in FCOS-HRNet models with updated performance and models (#4250) - Fix invalid checkpoint and log in Mask R-CNN models on Cityscapes dataset (#4287) - Fix bug in distributed sampler when dataset is too small (#4257) - Fix bug of 'PAFPN has no attribute extra_convs_on_inputs' (#4235) #### Improvements - Update model url from aws to aliyun (#4349) - Update ATSS for PyTorch 1.6+ (#4359) - Update script to install ruby in pre-commit installation (#4360) - Delete deprecated `mmdet.ops` (#4325) - Refactor hungarian assigner for more general usage in Sparse R-CNN (#4259) - Handle scipy import in DETR to reduce package dependencies (#4339) - Update documentation of usages for config options after MMCV (1.2.3) supports overriding list in config (#4326) - Update pre-train models of faster rcnn trained on COCO subsets (#4307) - Avoid zero or too small value for beta in Dynamic R-CNN (#4303) - Add doccumentation for Pytorch2ONNX (#4271) - Add deprecated warning FPN arguments (#4264) - Support returning indices of kept bboxes when using nms (#4251) - Update type and device requirements when creating tensors `GFLHead` (#4210) - Update device requirements when creating tensors in `CrossEntropyLoss` (#4224) ### v2.7.0 (30/11/2020) - Support new method: [DETR](https://arxiv.org/abs/2005.12872), [ResNest](https://arxiv.org/abs/2004.08955), Faster R-CNN DC5. - Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX. #### New Features - Support [DETR](https://arxiv.org/abs/2005.12872) (#4201, #4206) - Support to link the best checkpoint in training (#3773) - Support to override config through options in inference.py (#4175) - Support YOLO, Mask R-CNN, and Cascade R-CNN models exportable to ONNX (#4087, #4083) - Support [ResNeSt](https://arxiv.org/abs/2004.08955) backbone (#2959) - Support unclip border bbox regression (#4076) - Add tpfp func in evaluating AP (#4069) - Support mixed precision training of SSD detector with other backbones (#4081) - Add Faster R-CNN DC5 models (#4043) #### Bug Fixes - Fix bug of `gpu_id` in distributed training mode (#4163) - Support Albumentations with version higher than 0.5 (#4032) - Fix num_classes bug in faster rcnn config (#4088) - Update code in docs/2_new_data_model.md (#4041) #### Improvements - Ensure DCN offset to have similar type as features in VFNet (#4198) - Add config links in README files of models (#4190) - Add tutorials for loss conventions (#3818) - Add solution to installation issues in 30-series GPUs (#4176) - Update docker version in get_started.md (#4145) - Add model statistics and polish some titles in configs README (#4140) - Clamp neg probability in FreeAnchor (#4082) - Speed up expanding large images (#4089) - Fix Pytorch 1.7 incompatibility issues (#4103) - Update trouble shooting page to resolve segmentation fault (#4055) - Update aLRP-Loss in project page (#4078) - Clean duplicated `reduce_mean` function (#4056) - Refactor Q&A (#4045) ### v2.6.0 (1/11/2020) - Support new method: [VarifocalNet](https://arxiv.org/abs/2008.13367). - Refactored documentation with more tutorials. #### New Features - Support GIoU calculation in `BboxOverlaps2D`, and re-implement `giou_loss` using `bbox_overlaps` (#3936) - Support random sampling in CPU mode (#3948) - Support VarifocalNet (#3666, #4024) #### Bug Fixes - Fix SABL validating bug in Cascade R-CNN (#3913) - Avoid division by zero in PAA head when num_pos=0 (#3938) - Fix temporary directory bug of multi-node testing error (#4034, #4017) - Fix `--show-dir` option in test script (#4025) - Fix GA-RetinaNet r50 model url (#3983) - Update code in docs and fix broken urls (#3947) #### Improvements - Refactor pytorch2onnx API into `mmdet.core.export` and use `generate_inputs_and_wrap_model` for pytorch2onnx (#3857, #3912) - Update RPN upgrade scripts for v2.5.0 compatibility (#3986) - Use mmcv `tensor2imgs` (#4010) - Update test robustness (#4000) - Update trouble shooting page (#3994) - Accelerate PAA training speed (#3985) - Support batch_size > 1 in validation (#3966) - Use RoIAlign implemented in MMCV for inference in CPU mode (#3930) - Documentation refactoring (#4031) ### v2.5.0 (5/10/2020) #### Highlights - Support new methods: [YOLACT](https://arxiv.org/abs/1904.02689), [CentripetalNet](https://arxiv.org/abs/2003.09119). - Add more documentations for easier and more clear usage. #### Backwards Incompatible Changes __FP16 related methods are imported from mmcv instead of mmdet. (#3766, #3822)__ Mixed precision training utils in `mmdet.core.fp16` are moved to `mmcv.runner`, including `force_fp32`, `auto_fp16`, `wrap_fp16_model`, and `Fp16OptimizerHook`. A deprecation warning will be raised if users attempt to import those methods from `mmdet.core.fp16`, and will be finally removed in V2.10.0. __\[0, N-1\] represents foreground classes and N indicates background classes for all models. (#3221)__ Before v2.5.0, the background label for RPN is 0, and N for other heads. Now the behavior is consistent for all models. Thus `self.background_labels` in `dense_heads` is removed and all heads use `self.num_classes` to indicate the class index of background labels. This change has no effect on the pre-trained models in the v2.x model zoo, but will affect the training of all models with RPN heads. Two-stage detectors whose RPN head uses softmax will be affected because the order of categories is changed. **Only call `get_subset_by_classes` when `test_mode=True` and `self.filter_empty_gt=True` (#3695)** Function `get_subset_by_classes` in dataset is refactored and only filters out images when `test_mode=True` and `self.filter_empty_gt=True`. In the original implementation, `get_subset_by_classes` is not related to the flag `self.filter_empty_gt` and will only be called when the classes is set during initialization no matter `test_mode` is `True` or `False`. This brings ambiguous behavior and potential bugs in many cases. After v2.5.0, if `filter_empty_gt=False`, no matter whether the classes are specified in a dataset, the dataset will use all the images in the annotations. If `filter_empty_gt=True` and `test_mode=True`, no matter whether the classes are specified, the dataset will call \`\`get_subset_by_classes\` to check the images and filter out images containing no GT boxes. Therefore, the users should be responsible for the data filtering/cleaning process for the test dataset. #### New Features - Test time augmentation for single stage detectors (#3844, #3638) - Support to show the name of experiments during training (#3764) - Add `Shear`, `Rotate`, `Translate` Augmentation (#3656, #3619, #3687) - Add image-only transformations including `Constrast`, `Equalize`, `Color`, and `Brightness`. (#3643) - Support [YOLACT](https://arxiv.org/abs/1904.02689) (#3456) - Support [CentripetalNet](https://arxiv.org/abs/2003.09119) (#3390) - Support PyTorch 1.6 in docker (#3905) #### Bug Fixes - Fix the bug of training ATSS when there is no ground truth boxes (#3702) - Fix the bug of using Focal Loss when there is `num_pos` is 0 (#3702) - Fix the label index mapping in dataset browser (#3708) - Fix Mask R-CNN training stuck problem when their is no positive rois (#3713) - Fix the bug of `self.rpn_head.test_cfg` in `RPNTestMixin` by using `self.rpn_head` in rpn head (#3808) - Fix deprecated `Conv2d` from mmcv.ops (#3791) - Fix device bug in RepPoints (#3836) - Fix SABL validating bug (#3849) - Use `https://download.openmmlab.com/mmcv/dist/index.html` for installing MMCV (#3840) - Fix nonzero in NMS for PyTorch 1.6.0 (#3867) - Fix the API change bug of PAA (#3883) - Fix typo in bbox_flip (#3886) - Fix cv2 import error of ligGL.so.1 in Dockerfile (#3891) #### Improvements - Change to use `mmcv.utils.collect_env` for collecting environment information to avoid duplicate codes (#3779) - Update checkpoint file names to v2.0 models in documentation (#3795) - Update tutorials for changing runtime settings (#3778), modifying loss (#3777) - Improve the function of `simple_test_bboxes` in SABL (#3853) - Convert mask to bool before using it as img's index for robustness and speedup (#3870) - Improve documentation of modules and dataset customization (#3821) ### v2.4.0 (5/9/2020) __Highlights__ - Fix lots of issues/bugs and reorganize the trouble shooting page - Support new methods [SABL](https://arxiv.org/abs/1912.04260), [YOLOv3](https://arxiv.org/abs/1804.02767), and [PAA Assign](https://arxiv.org/abs/2007.08103) - Support Batch Inference - Start to publish `mmdet` package to PyPI since v2.3.0 - Switch model zoo to download.openmmlab.com __Backwards Incompatible Changes__ - Support Batch Inference (#3564, #3686, #3705): Since v2.4.0, MMDetection could inference model with multiple images in a single GPU. This change influences all the test APIs in MMDetection and downstream codebases. To help the users migrate their code, we use `replace_ImageToTensor` (#3686) to convert legacy test data pipelines during dataset initialization. - Support RandomFlip with horizontal/vertical/diagonal direction (#3608): Since v2.4.0, MMDetection supports horizontal/vertical/diagonal flip in the data augmentation. This influences bounding box, mask, and image transformations in data augmentation process and the process that will map those data back to the original format. - Migrate to use `mmlvis` and `mmpycocotools` for COCO and LVIS dataset (#3727). The APIs are fully compatible with the original `lvis` and `pycocotools`. Users need to uninstall the existing pycocotools and lvis packages in their environment first and install `mmlvis` & `mmpycocotools`. __Bug Fixes__ - Fix default mean/std for onnx (#3491) - Fix coco evaluation and add metric items (#3497) - Fix typo for install.md (#3516) - Fix atss when sampler per gpu is 1 (#3528) - Fix import of fuse_conv_bn (#3529) - Fix bug of gaussian_target, update unittest of heatmap (#3543) - Fixed VOC2012 evaluate (#3553) - Fix scale factor bug of rescale (#3566) - Fix with_xxx_attributes in base detector (#3567) - Fix boxes scaling when number is 0 (#3575) - Fix rfp check when neck config is a list (#3591) - Fix import of fuse conv bn in benchmark.py (#3606) - Fix webcam demo (#3634) - Fix typo and itemize issues in tutorial (#3658) - Fix error in distributed training when some levels of FPN are not assigned with bounding boxes (#3670) - Fix the width and height orders of stride in valid flag generation (#3685) - Fix weight initialization bug in Res2Net DCN (#3714) - Fix bug in OHEMSampler (#3677) __New Features__ - Support Cutout augmentation (#3521) - Support evaluation on multiple datasets through ConcatDataset (#3522) - Support [PAA assign](https://arxiv.org/abs/2007.08103) #(3547) - Support eval metric with pickle results (#3607) - Support [YOLOv3](https://arxiv.org/abs/1804.02767) (#3083) - Support [SABL](https://arxiv.org/abs/1912.04260) (#3603) - Support to publish to Pypi in github-action (#3510) - Support custom imports (#3641) __Improvements__ - Refactor common issues in documentation (#3530) - Add pytorch 1.6 to CI config (#3532) - Add config to runner meta (#3534) - Add eval-option flag for testing (#3537) - Add init_eval to evaluation hook (#3550) - Add include_bkg in ClassBalancedDataset (#3577) - Using config's loading in inference_detector (#3611) - Add ATSS ResNet-101 models in model zoo (#3639) - Update urls to download.openmmlab.com (#3665) - Support non-mask training for CocoDataset (#3711) ### v2.3.0 (5/8/2020) __Highlights__ - The CUDA/C++ operators have been moved to `mmcv.ops`. For backward compatibility `mmdet.ops` is kept as warppers of `mmcv.ops`. - Support new methods [CornerNet](https://arxiv.org/abs/1808.01244), [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss, and new dataset: [LVIS V1](https://arxiv.org/abs/1908.03195) - Provide more detailed colab training tutorials and more complete documentation. - Support to convert RetinaNet from Pytorch to ONNX. __Bug Fixes__ - Fix the model initialization bug of DetectoRS (#3187) - Fix the bug of module names in NASFCOSHead (#3205) - Fix the filename bug in publish_model.py (#3237) - Fix the dimensionality bug when `inside_flags.any()` is `False` in dense heads (#3242) - Fix the bug of forgetting to pass flip directions in `MultiScaleFlipAug` (#3262) - Fixed the bug caused by default value of `stem_channels` (#3333) - Fix the bug of model checkpoint loading for CPU inference (#3318, #3316) - Fix topk bug when box number is smaller than the expected topk number in ATSSAssigner (#3361) - Fix the gt priority bug in center_region_assigner.py (#3208) - Fix NaN issue of iou calculation in iou_loss.py (#3394) - Fix the bug that `iou_thrs` is not actually used during evaluation in coco.py (#3407) - Fix test-time augmentation of RepPoints (#3435) - Fix runtimeError caused by incontiguous tensor in Res2Net+DCN (#3412) __New Features__ - Support [CornerNet](https://arxiv.org/abs/1808.01244) (#3036) - Support [DIOU](https://arxiv.org/abs/1911.08287)/[CIOU](https://arxiv.org/abs/2005.03572) loss (#3151) - Support [LVIS V1](https://arxiv.org/abs/1908.03195) dataset (#) - Support customized hooks in training (#3395) - Support fp16 training of generalized focal loss (#3410) - Support to convert RetinaNet from Pytorch to ONNX (#3075) __Improvements__ - Support to process ignore boxes in ATSS assigner (#3082) - Allow to crop images without ground truth in `RandomCrop` (#3153) - Enable the the `Accuracy` module to set threshold (#3155) - Refactoring unit tests (#3206) - Unify the training settings of `to_float32` and `norm_cfg` in RegNets configs (#3210) - Add colab training tutorials for beginners (#3213, #3273) - Move CUDA/C++ operators into `mmcv.ops` and keep `mmdet.ops` as warppers for backward compatibility (#3232)(#3457) - Update installation scripts in documentation (#3290) and dockerfile (#3320) - Support to set image resize backend (#3392) - Remove git hash in version file (#3466) - Check mmcv version to force version compatibility (#3460) ### v2.2.0 (1/7/2020) __Highlights__ - Support new methods: [DetectoRS](https://arxiv.org/abs/2006.02334), [PointRend](https://arxiv.org/abs/1912.08193), [Generalized Focal Loss](https://arxiv.org/abs/2006.04388), [Dynamic R-CNN](https://arxiv.org/abs/2004.06002) __Bug Fixes__ - Fix FreeAnchor when no gt in image (#3176) - Clean up deprecated usage of `register_module()` (#3092, #3161) - Fix pretrain bug in NAS FCOS (#3145) - Fix `num_classes` in SSD (#3142) - Fix FCOS warmup (#3119) - Fix `rstrip` in `tools/publish_model.py` - Fix `flip_ratio` default value in RandomFLip pipeline (#3106) - Fix cityscapes eval with ms_rcnn (#3112) - Fix RPN softmax (#3056) - Fix filename of [email protected] (#2998) - Fix nan loss by filtering out-of-frame gt_bboxes in COCO (#2999) - Fix bug in FSAF (#3018) - Add FocalLoss `num_classes` check (#2964) - Fix PISA Loss when there are no gts (#2992) - Avoid nan in `iou_calculator` (#2975) - Prevent possible bugs in loading and transforms caused by shallow copy (#2967) __New Features__ - Add DetectoRS (#3064) - Support Generalize Focal Loss (#3097) - Support PointRend (#2752) - Support Dynamic R-CNN (#3040) - Add DeepFashion dataset (#2968) - Implement FCOS training tricks (#2935) - Use BaseDenseHead as base class for anchor-base heads (#2963) - Add `with_cp` for BasicBlock (#2891) - Add `stem_channels` argument for ResNet (#2954) __Improvements__ - Add anchor free base head (#2867) - Migrate to github action (#3137) - Add docstring for datasets, pipelines, core modules and methods (#3130, #3125, #3120) - Add VOC benchmark (#3060) - Add `concat` mode in GRoI (#3098) - Remove cmd arg `autorescale-lr` (#3080) - Use `len(data['img_metas'])` to indicate `num_samples` (#3073, #3053) - Switch to EpochBasedRunner (#2976) ### v2.1.0 (8/6/2020) __Highlights__ - Support new backbones: [RegNetX](https://arxiv.org/abs/2003.13678), [Res2Net](https://arxiv.org/abs/1904.01169) - Support new methods: [NASFCOS](https://arxiv.org/abs/1906.04423), [PISA](https://arxiv.org/abs/1904.04821), [GRoIE](https://arxiv.org/abs/2004.13665) - Support new dataset: [LVIS](https://arxiv.org/abs/1908.03195) __Bug Fixes__ - Change the CLI argument `--validate` to `--no-validate` to enable validation after training epochs by default. (#2651) - Add missing cython to docker file (#2713) - Fix bug in nms cpu implementation (#2754) - Fix bug when showing mask results (#2763) - Fix gcc requirement (#2806) - Fix bug in async test (#2820) - Fix mask encoding-decoding bugs in test API (#2824) - Fix bug in test time augmentation (#2858, #2921, #2944) - Fix a typo in comment of apis/train (#2877) - Fix the bug of returning None when no gt bboxes are in the original image in `RandomCrop`. Fix the bug that misses to handle `gt_bboxes_ignore`, `gt_label_ignore`, and `gt_masks_ignore` in `RandomCrop`, `MinIoURandomCrop` and `Expand` modules. (#2810) - Fix bug of `base_channels` of regnet (#2917) - Fix the bug of logger when loading pre-trained weights in base detector (#2936) __New Features__ - Add IoU models (#2666) - Add colab demo for inference - Support class agnostic nms (#2553) - Add benchmark gathering scripts for development only (#2676) - Add mmdet-based project links (#2736, #2767, #2895) - Add config dump in training (#2779) - Add ClassBalancedDataset (#2721) - Add res2net backbone (#2237) - Support RegNetX models (#2710) - Use `mmcv.FileClient` to support different storage backends (#2712) - Add ClassBalancedDataset (#2721) - Code Release: Prime Sample Attention in Object Detection (CVPR 2020) (#2626) - Implement NASFCOS (#2682) - Add class weight in CrossEntropyLoss (#2797) - Support LVIS dataset (#2088) - Support GRoIE (#2584) __Improvements__ - Allow different x and y strides in anchor heads. (#2629) - Make FSAF loss more robust to no gt (#2680) - Compute pure inference time instead (#2657) and update inference speed (#2730) - Avoided the possibility that a patch with 0 area is cropped. (#2704) - Add warnings when deprecated `imgs_per_gpu` is used. (#2700) - Add a mask rcnn example for config (#2645) - Update model zoo (#2762, #2866, #2876, #2879, #2831) - Add `ori_filename` to img_metas and use it in test show-dir (#2612) - Use `img_fields` to handle multiple images during image transform (#2800) - Add upsample_cfg support in FPN (#2787) - Add `['img']` as default `img_fields` for back compatibility (#2809) - Rename the pretrained model from `open-mmlab://resnet50_caffe` and `open-mmlab://resnet50_caffe_bgr` to `open-mmlab://detectron/resnet50_caffe` and `open-mmlab://detectron2/resnet50_caffe`. (#2832) - Added sleep(2) in test.py to reduce hanging problem (#2847) - Support `c10::half` in CARAFE (#2890) - Improve documentations (#2918, #2714) - Use optimizer constructor in mmcv and clean the original implementation in `mmdet.core.optimizer` (#2947) ### v2.0.0 (6/5/2020) In this release, we made lots of major refactoring and modifications. 1. __Faster speed__. We optimize the training and inference speed for common models, achieving up to 30% speedup for training and 25% for inference. Please refer to [model zoo](model_zoo.md#comparison-with-detectron2) for details. 2. __Higher performance__. We change some default hyperparameters with no additional cost, which leads to a gain of performance for most models. Please refer to [compatibility](compatibility.md#training-hyperparameters) for details. 3. __More documentation and tutorials__. We add a bunch of documentation and tutorials to help users get started more smoothly. Read it [here](https://mmdetection.readthedocs.io/en/latest/). 4. __Support PyTorch 1.5__. The support for 1.1 and 1.2 is dropped, and we switch to some new APIs. 5. __Better configuration system__. Inheritance is supported to reduce the redundancy of configs. 6. __Better modular design__. Towards the goal of simplicity and flexibility, we simplify some encapsulation while add more other configurable modules like BBoxCoder, IoUCalculator, OptimizerConstructor, RoIHead. Target computation is also included in heads and the call hierarchy is simpler. 7. Support new methods: [FSAF](https://arxiv.org/abs/1903.00621) and PAFPN (part of [PAFPN](https://arxiv.org/abs/1803.01534)). __Breaking Changes__ Models training with MMDetection 1.x are not fully compatible with 2.0, please refer to the [compatibility doc](compatibility.md) for the details and how to migrate to the new version. __Improvements__ - Unify cuda and cpp API for custom ops. (#2277) - New config files with inheritance. (#2216) - Encapsulate the second stage into RoI heads. (#1999) - Refactor GCNet/EmpericalAttention into plugins. (#2345) - Set low quality match as an option in IoU-based bbox assigners. (#2375) - Change the codebase's coordinate system. (#2380) - Refactor the category order in heads. 0 means the first positive class instead of background now. (#2374) - Add bbox sampler and assigner registry. (#2419) - Speed up the inference of RPN. (#2420) - Add `train_cfg` and `test_cfg` as class members in all anchor heads. (#2422) - Merge target computation methods into heads. (#2429) - Add bbox coder to support different bbox encoding and losses. (#2480) - Unify the API for regression loss. (#2156) - Refactor Anchor Generator. (#2474) - Make `lr` an optional argument for optimizers. (#2509) - Migrate to modules and methods in MMCV. (#2502, #2511, #2569, #2572) - Support PyTorch 1.5. (#2524) - Drop the support for Python 3.5 and use F-string in the codebase. (#2531) __Bug Fixes__ - Fix the scale factors for resized images without keep the aspect ratio. (#2039) - Check if max_num > 0 before slicing in NMS. (#2486) - Fix Deformable RoIPool when there is no instance. (#2490) - Fix the default value of assigned labels. (#2536) - Fix the evaluation of Cityscapes. (#2578) __New Features__ - Add deep_stem and avg_down option to ResNet, i.e., support ResNetV1d. (#2252) - Add L1 loss. (#2376) - Support both polygon and bitmap for instance masks. (#2353, #2540) - Support CPU mode for inference. (#2385) - Add optimizer constructor for complicated configuration of optimizers. (#2397, #2488) - Implement PAFPN. (#2392) - Support empty tensor input for some modules. (#2280) - Support for custom dataset classes without overriding it. (#2408, #2443) - Support to train subsets of coco dataset. (#2340) - Add iou_calculator to potentially support more IoU calculation methods. (2405) - Support class wise mean AP (was removed in the last version). (#2459) - Add option to save the testing result images. (#2414) - Support MomentumUpdaterHook. (#2571) - Add a demo to inference a single image. (#2605) ### v1.1.0 (24/2/2020) __Highlights__ - Dataset evaluation is rewritten with a unified api, which is used by both evaluation hooks and test scripts. - Support new methods: [CARAFE](https://arxiv.org/abs/1905.02188). __Breaking Changes__ - The new MMDDP inherits from the official DDP, thus the `__init__` api is changed to be the same as official DDP. - The `mask_head` field in HTC config files is modified. - The evaluation and testing script is updated. - In all transforms, instance masks are stored as a numpy array shaped (n, h, w) instead of a list of (h, w) arrays, where n is the number of instances. __Bug Fixes__ - Fix IOU assigners when ignore_iof_thr > 0 and there is no pred boxes. (#2135) - Fix mAP evaluation when there are no ignored boxes. (#2116) - Fix the empty RoI input for Deformable RoI Pooling. (#2099) - Fix the dataset settings for multiple workflows. (#2103) - Fix the warning related to `torch.uint8` in PyTorch 1.4. (#2105) - Fix the inference demo on devices other than gpu:0. (#2098) - Fix Dockerfile. (#2097) - Fix the bug that `pad_val` is unused in Pad transform. (#2093) - Fix the albumentation transform when there is no ground truth bbox. (#2032) __Improvements__ - Use torch instead of numpy for random sampling. (#2094) - Migrate to the new MMDDP implementation in MMCV v0.3. (#2090) - Add meta information in logs. (#2086) - Rewrite Soft NMS with pytorch extension and remove cython as a dependency. (#2056) - Rewrite dataset evaluation. (#2042, #2087, #2114, #2128) - Use numpy array for masks in transforms. (#2030) __New Features__ - Implement "CARAFE: Content-Aware ReAssembly of FEatures". (#1583) - Add `worker_init_fn()` in data_loader when seed is set. (#2066, #2111) - Add logging utils. (#2035) ### v1.0.0 (30/1/2020) This release mainly improves the code quality and add more docstrings. __Highlights__ - Documentation is online now: <https://mmdetection.readthedocs.io>. - Support new models: [ATSS](https://arxiv.org/abs/1912.02424). - DCN is now available with the api `build_conv_layer` and `ConvModule` like the normal conv layer. - A tool to collect environment information is available for trouble shooting. __Bug Fixes__ - Fix the incompatibility of the latest numpy and pycocotools. (#2024) - Fix the case when distributed package is unavailable, e.g., on Windows. (#1985) - Fix the dimension issue for `refine_bboxes()`. (#1962) - Fix the typo when `seg_prefix` is a list. (#1906) - Add segmentation map cropping to RandomCrop. (#1880) - Fix the return value of `ga_shape_target_single()`. (#1853) - Fix the loaded shape of empty proposals. (#1819) - Fix the mask data type when using albumentation. (#1818) __Improvements__ - Enhance AssignResult and SamplingResult. (#1995) - Add ability to overwrite existing module in Registry. (#1982) - Reorganize requirements and make albumentations and imagecorruptions optional. (#1969) - Check NaN in `SSDHead`. (#1935) - Encapsulate the DCN in ResNe(X)t into a ConvModule & Conv_layers. (#1894) - Refactoring for mAP evaluation and support multiprocessing and logging. (#1889) - Init the root logger before constructing Runner to log more information. (#1865) - Split `SegResizeFlipPadRescale` into different existing transforms. (#1852) - Move `init_dist()` to MMCV. (#1851) - Documentation and docstring improvements. (#1971, #1938, #1869, #1838) - Fix the color of the same class for mask visualization. (#1834) - Remove the option `keep_all_stages` in HTC and Cascade R-CNN. (#1806) __New Features__ - Add two test-time options `crop_mask` and `rle_mask_encode` for mask heads. (#2013) - Support loading grayscale images as single channel. (#1975) - Implement "Bridging the Gap Between Anchor-based and Anchor-free Detection via Adaptive Training Sample Selection". (#1872) - Add sphinx generated docs. (#1859, #1864) - Add GN support for flops computation. (#1850) - Collect env info for trouble shooting. (#1812) ### v1.0rc1 (13/12/2019) The RC1 release mainly focuses on improving the user experience, and fixing bugs. __Highlights__ - Support new models: [FoveaBox](https://arxiv.org/abs/1904.03797), [RepPoints](https://arxiv.org/abs/1904.11490) and [FreeAnchor](https://arxiv.org/abs/1909.02466). - Add a Dockerfile. - Add a jupyter notebook demo and a webcam demo. - Setup the code style and CI. - Add lots of docstrings and unit tests. - Fix lots of bugs. __Breaking Changes__ - There was a bug for computing COCO-style mAP w.r.t different scales (AP_s, AP_m, AP_l), introduced by #621. (#1679) __Bug Fixes__ - Fix a sampling interval bug in Libra R-CNN. (#1800) - Fix the learning rate in SSD300 WIDER FACE. (#1781) - Fix the scaling issue when `keep_ratio=False`. (#1730) - Fix typos. (#1721, #1492, #1242, #1108, #1107) - Fix the shuffle argument in `build_dataloader`. (#1693) - Clip the proposal when computing mask targets. (#1688) - Fix the "index out of range" bug for samplers in some corner cases. (#1610, #1404) - Fix the NMS issue on devices other than GPU:0. (#1603) - Fix SSD Head and GHM Loss on CPU. (#1578) - Fix the OOM error when there are too many gt bboxes. (#1575) - Fix the wrong keyword argument `nms_cfg` in HTC. (#1573) - Process masks and semantic segmentation in Expand and MinIoUCrop transforms. (#1550, #1361) - Fix a scale bug in the Non Local op. (#1528) - Fix a bug in transforms when `gt_bboxes_ignore` is None. (#1498) - Fix a bug when `img_prefix` is None. (#1497) - Pass the device argument to `grid_anchors` and `valid_flags`. (#1478) - Fix the data pipeline for test_robustness. (#1476) - Fix the argument type of deformable pooling. (#1390) - Fix the coco_eval when there are only two classes. (#1376) - Fix a bug in Modulated DeformableConv when deformable_group>1. (#1359) - Fix the mask cropping in RandomCrop. (#1333) - Fix zero outputs in DeformConv when not running on cuda:0. (#1326) - Fix the type issue in Expand. (#1288) - Fix the inference API. (#1255) - Fix the inplace operation in Expand. (#1249) - Fix the from-scratch training config. (#1196) - Fix inplace add in RoIExtractor which cause an error in PyTorch 1.2. (#1160) - Fix FCOS when input images has no positive sample. (#1136) - Fix recursive imports. (#1099) __Improvements__ - Print the config file and mmdet version in the log. (#1721) - Lint the code before compiling in travis CI. (#1715) - Add a probability argument for the `Expand` transform. (#1651) - Update the PyTorch and CUDA version in the docker file. (#1615) - Raise a warning when specifying `--validate` in non-distributed training. (#1624, #1651) - Beautify the mAP printing. (#1614) - Add pre-commit hook. (#1536) - Add the argument `in_channels` to backbones. (#1475) - Add lots of docstrings and unit tests, thanks to [@Erotemic](https://github.com/Erotemic). (#1603, #1517, #1506, #1505, #1491, #1479, #1477, #1475, #1474) - Add support for multi-node distributed test when there is no shared storage. (#1399) - Optimize Dockerfile to reduce the image size. (#1306) - Update new results of HRNet. (#1284, #1182) - Add an argument `no_norm_on_lateral` in FPN. (#1240) - Test the compiling in CI. (#1235) - Move docs to a separate folder. (#1233) - Add a jupyter notebook demo. (#1158) - Support different type of dataset for training. (#1133) - Use int64_t instead of long in cuda kernels. (#1131) - Support unsquare RoIs for bbox and mask heads. (#1128) - Manually add type promotion to make compatible to PyTorch 1.2. (#1114) - Allowing validation dataset for computing validation loss. (#1093) - Use `.scalar_type()` instead of `.type()` to suppress some warnings. (#1070) __New Features__ - Add an option `--with_ap` to compute the AP for each class. (#1549) - Implement "FreeAnchor: Learning to Match Anchors for Visual Object Detection". (#1391) - Support [Albumentations](https://github.com/albumentations-team/albumentations) for augmentations in the data pipeline. (#1354) - Implement "FoveaBox: Beyond Anchor-based Object Detector". (#1339) - Support horizontal and vertical flipping. (#1273, #1115) - Implement "RepPoints: Point Set Representation for Object Detection". (#1265) - Add test-time augmentation to HTC and Cascade R-CNN. (#1251) - Add a COCO result analysis tool. (#1228) - Add Dockerfile. (#1168) - Add a webcam demo. (#1155, #1150) - Add FLOPs counter. (#1127) - Allow arbitrary layer order for ConvModule. (#1078) ### v1.0rc0 (27/07/2019) - Implement lots of new methods and components (Mixed Precision Training, HTC, Libra R-CNN, Guided Anchoring, Empirical Attention, Mask Scoring R-CNN, Grid R-CNN (Plus), GHM, GCNet, FCOS, HRNet, Weight Standardization, etc.). Thank all collaborators! - Support two additional datasets: WIDER FACE and Cityscapes. - Refactoring for loss APIs and make it more flexible to adopt different losses and related hyper-parameters. - Speed up multi-gpu testing. - Integrate all compiling and installing in a single script. ### v0.6.0 (14/04/2019) - Up to 30% speedup compared to the model zoo. - Support both PyTorch stable and nightly version. - Replace NMS and SigmoidFocalLoss with Pytorch CUDA extensions. ### v0.6rc0(06/02/2019) - Migrate to PyTorch 1.0. ### v0.5.7 (06/02/2019) - Add support for Deformable ConvNet v2. (Many thanks to the authors and [@chengdazhi](https://github.com/chengdazhi)) - This is the last release based on PyTorch 0.4.1. ### v0.5.6 (17/01/2019) - Add support for Group Normalization. - Unify RPNHead and single stage heads (RetinaHead, SSDHead) with AnchorHead. ### v0.5.5 (22/12/2018) - Add SSD for COCO and PASCAL VOC. - Add ResNeXt backbones and detection models. - Refactoring for Samplers/Assigners and add OHEM. - Add VOC dataset and evaluation scripts. ### v0.5.4 (27/11/2018) - Add SingleStageDetector and RetinaNet. ### v0.5.3 (26/11/2018) - Add Cascade R-CNN and Cascade Mask R-CNN. - Add support for Soft-NMS in config files. ### v0.5.2 (21/10/2018) - Add support for custom datasets. - Add a script to convert PASCAL VOC annotations to the expected format. ### v0.5.1 (20/10/2018) - Add BBoxAssigner and BBoxSampler, the `train_cfg` field in config files are restructured. - `ConvFCRoIHead` / `SharedFCRoIHead` are renamed to `ConvFCBBoxHead` / `SharedFCBBoxHead` for consistency.
84,827
43.693361
754
md
mmdetection
mmdetection-master/docs/en/compatibility.md
# Compatibility of MMDetection 2.x ## MMDetection 2.25.0 In order to support Mask2Former for instance segmentation, the original config files of Mask2Former for panpotic segmentation need to be renamed [PR #7571](https://github.com/open-mmlab/mmdetection/pull/7571). <table align="center"> <thead> <tr align='center'> <td>before v2.25.0</td> <td>after v2.25.0</td> </tr> </thead> <tbody><tr valign='top'> <th> ``` 'mask2former_xxx_coco.py' represents config files for **panoptic segmentation**. ``` </th> <th> ``` 'mask2former_xxx_coco.py' represents config files for **instance segmentation**. 'mask2former_xxx_coco-panoptic.py' represents config files for **panoptic segmentation**. ``` </th></tr> </tbody></table> ## MMDetection 2.21.0 In order to support CPU training, the logic of scatter in batch collating has been changed. We recommend to use MMCV v1.4.4 or higher. For more details, please refer to [MMCV PR #1621](https://github.com/open-mmlab/mmcv/pull/1621). ## MMDetection 2.18.1 ### MMCV compatibility In order to fix the wrong weight reference bug in BaseTransformerLayer, the logic in batch first mode of MultiheadAttention has been changed. We recommend to use MMCV v1.3.17 or higher. For more details, please refer to [MMCV PR #1418](https://github.com/open-mmlab/mmcv/pull/1418). ## MMDetection 2.18.0 ### DIIHead compatibility In order to support QueryInst, attn_feats is added into the returned tuple of DIIHead. ## MMDetection 2.14.0 ### MMCV Version In order to fix the problem that the priority of EvalHook is too low, all hook priorities have been re-adjusted in 1.3.8, so MMDetection 2.14.0 needs to rely on the latest MMCV 1.3.8 version. For related information, please refer to [#1120](https://github.com/open-mmlab/mmcv/pull/1120), for related issues, please refer to [#5343](https://github.com/open-mmlab/mmdetection/issues/5343). ### SSD compatibility In v2.14.0, to make SSD more flexible to use, [PR5291](https://github.com/open-mmlab/mmdetection/pull/5291) refactored its backbone, neck and head. The users can use the script `tools/model_converters/upgrade_ssd_version.py` to convert their models. ```bash python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} ``` - OLD_MODEL_PATH: the path to load the old version SSD model. - NEW_MODEL_PATH: the path to save the converted model weights. ## MMDetection 2.12.0 MMDetection is going through big refactoring for more general and convenient usages during the releases from v2.12.0 to v2.18.0 (maybe longer). In v2.12.0 MMDetection inevitably brings some BC-breakings, including the MMCV dependency, model initialization, model registry, and mask AP evaluation. ### MMCV Version MMDetection v2.12.0 relies on the newest features in MMCV 1.3.3, including `BaseModule` for unified parameter initialization, model registry, and the CUDA operator `MultiScaleDeformableAttn` for [Deformable DETR](https://arxiv.org/abs/2010.04159). Note that MMCV 1.3.2 already contains all the features used by MMDet but has known issues. Therefore, we recommend users to skip MMCV v1.3.2 and use v1.3.2, though v1.3.2 might work for most of the cases. ### Unified model initialization To unify the parameter initialization in OpenMMLab projects, MMCV supports `BaseModule` that accepts `init_cfg` to allow the modules' parameters initialized in a flexible and unified manner. Now the users need to explicitly call `model.init_weights()` in the training script to initialize the model (as in [here](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162), previously this was handled by the detector. **The downstream projects must update their model initialization accordingly to use MMDetection v2.12.0**. Please refer to PR #4750 for details. ### Unified model registry To easily use backbones implemented in other OpenMMLab projects, MMDetection v2.12.0 inherits the model registry created in MMCV (#760). In this way, as long as the backbone is supported in an OpenMMLab project and that project also uses the registry in MMCV, users can use that backbone in MMDetection by simply modifying the config without copying the code of that backbone into MMDetection. Please refer to PR #5059 for more details. ### Mask AP evaluation Before [PR 4898](https://github.com/open-mmlab/mmdetection/pull/4898) and V2.12.0, the mask AP of small, medium, and large instances is calculated based on the bounding box area rather than the real mask area. This leads to higher `APs` and `APm` but lower `APl` but will not affect the overall mask AP. [PR 4898](https://github.com/open-mmlab/mmdetection/pull/4898) change it to use mask areas by deleting `bbox` in mask AP calculation. The new calculation does not affect the overall mask AP evaluation and is consistent with [Detectron2](https://github.com/facebookresearch/detectron2/). ## Compatibility with MMDetection 1.x MMDetection 2.0 goes through a big refactoring and addresses many legacy issues. It is not compatible with the 1.x version, i.e., running inference with the same model weights in these two versions will produce different results. Thus, MMDetection 2.0 re-benchmarks all the models and provides their links and logs in the model zoo. The major differences are in four folds: coordinate system, codebase conventions, training hyperparameters, and modular design. ### Coordinate System The new coordinate system is consistent with [Detectron2](https://github.com/facebookresearch/detectron2/) and treats the center of the most left-top pixel as (0, 0) rather than the left-top corner of that pixel. Accordingly, the system interprets the coordinates in COCO bounding box and segmentation annotations as coordinates in range `[0, width]` or `[0, height]`. This modification affects all the computation related to the bbox and pixel selection, which is more natural and accurate. - The height and width of a box with corners (x1, y1) and (x2, y2) in the new coordinate system is computed as `width = x2 - x1` and `height = y2 - y1`. In MMDetection 1.x and previous version, a "+ 1" was added both height and width. This modification are in three folds: 1. Box transformation and encoding/decoding in regression. 2. IoU calculation. This affects the matching process between ground truth and bounding box and the NMS process. The effect to compatibility is very negligible, though. 3. The corners of bounding box is in float type and no longer quantized. This should provide more accurate bounding box results. This also makes the bounding box and RoIs not required to have minimum size of 1, whose effect is small, though. - The anchors are center-aligned to feature grid points and in float type. In MMDetection 1.x and previous version, the anchors are in `int` type and not center-aligned. This affects the anchor generation in RPN and all the anchor-based methods. - ROIAlign is better aligned with the image coordinate system. The new implementation is adopted from [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign). The RoIs are shifted by half a pixel by default when they are used to cropping RoI features, compared to MMDetection 1.x. The old behavior is still available by setting `aligned=False` instead of `aligned=True`. - Mask cropping and pasting are more accurate. 1. We use the new RoIAlign to crop mask targets. In MMDetection 1.x, the bounding box is quantized before it is used to crop mask target, and the crop process is implemented by numpy. In new implementation, the bounding box for crop is not quantized and sent to RoIAlign. This implementation accelerates the training speed by a large margin (~0.1s per iter, ~2 hour when training Mask R50 for 1x schedule) and should be more accurate. 2. In MMDetection 2.0, the "`paste_mask()`" function is different and should be more accurate than those in previous versions. This change follows the modification in [Detectron2](https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/masks.py) and can improve mask AP on COCO by ~0.5% absolute. ### Codebase Conventions - MMDetection 2.0 changes the order of class labels to reduce unused parameters in regression and mask branch more naturally (without +1 and -1). This effect all the classification layers of the model to have a different ordering of class labels. The final layers of regression branch and mask head no longer keep K+1 channels for K categories, and their class orders are consistent with the classification branch. - In MMDetection 2.0, label "K" means background, and labels \[0, K-1\] correspond to the K = num_categories object categories. - In MMDetection 1.x and previous version, label "0" means background, and labels \[1, K\] correspond to the K categories. - **Note**: The class order of softmax RPN is still the same as that in 1.x in versions\<=2.4.0 while sigmoid RPN is not affected. The class orders in all heads are unified since MMDetection v2.5.0. - Low quality matching in R-CNN is not used. In MMDetection 1.x and previous versions, the `max_iou_assigner` will match low quality boxes for each ground truth box in both RPN and R-CNN training. We observe this sometimes does not assign the most perfect GT box to some bounding boxes, thus MMDetection 2.0 do not allow low quality matching by default in R-CNN training in the new system. This sometimes may slightly improve the box AP (~0.1% absolute). - Separate scale factors for width and height. In MMDetection 1.x and previous versions, the scale factor is a single float in mode `keep_ratio=True`. This is slightly inaccurate because the scale factors for width and height have slight difference. MMDetection 2.0 adopts separate scale factors for width and height, the improvement on AP ~0.1% absolute. - Configs name conventions are changed. MMDetection V2.0 adopts the new name convention to maintain the gradually growing model zoo as the following: ```shell [model]_(model setting)_[backbone]_[neck]_(norm setting)_(misc)_(gpu x batch)_[schedule]_[dataset].py, ``` where the (`misc`) includes DCN and GCBlock, etc. More details are illustrated in the [documentation for config](tutorials/config) - MMDetection V2.0 uses new ResNet Caffe backbones to reduce warnings when loading pre-trained models. Most of the new backbones' weights are the same as the former ones but do not have `conv.bias`, except that they use a different `img_norm_cfg`. Thus, the new backbone will not cause warning of unexpected keys. ### Training Hyperparameters The change in training hyperparameters does not affect model-level compatibility but slightly improves the performance. The major ones are: - The number of proposals after nms is changed from 2000 to 1000 by setting `nms_post=1000` and `max_num=1000`. This slightly improves both mask AP and bbox AP by ~0.2% absolute. - The default box regression losses for Mask R-CNN, Faster R-CNN and RetinaNet are changed from smooth L1 Loss to L1 loss. This leads to an overall improvement in box AP (~0.6% absolute). However, using L1-loss for other methods such as Cascade R-CNN and HTC does not improve the performance, so we keep the original settings for these methods. - The sample num of RoIAlign layer is set to be 0 for simplicity. This leads to slightly improvement on mask AP (~0.2% absolute). - The default setting does not use gradient clipping anymore during training for faster training speed. This does not degrade performance of the most of models. For some models such as RepPoints we keep using gradient clipping to stabilize the training process and to obtain better performance. - The default warmup ratio is changed from 1/3 to 0.001 for a more smooth warming up process since the gradient clipping is usually not used. The effect is found negligible during our re-benchmarking, though. ### Upgrade Models from 1.x to 2.0 To convert the models trained by MMDetection V1.x to MMDetection V2.0, the users can use the script `tools/model_converters/upgrade_model_version.py` to convert their models. The converted models can be run in MMDetection V2.0 with slightly dropped performance (less than 1% AP absolute). Details can be found in `configs/legacy`. ## pycocotools compatibility `mmpycocotools` is the OpenMMlab's fork of official `pycocotools`, which works for both MMDetection and Detectron2. Before [PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939), since `pycocotools` and `mmpycocotool` have the same package name, if users already installed `pycocotools` (installed Detectron2 first under the same environment), then the setup of MMDetection will skip installing `mmpycocotool`. Thus MMDetection fails due to the missing `mmpycocotools`. If MMDetection is installed before Detectron2, they could work under the same environment. [PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939) deprecates mmpycocotools in favor of official pycocotools. Users may install MMDetection and Detectron2 under the same environment after [PR 4939](https://github.com/open-mmlab/mmdetection/pull/4939), no matter what the installation order is.
13,244
72.994413
578
md
mmdetection
mmdetection-master/docs/en/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../..')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'myst_parser', 'sphinx_markdown_tables', 'sphinx_copybutton', ] myst_enable_extensions = ['colon_fence'] myst_heading_anchors = 3 autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'en' } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler)
3,439
28.401709
79
py
mmdetection
mmdetection-master/docs/en/conventions.md
# Conventions Please check the following conventions if you would like to modify MMDetection as your own project. ## Loss In MMDetection, a `dict` containing losses and metrics will be returned by `model(**data)`. For example, in bbox head, ```python class BBoxHead(nn.Module): ... def loss(self, ...): losses = dict() # classification loss losses['loss_cls'] = self.loss_cls(...) # classification accuracy losses['acc'] = accuracy(...) # bbox regression loss losses['loss_bbox'] = self.loss_bbox(...) return losses ``` `bbox_head.loss()` will be called during model forward. The returned dict contains `'loss_bbox'`, `'loss_cls'`, `'acc'` . Only `'loss_bbox'`, `'loss_cls'` will be used during back propagation, `'acc'` will only be used as a metric to monitor training process. By default, only values whose keys contain `'loss'` will be back propagated. This behavior could be changed by modifying `BaseDetector.train_step()`. ## Empty Proposals In MMDetection, We have added special handling and unit test for empty proposals of two-stage. We need to deal with the empty proposals of the entire batch and single image at the same time. For example, in CascadeRoIHead, ```python # simple_test method ... # There is no proposal in the whole batch if rois.shape[0] == 0: bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results ... # There is no proposal in the single image for i in range(self.num_stages): ... if i < self.num_stages - 1: for j in range(num_imgs): # Handle empty proposal if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_roi = self.bbox_head[i].regress_by_class( rois[j], bbox_label, bbox_pred[j], img_metas[j]) refine_roi_list.append(refine_roi) ``` If you have customized `RoIHead`, you can refer to the above method to deal with empty proposals. ## Coco Panoptic Dataset In MMDetection, we have supported COCO Panoptic dataset. We clarify a few conventions about the implementation of `CocoPanopticDataset` here. 1. For mmdet\<=2.16.0, the range of foreground and background labels in semantic segmentation are different from the default setting of MMDetection. The label `0` stands for `VOID` label and the category labels start from `1`. Since mmdet=2.17.0, the category labels of semantic segmentation start from `0` and label `255` stands for `VOID` for consistency with labels of bounding boxes. To achieve that, the `Pad` pipeline supports setting the padding value for `seg`. 2. In the evaluation, the panoptic result is a map with the same shape as the original image. Each value in the result map has the format of `instance_id * INSTANCE_OFFSET + category_id`.
3,252
40.177215
226
md
mmdetection
mmdetection-master/docs/en/faq.md
# Frequently Asked Questions We list some common troubles faced by many users and their corresponding solutions here. Feel free to enrich the list if you find any frequent issues and have ways to help others to solve them. If the contents here do not cover your issue, please create an issue using the [provided templates](https://github.com/open-mmlab/mmdetection/blob/master/.github/ISSUE_TEMPLATE/error-report.md/) and make sure you fill in all required information in the template. ## Installation - Compatibility issue between MMCV and MMDetection; "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." Compatible MMDetection and MMCV versions are shown as below. Please choose the correct version of MMCV to avoid installation issues. | MMDetection version | MMCV version | | :-----------------: | :------------------------: | | master | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.2 | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.1 | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.27.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.26.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.25.3 | mmcv-full>=1.3.17, \<1.7.0 | | 2.25.2 | mmcv-full>=1.3.17, \<1.7.0 | | 2.25.1 | mmcv-full>=1.3.17, \<1.6.0 | | 2.25.0 | mmcv-full>=1.3.17, \<1.6.0 | | 2.24.1 | mmcv-full>=1.3.17, \<1.6.0 | | 2.24.0 | mmcv-full>=1.3.17, \<1.6.0 | | 2.23.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.22.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.21.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.20.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.19.1 | mmcv-full>=1.3.17, \<1.5.0 | | 2.19.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.18.0 | mmcv-full>=1.3.17, \<1.4.0 | | 2.17.0 | mmcv-full>=1.3.14, \<1.4.0 | | 2.16.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.15.1 | mmcv-full>=1.3.8, \<1.4.0 | | 2.15.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.14.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.13.0 | mmcv-full>=1.3.3, \<1.4.0 | | 2.12.0 | mmcv-full>=1.3.3, \<1.4.0 | | 2.11.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.10.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.9.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.8.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.7.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.6.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.5.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.4.0 | mmcv-full>=1.1.1, \<1.4.0 | | 2.3.0 | mmcv-full==1.0.5 | | 2.3.0rc0 | mmcv-full>=1.0.2 | | 2.2.1 | mmcv==0.6.2 | | 2.2.0 | mmcv==0.6.2 | | 2.1.0 | mmcv>=0.5.9, \<=0.6.1 | | 2.0.0 | mmcv>=0.5.1, \<=0.5.8 | - "No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'". 1. Uninstall existing mmcv in the environment using `pip uninstall mmcv`. 2. Install mmcv-full following the [installation instruction](get_started#best-practices). - Using albumentations If you would like to use `albumentations`, we suggest using `pip install -r requirements/albu.txt` or `pip install -U albumentations --no-binary qudida,albumentations`. If you simply use `pip install albumentations>=0.3.2`, it will install `opencv-python-headless` simultaneously (even though you have already installed `opencv-python`). Please refer to the [official documentation](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies) for details. - ModuleNotFoundError is raised when using some algorithms Some extra dependencies are required for Instaboost, Panoptic Segmentation, LVIS dataset, etc. Please note the error message and install corresponding packages, e.g., ```shell # for instaboost pip install instaboostfast # for panoptic segmentation pip install git+https://github.com/cocodataset/panopticapi.git # for LVIS dataset pip install git+https://github.com/lvis-dataset/lvis-api.git ``` ## Coding - Do I need to reinstall mmdet after some code modifications If you follow the best practice and install mmdet with `pip install -e .`, any local modifications made to the code will take effect without reinstallation. - How to develop with multiple MMDetection versions You can have multiple folders like mmdet-2.21, mmdet-2.22. When you run the train or test script, it will adopt the mmdet package in the current folder. To use the default MMDetection installed in the environment rather than the one you are working with, you can remove the following line in those scripts: ```shell PYTHONPATH="$(dirname $0)/..":$PYTHONPATH ``` ## PyTorch/CUDA Environment - "RTX 30 series card fails when building MMCV or MMDet" 1. Temporary work-around: do `MMCV_WITH_OPS=1 MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80' pip install -e .`. The common issue is `nvcc fatal : Unsupported gpu architecture 'compute_86'`. This means that the compiler should optimize for sm_86, i.e., nvidia 30 series card, but such optimizations have not been supported by CUDA toolkit 11.0. This work-around modifies the compile flag by adding `MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80'`, which tells `nvcc` to optimize for **sm_80**, i.e., Nvidia A100. Although A100 is different from the 30 series card, they use similar ampere architecture. This may hurt the performance but it works. 2. PyTorch developers have updated that the default compiler flags should be fixed by [pytorch/pytorch#47585](https://github.com/pytorch/pytorch/pull/47585). So using PyTorch-nightly may also be able to solve the problem, though we have not tested it yet. - "invalid device function" or "no kernel image is available for execution". 1. Check if your cuda runtime version (under `/usr/local/`), `nvcc --version` and `conda list cudatoolkit` version match. 2. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built for the correct GPU architecture. You may need to set `TORCH_CUDA_ARCH_LIST` to reinstall MMCV. The GPU arch table could be found [here](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list), i.e. run `TORCH_CUDA_ARCH_LIST=7.0 pip install mmcv-full` to build MMCV for Volta GPUs. The compatibility issue could happen when using old GPUS, e.g., Tesla K80 (3.7) on colab. 3. Check whether the running environment is the same as that when mmcv/mmdet has compiled. For example, you may compile mmcv using CUDA 10.0 but run it on CUDA 9.0 environments. - "undefined symbol" or "cannot open xxx.so". 1. If those symbols are CUDA/C++ symbols (e.g., libcudart.so or GLIBCXX), check whether the CUDA/GCC runtimes are the same as those used for compiling mmcv, i.e. run `python mmdet/utils/collect_env.py` to see if `"MMCV Compiler"`/`"MMCV CUDA Compiler"` is the same as `"GCC"`/`"CUDA_HOME"`. 2. If those symbols are PyTorch symbols (e.g., symbols containing caffe, aten, and TH), check whether the PyTorch version is the same as that used for compiling mmcv. 3. Run `python mmdet/utils/collect_env.py` to check whether PyTorch, torchvision, and MMCV are built by and running on the same environment. - setuptools.sandbox.UnpickleableException: DistutilsSetupError("each element of 'ext_modules' option must be an Extension instance or 2-tuple") 1. If you are using miniconda rather than anaconda, check whether Cython is installed as indicated in [#3379](https://github.com/open-mmlab/mmdetection/issues/3379). You need to manually install Cython first and then run command `pip install -r requirements.txt`. 2. You may also need to check the compatibility between the `setuptools`, `Cython`, and `PyTorch` in your environment. - "Segmentation fault". 1. Check you GCC version and use GCC 5.4. This usually caused by the incompatibility between PyTorch and the environment (e.g., GCC \< 4.9 for PyTorch). We also recommend the users to avoid using GCC 5.5 because many feedbacks report that GCC 5.5 will cause "segmentation fault" and simply changing it to GCC 5.4 could solve the problem. 2. Check whether PyTorch is correctly installed and could use CUDA op, e.g. type the following command in your terminal. ```shell python -c 'import torch; print(torch.cuda.is_available())' ``` And see whether they could correctly output results. 3. If Pytorch is correctly installed, check whether MMCV is correctly installed. ```shell python -c 'import mmcv; import mmcv.ops' ``` If MMCV is correctly installed, then there will be no issue of the above two commands. 4. If MMCV and Pytorch is correctly installed, you man use `ipdb`, `pdb` to set breakpoints or directly add 'print' in mmdetection code and see which part leads the segmentation fault. ## Training - "Loss goes Nan" 1. Check if the dataset annotations are valid: zero-size bounding boxes will cause the regression loss to be Nan due to the commonly used transformation for box regression. Some small size (width or height are smaller than 1) boxes will also cause this problem after data augmentation (e.g., instaboost). So check the data and try to filter out those zero-size boxes and skip some risky augmentations on the small-size boxes when you face the problem. 2. Reduce the learning rate: the learning rate might be too large due to some reasons, e.g., change of batch size. You can rescale them to the value that could stably train the model. 3. Extend the warmup iterations: some models are sensitive to the learning rate at the start of the training. You can extend the warmup iterations, e.g., change the `warmup_iters` from 500 to 1000 or 2000. 4. Add gradient clipping: some models requires gradient clipping to stabilize the training process. The default of `grad_clip` is `None`, you can add gradient clippint to avoid gradients that are too large, i.e., set `optimizer_config=dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))` in your config file. If your config does not inherits from any basic config that contains `optimizer_config=dict(grad_clip=None)`, you can simply add `optimizer_config=dict(grad_clip=dict(max_norm=35, norm_type=2))`. - "GPU out of memory" 1. There are some scenarios when there are large amount of ground truth boxes, which may cause OOM during target assignment. You can set `gpu_assign_thr=N` in the config of assigner thus the assigner will calculate box overlaps through CPU when there are more than N GT boxes. 2. Set `with_cp=True` in the backbone. This uses the sublinear strategy in PyTorch to reduce GPU memory cost in the backbone. 3. Try mixed precision training using following the examples in `config/fp16`. The `loss_scale` might need further tuning for different models. 4. Try to use `AvoidCUDAOOM` to avoid GPU out of memory. It will first retry after calling `torch.cuda.empty_cache()`. If it still fails, it will then retry by converting the type of inputs to FP16 format. If it still fails, it will try to copy inputs from GPUs to CPUs to continue computing. Try AvoidOOM in you code to make the code continue to run when GPU memory runs out: ```python from mmdet.utils import AvoidCUDAOOM output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) ``` You can also try `AvoidCUDAOOM` as a decorator to make the code continue to run when GPU memory runs out: ```python from mmdet.utils import AvoidCUDAOOM @AvoidCUDAOOM.retry_if_cuda_oom def function(*args, **kwargs): ... return xxx ``` - "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one" 1. This error indicates that your module has parameters that were not used in producing loss. This phenomenon may be caused by running different branches in your code in DDP mode. 2. You can set `find_unused_parameters = True` in the config to solve the above problems(but this will slow down the training speed. 3. If the version of your MMCV >= 1.4.1, you can get the name of those unused parameters with `detect_anomalous_params=True` in `optimizer_config` of config. - Save the best model It can be turned on by configuring `evaluation = dict(save_best=‘auto’)`. In the case of the `auto` parameter, the first key in the returned evaluation result will be used as the basis for selecting the best model. You can also directly set the key in the evaluation result to manually set it, for example, `evaluation = dict(save_best='mAP' )`. - Resume training with `ExpMomentumEMAHook` If you use `ExpMomentumEMAHook` in training, you can't just use command line parameters `--resume-from` nor `--cfg-options resume_from` to restore model parameters during resume, i.e., the command `python tools/train.py configs/yolox/yolox_s_8x8_300e_coco.py --resume-from ./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth` will not work. Since `ExpMomentumEMAHook` needs to reload the weights, taking the `yolox_s` algorithm as an example, you should modify the values of `resume_from` in two places of the config as below: ```python # Open configs/yolox/yolox_s_8x8_300e_coco.py directly and modify all resume_from fields resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth custom_hooks=[... dict( type='ExpMomentumEMAHook', resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth, momentum=0.0001, priority=49) ] ``` ## Evaluation - COCO Dataset, AP or AR = -1 1. According to the definition of COCO dataset, the small and medium areas in an image are less than 1024 (32\*32), 9216 (96\*96), respectively. 2. If the corresponding area has no object, the result of AP and AR will set to -1. ## Model - `style` in ResNet The `style` parameter in ResNet allows either `pytorch` or `caffe` style. It indicates the difference in the Bottleneck module. Bottleneck is a stacking structure of `1x1-3x3-1x1` convolutional layers. In the case of `caffe` mode, the convolution layer with `stride=2` is the first `1x1` convolution, while in `pyorch` mode, it is the second `3x3` convolution has `stride=2`. A sample code is as below: ```python if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 ``` - ResNeXt parameter description ResNeXt comes from the paper [`Aggregated Residual Transformations for Deep Neural Networks`](https://arxiv.org/abs/1611.05431). It introduces group and uses “cardinality” to control the number of groups to achieve a balance between accuracy and complexity. It controls the basic width and grouping parameters of the internal Bottleneck module through two hyperparameters `baseWidth` and `cardinality`. An example configuration name in MMDetection is `mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py`, where `mask_rcnn` represents the algorithm using Mask R-CNN, `x101` represents the backbone network using ResNeXt-101, and `64x4d` represents that the bottleneck block has 64 group and each group has basic width of 4. - `norm_eval` in backbone Since the detection model is usually large and the input image resolution is high, this will result in a small batch of the detection model, which will make the variance of the statistics calculated by BatchNorm during the training process very large and not as stable as the statistics obtained during the pre-training of the backbone network . Therefore, the `norm_eval=True` mode is generally used in training, and the BatchNorm statistics in the pre-trained backbone network are directly used. The few algorithms that use large batches are the `norm_eval=False` mode, such as NASFPN. For the backbone network without ImageNet pre-training and the batch is relatively small, you can consider using `SyncBN`.
16,218
66.298755
723
md
mmdetection
mmdetection-master/docs/en/get_started.md
# Prerequisites In this section we demonstrate how to prepare an environment with PyTorch. MMDetection works on Linux, Windows and macOS. It requires Python 3.7+, CUDA 9.2+ and PyTorch 1.5+. ```{note} If you are experienced with PyTorch and have already installed it, just skip this part and jump to the [next section](#installation). Otherwise, you can follow these steps for the preparation. ``` **Step 0.** Download and install Miniconda from the [official website](https://docs.conda.io/en/latest/miniconda.html). **Step 1.** Create a conda environment and activate it. ```shell conda create --name openmmlab python=3.8 -y conda activate openmmlab ``` **Step 2.** Install PyTorch following [official instructions](https://pytorch.org/get-started/locally/), e.g. On GPU platforms: ```shell conda install pytorch torchvision -c pytorch ``` On CPU platforms: ```shell conda install pytorch torchvision cpuonly -c pytorch ``` # Installation We recommend that users follow our best practices to install MMDetection. However, the whole process is highly customizable. See [Customize Installation](#customize-installation) section for more information. ## Best Practices **Step 0.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). ```shell pip install -U openmim mim install mmcv-full ``` **Step 1.** Install MMDetection. Case a: If you develop and run mmdet directly, install it from source: ```shell git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection pip install -v -e . # "-v" means verbose, or more output # "-e" means installing a project in editable mode, # thus any local modifications made to the code will take effect without reinstallation. ``` Case b: If you use mmdet as a dependency or third-party package, install it with pip: ```shell pip install mmdet ``` ## Verify the installation To verify whether MMDetection is installed correctly, we provide some sample codes to run an inference demo. **Step 1.** We need to download config and checkpoint files. ```shell mim download mmdet --config yolov3_mobilenetv2_320_300e_coco --dest . ``` The downloading will take several seconds or more, depending on your network environment. When it is done, you will find two files `yolov3_mobilenetv2_320_300e_coco.py` and `yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth` in your current folder. **Step 2.** Verify the inference demo. Option (a). If you install mmdetection from source, just run the following command. ```shell python demo/image_demo.py demo/demo.jpg yolov3_mobilenetv2_320_300e_coco.py yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth --device cpu --out-file result.jpg ``` You will see a new image `result.jpg` on your current folder, where bounding boxes are plotted on cars, benches, etc. Option (b). If you install mmdetection with pip, open you python interpreter and copy&paste the following codes. ```python from mmdet.apis import init_detector, inference_detector config_file = 'yolov3_mobilenetv2_320_300e_coco.py' checkpoint_file = 'yolov3_mobilenetv2_320_300e_coco_20210719_215349-d18dff72.pth' model = init_detector(config_file, checkpoint_file, device='cpu') # or device='cuda:0' inference_detector(model, 'demo/demo.jpg') ``` You will see a list of arrays printed, indicating the detected bounding boxes. ## Customize Installation ### CUDA versions When installing PyTorch, you need to specify the version of CUDA. If you are not clear on which to choose, follow our recommendations: - For Ampere-based NVIDIA GPUs, such as GeForce 30 series and NVIDIA A100, CUDA 11 is a must. - For older NVIDIA GPUs, CUDA 11 is backward compatible, but CUDA 10.2 offers better compatibility and is more lightweight. Please make sure the GPU driver satisfies the minimum version requirements. See [this table](https://docs.nvidia.com/cuda/cuda-toolkit-release-notes/index.html#cuda-major-component-versions__table-cuda-toolkit-driver-versions) for more information. ```{note} Installing CUDA runtime libraries is enough if you follow our best practices, because no CUDA code will be compiled locally. However if you hope to compile MMCV from source or develop other CUDA operators, you need to install the complete CUDA toolkit from NVIDIA's [website](https://developer.nvidia.com/cuda-downloads), and its version should match the CUDA version of PyTorch. i.e., the specified version of cudatoolkit in `conda install` command. ``` ### Install MMCV without MIM MMCV contains C++ and CUDA extensions, thus depending on PyTorch in a complex way. MIM solves such dependencies automatically and makes the installation easier. However, it is not a must. To install MMCV with pip instead of MIM, please follow [MMCV installation guides](https://mmcv.readthedocs.io/en/latest/get_started/installation.html). This requires manually specifying a find-url based on PyTorch version and its CUDA version. For example, the following command install mmcv-full built for PyTorch 1.10.x and CUDA 11.3. ```shell pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu113/torch1.10/index.html ``` ### Install on CPU-only platforms MMDetection can be built for CPU only environment. In CPU mode you can train (requires MMCV version >= 1.4.4), test or inference a model. However some functionalities are gone in this mode: - Deformable Convolution - Modulated Deformable Convolution - ROI pooling - Deformable ROI pooling - CARAFE - SyncBatchNorm - CrissCrossAttention - MaskedConv2d - Temporal Interlace Shift - nms_cuda - sigmoid_focal_loss_cuda - bbox_overlaps If you try to train/test/inference a model containing above ops, an error will be raised. The following table lists affected algorithms. | Operator | Model | | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------: | | Deformable Convolution/Modulated Deformable Convolution | DCN、Guided Anchoring、RepPoints、CentripetalNet、VFNet、CascadeRPN、NAS-FCOS、DetectoRS | | MaskedConv2d | Guided Anchoring | | CARAFE | CARAFE | | SyncBatchNorm | ResNeSt | ### Install on Google Colab [Google Colab](https://research.google.com/) usually has PyTorch installed, thus we only need to install MMCV and MMDetection with the following commands. **Step 1.** Install [MMCV](https://github.com/open-mmlab/mmcv) using [MIM](https://github.com/open-mmlab/mim). ```shell !pip3 install openmim !mim install mmcv-full ``` **Step 2.** Install MMDetection from the source. ```shell !git clone https://github.com/open-mmlab/mmdetection.git %cd mmdetection !pip install -e . ``` **Step 3.** Verification. ```python import mmdet print(mmdet.__version__) # Example output: 2.23.0 ``` ```{note} Within Jupyter, the exclamation mark `!` is used to call external executables and `%cd` is a [magic command](https://ipython.readthedocs.io/en/stable/interactive/magics.html#magic-cd) to change the current working directory of Python. ``` ### Using MMDetection with Docker We provide a [Dockerfile](https://github.com/open-mmlab/mmdetection/blob/master/docker/Dockerfile) to build an image. Ensure that your [docker version](https://docs.docker.com/engine/install/) >=19.03. ```shell # build an image with PyTorch 1.6, CUDA 10.1 # If you prefer other versions, just modified the Dockerfile docker build -t mmdetection docker/ ``` Run it with ```shell docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection/data mmdetection ``` ## Trouble shooting If you have some issues during the installation, please first view the [FAQ](faq.md) page. You may [open an issue](https://github.com/open-mmlab/mmdetection/issues/new/choose) on GitHub if no solution is found.
8,263
38.54067
450
md
mmdetection
mmdetection-master/docs/en/model_zoo.md
# Benchmark and Model Zoo ## Mirror sites We only use aliyun to maintain the model zoo since MMDetection V2.0. The model zoo of V1.x has been deprecated. ## Common settings - All models were trained on `coco_2017_train`, and tested on the `coco_2017_val`. - We use distributed training. - All pytorch-style pretrained backbones on ImageNet are from PyTorch model zoo, caffe-style pretrained backbones are converted from the newly released model from detectron2. - For fair comparison with other codebases, we report the GPU memory as the maximum value of `torch.cuda.max_memory_allocated()` for all 8 GPUs. Note that this value is usually less than what `nvidia-smi` shows. - We report the inference time as the total time of network forwarding and post-processing, excluding the data loading time. Results are obtained with the script [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) which computes the average time on 2000 images. ## ImageNet Pretrained Models It is common to initialize from backbone models pre-trained on ImageNet classification task. All pre-trained model links can be found at [open_mmlab](https://github.com/open-mmlab/mmcv/blob/master/mmcv/model_zoo/open_mmlab.json). According to `img_norm_cfg` and source of weight, we can divide all the ImageNet pre-trained model weights into some cases: - TorchVision: Corresponding to torchvision weight, including ResNet50, ResNet101. The `img_norm_cfg` is `dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)`. - Pycls: Corresponding to [pycls](https://github.com/facebookresearch/pycls) weight, including RegNetX. The `img_norm_cfg` is `dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)`. - MSRA styles: Corresponding to [MSRA](https://github.com/KaimingHe/deep-residual-networks) weights, including ResNet50_Caffe and ResNet101_Caffe. The `img_norm_cfg` is `dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)`. - Caffe2 styles: Currently only contains ResNext101_32x8d. The `img_norm_cfg` is `dict(mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False)`. - Other styles: E.g SSD which corresponds to `img_norm_cfg` is `dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)` and YOLOv3 which corresponds to `img_norm_cfg` is `dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)`. The detailed table of the commonly used backbone models in MMDetection is listed below : | model | source | link | description | | ---------------- | ----------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | ResNet50 | TorchVision | [torchvision's ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth) | From [torchvision's ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth). | | ResNet101 | TorchVision | [torchvision's ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth) | From [torchvision's ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth). | | RegNetX | Pycls | [RegNetX_3.2gf](https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth), [RegNetX_800mf](https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth). etc. | From [pycls](https://github.com/facebookresearch/pycls). | | ResNet50_Caffe | MSRA | [MSRA's ResNet-50](https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth) | Converted copy of [Detectron2's R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl) model. The original weight comes from [MSRA's original ResNet-50](https://github.com/KaimingHe/deep-residual-networks). | | ResNet101_Caffe | MSRA | [MSRA's ResNet-101](https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth) | Converted copy of [Detectron2's R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl) model. The original weight comes from [MSRA's original ResNet-101](https://github.com/KaimingHe/deep-residual-networks). | | ResNext101_32x8d | Caffe2 | [Caffe2 ResNext101_32x8d](https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth) | Converted copy of [Detectron2's X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl) model. The ResNeXt-101-32x8d model trained with Caffe2 at FB. | ## Baselines ### RPN Please refer to [RPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/rpn) for details. ### Faster R-CNN Please refer to [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) for details. ### Mask R-CNN Please refer to [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn) for details. ### Fast R-CNN (with pre-computed proposals) Please refer to [Fast R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/fast_rcnn) for details. ### RetinaNet Please refer to [RetinaNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet) for details. ### Cascade R-CNN and Cascade Mask R-CNN Please refer to [Cascade R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/cascade_rcnn) for details. ### Hybrid Task Cascade (HTC) Please refer to [HTC](https://github.com/open-mmlab/mmdetection/blob/master/configs/htc) for details. ### SSD Please refer to [SSD](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd) for details. ### Group Normalization (GN) Please refer to [Group Normalization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn) for details. ### Weight Standardization Please refer to [Weight Standardization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn+ws) for details. ### Deformable Convolution v2 Please refer to [Deformable Convolutional Networks](https://github.com/open-mmlab/mmdetection/blob/master/configs/dcn) for details. ### CARAFE: Content-Aware ReAssembly of FEatures Please refer to [CARAFE](https://github.com/open-mmlab/mmdetection/blob/master/configs/carafe) for details. ### Instaboost Please refer to [Instaboost](https://github.com/open-mmlab/mmdetection/blob/master/configs/instaboost) for details. ### Libra R-CNN Please refer to [Libra R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/libra_rcnn) for details. ### Guided Anchoring Please refer to [Guided Anchoring](https://github.com/open-mmlab/mmdetection/blob/master/configs/guided_anchoring) for details. ### FCOS Please refer to [FCOS](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos) for details. ### FoveaBox Please refer to [FoveaBox](https://github.com/open-mmlab/mmdetection/blob/master/configs/foveabox) for details. ### RepPoints Please refer to [RepPoints](https://github.com/open-mmlab/mmdetection/blob/master/configs/reppoints) for details. ### FreeAnchor Please refer to [FreeAnchor](https://github.com/open-mmlab/mmdetection/blob/master/configs/free_anchor) for details. ### Grid R-CNN (plus) Please refer to [Grid R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/grid_rcnn) for details. ### GHM Please refer to [GHM](https://github.com/open-mmlab/mmdetection/blob/master/configs/ghm) for details. ### GCNet Please refer to [GCNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/gcnet) for details. ### HRNet Please refer to [HRNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet) for details. ### Mask Scoring R-CNN Please refer to [Mask Scoring R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/ms_rcnn) for details. ### Train from Scratch Please refer to [Rethinking ImageNet Pre-training](https://github.com/open-mmlab/mmdetection/blob/master/configs/scratch) for details. ### NAS-FPN Please refer to [NAS-FPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/nas_fpn) for details. ### ATSS Please refer to [ATSS](https://github.com/open-mmlab/mmdetection/blob/master/configs/atss) for details. ### FSAF Please refer to [FSAF](https://github.com/open-mmlab/mmdetection/blob/master/configs/fsaf) for details. ### RegNetX Please refer to [RegNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) for details. ### Res2Net Please refer to [Res2Net](https://github.com/open-mmlab/mmdetection/blob/master/configs/res2net) for details. ### GRoIE Please refer to [GRoIE](https://github.com/open-mmlab/mmdetection/blob/master/configs/groie) for details. ### Dynamic R-CNN Please refer to [Dynamic R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/dynamic_rcnn) for details. ### PointRend Please refer to [PointRend](https://github.com/open-mmlab/mmdetection/blob/master/configs/point_rend) for details. ### DetectoRS Please refer to [DetectoRS](https://github.com/open-mmlab/mmdetection/blob/master/configs/detectors) for details. ### Generalized Focal Loss Please refer to [Generalized Focal Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/gfl) for details. ### CornerNet Please refer to [CornerNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/cornernet) for details. ### YOLOv3 Please refer to [YOLOv3](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo) for details. ### PAA Please refer to [PAA](https://github.com/open-mmlab/mmdetection/blob/master/configs/paa) for details. ### SABL Please refer to [SABL](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl) for details. ### CentripetalNet Please refer to [CentripetalNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centripetalnet) for details. ### ResNeSt Please refer to [ResNeSt](https://github.com/open-mmlab/mmdetection/blob/master/configs/resnest) for details. ### DETR Please refer to [DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/detr) for details. ### Deformable DETR Please refer to [Deformable DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/deformable_detr) for details. ### AutoAssign Please refer to [AutoAssign](https://github.com/open-mmlab/mmdetection/blob/master/configs/autoassign) for details. ### YOLOF Please refer to [YOLOF](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolof) for details. ### Seesaw Loss Please refer to [Seesaw Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/seesaw_loss) for details. ### CenterNet Please refer to [CenterNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet) for details. ### YOLOX Please refer to [YOLOX](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox) for details. ### PVT Please refer to [PVT](https://github.com/open-mmlab/mmdetection/blob/master/configs/pvt) for details. ### SOLO Please refer to [SOLO](https://github.com/open-mmlab/mmdetection/blob/master/configs/solo) for details. ### QueryInst Please refer to [QueryInst](https://github.com/open-mmlab/mmdetection/blob/master/configs/queryinst) for details. ### PanopticFPN Please refer to [PanopticFPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/panoptic_fpn) for details. ### MaskFormer Please refer to [MaskFormer](https://github.com/open-mmlab/mmdetection/blob/master/configs/maskformer) for details. ### DyHead Please refer to [DyHead](https://github.com/open-mmlab/mmdetection/blob/master/configs/dyhead) for details. ### Mask2Former Please refer to [Mask2Former](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask2former) for details. ### Efficientnet Please refer to [Efficientnet](https://github.com/open-mmlab/mmdetection/blob/master/configs/efficientnet) for details. ### RF-Next Please refer to [RF-Next](https://github.com/open-mmlab/mmdetection/blob/master/configs/rfnext) for details. ### Other datasets We also benchmark some methods on [PASCAL VOC](https://github.com/open-mmlab/mmdetection/blob/master/configs/pascal_voc), [Cityscapes](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes), [OpenImages](https://github.com/open-mmlab/mmdetection/blob/master/configs/openimages) and [WIDER FACE](https://github.com/open-mmlab/mmdetection/blob/master/configs/wider_face). ### Pre-trained Models We also train [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) and [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn) using ResNet-50 and [RegNetX-3.2G](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) with multi-scale training and longer schedules. These models serve as strong pre-trained models for downstream tasks for convenience. ## Speed benchmark ### Training Speed benchmark We provide [analyze_logs.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py) to get average time of iteration in training. You can find examples in [Log Analysis](https://mmdetection.readthedocs.io/en/latest/useful_tools.html#log-analysis). We compare the training speed of Mask R-CNN with some other popular frameworks (The data is copied from [detectron2](https://github.com/facebookresearch/detectron2/blob/master/docs/notes/benchmarks.md/)). For mmdetection, we benchmark with [mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py), which should have the same setting with [mask_rcnn_R_50_FPN_noaug_1x.yaml](https://github.com/facebookresearch/detectron2/blob/master/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml) of detectron2. We also provide the [checkpoint](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_compare_20200518-10127928.pth) and [training log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_20200518_105755.log.json) for reference. The throughput is computed as the average throughput in iterations 100-500 to skip GPU warmup time. | Implementation | Throughput (img/s) | | -------------------------------------------------------------------------------------- | ------------------ | | [Detectron2](https://github.com/facebookresearch/detectron2) | 62 | | [MMDetection](https://github.com/open-mmlab/mmdetection) | 61 | | [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/) | 53 | | [tensorpack](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) | 50 | | [simpledet](https://github.com/TuSimple/simpledet/) | 39 | | [Detectron](https://github.com/facebookresearch/Detectron) | 19 | | [matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN/) | 14 | ### Inference Speed Benchmark We provide [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) to benchmark the inference latency. The script benchmarkes the model with 2000 images and calculates the average time ignoring first 5 times. You can change the output log interval (defaults: 50) by setting `LOG-INTERVAL`. ```shell python tools/benchmark.py ${CONFIG} ${CHECKPOINT} [--log-interval $[LOG-INTERVAL]] [--fuse-conv-bn] ``` The latency of all models in our model zoo is benchmarked without setting `fuse-conv-bn`, you can get a lower latency by setting it. ## Comparison with Detectron2 We compare mmdetection with [Detectron2](https://github.com/facebookresearch/detectron2.git) in terms of speed and performance. We use the commit id [185c27e](https://github.com/facebookresearch/detectron2/tree/185c27e4b4d2d4c68b5627b3765420c6d7f5a659)(30/4/2020) of detectron. For fair comparison, we install and run both frameworks on the same machine. ### Hardware - 8 NVIDIA Tesla V100 (32G) GPUs - Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz ### Software environment - Python 3.7 - PyTorch 1.4 - CUDA 10.1 - CUDNN 7.6.03 - NCCL 2.4.08 ### Performance | Type | Lr schd | Detectron2 | mmdetection | Download | | -------------------------------------------------------------------------------------------------------------------------------------- | ------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [37.9](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml) | 38.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-5324cff8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco_20200429_234554.log.json) | | [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py) | 1x | [38.6 & 35.2](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml) | 38.8 & 35.4 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco-dbecf295.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco_20200430_054239.log.json) | | [Retinanet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [36.5](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml) | 37.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco-586977a0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco_20200430_014748.log.json) | ### Training Speed The training speed is measure with s/iter. The lower, the better. | Type | Detectron2 | mmdetection | | ------------ | ---------- | ----------- | | Faster R-CNN | 0.210 | 0.216 | | Mask R-CNN | 0.261 | 0.265 | | Retinanet | 0.200 | 0.205 | ### Inference Speed The inference speed is measured with fps (img/s) on a single GPU, the higher, the better. To be consistent with Detectron2, we report the pure inference speed (without the time of data loading). For Mask R-CNN, we exclude the time of RLE encoding in post-processing. We also include the officially reported speed in the parentheses, which is slightly higher than the results tested on our server due to differences of hardwares. | Type | Detectron2 | mmdetection | | ------------ | ----------- | ----------- | | Faster R-CNN | 25.6 (26.3) | 22.2 | | Mask R-CNN | 22.5 (23.3) | 19.6 | | Retinanet | 17.8 (18.2) | 20.6 | ### Training memory | Type | Detectron2 | mmdetection | | ------------ | ---------- | ----------- | | Faster R-CNN | 3.0 | 3.8 | | Mask R-CNN | 3.4 | 3.9 | | Retinanet | 3.9 | 3.4 |
22,922
62.14876
654
md
mmdetection
mmdetection-master/docs/en/projects.md
# Projects based on MMDetection There are many projects built upon MMDetection. We list some of them as examples of how to extend MMDetection for your own projects. As the page might not be completed, please feel free to create a PR to update this page. ## Projects as an extension Some projects extend the boundary of MMDetection for deployment or other research fields. They reveal the potential of what MMDetection can do. We list several of them as below. - [OTEDetection](https://github.com/opencv/mmdetection): OpenVINO training extensions for object detection. - [MMDetection3d](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. ## Projects of papers There are also projects released with papers. Some of the papers are published in top-tier conferences (CVPR, ICCV, and ECCV), the others are also highly influential. To make this list also a reference for the community to develop and compare new object detection algorithms, we list them following the time order of top-tier conferences. Methods already supported and maintained by MMDetection are not listed. - Anchor Pruning for Object Detection, CVIU 2022. [\[paper\]](https://doi.org/10.1016/j.cviu.2022.103445)[\[github\]](https://github.com/Mxbonn/anchor_pruning) - Involution: Inverting the Inherence of Convolution for Visual Recognition, CVPR21. [\[paper\]](https://arxiv.org/abs/2103.06255)[\[github\]](https://github.com/d-li14/involution) - Multiple Instance Active Learning for Object Detection, CVPR 2021. [\[paper\]](https://openaccess.thecvf.com/content/CVPR2021/papers/Yuan_Multiple_Instance_Active_Learning_for_Object_Detection_CVPR_2021_paper.pdf)[\[github\]](https://github.com/yuantn/MI-AOD) - Adaptive Class Suppression Loss for Long-Tail Object Detection, CVPR 2021. [\[paper\]](https://arxiv.org/abs/2104.00885)[\[github\]](https://github.com/CASIA-IVA-Lab/ACSL) - Generalizable Pedestrian Detection: The Elephant In The Room, CVPR2021. [\[paper\]](https://arxiv.org/abs/2003.08799)[\[github\]](https://github.com/hasanirtiza/Pedestron) - Group Fisher Pruning for Practical Network Compression, ICML2021. [\[paper\]](https://github.com/jshilong/FisherPruning/blob/main/resources/paper.pdf)[\[github\]](https://github.com/jshilong/FisherPruning) - Overcoming Classifier Imbalance for Long-tail Object Detection with Balanced Group Softmax, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Li_Overcoming_Classifier_Imbalance_for_Long-Tail_Object_Detection_With_Balanced_Group_CVPR_2020_paper.pdf)[\[github\]](https://github.com/FishYuLi/BalancedGroupSoftmax) - Coherent Reconstruction of Multiple Humans from a Single Image, CVPR2020. [\[paper\]](https://jiangwenpl.github.io/multiperson/)[\[github\]](https://github.com/JiangWenPL/multiperson) - Look-into-Object: Self-supervised Structure Modeling for Object Recognition, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Zhou_Look-Into-Object_Self-Supervised_Structure_Modeling_for_Object_Recognition_CVPR_2020_paper.pdf)[\[github\]](https://github.com/JDAI-CV/LIO) - Video Panoptic Segmentation, CVPR2020. [\[paper\]](https://arxiv.org/abs/2006.11339)[\[github\]](https://github.com/mcahny/vps) - D2Det: Towards High Quality Object Detection and Instance Segmentation, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cao_D2Det_Towards_High_Quality_Object_Detection_and_Instance_Segmentation_CVPR_2020_paper.html)[\[github\]](https://github.com/JialeCao001/D2Det) - CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.09119)[\[github\]](https://github.com/KiveeDong/CentripetalNet) - Learning a Unified Sample Weighting Network for Object Detection, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cai_Learning_a_Unified_Sample_Weighting_Network_for_Object_Detection_CVPR_2020_paper.html)[\[github\]](https://github.com/caiqi/sample-weighting-network) - Scale-equalizing Pyramid Convolution for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2005.03101) [\[github\]](https://github.com/jshilong/SEPC) - Revisiting the Sibling Head in Object Detector, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.07540)[\[github\]](https://github.com/Sense-X/TSD) - PolarMask: Single Shot Instance Segmentation with Polar Representation, CVPR2020. [\[paper\]](https://arxiv.org/abs/1909.13226)[\[github\]](https://github.com/xieenze/PolarMask) - Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.11818)[\[github\]](https://github.com/ggjy/HitDet.pytorch) - ZeroQ: A Novel Zero Shot Quantization Framework, CVPR2020. [\[paper\]](https://arxiv.org/abs/2001.00281)[\[github\]](https://github.com/amirgholami/ZeroQ) - CBNet: A Novel Composite Backbone Network Architecture for Object Detection, AAAI2020. [\[paper\]](https://aaai.org/Papers/AAAI/2020GB/AAAI-LiuY.1833.pdf)[\[github\]](https://github.com/VDIGPKU/CBNet) - RDSNet: A New Deep Architecture for Reciprocal Object Detection and Instance Segmentation, AAAI2020. [\[paper\]](https://arxiv.org/abs/1912.05070)[\[github\]](https://github.com/wangsr126/RDSNet) - Training-Time-Friendly Network for Real-Time Object Detection, AAAI2020. [\[paper\]](https://arxiv.org/abs/1909.00700)[\[github\]](https://github.com/ZJULearning/ttfnet) - Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution, NeurIPS 2019. [\[paper\]](https://arxiv.org/abs/1909.06720)[\[github\]](https://github.com/thangvubk/Cascade-RPN) - Reasoning R-CNN: Unifying Adaptive Global Reasoning into Large-scale Object Detection, CVPR2019. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2019/papers/Xu_Reasoning-RCNN_Unifying_Adaptive_Global_Reasoning_Into_Large-Scale_Object_Detection_CVPR_2019_paper.pdf)[\[github\]](https://github.com/chanyn/Reasoning-RCNN) - Learning RoI Transformer for Oriented Object Detection in Aerial Images, CVPR2019. [\[paper\]](https://arxiv.org/abs/1812.00155)[\[github\]](https://github.com/dingjiansw101/AerialDetection) - SOLO: Segmenting Objects by Locations. [\[paper\]](https://arxiv.org/abs/1912.04488)[\[github\]](https://github.com/WXinlong/SOLO) - SOLOv2: Dynamic, Faster and Stronger. [\[paper\]](https://arxiv.org/abs/2003.10152)[\[github\]](https://github.com/WXinlong/SOLO) - Dense Peppoints: Representing Visual Objects with Dense Point Sets. [\[paper\]](https://arxiv.org/abs/1912.11473)[\[github\]](https://github.com/justimyhxu/Dense-RepPoints) - IterDet: Iterative Scheme for Object Detection in Crowded Environments. [\[paper\]](https://arxiv.org/abs/2005.05708)[\[github\]](https://github.com/saic-vul/iterdet) - Cross-Iteration Batch Normalization. [\[paper\]](https://arxiv.org/abs/2002.05712)[\[github\]](https://github.com/Howal/Cross-iterationBatchNorm) - A Ranking-based, Balanced Loss Function Unifying Classification and Localisation in Object Detection, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2009.13592)[\[github\]](https://github.com/kemaloksuz/aLRPLoss) - RelationNet++: Bridging Visual Representations for Object Detection via Transformer Decoder, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2010.15831)[\[github\]](https://github.com/microsoft/RelationNet2) - Generalized Focal Loss V2: Learning Reliable Localization Quality Estimation for Dense Object Detection, CVPR2021[\[paper\]](https://arxiv.org/abs/2011.12885)[\[github\]](https://github.com/implus/GFocalV2) - Swin Transformer: Hierarchical Vision Transformer using Shifted Windows, ICCV2021[\[paper\]](https://arxiv.org/abs/2103.14030)[\[github\]](https://github.com/SwinTransformer/) - Focal Transformer: Focal Self-attention for Local-Global Interactions in Vision Transformers, NeurIPS2021[\[paper\]](https://arxiv.org/abs/2107.00641)[\[github\]](https://github.com/microsoft/Focal-Transformer) - End-to-End Semi-Supervised Object Detection with Soft Teacher, ICCV2021[\[paper\]](https://arxiv.org/abs/2106.09018)[\[github\]](https://github.com/microsoft/SoftTeacher) - CBNetV2: A Novel Composite Backbone Network Architecture for Object Detection [\[paper\]](http://arxiv.org/abs/2107.00420)[\[github\]](https://github.com/VDIGPKU/CBNetV2) - Instances as Queries, ICCV2021 [\[paper\]](https://openaccess.thecvf.com/content/ICCV2021/papers/Fang_Instances_As_Queries_ICCV_2021_paper.pdf)[\[github\]](https://github.com/hustvl/QueryInst)
8,495
143
338
md
mmdetection
mmdetection-master/docs/en/robustness_benchmarking.md
# Corruption Benchmarking ## Introduction We provide tools to test object detection and instance segmentation models on the image corruption benchmark defined in [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484). This page provides basic tutorials how to use the benchmark. ```latex @article{michaelis2019winter, title={Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming}, author={Michaelis, Claudio and Mitzkus, Benjamin and Geirhos, Robert and Rusak, Evgenia and Bringmann, Oliver and Ecker, Alexander S. and Bethge, Matthias and Brendel, Wieland}, journal={arXiv:1907.07484}, year={2019} } ``` ![image corruption example](../resources/corruptions_sev_3.png) ## About the benchmark To submit results to the benchmark please visit the [benchmark homepage](https://github.com/bethgelab/robust-detection-benchmark) The benchmark is modelled after the [imagenet-c benchmark](https://github.com/hendrycks/robustness) which was originally published in [Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261) (ICLR 2019) by Dan Hendrycks and Thomas Dietterich. The image corruption functions are included in this library but can be installed separately using: ```shell pip install imagecorruptions ``` Compared to imagenet-c a few changes had to be made to handle images of arbitrary size and greyscale images. We also modified the 'motion blur' and 'snow' corruptions to remove dependency from a linux specific library, which would have to be installed separately otherwise. For details please refer to the [imagecorruptions repository](https://github.com/bethgelab/imagecorruptions). ## Inference with pretrained models We provide a testing script to evaluate a models performance on any combination of the corruptions provided in the benchmark. ### Test a dataset - [x] single GPU testing - [ ] multiple GPU testing - [ ] visualize detection results You can use the following commands to test a models performance under the 15 corruptions used in the benchmark. ```shell # single-gpu testing python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] ``` Alternatively different group of corruptions can be selected. ```shell # noise python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions noise # blur python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions blur # wetaher python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions weather # digital python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions digital ``` Or a costom set of corruptions e.g.: ```shell # gaussian noise, zoom blur and snow python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions gaussian_noise zoom_blur snow ``` Finally the corruption severities to evaluate can be chosen. Severity 0 corresponds to clean data and the effect increases from 1 to 5. ```shell # severity 1 python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 1 # severities 0,2,4 python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 0 2 4 ``` ## Results for modelzoo models The results on COCO 2017val are shown in the below table. | Model | Backbone | Style | Lr schd | box AP clean | box AP corr. | box % | mask AP clean | mask AP corr. | mask % | | :-----------------: | :-----------------: | :-----: | :-----: | :----------: | :----------: | :---: | :-----------: | :-----------: | :----: | | Faster R-CNN | R-50-FPN | pytorch | 1x | 36.3 | 18.2 | 50.2 | - | - | - | | Faster R-CNN | R-101-FPN | pytorch | 1x | 38.5 | 20.9 | 54.2 | - | - | - | | Faster R-CNN | X-101-32x4d-FPN | pytorch | 1x | 40.1 | 22.3 | 55.5 | - | - | - | | Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 41.3 | 23.4 | 56.6 | - | - | - | | Faster R-CNN | R-50-FPN-DCN | pytorch | 1x | 40.0 | 22.4 | 56.1 | - | - | - | | Faster R-CNN | X-101-32x4d-FPN-DCN | pytorch | 1x | 43.4 | 26.7 | 61.6 | - | - | - | | Mask R-CNN | R-50-FPN | pytorch | 1x | 37.3 | 18.7 | 50.1 | 34.2 | 16.8 | 49.1 | | Mask R-CNN | R-50-FPN-DCN | pytorch | 1x | 41.1 | 23.3 | 56.7 | 37.2 | 20.7 | 55.7 | | Cascade R-CNN | R-50-FPN | pytorch | 1x | 40.4 | 20.1 | 49.7 | - | - | - | | Cascade Mask R-CNN | R-50-FPN | pytorch | 1x | 41.2 | 20.7 | 50.2 | 35.7 | 17.6 | 49.3 | | RetinaNet | R-50-FPN | pytorch | 1x | 35.6 | 17.8 | 50.1 | - | - | - | | Hybrid Task Cascade | X-101-64x4d-FPN-DCN | pytorch | 1x | 50.6 | 32.7 | 64.7 | 43.8 | 28.1 | 64.0 | Results may vary slightly due to the stochastic application of the corruptions.
5,984
52.918919
242
md
mmdetection
mmdetection-master/docs/en/stat.py
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/configs' files = sorted(glob.glob('../../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../../configs', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo)
1,539
22.692308
76
py
mmdetection
mmdetection-master/docs/en/switch_language.md
## <a href='https://mmdetection.readthedocs.io/en/latest/'>English</a> ## <a href='https://mmdetection.readthedocs.io/zh_CN/latest/'>简体中文</a>
143
35
70
md
mmdetection
mmdetection-master/docs/en/useful_tools.md
Apart from training/testing scripts, We provide lots of useful tools under the `tools/` directory. ## Log Analysis `tools/analysis_tools/analyze_logs.py` plots loss/mAP curves given a training log file. Run `pip install seaborn` first to install the dependency. ```shell python tools/analysis_tools/analyze_logs.py plot_curve [--keys ${KEYS}] [--eval-interval ${EVALUATION_INTERVAL}] [--title ${TITLE}] [--legend ${LEGEND}] [--backend ${BACKEND}] [--style ${STYLE}] [--out ${OUT_FILE}] ``` ![loss curve image](../../resources/loss_curve.png) Examples: - Plot the classification loss of some run. ```shell python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls --legend loss_cls ``` - Plot the classification and regression loss of some run, and save the figure to a pdf. ```shell python tools/analysis_tools/analyze_logs.py plot_curve log.json --keys loss_cls loss_bbox --out losses.pdf ``` - Compare the bbox mAP of two runs in the same figure. ```shell python tools/analysis_tools/analyze_logs.py plot_curve log1.json log2.json --keys bbox_mAP --legend run1 run2 ``` - Compute the average training speed. ```shell python tools/analysis_tools/analyze_logs.py cal_train_time log.json [--include-outliers] ``` The output is expected to be like the following. ```text -----Analyze train time of work_dirs/some_exp/20190611_192040.log.json----- slowest epoch 11, average time is 1.2024 fastest epoch 1, average time is 1.1909 time std over epochs is 0.0028 average iter time: 1.1959 s/iter ``` ## Result Analysis `tools/analysis_tools/analyze_results.py` calculates single image mAP and saves or shows the topk images with the highest and lowest scores based on prediction results. **Usage** ```shell python tools/analysis_tools/analyze_results.py \ ${CONFIG} \ ${PREDICTION_PATH} \ ${SHOW_DIR} \ [--show] \ [--wait-time ${WAIT_TIME}] \ [--topk ${TOPK}] \ [--show-score-thr ${SHOW_SCORE_THR}] \ [--cfg-options ${CFG_OPTIONS}] ``` Description of all arguments: - `config` : The path of a model config file. - `prediction_path`: Output result file in pickle format from `tools/test.py` - `show_dir`: Directory where painted GT and detection images will be saved - `--show`:Determines whether to show painted images, If not specified, it will be set to `False` - `--wait-time`: The interval of show (s), 0 is block - `--topk`: The number of saved images that have the highest and lowest `topk` scores after sorting. If not specified, it will be set to `20`. - `--show-score-thr`: Show score threshold. If not specified, it will be set to `0`. - `--cfg-options`: If specified, the key-value pair optional cfg will be merged into config file **Examples**: Assume that you have got result file in pickle format from `tools/test.py` in the path './result.pkl'. 1. Test Faster R-CNN and visualize the results, save images to the directory `results/` ```shell python tools/analysis_tools/analyze_results.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ result.pkl \ results \ --show ``` 2. Test Faster R-CNN and specified topk to 50, save images to the directory `results/` ```shell python tools/analysis_tools/analyze_results.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ result.pkl \ results \ --topk 50 ``` 3. If you want to filter the low score prediction results, you can specify the `show-score-thr` parameter ```shell python tools/analysis_tools/analyze_results.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ result.pkl \ results \ --show-score-thr 0.3 ``` ## Visualization ### Visualize Datasets `tools/misc/browse_dataset.py` helps the user to browse a detection dataset (both images and bounding box annotations) visually, or save the image to a designated directory. ```shell python tools/misc/browse_dataset.py ${CONFIG} [-h] [--skip-type ${SKIP_TYPE[SKIP_TYPE...]}] [--output-dir ${OUTPUT_DIR}] [--not-show] [--show-interval ${SHOW_INTERVAL}] ``` ### Visualize Models First, convert the model to ONNX as described [here](#convert-mmdetection-model-to-onnx-experimental). Note that currently only RetinaNet is supported, support for other models will be coming in later versions. The converted model could be visualized by tools like [Netron](https://github.com/lutzroeder/netron). ### Visualize Predictions If you need a lightweight GUI for visualizing the detection results, you can refer [DetVisGUI project](https://github.com/Chien-Hung/DetVisGUI/tree/mmdetection). ## Error Analysis `tools/analysis_tools/coco_error_analysis.py` analyzes COCO results per category and by different criterion. It can also make a plot to provide useful information. ```shell python tools/analysis_tools/coco_error_analysis.py ${RESULT} ${OUT_DIR} [-h] [--ann ${ANN}] [--types ${TYPES[TYPES...]}] ``` Example: Assume that you have got [Mask R-CNN checkpoint file](https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_fpn_1x_coco/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth) in the path 'checkpoint'. For other checkpoints, please refer to our [model zoo](./model_zoo.md). You can use the following command to get the results bbox and segmentation json file. ```shell # out: results.bbox.json and results.segm.json python tools/test.py \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoint/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ --format-only \ --options "jsonfile_prefix=./results" ``` 1. Get COCO bbox error results per category , save analyze result images to the directory `results/` ```shell python tools/analysis_tools/coco_error_analysis.py \ results.bbox.json \ results \ --ann=data/coco/annotations/instances_val2017.json \ ``` 2. Get COCO segmentation error results per category , save analyze result images to the directory `results/` ```shell python tools/analysis_tools/coco_error_analysis.py \ results.segm.json \ results \ --ann=data/coco/annotations/instances_val2017.json \ --types='segm' ``` ## Model Serving In order to serve an `MMDetection` model with [`TorchServe`](https://pytorch.org/serve/), you can follow the steps: ### 1. Convert model from MMDetection to TorchServe ```shell python tools/deployment/mmdet2torchserve.py ${CONFIG_FILE} ${CHECKPOINT_FILE} \ --output-folder ${MODEL_STORE} \ --model-name ${MODEL_NAME} ``` **Note**: ${MODEL_STORE} needs to be an absolute path to a folder. ### 2. Build `mmdet-serve` docker image ```shell docker build -t mmdet-serve:latest docker/serve/ ``` ### 3. Run `mmdet-serve` Check the official docs for [running TorchServe with docker](https://github.com/pytorch/serve/blob/master/docker/README.md#running-torchserve-in-a-production-docker-environment). In order to run in GPU, you need to install [nvidia-docker](https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html). You can omit the `--gpus` argument in order to run in CPU. Example: ```shell docker run --rm \ --cpus 8 \ --gpus device=0 \ -p8080:8080 -p8081:8081 -p8082:8082 \ --mount type=bind,source=$MODEL_STORE,target=/home/model-server/model-store \ mmdet-serve:latest ``` [Read the docs](https://github.com/pytorch/serve/blob/072f5d088cce9bb64b2a18af065886c9b01b317b/docs/rest_api.md/) about the Inference (8080), Management (8081) and Metrics (8082) APis ### 4. Test deployment ```shell curl -O curl -O https://raw.githubusercontent.com/pytorch/serve/master/docs/images/3dogs.jpg curl http://127.0.0.1:8080/predictions/${MODEL_NAME} -T 3dogs.jpg ``` You should obtain a response similar to: ```json [ { "class_name": "dog", "bbox": [ 294.63409423828125, 203.99111938476562, 417.048583984375, 281.62744140625 ], "score": 0.9987992644309998 }, { "class_name": "dog", "bbox": [ 404.26019287109375, 126.0080795288086, 574.5091552734375, 293.6662292480469 ], "score": 0.9979367256164551 }, { "class_name": "dog", "bbox": [ 197.2144775390625, 93.3067855834961, 307.8505554199219, 276.7560119628906 ], "score": 0.993338406085968 } ] ``` And you can use `test_torchserver.py` to compare result of torchserver and pytorch, and visualize them. ```shell python tools/deployment/test_torchserver.py ${IMAGE_FILE} ${CONFIG_FILE} ${CHECKPOINT_FILE} ${MODEL_NAME} [--inference-addr ${INFERENCE_ADDR}] [--device ${DEVICE}] [--score-thr ${SCORE_THR}] ``` Example: ```shell python tools/deployment/test_torchserver.py \ demo/demo.jpg \ configs/yolo/yolov3_d53_320_273e_coco.py \ checkpoint/yolov3_d53_320_273e_coco-421362b6.pth \ yolov3 ``` ## Model Complexity `tools/analysis_tools/get_flops.py` is a script adapted from [flops-counter.pytorch](https://github.com/sovrasov/flops-counter.pytorch) to compute the FLOPs and params of a given model. ```shell python tools/analysis_tools/get_flops.py ${CONFIG_FILE} [--shape ${INPUT_SHAPE}] ``` You will get the results like this. ```text ============================== Input shape: (3, 1280, 800) Flops: 239.32 GFLOPs Params: 37.74 M ============================== ``` **Note**: This tool is still experimental and we do not guarantee that the number is absolutely correct. You may well use the result for simple comparisons, but double check it before you adopt it in technical reports or papers. 1. FLOPs are related to the input shape while parameters are not. The default input shape is (1, 3, 1280, 800). 2. Some operators are not counted into FLOPs like GN and custom operators. Refer to [`mmcv.cnn.get_model_complexity_info()`](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/flops_counter.py) for details. 3. The FLOPs of two-stage detectors is dependent on the number of proposals. ## Model conversion ### MMDetection model to ONNX (experimental) We provide a script to convert model to [ONNX](https://github.com/onnx/onnx) format. We also support comparing the output results between Pytorch and ONNX model for verification. ```shell python tools/deployment/pytorch2onnx.py ${CONFIG_FILE} ${CHECKPOINT_FILE} --output-file ${ONNX_FILE} [--shape ${INPUT_SHAPE} --verify] ``` **Note**: This tool is still experimental. Some customized operators are not supported for now. For a detailed description of the usage and the list of supported models, please refer to [pytorch2onnx](tutorials/pytorch2onnx.md). ### MMDetection 1.x model to MMDetection 2.x `tools/model_converters/upgrade_model_version.py` upgrades a previous MMDetection checkpoint to the new version. Note that this script is not guaranteed to work as some breaking changes are introduced in the new version. It is recommended to directly use the new checkpoints. ```shell python tools/model_converters/upgrade_model_version.py ${IN_FILE} ${OUT_FILE} [-h] [--num-classes NUM_CLASSES] ``` ### RegNet model to MMDetection `tools/model_converters/regnet2mmdet.py` convert keys in pycls pretrained RegNet models to MMDetection style. ```shell python tools/model_converters/regnet2mmdet.py ${SRC} ${DST} [-h] ``` ### Detectron ResNet to Pytorch `tools/model_converters/detectron2pytorch.py` converts keys in the original detectron pretrained ResNet models to PyTorch style. ```shell python tools/model_converters/detectron2pytorch.py ${SRC} ${DST} ${DEPTH} [-h] ``` ### Prepare a model for publishing `tools/model_converters/publish_model.py` helps users to prepare their model for publishing. Before you upload a model to AWS, you may want to 1. convert model weights to CPU tensors 2. delete the optimizer states and 3. compute the hash of the checkpoint file and append the hash id to the filename. ```shell python tools/model_converters/publish_model.py ${INPUT_FILENAME} ${OUTPUT_FILENAME} ``` E.g., ```shell python tools/model_converters/publish_model.py work_dirs/faster_rcnn/latest.pth faster_rcnn_r50_fpn_1x_20190801.pth ``` The final output filename will be `faster_rcnn_r50_fpn_1x_20190801-{hash id}.pth`. ## Dataset Conversion `tools/data_converters/` contains tools to convert the Cityscapes dataset and Pascal VOC dataset to the COCO format. ```shell python tools/dataset_converters/cityscapes.py ${CITYSCAPES_PATH} [-h] [--img-dir ${IMG_DIR}] [--gt-dir ${GT_DIR}] [-o ${OUT_DIR}] [--nproc ${NPROC}] python tools/dataset_converters/pascal_voc.py ${DEVKIT_PATH} [-h] [-o ${OUT_DIR}] ``` ## Dataset Download `tools/misc/download_dataset.py` supports downloading datasets such as COCO, VOC, and LVIS. ```shell python tools/misc/download_dataset.py --dataset-name coco2017 python tools/misc/download_dataset.py --dataset-name voc2007 python tools/misc/download_dataset.py --dataset-name lvis ``` ## Benchmark ### Robust Detection Benchmark `tools/analysis_tools/test_robustness.py` and`tools/analysis_tools/robustness_eval.py` helps users to evaluate model robustness. The core idea comes from [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484). For more information how to evaluate models on corrupted images and results for a set of standard models please refer to [robustness_benchmarking.md](robustness_benchmarking.md). ### FPS Benchmark `tools/analysis_tools/benchmark.py` helps users to calculate FPS. The FPS value includes model forward and post-processing. In order to get a more accurate value, currently only supports single GPU distributed startup mode. ```shell python -m torch.distributed.launch --nproc_per_node=1 --master_port=${PORT} tools/analysis_tools/benchmark.py \ ${CONFIG} \ ${CHECKPOINT} \ [--repeat-num ${REPEAT_NUM}] \ [--max-iter ${MAX_ITER}] \ [--log-interval ${LOG_INTERVAL}] \ --launcher pytorch ``` Examples: Assuming that you have already downloaded the `Faster R-CNN` model checkpoint to the directory `checkpoints/`. ```shell python -m torch.distributed.launch --nproc_per_node=1 --master_port=29500 tools/analysis_tools/benchmark.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --launcher pytorch ``` ## Miscellaneous ### Evaluating a metric `tools/analysis_tools/eval_metric.py` evaluates certain metrics of a pkl result file according to a config file. ```shell python tools/analysis_tools/eval_metric.py ${CONFIG} ${PKL_RESULTS} [-h] [--format-only] [--eval ${EVAL[EVAL ...]}] [--cfg-options ${CFG_OPTIONS [CFG_OPTIONS ...]}] [--eval-options ${EVAL_OPTIONS [EVAL_OPTIONS ...]}] ``` ### Print the entire config `tools/misc/print_config.py` prints the whole config verbatim, expanding all its imports. ```shell python tools/misc/print_config.py ${CONFIG} [-h] [--options ${OPTIONS [OPTIONS...]}] ``` ## Hyper-parameter Optimization ### YOLO Anchor Optimization `tools/analysis_tools/optimize_anchors.py` provides two method to optimize YOLO anchors. One is k-means anchor cluster which refers from [darknet](https://github.com/AlexeyAB/darknet/blob/master/src/detector.c#L1421). ```shell python tools/analysis_tools/optimize_anchors.py ${CONFIG} --algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} --output-dir ${OUTPUT_DIR} ``` Another is using differential evolution to optimize anchors. ```shell python tools/analysis_tools/optimize_anchors.py ${CONFIG} --algorithm differential_evolution --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} --output-dir ${OUTPUT_DIR} ``` E.g., ```shell python tools/analysis_tools/optimize_anchors.py configs/yolo/yolov3_d53_320_273e_coco.py --algorithm differential_evolution --input-shape 608 608 --device cuda --output-dir work_dirs ``` You will get: ``` loading annotations into memory... Done (t=9.70s) creating index... index created! 2021-07-19 19:37:20,951 - mmdet - INFO - Collecting bboxes from annotation... [>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 117266/117266, 15874.5 task/s, elapsed: 7s, ETA: 0s 2021-07-19 19:37:28,753 - mmdet - INFO - Collected 849902 bboxes. differential_evolution step 1: f(x)= 0.506055 differential_evolution step 2: f(x)= 0.506055 ...... differential_evolution step 489: f(x)= 0.386625 2021-07-19 19:46:40,775 - mmdet - INFO Anchor evolution finish. Average IOU: 0.6133754253387451 2021-07-19 19:46:40,776 - mmdet - INFO Anchor differential evolution result:[[10, 12], [15, 30], [32, 22], [29, 59], [61, 46], [57, 116], [112, 89], [154, 198], [349, 336]] 2021-07-19 19:46:40,798 - mmdet - INFO Result saved in work_dirs/anchor_optimize_result.json ``` ## Confusion Matrix A confusion matrix is a summary of prediction results. `tools/analysis_tools/confusion_matrix.py` can analyze the prediction results and plot a confusion matrix table. First, run `tools/test.py` to save the `.pkl` detection results. Then, run ``` python tools/analysis_tools/confusion_matrix.py ${CONFIG} ${DETECTION_RESULTS} ${SAVE_DIR} --show ``` And you will get a confusion matrix like this: ![confusion_matrix_example](https://user-images.githubusercontent.com/12907710/140513068-994cdbf4-3a4a-48f0-8fd8-2830d93fd963.png) ## COCO Separated & Occluded Mask Metric Detecting occluded objects still remains a challenge for state-of-the-art object detectors. We implemented the metric presented in paper [A Tri-Layer Plugin to Improve Occluded Detection](https://arxiv.org/abs/2210.10046) to calculate the recall of separated and occluded masks. There are two ways to use this metric: ### Offline evaluation We provide a script to calculate the metric with a dumped prediction file. First, use the `tools/test.py` script to dump the detection results: ```shell python tools/test.py ${CONFIG} ${MODEL_PATH} --out results.pkl ``` Then, run the `tools/analysis_tools/coco_occluded_separated_recall.py` script to get the recall of separated and occluded masks: ```shell python tools/analysis_tools/coco_occluded_separated_recall.py results.pkl --out occluded_separated_recall.json ``` The output should be like this: ``` loading annotations into memory... Done (t=0.51s) creating index... index created! processing detection results... [>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 5000/5000, 109.3 task/s, elapsed: 46s, ETA: 0s computing occluded mask recall... [>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 5550/5550, 780.5 task/s, elapsed: 7s, ETA: 0s COCO occluded mask recall: 58.79% COCO occluded mask success num: 3263 computing separated mask recall... [>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>] 3522/3522, 778.3 task/s, elapsed: 5s, ETA: 0s COCO separated mask recall: 31.94% COCO separated mask success num: 1125 +-----------+--------+-------------+ | mask type | recall | num correct | +-----------+--------+-------------+ | occluded | 58.79% | 3263 | | separated | 31.94% | 1125 | +-----------+--------+-------------+ Evaluation results have been saved to occluded_separated_recall.json. ``` ### Online evaluation We implement `OccludedSeparatedCocoDataset` which inherited from the `CocoDataset`. To evaluate the recall of separated and occluded masks during training, just replace the validation dataset type with `'OccludedSeparatedCocoDataset'` in your config: ```python data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=train_pipeline), val=dict( type='OccludedSeparatedCocoDataset', # modify this ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline), test=dict( type='OccludedSeparatedCocoDataset', # modify this ann_file=data_root + 'annotations/instances_val2017.json', img_prefix=data_root + 'val2017/', pipeline=test_pipeline)) ``` Please cite the paper if you use this metric: ```latex @article{zhan2022triocc, title={A Tri-Layer Plugin to Improve Occluded Detection}, author={Zhan, Guanqi and Xie, Weidi and Zisserman, Andrew}, journal={British Machine Vision Conference}, year={2022} } ```
20,350
33.49322
456
md
mmdetection
mmdetection-master/docs/en/_static/css/readthedocs.css
.header-logo { background-image: url("../image/mmdet-logo.png"); background-size: 156px 40px; height: 40px; width: 156px; }
140
19.142857
53
css
mmdetection
mmdetection-master/docs/en/device/npu.md
# NPU (HUAWEI Ascend) ## Usage Please refer to the [building documentation of MMCV](https://mmcv.readthedocs.io/en/latest/get_started/build.html#build-mmcv-full-on-ascend-npu-machine) to install MMCV on NPU devices Here we use 8 NPUs on your computer to train the model with the following command: ```shell bash tools/dist_train.sh configs/ssd/ssd300_coco.py 8 ``` Also, you can use only one NPU to train the model with the following command: ```shell python tools/train.py configs/ssd/ssd300_coco.py ``` ## Models Results | Model | box AP | mask AP | Config | Download | | :------------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------- | | [ssd300](<>) | 25.6 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssd300_fp16_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssd300_coco.log.json) | | [ssd512](<>) | 29.4 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssd512_fp16_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssd512_coco.log.json) | | [ssdlite-mbv2\*](<>) | 20.2 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssdlite_mobilenetv2_scratch_600e_coco.log.json) | | [retinanet-r18](<>) | 31.8 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/retinanet_r18_fpn_1x8_1x_coco.log.json) | | [retinanet-r50](<>) | 36.6 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/retinanet_r50_fpn_1x_coco.log.json) | | [yolov3-608](<>) | 34.7 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/yolov3_d53_fp16_mstrain-608_273e_coco.log.json) | | [yolox-s\*\*](<>) | 39.9 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/yolox_s_8x8_300e_coco.log.json) | | [centernet-r18](<>) | 26.1 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet/centernet_resnet18_140e_cocoo.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/centernet_resnet18_140e_coco.log.jsonn) | | [fcos-r50\*](<>) | 36.1 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/fcos_r50_caffe_fpn_gn-head_1x_coco_bs8x8.log.json) | | [solov2-r50](<>) | --- | 34.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/solov2/solov2_r50_fpn_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/solov2_r50_fpn_1x_coco.log.json) | **Notes:** - If not specially marked, the results on NPU are the same as those on the GPU with FP32. - (\*) The results on the NPU of these models are aligned with the results of the mixed-precision training on the GPU, but are lower than the results of the FP32. This situation is mainly related to the phase of the model itself in mixed-precision training, users may need to adjust the hyperparameters to achieve better results. - (\*\*) The accuracy of yolox-s on the GPU in mixed precision is 40.1, with `persister_woker=True` in the data loader config by default. There are currently some bugs on NPUs that prevent the last few epochs from running, but the accuracy is less affected and the difference can be ignored. ## High-performance Model Result on Ascend Device Introduction to optimization: 1. Modify the loop calculation as a whole batch calculation to reduce the number of instructions issued. 2. Modify the index calculation to mask calculation, because the SIMD architecture is good at processing continuous data calculation. | Model | Config | v100 iter time | 910A iter time | | :------------------------: | :-----------------------------------------------------------------------------------------------------------------------: | :------------: | :------------------------: | | [ascend-ssd300](<>) | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ascend_ssd300_fp16_coco.py) | 0.165s/iter | 0.383s/iter -> 0.13s/iter | | [ascend-retinanet-r18](<>) | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/ascend_retinanet_r18_fpn_1x8_1x_coco.py) | 0.567s/iter | 0.780s/iter -> 0.420s/iter | **All above models are provided by Huawei Ascend group.**
5,844
103.375
282
md
mmdetection
mmdetection-master/docs/en/tutorials/config.md
# Tutorial 1: Learn about Configs We incorporate modular and inheritance design into our config system, which is convenient to conduct various experiments. If you wish to inspect the config file, you may run `python tools/misc/print_config.py /PATH/TO/CONFIG` to see the complete config. ## Modify config through script arguments When submitting jobs using "tools/train.py" or "tools/test.py", you may specify `--cfg-options` to in-place modify the config. - Update config keys of dict chains. The config options can be specified following the order of the dict keys in the original config. For example, `--cfg-options model.backbone.norm_eval=False` changes the all BN modules in model backbones to `train` mode. - Update keys inside a list of configs. Some config dicts are composed as a list in your config. For example, the training pipeline `data.train.pipeline` is normally a list e.g. `[dict(type='LoadImageFromFile'), ...]`. If you want to change `'LoadImageFromFile'` to `'LoadImageFromWebcam'` in the pipeline, you may specify `--cfg-options data.train.pipeline.0.type=LoadImageFromWebcam`. - Update values of list/tuples. If the value to be updated is a list or a tuple. For example, the config file normally sets `workflow=[('train', 1)]`. If you want to change this key, you may specify `--cfg-options workflow="[(train,1),(val,1)]"`. Note that the quotation mark " is necessary to support list/tuple data types, and that **NO** white space is allowed inside the quotation marks in the specified value. ## Config File Structure There are 4 basic component types under `config/_base_`, dataset, model, schedule, default_runtime. Many methods could be easily constructed with one of each like Faster R-CNN, Mask R-CNN, Cascade R-CNN, RPN, SSD. The configs that are composed by components from `_base_` are called _primitive_. For all configs under the same folder, it is recommended to have only **one** _primitive_ config. All other configs should inherit from the _primitive_ config. In this way, the maximum of inheritance level is 3. For easy understanding, we recommend contributors to inherit from existing methods. For example, if some modification is made base on Faster R-CNN, user may first inherit the basic Faster R-CNN structure by specifying `_base_ = ../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py`, then modify the necessary fields in the config files. If you are building an entirely new method that does not share the structure with any of the existing methods, you may create a folder `xxx_rcnn` under `configs`, Please refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html) for detailed documentation. ## Config Name Style We follow the below style to name config files. Contributors are advised to follow the same style. ``` {model}_[model setting]_{backbone}_{neck}_[norm setting]_[misc]_[gpu x batch_per_gpu]_{schedule}_{dataset} ``` `{xxx}` is required field and `[yyy]` is optional. - `{model}`: model type like `faster_rcnn`, `mask_rcnn`, etc. - `[model setting]`: specific setting for some model, like `without_semantic` for `htc`, `moment` for `reppoints`, etc. - `{backbone}`: backbone type like `r50` (ResNet-50), `x101` (ResNeXt-101). - `{neck}`: neck type like `fpn`, `pafpn`, `nasfpn`, `c4`. - `[norm_setting]`: `bn` (Batch Normalization) is used unless specified, other norm layer type could be `gn` (Group Normalization), `syncbn` (Synchronized Batch Normalization). `gn-head`/`gn-neck` indicates GN is applied in head/neck only, while `gn-all` means GN is applied in the entire model, e.g. backbone, neck, head. - `[misc]`: miscellaneous setting/plugins of model, e.g. `dconv`, `gcb`, `attention`, `albu`, `mstrain`. - `[gpu x batch_per_gpu]`: GPUs and samples per GPU, `8x2` is used by default. - `{schedule}`: training schedule, options are `1x`, `2x`, `20e`, etc. `1x` and `2x` means 12 epochs and 24 epochs respectively. `20e` is adopted in cascade models, which denotes 20 epochs. For `1x`/`2x`, initial learning rate decays by a factor of 10 at the 8/16th and 11/22th epochs. For `20e`, initial learning rate decays by a factor of 10 at the 16th and 19th epochs. - `{dataset}`: dataset like `coco`, `cityscapes`, `voc_0712`, `wider_face`. ## Deprecated train_cfg/test_cfg The `train_cfg` and `test_cfg` are deprecated in config file, please specify them in the model config. The original config structure is as below. ```python # deprecated model = dict( type=..., ... ) train_cfg=dict(...) test_cfg=dict(...) ``` The migration example is as below. ```python # recommended model = dict( type=..., ... train_cfg=dict(...), test_cfg=dict(...), ) ``` ## An Example of Mask R-CNN To help the users have a basic idea of a complete config and the modules in a modern detection system, we make brief comments on the config of Mask R-CNN using ResNet50 and FPN as the following. For more detailed usage and the corresponding alternative for each modules, please refer to the API documentation. ```python model = dict( type='MaskRCNN', # The name of detector backbone=dict( # The config of backbone type='ResNet', # The type of the backbone, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py#L308 for more details. depth=50, # The depth of backbone, usually it is 50 or 101 for ResNet and ResNext backbones. num_stages=4, # Number of stages of the backbone. out_indices=(0, 1, 2, 3), # The index of output feature maps produced in each stages frozen_stages=1, # The weights in the first 1 stage are frozen norm_cfg=dict( # The config of normalization layers. type='BN', # Type of norm layer, usually it is BN or GN requires_grad=True), # Whether to train the gamma and beta in BN norm_eval=True, # Whether to freeze the statistics in BN style='pytorch', # The style of backbone, 'pytorch' means that stride 2 layers are in 3x3 conv, 'caffe' means stride 2 layers are in 1x1 convs. init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # The ImageNet pretrained backbone to be loaded neck=dict( type='FPN', # The neck of detector is FPN. We also support 'NASFPN', 'PAFPN', etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10 for more details. in_channels=[256, 512, 1024, 2048], # The input channels, this is consistent with the output channels of backbone out_channels=256, # The output channels of each level of the pyramid feature map num_outs=5), # The number of output scales rpn_head=dict( type='RPNHead', # The type of RPN head is 'RPNHead', we also support 'GARPNHead', etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/rpn_head.py#L12 for more details. in_channels=256, # The input channels of each input feature map, this is consistent with the output channels of neck feat_channels=256, # Feature channels of convolutional layers in the head. anchor_generator=dict( # The config of anchor generator type='AnchorGenerator', # Most of methods use AnchorGenerator, SSD Detectors uses `SSDAnchorGenerator`. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/anchor/anchor_generator.py#L10 for more details scales=[8], # Basic scale of the anchor, the area of the anchor in one position of a feature map will be scale * base_sizes ratios=[0.5, 1.0, 2.0], # The ratio between height and width. strides=[4, 8, 16, 32, 64]), # The strides of the anchor generator. This is consistent with the FPN feature strides. The strides will be taken as base_sizes if base_sizes is not set. bbox_coder=dict( # Config of box coder to encode and decode the boxes during training and testing type='DeltaXYWHBBoxCoder', # Type of box coder. 'DeltaXYWHBBoxCoder' is applied for most of methods. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py#L9 for more details. target_means=[0.0, 0.0, 0.0, 0.0], # The target means used to encode and decode boxes target_stds=[1.0, 1.0, 1.0, 1.0]), # The standard variance used to encode and decode boxes loss_cls=dict( # Config of loss function for the classification branch type='CrossEntropyLoss', # Type of loss for classification branch, we also support FocalLoss etc. use_sigmoid=True, # RPN usually perform two-class classification, so it usually uses sigmoid function. loss_weight=1.0), # Loss weight of the classification branch. loss_bbox=dict( # Config of loss function for the regression branch. type='L1Loss', # Type of loss, we also support many IoU Losses and smooth L1-loss, etc. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L56 for implementation. loss_weight=1.0)), # Loss weight of the regression branch. roi_head=dict( # RoIHead encapsulates the second stage of two-stage/cascade detectors. type='StandardRoIHead', # Type of the RoI head. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/standard_roi_head.py#L10 for implementation. bbox_roi_extractor=dict( # RoI feature extractor for bbox regression. type='SingleRoIExtractor', # Type of the RoI feature extractor, most of methods uses SingleRoIExtractor. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/roi_extractors/single_level.py#L10 for details. roi_layer=dict( # Config of RoI Layer type='RoIAlign', # Type of RoI Layer, DeformRoIPoolingPack and ModulatedDeformRoIPoolingPack are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/roi_align/roi_align.py#L79 for details. output_size=7, # The output size of feature maps. sampling_ratio=0), # Sampling ratio when extracting the RoI features. 0 means adaptive ratio. out_channels=256, # output channels of the extracted feature. featmap_strides=[4, 8, 16, 32]), # Strides of multi-scale feature maps. It should be consistent to the architecture of the backbone. bbox_head=dict( # Config of box head in the RoIHead. type='Shared2FCBBoxHead', # Type of the bbox head, Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L177 for implementation details. in_channels=256, # Input channels for bbox head. This is consistent with the out_channels in roi_extractor fc_out_channels=1024, # Output feature channels of FC layers. roi_feat_size=7, # Size of RoI features num_classes=80, # Number of classes for classification bbox_coder=dict( # Box coder used in the second stage. type='DeltaXYWHBBoxCoder', # Type of box coder. 'DeltaXYWHBBoxCoder' is applied for most of methods. target_means=[0.0, 0.0, 0.0, 0.0], # Means used to encode and decode box target_stds=[0.1, 0.1, 0.2, 0.2]), # Standard variance for encoding and decoding. It is smaller since the boxes are more accurate. [0.1, 0.1, 0.2, 0.2] is a conventional setting. reg_class_agnostic=False, # Whether the regression is class agnostic. loss_cls=dict( # Config of loss function for the classification branch type='CrossEntropyLoss', # Type of loss for classification branch, we also support FocalLoss etc. use_sigmoid=False, # Whether to use sigmoid. loss_weight=1.0), # Loss weight of the classification branch. loss_bbox=dict( # Config of loss function for the regression branch. type='L1Loss', # Type of loss, we also support many IoU Losses and smooth L1-loss, etc. loss_weight=1.0)), # Loss weight of the regression branch. mask_roi_extractor=dict( # RoI feature extractor for mask generation. type='SingleRoIExtractor', # Type of the RoI feature extractor, most of methods uses SingleRoIExtractor. roi_layer=dict( # Config of RoI Layer that extracts features for instance segmentation type='RoIAlign', # Type of RoI Layer, DeformRoIPoolingPack and ModulatedDeformRoIPoolingPack are also supported output_size=14, # The output size of feature maps. sampling_ratio=0), # Sampling ratio when extracting the RoI features. out_channels=256, # Output channels of the extracted feature. featmap_strides=[4, 8, 16, 32]), # Strides of multi-scale feature maps. mask_head=dict( # Mask prediction head type='FCNMaskHead', # Type of mask head, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py#L21 for implementation details. num_convs=4, # Number of convolutional layers in mask head. in_channels=256, # Input channels, should be consistent with the output channels of mask roi extractor. conv_out_channels=256, # Output channels of the convolutional layer. num_classes=80, # Number of class to be segmented. loss_mask=dict( # Config of loss function for the mask branch. type='CrossEntropyLoss', # Type of loss used for segmentation use_mask=True, # Whether to only train the mask in the correct class. loss_weight=1.0))), # Loss weight of mask branch. train_cfg = dict( # Config of training hyperparameters for rpn and rcnn rpn=dict( # Training config of rpn assigner=dict( # Config of assigner type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for many common detectors. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. pos_iou_thr=0.7, # IoU >= threshold 0.7 will be taken as positive samples neg_iou_thr=0.3, # IoU < threshold 0.3 will be taken as negative samples min_pos_iou=0.3, # The minimal IoU threshold to take boxes as positive samples match_low_quality=True, # Whether to match the boxes under low quality (see API doc for more details). ignore_iof_thr=-1), # IoF threshold for ignoring bboxes sampler=dict( # Config of positive/negative sampler type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. num=256, # Number of samples pos_fraction=0.5, # The ratio of positive samples in the total samples. neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. add_gt_as_proposals=False), # Whether add GT as proposals after sampling. allowed_border=-1, # The border allowed after padding for valid anchors. pos_weight=-1, # The weight of positive samples during training. debug=False), # Whether to set the debug mode rpn_proposal=dict( # The config to generate proposals during training nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. nms_pre=2000, # The number of boxes before NMS nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. max_per_img=1000, # The number of boxes to be kept after NMS. nms=dict( # Config of NMS type='nms', # Type of NMS iou_threshold=0.7 # NMS threshold ), min_bbox_size=0), # The allowed minimal box size rcnn=dict( # The config for the roi heads. assigner=dict( # Config of assigner for second stage, this is different for that in rpn type='MaxIoUAssigner', # Type of assigner, MaxIoUAssigner is used for all roi_heads for now. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10 for more details. pos_iou_thr=0.5, # IoU >= threshold 0.5 will be taken as positive samples neg_iou_thr=0.5, # IoU < threshold 0.5 will be taken as negative samples min_pos_iou=0.5, # The minimal IoU threshold to take boxes as positive samples match_low_quality=False, # Whether to match the boxes under low quality (see API doc for more details). ignore_iof_thr=-1), # IoF threshold for ignoring bboxes sampler=dict( type='RandomSampler', # Type of sampler, PseudoSampler and other samplers are also supported. Refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8 for implementation details. num=512, # Number of samples pos_fraction=0.25, # The ratio of positive samples in the total samples. neg_pos_ub=-1, # The upper bound of negative samples based on the number of positive samples. add_gt_as_proposals=True ), # Whether add GT as proposals after sampling. mask_size=28, # Size of mask pos_weight=-1, # The weight of positive samples during training. debug=False)), # Whether to set the debug mode test_cfg = dict( # Config for testing hyperparameters for rpn and rcnn rpn=dict( # The config to generate proposals during testing nms_across_levels=False, # Whether to do NMS for boxes across levels. Only work in `GARPNHead`, naive rpn does not support do nms cross levels. nms_pre=1000, # The number of boxes before NMS nms_post=1000, # The number of boxes to be kept by NMS, Only work in `GARPNHead`. max_per_img=1000, # The number of boxes to be kept after NMS. nms=dict( # Config of NMS type='nms', #Type of NMS iou_threshold=0.7 # NMS threshold ), min_bbox_size=0), # The allowed minimal box size rcnn=dict( # The config for the roi heads. score_thr=0.05, # Threshold to filter out boxes nms=dict( # Config of NMS in the second stage type='nms', # Type of NMS iou_thr=0.5), # NMS threshold max_per_img=100, # Max number of detections of each image mask_thr_binary=0.5))) # Threshold of mask prediction dataset_type = 'CocoDataset' # Dataset type, this will be used to define the dataset data_root = 'data/coco/' # Root path of data img_norm_cfg = dict( # Image normalization config to normalize the input images mean=[123.675, 116.28, 103.53], # Mean values used to pre-training the pre-trained backbone models std=[58.395, 57.12, 57.375], # Standard variance used to pre-training the pre-trained backbone models to_rgb=True ) # The channel orders of image used to pre-training the pre-trained backbone models train_pipeline = [ # Training pipeline dict(type='LoadImageFromFile'), # First pipeline to load images from file path dict( type='LoadAnnotations', # Second pipeline to load annotations for current image with_bbox=True, # Whether to use bounding box, True for detection with_mask=True, # Whether to use instance mask, True for instance segmentation poly2mask=False), # Whether to convert the polygon mask to instance mask, set False for acceleration and to save memory dict( type='Resize', # Augmentation pipeline that resize the images and their annotations img_scale=(1333, 800), # The largest scale of image keep_ratio=True ), # whether to keep the ratio between height and width. dict( type='RandomFlip', # Augmentation pipeline that flip the images and their annotations flip_ratio=0.5), # The ratio or probability to flip dict( type='Normalize', # Augmentation pipeline that normalize the input images mean=[123.675, 116.28, 103.53], # These keys are the same of img_norm_cfg since the std=[58.395, 57.12, 57.375], # keys of img_norm_cfg are used here as arguments to_rgb=True), dict( type='Pad', # Padding config size_divisor=32), # The number the padded images should be divisible dict(type='DefaultFormatBundle'), # Default format bundle to gather data in the pipeline dict( type='Collect', # Pipeline that decides which keys in the data should be passed to the detector keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] test_pipeline = [ dict(type='LoadImageFromFile'), # First pipeline to load images from file path dict( type='MultiScaleFlipAug', # An encapsulation that encapsulates the testing augmentations img_scale=(1333, 800), # Decides the largest scale for testing, used for the Resize pipeline flip=False, # Whether to flip images during testing transforms=[ dict(type='Resize', # Use resize augmentation keep_ratio=True), # Whether to keep the ratio between height and width, the img_scale set here will be suppressed by the img_scale set above. dict(type='RandomFlip'), # Thought RandomFlip is added in pipeline, it is not used because flip=False dict( type='Normalize', # Normalization config, the values are from img_norm_cfg mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict( type='Pad', # Padding config to pad images divisible by 32. size_divisor=32), dict( type='ImageToTensor', # convert image to tensor keys=['img']), dict( type='Collect', # Collect pipeline that collect necessary keys for testing. keys=['img']) ]) ] data = dict( samples_per_gpu=2, # Batch size of a single GPU workers_per_gpu=2, # Worker to pre-fetch data for each single GPU train=dict( # Train dataset config type='CocoDataset', # Type of dataset, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py#L19 for details. ann_file='data/coco/annotations/instances_train2017.json', # Path of annotation file img_prefix='data/coco/train2017/', # Prefix of image path pipeline=[ # pipeline, this is passed by the train_pipeline created before. dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ]), val=dict( # Validation dataset config type='CocoDataset', ann_file='data/coco/annotations/instances_val2017.json', img_prefix='data/coco/val2017/', pipeline=[ # Pipeline is passed by test_pipeline created before dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ]), test=dict( # Test dataset config, modify the ann_file for test-dev/test submission type='CocoDataset', ann_file='data/coco/annotations/instances_val2017.json', img_prefix='data/coco/val2017/', pipeline=[ # Pipeline is passed by test_pipeline created before dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ], samples_per_gpu=2 # Batch size of a single GPU used in testing )) evaluation = dict( # The config to build the evaluation hook, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7 for more details. interval=1, # Evaluation interval metric=['bbox', 'segm']) # Metrics used during evaluation optimizer = dict( # Config used to build optimizer, support all the optimizers in PyTorch whose arguments are also the same as those in PyTorch type='SGD', # Type of optimizers, refer to https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13 for more details lr=0.02, # Learning rate of optimizers, see detail usages of the parameters in the documentation of PyTorch momentum=0.9, # Momentum weight_decay=0.0001) # Weight decay of SGD optimizer_config = dict( # Config used to build the optimizer hook, refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8 for implementation details. grad_clip=None) # Most of the methods do not use gradient clip lr_config = dict( # Learning rate scheduler config used to register LrUpdater hook policy='step', # The policy of scheduler, also support CosineAnnealing, Cyclic, etc. Refer to details of supported LrUpdater from https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9. warmup='linear', # The warmup policy, also support `exp` and `constant`. warmup_iters=500, # The number of iterations for warmup warmup_ratio= 0.001, # The ratio of the starting learning rate used for warmup step=[8, 11]) # Steps to decay the learning rate runner = dict( type='EpochBasedRunner', # Type of runner to use (i.e. IterBasedRunner or EpochBasedRunner) max_epochs=12) # Runner that runs the workflow in total max_epochs. For IterBasedRunner use `max_iters` checkpoint_config = dict( # Config to set the checkpoint hook, Refer to https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py for implementation. interval=1) # The save interval is 1 log_config = dict( # config to register logger hook interval=50, # Interval to print the log hooks=[ dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook', by_epoch=False), dict(type='MMDetWandbHook', by_epoch=False, # The Wandb logger is also supported, It requires `wandb` to be installed. init_kwargs={'entity': "OpenMMLab", # The entity used to log on Wandb 'project': "MMDet", # Project name in WandB 'config': cfg_dict}), # Check https://docs.wandb.ai/ref/python/init for more init arguments. # MMDetWandbHook is mmdet implementation of WandbLoggerHook. ClearMLLoggerHook, DvcliveLoggerHook, MlflowLoggerHook, NeptuneLoggerHook, PaviLoggerHook, SegmindLoggerHook are also supported based on MMCV implementation. ]) # The logger used to record the training process. dist_params = dict(backend='nccl') # Parameters to setup distributed training, the port can also be set. log_level = 'INFO' # The level of logging. load_from = None # load models as a pre-trained model from a given path. This will not resume training. resume_from = None # Resume checkpoints from a given path, the training will be resumed from the epoch when the checkpoint's is saved. workflow = [('train', 1)] # Workflow for runner. [('train', 1)] means there is only one workflow and the workflow named 'train' is executed once. The workflow trains the model by 12 epochs according to the total_epochs. work_dir = 'work_dir' # Directory to save the model checkpoints and logs for the current experiments. ``` ## FAQ ### Ignore some fields in the base configs Sometimes, you may set `_delete_=True` to ignore some of fields in base configs. You may refer to [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) for simple illustration. In MMDetection, for example, to change the backbone of Mask R-CNN with the following config. ```python model = dict( type='MaskRCNN', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict(...), rpn_head=dict(...), roi_head=dict(...)) ``` `ResNet` and `HRNet` use different keywords to construct. ```python _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w32', backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256)))), neck=dict(...)) ``` The `_delete_=True` would replace all old keys in `backbone` field with new keys. ### Use intermediate variables in configs Some intermediate variables are used in the configs files, like `train_pipeline`/`test_pipeline` in datasets. It's worth noting that when modifying intermediate variables in the children configs, user need to pass the intermediate variables into corresponding fields again. For example, we would like to use multi scale strategy to train a Mask R-CNN. `train_pipeline`/`test_pipeline` are intermediate variable we would like modify. ```python _base_ = './mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode="value", keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ``` We first define the new `train_pipeline`/`test_pipeline` and pass them into `data`. Similarly, if we would like to switch from `SyncBN` to `BN` or `MMSyncBN`, we need to substitute every `norm_cfg` in the config. ```python _base_ = './mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), ...) ```
33,858
60.338768
251
md
mmdetection
mmdetection-master/docs/en/tutorials/customize_dataset.md
# Tutorial 2: Customize Datasets ## Support new data format To support a new data format, you can either convert them to existing formats (COCO format or PASCAL format) or directly convert them to the middle format. You could also choose to convert them offline (before training by a script) or online (implement a new dataset and do the conversion at training). In MMDetection, we recommend to convert the data into COCO formats and do the conversion offline, thus you only need to modify the config's data annotation paths and classes after the conversion of your data. ### Reorganize new data formats to existing format The simplest way is to convert your dataset to existing dataset formats (COCO or PASCAL VOC). The annotation json files in COCO format has the following necessary keys: ```python 'images': [ { 'file_name': 'COCO_val2014_000000001268.jpg', 'height': 427, 'width': 640, 'id': 1268 }, ... ], 'annotations': [ { 'segmentation': [[192.81, 247.09, ... 219.03, 249.06]], # if you have mask labels 'area': 1035.749, 'iscrowd': 0, 'image_id': 1268, 'bbox': [192.81, 224.8, 74.73, 33.43], 'category_id': 16, 'id': 42986 }, ... ], 'categories': [ {'id': 0, 'name': 'car'}, ] ``` There are three necessary keys in the json file: - `images`: contains a list of images with their information like `file_name`, `height`, `width`, and `id`. - `annotations`: contains the list of instance annotations. - `categories`: contains the list of categories names and their ID. After the data pre-processing, there are two steps for users to train the customized new dataset with existing format (e.g. COCO format): 1. Modify the config file for using the customized dataset. 2. Check the annotations of the customized dataset. Here we give an example to show the above two steps, which uses a customized dataset of 5 classes with COCO format to train an existing Cascade Mask R-CNN R50-FPN detector. #### 1. Modify the config file for using the customized dataset There are two aspects involved in the modification of config file: 1. The `data` field. Specifically, you need to explicitly add the `classes` fields in `data.train`, `data.val` and `data.test`. 2. The `num_classes` field in the `model` part. Explicitly over-write all the `num_classes` from default value (e.g. 80 in COCO) to your classes number. In `configs/my_custom_config.py`: ```python # the new config inherits the base configs to highlight the necessary modification _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' # 1. dataset settings dataset_type = 'CocoDataset' classes = ('a', 'b', 'c', 'd', 'e') data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, # explicitly add your class names to the field `classes` classes=classes, ann_file='path/to/your/train/annotation_data', img_prefix='path/to/your/train/image_data'), val=dict( type=dataset_type, # explicitly add your class names to the field `classes` classes=classes, ann_file='path/to/your/val/annotation_data', img_prefix='path/to/your/val/image_data'), test=dict( type=dataset_type, # explicitly add your class names to the field `classes` classes=classes, ann_file='path/to/your/test/annotation_data', img_prefix='path/to/your/test/image_data')) # 2. model settings # explicitly over-write all the `num_classes` field from default 80 to 5. model = dict( roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', # explicitly over-write all the `num_classes` field from default 80 to 5. num_classes=5), dict( type='Shared2FCBBoxHead', # explicitly over-write all the `num_classes` field from default 80 to 5. num_classes=5), dict( type='Shared2FCBBoxHead', # explicitly over-write all the `num_classes` field from default 80 to 5. num_classes=5)], # explicitly over-write all the `num_classes` field from default 80 to 5. mask_head=dict(num_classes=5))) ``` #### 2. Check the annotations of the customized dataset Assuming your customized dataset is COCO format, make sure you have the correct annotations in the customized dataset: 1. The length for `categories` field in annotations should exactly equal the tuple length of `classes` fields in your config, meaning the number of classes (e.g. 5 in this example). 2. The `classes` fields in your config file should have exactly the same elements and the same order with the `name` in `categories` of annotations. MMDetection automatically maps the uncontinuous `id` in `categories` to the continuous label indices, so the string order of `name` in `categories` field affects the order of label indices. Meanwhile, the string order of `classes` in config affects the label text during visualization of predicted bounding boxes. 3. The `category_id` in `annotations` field should be valid, i.e., all values in `category_id` should belong to `id` in `categories`. Here is a valid example of annotations: ```python 'annotations': [ { 'segmentation': [[192.81, 247.09, ... 219.03, 249.06]], # if you have mask labels 'area': 1035.749, 'iscrowd': 0, 'image_id': 1268, 'bbox': [192.81, 224.8, 74.73, 33.43], 'category_id': 16, 'id': 42986 }, ... ], # MMDetection automatically maps the uncontinuous `id` to the continuous label indices. 'categories': [ {'id': 1, 'name': 'a'}, {'id': 3, 'name': 'b'}, {'id': 4, 'name': 'c'}, {'id': 16, 'name': 'd'}, {'id': 17, 'name': 'e'}, ] ``` We use this way to support CityScapes dataset. The script is in [cityscapes.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/cityscapes.py) and we also provide the finetuning [configs](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes). **Note** 1. For instance segmentation datasets, **MMDetection only supports evaluating mask AP of dataset in COCO format for now**. 2. It is recommended to convert the data offline before training, thus you can still use `CocoDataset` and only need to modify the path of annotations and the training classes. ### Reorganize new data format to middle format It is also fine if you do not want to convert the annotation format to COCO or PASCAL format. Actually, we define a simple annotation format and all existing datasets are processed to be compatible with it, either online or offline. The annotation of a dataset is a list of dict, each dict corresponds to an image. There are 3 field `filename` (relative path), `width`, `height` for testing, and an additional field `ann` for training. `ann` is also a dict containing at least 2 fields: `bboxes` and `labels`, both of which are numpy arrays. Some datasets may provide annotations like crowd/difficult/ignored bboxes, we use `bboxes_ignore` and `labels_ignore` to cover them. Here is an example. ```python [ { 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': <np.ndarray, float32> (n, 4), 'labels': <np.ndarray, int64> (n, ), 'bboxes_ignore': <np.ndarray, float32> (k, 4), 'labels_ignore': <np.ndarray, int64> (k, ) (optional field) } }, ... ] ``` There are two ways to work with custom datasets. - online conversion You can write a new Dataset class inherited from `CustomDataset`, and overwrite two methods `load_annotations(self, ann_file)` and `get_ann_info(self, idx)`, like [CocoDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py) and [VOCDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/voc.py). - offline conversion You can convert the annotation format to the expected format above and save it to a pickle or json file, like [pascal_voc.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/pascal_voc.py). Then you can simply use `CustomDataset`. ### An example of customized dataset Assume the annotation is in a new format in text files. The bounding boxes annotations are stored in text file `annotation.txt` as the following ``` # 000001.jpg 1280 720 2 10 20 40 60 1 20 40 50 60 2 # 000002.jpg 1280 720 3 50 20 40 60 2 20 40 30 45 2 30 40 50 60 3 ``` We can create a new dataset in `mmdet/datasets/my_dataset.py` to load the data. ```python import mmcv import numpy as np from .builder import DATASETS from .custom import CustomDataset @DATASETS.register_module() class MyDataset(CustomDataset): CLASSES = ('person', 'bicycle', 'car', 'motorcycle') def load_annotations(self, ann_file): ann_list = mmcv.list_from_file(ann_file) data_infos = [] for i, ann_line in enumerate(ann_list): if ann_line != '#': continue img_shape = ann_list[i + 2].split(' ') width = int(img_shape[0]) height = int(img_shape[1]) bbox_number = int(ann_list[i + 3]) anns = ann_line.split(' ') bboxes = [] labels = [] for anns in ann_list[i + 4:i + 4 + bbox_number]: bboxes.append([float(ann) for ann in anns[:4]]) labels.append(int(anns[4])) data_infos.append( dict( filename=ann_list[i + 1], width=width, height=height, ann=dict( bboxes=np.array(bboxes).astype(np.float32), labels=np.array(labels).astype(np.int64)) )) return data_infos def get_ann_info(self, idx): return self.data_infos[idx]['ann'] ``` Then in the config, to use `MyDataset` you can modify the config as the following ```python dataset_A_train = dict( type='MyDataset', ann_file = 'image_list.txt', pipeline=train_pipeline ) ``` ## Customize datasets by dataset wrappers MMDetection also supports many dataset wrappers to mix the dataset or modify the dataset distribution for training. Currently it supports to three dataset wrappers as below: - `RepeatDataset`: simply repeat the whole dataset. - `ClassBalancedDataset`: repeat dataset in a class balanced manner. - `ConcatDataset`: concat datasets. ### Repeat dataset We use `RepeatDataset` as wrapper to repeat the dataset. For example, suppose the original dataset is `Dataset_A`, to repeat it, the config looks like the following ```python dataset_A_train = dict( type='RepeatDataset', times=N, dataset=dict( # This is the original config of Dataset_A type='Dataset_A', ... pipeline=train_pipeline ) ) ``` ### Class balanced dataset We use `ClassBalancedDataset` as wrapper to repeat the dataset based on category frequency. The dataset to repeat needs to instantiate function `self.get_cat_ids(idx)` to support `ClassBalancedDataset`. For example, to repeat `Dataset_A` with `oversample_thr=1e-3`, the config looks like the following ```python dataset_A_train = dict( type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( # This is the original config of Dataset_A type='Dataset_A', ... pipeline=train_pipeline ) ) ``` You may refer to [source code](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/dataset_wrappers.py#L211) for details. ### Concatenate dataset There are three ways to concatenate the dataset. 1. If the datasets you want to concatenate are in the same type with different annotation files, you can concatenate the dataset configs like the following. ```python dataset_A_train = dict( type='Dataset_A', ann_file = ['anno_file_1', 'anno_file_2'], pipeline=train_pipeline ) ``` If the concatenated dataset is used for test or evaluation, this manner supports to evaluate each dataset separately. To test the concatenated datasets as a whole, you can set `separate_eval=False` as below. ```python dataset_A_train = dict( type='Dataset_A', ann_file = ['anno_file_1', 'anno_file_2'], separate_eval=False, pipeline=train_pipeline ) ``` 2. In case the dataset you want to concatenate is different, you can concatenate the dataset configs like the following. ```python dataset_A_train = dict() dataset_B_train = dict() data = dict( imgs_per_gpu=2, workers_per_gpu=2, train = [ dataset_A_train, dataset_B_train ], val = dataset_A_val, test = dataset_A_test ) ``` If the concatenated dataset is used for test or evaluation, this manner also supports to evaluate each dataset separately. 3. We also support to define `ConcatDataset` explicitly as the following. ```python dataset_A_val = dict() dataset_B_val = dict() data = dict( imgs_per_gpu=2, workers_per_gpu=2, train=dataset_A_train, val=dict( type='ConcatDataset', datasets=[dataset_A_val, dataset_B_val], separate_eval=False)) ``` This manner allows users to evaluate all the datasets as a single one by setting `separate_eval=False`. **Note:** 1. The option `separate_eval=False` assumes the datasets use `self.data_infos` during evaluation. Therefore, COCO datasets do not support this behavior since COCO datasets do not fully rely on `self.data_infos` for evaluation. Combining different types of datasets and evaluating them as a whole is not tested thus is not suggested. 2. Evaluating `ClassBalancedDataset` and `RepeatDataset` is not supported thus evaluating concatenated datasets of these types is also not supported. A more complex example that repeats `Dataset_A` and `Dataset_B` by N and M times, respectively, and then concatenates the repeated datasets is as the following. ```python dataset_A_train = dict( type='RepeatDataset', times=N, dataset=dict( type='Dataset_A', ... pipeline=train_pipeline ) ) dataset_A_val = dict( ... pipeline=test_pipeline ) dataset_A_test = dict( ... pipeline=test_pipeline ) dataset_B_train = dict( type='RepeatDataset', times=M, dataset=dict( type='Dataset_B', ... pipeline=train_pipeline ) ) data = dict( imgs_per_gpu=2, workers_per_gpu=2, train = [ dataset_A_train, dataset_B_train ], val = dataset_A_val, test = dataset_A_test ) ``` ## Modify Dataset Classes With existing dataset types, we can modify the class names of them to train subset of the annotations. For example, if you want to train only three classes of the current dataset, you can modify the classes of dataset. The dataset will filter out the ground truth boxes of other classes automatically. ```python classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) ``` MMDetection V2.0 also supports to read the classes from a file, which is common in real applications. For example, assume the `classes.txt` contains the name of classes as the following. ``` person bicycle car ``` Users can set the classes as a file path, the dataset will load it and convert it to a list automatically. ```python classes = 'path/to/classes.txt' data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) ``` **Note**: - Before MMDetection v2.5.0, the dataset will filter out the empty GT images automatically if the classes are set and there is no way to disable that through config. This is an undesirable behavior and introduces confusion because if the classes are not set, the dataset only filter the empty GT images when `filter_empty_gt=True` and `test_mode=False`. After MMDetection v2.5.0, we decouple the image filtering process and the classes modification, i.e., the dataset will only filter empty GT images when `filter_empty_gt=True` and `test_mode=False`, no matter whether the classes are set. Thus, setting the classes only influences the annotations of classes used for training and users could decide whether to filter empty GT images by themselves. - Since the middle format only has box labels and does not contain the class names, when using `CustomDataset`, users cannot filter out the empty GT images through configs but only do this offline. - Please remember to modify the `num_classes` in the head when specifying `classes` in dataset. We implemented [NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py) to check whether the numbers are consistent since v2.9.0(after PR#4508). - The features for setting dataset classes and dataset filtering will be refactored to be more user-friendly in the future (depends on the progress). ## COCO Panoptic Dataset Now we support COCO Panoptic Dataset, the format of panoptic annotations is different from COCO format. Both the foreground and the background will exist in the annotation file. The annotation json files in COCO Panoptic format has the following necessary keys: ```python 'images': [ { 'file_name': '000000001268.jpg', 'height': 427, 'width': 640, 'id': 1268 }, ... ] 'annotations': [ { 'filename': '000000001268.jpg', 'image_id': 1268, 'segments_info': [ { 'id':8345037, # One-to-one correspondence with the id in the annotation map. 'category_id': 51, 'iscrowd': 0, 'bbox': (x1, y1, w, h), # The bbox of the background is the outer rectangle of its mask. 'area': 24315 }, ... ] }, ... ] 'categories': [ # including both foreground categories and background categories {'id': 0, 'name': 'person'}, ... ] ``` Moreover, the `seg_prefix` must be set to the path of the panoptic annotation images. ```python data = dict( type='CocoPanopticDataset', train=dict( seg_prefix = 'path/to/your/train/panoptic/image_annotation_data' ), val=dict( seg_prefix = 'path/to/your/train/panoptic/image_annotation_data' ) ) ```
18,750
33.532228
749
md
mmdetection
mmdetection-master/docs/en/tutorials/customize_losses.md
# Tutorial 6: Customize Losses MMDetection provides users with different loss functions. But the default configuration may be not applicable for different datasets or models, so users may want to modify a specific loss to adapt the new situation. This tutorial first elaborate the computation pipeline of losses, then give some instructions about how to modify each step. The modification can be categorized as tweaking and weighting. ## Computation pipeline of a loss Given the input prediction and target, as well as the weights, a loss function maps the input tensor to the final loss scalar. The mapping can be divided into five steps: 1. Set the sampling method to sample positive and negative samples. 2. Get **element-wise** or **sample-wise** loss by the loss kernel function. 3. Weighting the loss with a weight tensor **element-wisely**. 4. Reduce the loss tensor to a **scalar**. 5. Weighting the loss with a **scalar**. ## Set sampling method (step 1) For some loss functions, sampling strategies are needed to avoid imbalance between positive and negative samples. For example, when using `CrossEntropyLoss` in RPN head, we need to set `RandomSampler` in `train_cfg` ```python train_cfg=dict( rpn=dict( sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False)) ``` For some other losses which have positive and negative sample balance mechanism such as Focal Loss, GHMC, and QualityFocalLoss, the sampler is no more necessary. ## Tweaking loss Tweaking a loss is more related with step 2, 4, 5, and most modifications can be specified in the config. Here we take [Focal Loss (FL)](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py) as an example. The following code sniper are the construction method and config of FL respectively, they are actually one to one correspondence. ```python @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): ``` ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0) ``` ### Tweaking hyper-parameters (step 2) `gamma` and `beta` are two hyper-parameters in the Focal Loss. Say if we want to change the value of `gamma` to be 1.5 and `alpha` to be 0.5, then we can specify them in the config as follows: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=1.5, alpha=0.5, loss_weight=1.0) ``` ### Tweaking the way of reduction (step 3) The default way of reduction is `mean` for FL. Say if we want to change the reduction from `mean` to `sum`, we can specify it in the config as follows: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='sum') ``` ### Tweaking loss weight (step 5) The loss weight here is a scalar which controls the weight of different losses in multi-task learning, e.g. classification loss and regression loss. Say if we want to change to loss weight of classification loss to be 0.5, we can specify it in the config as follows: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=0.5) ``` ## Weighting loss (step 3) Weighting loss means we re-weight the loss element-wisely. To be more specific, we multiply the loss tensor with a weight tensor which has the same shape. As a result, different entries of the loss can be scaled differently, and so called element-wisely. The loss weight varies across different models and highly context related, but overall there are two kinds of loss weights, `label_weights` for classification loss and `bbox_weights` for bbox regression loss. You can find them in the `get_target` method of the corresponding head. Here we take [ATSSHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/atss_head.py#L530) as an example, which inherit [AnchorHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/anchor_head.py) but overwrite its `get_targets` method which yields different `label_weights` and `bbox_weights`. ``` class ATSSHead(AnchorHead): ... def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): ```
4,777
36.622047
637
md
mmdetection
mmdetection-master/docs/en/tutorials/customize_models.md
# Tutorial 4: Customize Models We basically categorize model components into 5 types. - backbone: usually an FCN network to extract feature maps, e.g., ResNet, MobileNet. - neck: the component between backbones and heads, e.g., FPN, PAFPN. - head: the component for specific tasks, e.g., bbox prediction and mask prediction. - roi extractor: the part for extracting RoI features from feature maps, e.g., RoI Align. - loss: the component in head for calculating losses, e.g., FocalLoss, L1Loss, and GHMLoss. ## Develop new components ### Add a new backbone Here we show how to develop new components with an example of MobileNet. #### 1. Define a new backbone (e.g. MobileNet) Create a new file `mmdet/models/backbones/mobilenet.py`. ```python import torch.nn as nn from ..builder import BACKBONES @BACKBONES.register_module() class MobileNet(nn.Module): def __init__(self, arg1, arg2): pass def forward(self, x): # should return a tuple pass ``` #### 2. Import the module You can either add the following line to `mmdet/models/backbones/__init__.py` ```python from .mobilenet import MobileNet ``` or alternatively add ```python custom_imports = dict( imports=['mmdet.models.backbones.mobilenet'], allow_failed_imports=False) ``` to the config file to avoid modifying the original code. #### 3. Use the backbone in your config file ```python model = dict( ... backbone=dict( type='MobileNet', arg1=xxx, arg2=xxx), ... ``` ### Add new necks #### 1. Define a neck (e.g. PAFPN) Create a new file `mmdet/models/necks/pafpn.py`. ```python from ..builder import NECKS @NECKS.register_module() class PAFPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False): pass def forward(self, inputs): # implementation is ignored pass ``` #### 2. Import the module You can either add the following line to `mmdet/models/necks/__init__.py`, ```python from .pafpn import PAFPN ``` or alternatively add ```python custom_imports = dict( imports=['mmdet.models.necks.pafpn.py'], allow_failed_imports=False) ``` to the config file and avoid modifying the original code. #### 3. Modify the config file ```python neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5) ``` ### Add new heads Here we show how to develop a new head with the example of [Double Head R-CNN](https://arxiv.org/abs/1904.06493) as the following. First, add a new bbox head in `mmdet/models/roi_heads/bbox_heads/double_bbox_head.py`. Double Head R-CNN implements a new bbox head for object detection. To implement a bbox head, basically we need to implement three functions of the new module as the following. ```python from mmdet.models.builder import HEADS from .bbox_head import BBoxHead @HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(**kwargs) def forward(self, x_cls, x_reg): ``` Second, implement a new RoI Head if it is necessary. We plan to inherit the new `DoubleHeadRoIHead` from `StandardRoIHead`. We can find that a `StandardRoIHead` already implements the following functions. ```python import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Simplest base roi head including one bbox head and one mask head. """ def init_assigner_sampler(self): def init_bbox_head(self, bbox_roi_extractor, bbox_head): def init_mask_head(self, mask_roi_extractor, mask_head): def forward_dummy(self, x, proposals): def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): def _bbox_forward(self, x, rois): def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" ``` Double Head's modification is mainly in the bbox_forward logic, and it inherits other logics from the `StandardRoIHead`. In the `mmdet/models/roi_heads/double_roi_head.py`, we implement the new RoI Head as the following: ```python from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results ``` Last, the users need to add the module in `mmdet/models/bbox_heads/__init__.py` and `mmdet/models/roi_heads/__init__.py` thus the corresponding registry could find and load them. Alternatively, the users can add ```python custom_imports=dict( imports=['mmdet.models.roi_heads.double_roi_head', 'mmdet.models.bbox_heads.double_bbox_head']) ``` to the config file and achieve the same goal. The config file of Double Head R-CNN is as the following ```python _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DoubleHeadRoIHead', reg_roi_scale_factor=1.3, bbox_head=dict( _delete_=True, type='DoubleConvFCBBoxHead', num_convs=4, num_fcs=2, in_channels=256, conv_out_channels=1024, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) ``` Since MMDetection 2.0, the config system supports to inherit configs such that the users can focus on the modification. The Double Head R-CNN mainly uses a new DoubleHeadRoIHead and a new `DoubleConvFCBBoxHead`, the arguments are set according to the `__init__` function of each module. ### Add new loss Assume you want to add a new loss as `MyLoss`, for bounding box regression. To add a new loss function, the users need implement it in `mmdet/models/losses/my_loss.py`. The decorator `weighted_loss` enable the loss to be weighted for each element. ```python import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def my_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss @LOSSES.register_module() class MyLoss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(MyLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * my_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox ``` Then the users need to add it in the `mmdet/models/losses/__init__.py`. ```python from .my_loss import MyLoss, my_loss ``` Alternatively, you can add ```python custom_imports=dict( imports=['mmdet.models.losses.my_loss']) ``` to the config file and achieve the same goal. To use it, modify the `loss_xxx` field. Since MyLoss is for regression, you need to modify the `loss_bbox` field in the head. ```python loss_bbox=dict(type='MyLoss', loss_weight=1.0)) ```
10,216
27.068681
204
md
mmdetection
mmdetection-master/docs/en/tutorials/customize_runtime.md
# Tutorial 5: Customize Runtime Settings ## Customize optimization settings ### Customize optimizer supported by Pytorch We already support to use all the optimizers implemented by PyTorch, and the only modification is to change the `optimizer` field of config files. For example, if you want to use `ADAM` (note that the performance could drop a lot), the modification could be as the following. ```python optimizer = dict(type='Adam', lr=0.0003, weight_decay=0.0001) ``` To modify the learning rate of the model, the users only need to modify the `lr` in the config of optimizer. The users can directly set arguments following the [API doc](https://pytorch.org/docs/stable/optim.html?highlight=optim#module-torch.optim) of PyTorch. ### Customize self-implemented optimizer #### 1. Define a new optimizer A customized optimizer could be defined as following. Assume you want to add a optimizer named `MyOptimizer`, which has arguments `a`, `b`, and `c`. You need to create a new directory named `mmdet/core/optimizer`. And then implement the new optimizer in a file, e.g., in `mmdet/core/optimizer/my_optimizer.py`: ```python from .registry import OPTIMIZERS from torch.optim import Optimizer @OPTIMIZERS.register_module() class MyOptimizer(Optimizer): def __init__(self, a, b, c) ``` #### 2. Add the optimizer to registry To find the above module defined above, this module should be imported into the main namespace at first. There are two options to achieve it. - Modify `mmdet/core/optimizer/__init__.py` to import it. The newly defined module should be imported in `mmdet/core/optimizer/__init__.py` so that the registry will find the new module and add it: ```python from .my_optimizer import MyOptimizer ``` - Use `custom_imports` in the config to manually import it ```python custom_imports = dict(imports=['mmdet.core.optimizer.my_optimizer'], allow_failed_imports=False) ``` The module `mmdet.core.optimizer.my_optimizer` will be imported at the beginning of the program and the class `MyOptimizer` is then automatically registered. Note that only the package containing the class `MyOptimizer` should be imported. `mmdet.core.optimizer.my_optimizer.MyOptimizer` **cannot** be imported directly. Actually users can use a totally different file directory structure using this importing method, as long as the module root can be located in `PYTHONPATH`. #### 3. Specify the optimizer in the config file Then you can use `MyOptimizer` in `optimizer` field of config files. In the configs, the optimizers are defined by the field `optimizer` like the following: ```python optimizer = dict(type='SGD', lr=0.02, momentum=0.9, weight_decay=0.0001) ``` To use your own optimizer, the field can be changed to ```python optimizer = dict(type='MyOptimizer', a=a_value, b=b_value, c=c_value) ``` ### Customize optimizer constructor Some models may have some parameter-specific settings for optimization, e.g. weight decay for BatchNorm layers. The users can do those fine-grained parameter tuning through customizing optimizer constructor. ```python from mmcv.utils import build_from_cfg from mmcv.runner.optimizer import OPTIMIZER_BUILDERS, OPTIMIZERS from mmdet.utils import get_root_logger from .my_optimizer import MyOptimizer @OPTIMIZER_BUILDERS.register_module() class MyOptimizerConstructor(object): def __init__(self, optimizer_cfg, paramwise_cfg=None): def __call__(self, model): return my_optimizer ``` The default optimizer constructor is implemented [here](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/optimizer/default_constructor.py#L11), which could also serve as a template for new optimizer constructor. ### Additional settings Tricks not implemented by the optimizer should be implemented through optimizer constructor (e.g., set parameter-wise learning rates) or hooks. We list some common settings that could stabilize the training or accelerate the training. Feel free to create PR, issue for more settings. - __Use gradient clip to stabilize training__: Some models need gradient clip to clip the gradients to stabilize the training process. An example is as below: ```python optimizer_config = dict( _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) ``` If your config inherits the base config which already sets the `optimizer_config`, you might need `_delete_=True` to override the unnecessary settings. See the [config documentation](https://mmdetection.readthedocs.io/en/latest/tutorials/config.html) for more details. - __Use momentum schedule to accelerate model convergence__: We support momentum scheduler to modify model's momentum according to learning rate, which could make the model converge in a faster way. Momentum scheduler is usually used with LR scheduler, for example, the following config is used in 3D detection to accelerate convergence. For more details, please refer to the implementation of [CyclicLrUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L327) and [CyclicMomentumUpdater](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/momentum_updater.py#L130). ```python lr_config = dict( policy='cyclic', target_ratio=(10, 1e-4), cyclic_times=1, step_ratio_up=0.4, ) momentum_config = dict( policy='cyclic', target_ratio=(0.85 / 0.95, 1), cyclic_times=1, step_ratio_up=0.4, ) ``` ## Customize training schedules By default we use step learning rate with 1x schedule, this calls [`StepLRHook`](https://github.com/open-mmlab/mmcv/blob/f48241a65aebfe07db122e9db320c31b685dc674/mmcv/runner/hooks/lr_updater.py#L153) in MMCV. We support many other learning rate schedule [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py), such as `CosineAnnealing` and `Poly` schedule. Here are some examples - Poly schedule: ```python lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) ``` - ConsineAnnealing schedule: ```python lr_config = dict( policy='CosineAnnealing', warmup='linear', warmup_iters=1000, warmup_ratio=1.0 / 10, min_lr_ratio=1e-5) ``` ## Customize workflow Workflow is a list of (phase, epochs) to specify the running order and epochs. By default it is set to be ```python workflow = [('train', 1)] ``` which means running 1 epoch for training. Sometimes user may want to check some metrics (e.g. loss, accuracy) about the model on the validate set. In such case, we can set the workflow as ```python [('train', 1), ('val', 1)] ``` so that 1 epoch for training and 1 epoch for validation will be run iteratively. **Note**: 1. The parameters of model will not be updated during val epoch. 2. Keyword `total_epochs` in the config only controls the number of training epochs and will not affect the validation workflow. 3. Workflows `[('train', 1), ('val', 1)]` and `[('train', 1)]` will not change the behavior of `EvalHook` because `EvalHook` is called by `after_train_epoch` and validation workflow only affect hooks that are called through `after_val_epoch`. Therefore, the only difference between `[('train', 1), ('val', 1)]` and `[('train', 1)]` is that the runner will calculate losses on validation set after each training epoch. ## Customize hooks ### Customize self-implemented hooks #### 1. Implement a new hook There are some occasions when the users might need to implement a new hook. MMDetection supports customized hooks in training (#3395) since v2.3.0. Thus the users could implement a hook directly in mmdet or their mmdet-based codebases and use the hook by only modifying the config in training. Before v2.3.0, the users need to modify the code to get the hook registered before training starts. Here we give an example of creating a new hook in mmdet and using it in training. ```python from mmcv.runner import HOOKS, Hook @HOOKS.register_module() class MyHook(Hook): def __init__(self, a, b): pass def before_run(self, runner): pass def after_run(self, runner): pass def before_epoch(self, runner): pass def after_epoch(self, runner): pass def before_iter(self, runner): pass def after_iter(self, runner): pass ``` Depending on the functionality of the hook, the users need to specify what the hook will do at each stage of the training in `before_run`, `after_run`, `before_epoch`, `after_epoch`, `before_iter`, and `after_iter`. #### 2. Register the new hook Then we need to make `MyHook` imported. Assuming the file is in `mmdet/core/utils/my_hook.py` there are two ways to do that: - Modify `mmdet/core/utils/__init__.py` to import it. The newly defined module should be imported in `mmdet/core/utils/__init__.py` so that the registry will find the new module and add it: ```python from .my_hook import MyHook ``` - Use `custom_imports` in the config to manually import it ```python custom_imports = dict(imports=['mmdet.core.utils.my_hook'], allow_failed_imports=False) ``` #### 3. Modify the config ```python custom_hooks = [ dict(type='MyHook', a=a_value, b=b_value) ] ``` You can also set the priority of the hook by adding key `priority` to `'NORMAL'` or `'HIGHEST'` as below ```python custom_hooks = [ dict(type='MyHook', a=a_value, b=b_value, priority='NORMAL') ] ``` By default the hook's priority is set as `NORMAL` during registration. ### Use hooks implemented in MMCV If the hook is already implemented in MMCV, you can directly modify the config to use the hook as below #### 4. Example: `NumClassCheckHook` We implement a customized hook named [NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py) to check whether the `num_classes` in head matches the length of `CLASSES` in `dataset`. We set it in [default_runtime.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/_base_/default_runtime.py). ```python custom_hooks = [dict(type='NumClassCheckHook')] ``` ### Modify default runtime hooks There are some common hooks that are not registered through `custom_hooks`, they are - log_config - checkpoint_config - evaluation - lr_config - optimizer_config - momentum_config In those hooks, only the logger hook has the `VERY_LOW` priority, others' priority are `NORMAL`. The above-mentioned tutorials already covers how to modify `optimizer_config`, `momentum_config`, and `lr_config`. Here we reveal how what we can do with `log_config`, `checkpoint_config`, and `evaluation`. #### Checkpoint config The MMCV runner will use `checkpoint_config` to initialize [`CheckpointHook`](https://github.com/open-mmlab/mmcv/blob/9ecd6b0d5ff9d2172c49a182eaa669e9f27bb8e7/mmcv/runner/hooks/checkpoint.py#L9). ```python checkpoint_config = dict(interval=1) ``` The users could set `max_keep_ckpts` to save only small number of checkpoints or decide whether to store state dict of optimizer by `save_optimizer`. More details of the arguments are [here](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.CheckpointHook) #### Log config The `log_config` wraps multiple logger hooks and enables to set intervals. Now MMCV supports `WandbLoggerHook`, `MlflowLoggerHook`, and `TensorboardLoggerHook`. The detail usages can be found in the [doc](https://mmcv.readthedocs.io/en/latest/api.html#mmcv.runner.LoggerHook). ```python log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), dict(type='TensorboardLoggerHook') ]) ``` #### Evaluation config The config of `evaluation` will be used to initialize the [`EvalHook`](https://github.com/open-mmlab/mmdetection/blob/7a404a2c000620d52156774a5025070d9e00d918/mmdet/core/evaluation/eval_hooks.py#L8). Except the key `interval`, other arguments such as `metric` will be passed to the `dataset.evaluate()` ```python evaluation = dict(interval=1, metric='bbox') ```
12,070
36.256173
417
md
mmdetection
mmdetection-master/docs/en/tutorials/data_pipeline.md
# Tutorial 3: Customize Data Pipelines ## Design of Data pipelines Following typical conventions, we use `Dataset` and `DataLoader` for data loading with multiple workers. `Dataset` returns a dict of data items corresponding the arguments of models' forward method. Since the data in object detection may not be the same size (image size, gt bbox size, etc.), we introduce a new `DataContainer` type in MMCV to help collect and distribute data of different size. See [here](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py) for more details. The data preparation pipeline and the dataset is decomposed. Usually a dataset defines how to process the annotations and a data pipeline defines all the steps to prepare a data dict. A pipeline consists of a sequence of operations. Each operation takes a dict as input and also output a dict for the next transform. We present a classical pipeline in the following figure. The blue blocks are pipeline operations. With the pipeline going on, each operator can add new keys (marked as green) to the result dict or update the existing keys (marked as orange). ![pipeline figure](../../../resources/data_pipeline.png) The operations are categorized into data loading, pre-processing, formatting and test-time augmentation. Here is a pipeline example for Faster R-CNN. ```python img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] ``` For each operation, we list the related dict fields that are added/updated/removed. ### Data loading `LoadImageFromFile` - add: img, img_shape, ori_shape `LoadAnnotations` - add: gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields `LoadProposals` - add: proposals ### Pre-processing `Resize` - add: scale, scale_idx, pad_shape, scale_factor, keep_ratio - update: img, img_shape, \*bbox_fields, \*mask_fields, \*seg_fields `RandomFlip` - add: flip - update: img, \*bbox_fields, \*mask_fields, \*seg_fields `Pad` - add: pad_fixed_size, pad_size_divisor - update: img, pad_shape, \*mask_fields, \*seg_fields `RandomCrop` - update: img, pad_shape, gt_bboxes, gt_labels, gt_masks, \*bbox_fields `Normalize` - add: img_norm_cfg - update: img `SegRescale` - update: gt_semantic_seg `PhotoMetricDistortion` - update: img `Expand` - update: img, gt_bboxes `MinIoURandomCrop` - update: img, gt_bboxes, gt_labels `Corrupt` - update: img ### Formatting `ToTensor` - update: specified by `keys`. `ImageToTensor` - update: specified by `keys`. `Transpose` - update: specified by `keys`. `ToDataContainer` - update: specified by `fields`. `DefaultFormatBundle` - update: img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg `Collect` - add: img_meta (the keys of img_meta is specified by `meta_keys`) - remove: all other keys except for those specified by `keys` ### Test time augmentation `MultiScaleFlipAug` ## Extend and use custom pipelines 1. Write a new pipeline in a file, e.g., in `my_pipeline.py`. It takes a dict as input and returns a dict. ```python import random from mmdet.datasets import PIPELINES @PIPELINES.register_module() class MyTransform: """Add your transform Args: p (float): Probability of shifts. Default 0.5. """ def __init__(self, p=0.5): self.p = p def __call__(self, results): if random.random() > self.p: results['dummy'] = True return results ``` 2. Import and use the pipeline in your config file. Make sure the import is relative to where your train script is located. ```python custom_imports = dict(imports=['path.to.my_pipeline'], allow_failed_imports=False) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='MyTransform', p=0.2), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] ``` 3. Visualize the output of your augmentation pipeline To visualize the output of your augmentation pipeline, `tools/misc/browse_dataset.py` can help the user to browse a detection dataset (both images and bounding box annotations) visually, or save the image to a designated directory. More details can refer to [useful_tools](../useful_tools.md)
5,606
27.035
241
md
mmdetection
mmdetection-master/docs/en/tutorials/finetune.md
# Tutorial 7: Finetuning Models Detectors pre-trained on the COCO dataset can serve as a good pre-trained model for other datasets, e.g., CityScapes and KITTI Dataset. This tutorial provides instruction for users to use the models provided in the [Model Zoo](../model_zoo.md) for other datasets to obtain better performance. There are two steps to finetune a model on a new dataset. - Add support for the new dataset following [Tutorial 2: Customize Datasets](customize_dataset.md). - Modify the configs as will be discussed in this tutorial. Take the finetuning process on Cityscapes Dataset as an example, the users need to modify five parts in the config. ## Inherit base configs To release the burden and reduce bugs in writing the whole configs, MMDetection V2.0 support inheriting configs from multiple existing configs. To finetune a Mask RCNN model, the new config needs to inherit `_base_/models/mask_rcnn_r50_fpn.py` to build the basic structure of the model. To use the Cityscapes Dataset, the new config can also simply inherit `_base_/datasets/cityscapes_instance.py`. For runtime settings such as training schedules, the new config needs to inherit `_base_/default_runtime.py`. This configs are in the `configs` directory and the users can also choose to write the whole contents rather than use inheritance. ```python _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] ``` ## Modify head Then the new config needs to modify the head according to the class numbers of the new datasets. By only changing `num_classes` in the roi_head, the weights of the pre-trained models are mostly reused except the final prediction head. ```python model = dict( pretrained=None, roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) ``` ## Modify dataset The users may also need to prepare the dataset and write the configs about dataset. MMDetection V2.0 already support VOC, WIDER FACE, COCO and Cityscapes Dataset. ## Modify training schedule The finetuning hyperparameters vary from the default schedule. It usually requires smaller learning rate and less training epochs ```python # optimizer # lr is set for a batch size of 8 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[7]) # the max_epochs and step in lr_config need specifically tuned for the customized dataset runner = dict(max_epochs=8) log_config = dict(interval=100) ``` ## Use pre-trained model To use the pre-trained model, the new config add the link of pre-trained models in the `load_from`. The users might need to download the model weights before training to avoid the download time during training. ```python load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa ```
3,936
42.744444
432
md
mmdetection
mmdetection-master/docs/en/tutorials/how_to.md
# Tutorial 11: How to xxx This tutorial collects answers to any `How to xxx with MMDetection`. Feel free to update this doc if you meet new questions about `How to` and find the answers! ## Use backbone network through MMClassification The model registry in MMDet, MMCls, MMSeg all inherit from the root registry in MMCV. This allows these repositories to directly use the modules already implemented by each other. Therefore, users can use backbone networks from MMClassification in MMDetection without implementing a network that already exists in MMClassification. ### Use backbone network implemented in MMClassification Suppose you want to use `MobileNetV3-small` as the backbone network of `RetinaNet`, the example config is as the following. ```python _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) pretrained = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth' model = dict( backbone=dict( _delete_=True, # Delete the backbone field in _base_ type='mmcls.MobileNetV3', # Using MobileNetV3 from mmcls arch='small', out_indices=(3, 8, 11), # Modify out_indices init_cfg=dict( type='Pretrained', checkpoint=pretrained, prefix='backbone.')), # The pre-trained weights of backbone network in MMCls have prefix='backbone.'. The prefix in the keys will be removed so that these weights can be normally loaded. # Modify in_channels neck=dict(in_channels=[24, 48, 96], start_level=0)) ``` ### Use backbone network in TIMM through MMClassification MMClassification also provides a wrapper for the PyTorch Image Models (timm) backbone network, users can directly use the backbone network in timm through MMClassification. Suppose you want to use EfficientNet-B1 as the backbone network of RetinaNet, the example config is as the following. ```python # https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, # Delete the backbone field in _base_ type='mmcls.TIMMBackbone', # Using timm from mmcls model_name='efficientnet_b1', features_only=True, pretrained=True, out_indices=(1, 2, 3, 4)), # Modify out_indices neck=dict(in_channels=[24, 40, 112, 320])) # Modify in_channels optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ``` `type='mmcls.TIMMBackbone'` means use the `TIMMBackbone` class from MMClassification in MMDetection, and the model used is `EfficientNet-B1`, where `mmcls` means the MMClassification repo and `TIMMBackbone` means the TIMMBackbone wrapper implemented in MMClassification. For the principle of the Hierarchy Registry, please refer to the [MMCV document](https://github.com/open-mmlab/mmcv/blob/master/docs/en/understand_mmcv/registry.md#hierarchy-registry). For how to use other backbones in MMClassification, you can refer to the [MMClassification document](https://github.com/open-mmlab/mmclassification/blob/master/docs/en/tutorials/config.md). ## Use Mosaic augmentation If you want to use `Mosaic` in training, please make sure that you use `MultiImageMixDataset` at the same time. Taking the 'Faster R-CNN' algorithm as an example, you should modify the values of `train_pipeline` and `train_dataset` in the config as below: ```python # Open configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py directly and add the following fields data_root = 'data/coco/' dataset_type = 'CocoDataset' img_scale=(1333, 800)​ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), # The image will be enlarged by 4 times after Mosaic processing,so we use affine transformation to restore the image size. dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] train_dataset = dict( _delete_ = True, # remove unnecessary Settings type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=train_pipeline ) ​ data = dict( train=train_dataset ) ``` ## Unfreeze backbone network after freezing the backbone in the config If you have freezed the backbone network in the config and want to unfreeze it after some epoches, you can write a hook function to do it. Taking the Faster R-CNN with the resnet backbone as an example, you can freeze one stage of the backbone network and add a `custom_hooks` in the config as below: ```python _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( # freeze one stage of the backbone network. backbone=dict(frozen_stages=1), ) custom_hooks = [dict(type="UnfreezeBackboneEpochBasedHook", unfreeze_epoch=1)] ``` Meanwhile write the hook class `UnfreezeBackboneEpochBasedHook` in `mmdet/core/hook/unfreeze_backbone_epoch_based_hook.py` ```python from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class UnfreezeBackboneEpochBasedHook(Hook): """Unfreeze backbone network Hook. Args: unfreeze_epoch (int): The epoch unfreezing the backbone network. """ def __init__(self, unfreeze_epoch=1): self.unfreeze_epoch = unfreeze_epoch def before_train_epoch(self, runner): # Unfreeze the backbone network. # Only valid for resnet. if runner.epoch == self.unfreeze_epoch: model = runner.model if is_module_wrapper(model): model = model.module backbone = model.backbone if backbone.frozen_stages >= 0: if backbone.deep_stem: backbone.stem.train() for param in backbone.stem.parameters(): param.requires_grad = True else: backbone.norm1.train() for m in [backbone.conv1, backbone.norm1]: for param in m.parameters(): param.requires_grad = True for i in range(1, backbone.frozen_stages + 1): m = getattr(backbone, f'layer{i}') m.train() for param in m.parameters(): param.requires_grad = True ``` ## Get the channels of a new backbone If you want to get the channels of a new backbone, you can build this backbone alone and input a pseudo image to get each stage output. Take `ResNet` as an example: ```python from mmdet.models import ResNet import torch self = ResNet(depth=18) self.eval() inputs = torch.rand(1, 3, 32, 32) level_outputs = self.forward(inputs) for level_out in level_outputs: print(tuple(level_out.shape)) ``` Output of the above script is as below: ```python (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) ``` Users can get the channels of the new backbone by Replacing the `ResNet(depth=18)` in this script with their customized backbone.
8,374
39.853659
374
md
mmdetection
mmdetection-master/docs/en/tutorials/init_cfg.md
# Tutorial 10: Weight initialization During training, a proper initialization strategy is beneficial to speeding up the training or obtaining a higher performance. [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/weight_init.py) provide some commonly used methods for initializing modules like `nn.Conv2d`. Model initialization in MMdetection mainly uses `init_cfg`. Users can initialize models with following two steps: 1. Define `init_cfg` for a model or its components in `model_cfg`, but `init_cfg` of children components have higher priority and will override `init_cfg` of parents modules. 2. Build model as usual, but call `model.init_weights()` method explicitly, and model parameters will be initialized as configuration. The high-level workflow of initialization in MMdetection is : model_cfg(init_cfg) -> build_from_cfg -> model -> init_weight() -> initialize(self, self.init_cfg) -> children's init_weight() ### Description It is dict or list\[dict\], and contains the following keys and values: - `type` (str), containing the initializer name in `INTIALIZERS`, and followed by arguments of the initializer. - `layer` (str or list\[str\]), containing the names of basiclayers in Pytorch or MMCV with learnable parameters that will be initialized, e.g. `'Conv2d'`,`'DeformConv2d'`. - `override` (dict or list\[dict\]), containing the sub-modules that not inherit from BaseModule and whose initialization configuration is different from other layers' which are in `'layer'` key. Initializer defined in `type` will work for all layers defined in `layer`, so if sub-modules are not derived Classes of `BaseModule` but can be initialized as same ways of layers in `layer`, it does not need to use `override`. `override` contains: - `type` followed by arguments of initializer; - `name` to indicate sub-module which will be initialized. ### Initialize parameters Inherit a new model from `mmcv.runner.BaseModule` or `mmdet.models` Here we show an example of FooModel. ```python import torch.nn as nn from mmcv.runner import BaseModule class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=None): super(FooModel, self).__init__(init_cfg) ... ``` - Initialize model by using `init_cfg` directly in code ```python import torch.nn as nn from mmcv.runner import BaseModule # or directly inherit mmdet models class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=XXX): super(FooModel, self).__init__(init_cfg) ... ``` - Initialize model by using `init_cfg` directly in `mmcv.Sequential` or `mmcv.ModuleList` code ```python from mmcv.runner import BaseModule, ModuleList class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=None): super(FooModel, self).__init__(init_cfg) ... self.conv1 = ModuleList(init_cfg=XXX) ``` - Initialize model by using `init_cfg` in config file ```python model = dict( ... model = dict( type='FooModel', arg1=XXX, arg2=XXX, init_cfg=XXX), ... ``` ### Usage of init_cfg 1. Initialize model by `layer` key If we only define `layer`, it just initialize the layer in `layer` key. NOTE: Value of `layer` key is the class name with attributes weights and bias of Pytorch, (so such as `MultiheadAttention layer` is not supported). - Define `layer` key for initializing module with same configuration. ```python init_cfg = dict(type='Constant', layer=['Conv1d', 'Conv2d', 'Linear'], val=1) # initialize whole module with same configuration ``` - Define `layer` key for initializing layer with different configurations. ```python init_cfg = [dict(type='Constant', layer='Conv1d', val=1), dict(type='Constant', layer='Conv2d', val=2), dict(type='Constant', layer='Linear', val=3)] # nn.Conv1d will be initialized with dict(type='Constant', val=1) # nn.Conv2d will be initialized with dict(type='Constant', val=2) # nn.Linear will be initialized with dict(type='Constant', val=3) ``` 2. Initialize model by `override` key - When initializing some specific part with its attribute name, we can use `override` key, and the value in `override` will ignore the value in init_cfg. ```python # layers: # self.feat = nn.Conv1d(3, 1, 3) # self.reg = nn.Conv2d(3, 3, 3) # self.cls = nn.Linear(1,2) init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(type='Constant', name='reg', val=3, bias=4)) # self.feat and self.cls will be initialized with dict(type='Constant', val=1, bias=2) # The module called 'reg' will be initialized with dict(type='Constant', val=3, bias=4) ``` - If `layer` is None in init_cfg, only sub-module with the name in override will be initialized, and type and other args in override can be omitted. ```python # layers: # self.feat = nn.Conv1d(3, 1, 3) # self.reg = nn.Conv2d(3, 3, 3) # self.cls = nn.Linear(1,2) init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='reg')) # self.feat and self.cls will be initialized by Pytorch # The module called 'reg' will be initialized with dict(type='Constant', val=1, bias=2) ``` - If we don't define `layer` key or `override` key, it will not initialize anything. - Invalid usage ```python # It is invalid that override don't have name key init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(type='Constant', val=3, bias=4)) # It is also invalid that override has name and other args except type init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(name='reg', val=3, bias=4)) ``` 3. Initialize model with the pretrained model ```python init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') ``` More details can refer to the documentation in [MMCV](https://mmcv.readthedocs.io/en/latest/cnn.html#weight-initialization) and MMCV [PR #780](https://github.com/open-mmlab/mmcv/pull/780)
6,251
37.592593
444
md
mmdetection
mmdetection-master/docs/en/tutorials/onnx2tensorrt.md
# Tutorial 9: ONNX to TensorRT (Experimental) > ## [Try the new MMDeploy to deploy your model](https://mmdeploy.readthedocs.io/) <!-- TOC --> - [Tutorial 9: ONNX to TensorRT (Experimental)](#tutorial-9-onnx-to-tensorrt-experimental) - [How to convert models from ONNX to TensorRT](#how-to-convert-models-from-onnx-to-tensorrt) - [Prerequisite](#prerequisite) - [Usage](#usage) - [How to evaluate the exported models](#how-to-evaluate-the-exported-models) - [List of supported models convertible to TensorRT](#list-of-supported-models-convertible-to-tensorrt) - [Reminders](#reminders) - [FAQs](#faqs) <!-- TOC --> ## How to convert models from ONNX to TensorRT ### Prerequisite 1. Please refer to [get_started.md](https://mmdetection.readthedocs.io/en/latest/get_started.html) for installation of MMCV and MMDetection from source. 2. Please refer to [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) and [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md/) to install `mmcv-full` with ONNXRuntime custom ops and TensorRT plugins. 3. Use our tool [pytorch2onnx](https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html) to convert the model from PyTorch to ONNX. ### Usage ```bash python tools/deployment/onnx2tensorrt.py \ ${CONFIG} \ ${MODEL} \ --trt-file ${TRT_FILE} \ --input-img ${INPUT_IMAGE_PATH} \ --shape ${INPUT_IMAGE_SHAPE} \ --min-shape ${MIN_IMAGE_SHAPE} \ --max-shape ${MAX_IMAGE_SHAPE} \ --workspace-size {WORKSPACE_SIZE} \ --show \ --verify \ ``` Description of all arguments: - `config` : The path of a model config file. - `model` : The path of an ONNX model file. - `--trt-file`: The Path of output TensorRT engine file. If not specified, it will be set to `tmp.trt`. - `--input-img` : The path of an input image for tracing and conversion. By default, it will be set to `demo/demo.jpg`. - `--shape`: The height and width of model input. If not specified, it will be set to `400 600`. - `--min-shape`: The minimum height and width of model input. If not specified, it will be set to the same as `--shape`. - `--max-shape`: The maximum height and width of model input. If not specified, it will be set to the same as `--shape`. - `--workspace-size` : The required GPU workspace size in GiB to build TensorRT engine. If not specified, it will be set to `1` GiB. - `--show`: Determines whether to show the outputs of the model. If not specified, it will be set to `False`. - `--verify`: Determines whether to verify the correctness of models between ONNXRuntime and TensorRT. If not specified, it will be set to `False`. - `--verbose`: Determines whether to print logging messages. It's useful for debugging. If not specified, it will be set to `False`. Example: ```bash python tools/deployment/onnx2tensorrt.py \ configs/retinanet/retinanet_r50_fpn_1x_coco.py \ checkpoints/retinanet_r50_fpn_1x_coco.onnx \ --trt-file checkpoints/retinanet_r50_fpn_1x_coco.trt \ --input-img demo/demo.jpg \ --shape 400 600 \ --show \ --verify \ ``` ## How to evaluate the exported models We prepare a tool `tools/deplopyment/test.py` to evaluate TensorRT models. Please refer to following links for more information. - [how-to-evaluate-the-exported-models](pytorch2onnx.md#how-to-evaluate-the-exported-models) - [results-and-models](pytorch2onnx.md#results-and-models) ## List of supported models convertible to TensorRT The table below lists the models that are guaranteed to be convertible to TensorRT. | Model | Config | Dynamic Shape | Batch Inference | Note | | :----------------: | :--------------------------------------------------------------: | :-----------: | :-------------: | :--: | | SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | | FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | | FCOS | `configs/fcos/fcos_r50_caffe_fpn_4x4_1x_coco.py` | Y | Y | | | YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | | RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | | Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | Notes: - *All models above are tested with Pytorch==1.6.0, onnx==1.7.0 and TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0* ## Reminders - If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, we may not provide much help here due to the limited resources. Please try to dig a little deeper and debug by yourself. - Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmdetecion`. ## FAQs - None
5,762
52.859813
300
md
mmdetection
mmdetection-master/docs/en/tutorials/pytorch2onnx.md
# Tutorial 8: Pytorch to ONNX (Experimental) > ## [Try the new MMDeploy to deploy your model](https://mmdeploy.readthedocs.io/) <!-- TOC --> - [Tutorial 8: Pytorch to ONNX (Experimental)](#tutorial-8-pytorch-to-onnx-experimental) - [How to convert models from Pytorch to ONNX](#how-to-convert-models-from-pytorch-to-onnx) - [Prerequisite](#prerequisite) - [Usage](#usage) - [Description of all arguments](#description-of-all-arguments) - [How to evaluate the exported models](#how-to-evaluate-the-exported-models) - [Prerequisite](#prerequisite-1) - [Usage](#usage-1) - [Description of all arguments](#description-of-all-arguments-1) - [Results and Models](#results-and-models) - [List of supported models exportable to ONNX](#list-of-supported-models-exportable-to-onnx) - [The Parameters of Non-Maximum Suppression in ONNX Export](#the-parameters-of-non-maximum-suppression-in-onnx-export) - [Reminders](#reminders) - [FAQs](#faqs) <!-- TOC --> ## How to convert models from Pytorch to ONNX ### Prerequisite 1. Install the prerequisites following [get_started.md/Prepare environment](../get_started.md). 2. Build custom operators for ONNX Runtime and install MMCV manually following [How to build custom operators for ONNX Runtime](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md/#how-to-build-custom-operators-for-onnx-runtime) 3. Install MMdetection manually following steps 2-3 in [get_started.md/Install MMdetection](../get_started.md). ### Usage ```bash python tools/deployment/pytorch2onnx.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ --output-file ${OUTPUT_FILE} \ --input-img ${INPUT_IMAGE_PATH} \ --shape ${IMAGE_SHAPE} \ --test-img ${TEST_IMAGE_PATH} \ --opset-version ${OPSET_VERSION} \ --cfg-options ${CFG_OPTIONS} --dynamic-export \ --show \ --verify \ --simplify \ ``` ### Description of all arguments - `config` : The path of a model config file. - `checkpoint` : The path of a model checkpoint file. - `--output-file`: The path of output ONNX model. If not specified, it will be set to `tmp.onnx`. - `--input-img`: The path of an input image for tracing and conversion. By default, it will be set to `tests/data/color.jpg`. - `--shape`: The height and width of input tensor to the model. If not specified, it will be set to `800 1216`. - `--test-img` : The path of an image to verify the exported ONNX model. By default, it will be set to `None`, meaning it will use `--input-img` for verification. - `--opset-version` : The opset version of ONNX. If not specified, it will be set to `11`. - `--dynamic-export`: Determines whether to export ONNX model with dynamic input and output shapes. If not specified, it will be set to `False`. - `--show`: Determines whether to print the architecture of the exported model and whether to show detection outputs when `--verify` is set to `True`. If not specified, it will be set to `False`. - `--verify`: Determines whether to verify the correctness of an exported model. If not specified, it will be set to `False`. - `--simplify`: Determines whether to simplify the exported ONNX model. If not specified, it will be set to `False`. - `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. - `--skip-postprocess`: Determines whether export model without post process. If not specified, it will be set to `False`. Notice: This is an experimental option. Only work for some single stage models. Users need to implement the post-process by themselves. We do not guarantee the correctness of the exported model. Example: ```bash python tools/deployment/pytorch2onnx.py \ configs/yolo/yolov3_d53_mstrain-608_273e_coco.py \ checkpoints/yolo/yolov3_d53_mstrain-608_273e_coco.pth \ --output-file checkpoints/yolo/yolov3_d53_mstrain-608_273e_coco.onnx \ --input-img demo/demo.jpg \ --test-img tests/data/color.jpg \ --shape 608 608 \ --show \ --verify \ --dynamic-export \ --cfg-options \ model.test_cfg.deploy_nms_pre=-1 \ ``` ## How to evaluate the exported models We prepare a tool `tools/deplopyment/test.py` to evaluate ONNX models with ONNXRuntime and TensorRT. ### Prerequisite - Install onnx and onnxruntime (CPU version) ```shell pip install onnx onnxruntime==1.5.1 ``` - If you want to run the model on GPU, please remove the CPU version before using the GPU version. ```shell pip uninstall onnxruntime pip install onnxruntime-gpu ``` Note: onnxruntime-gpu is version-dependent on CUDA and CUDNN, please ensure that your environment meets the requirements. - Build custom operators for ONNX Runtime following [How to build custom operators for ONNX Runtime](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md/#how-to-build-custom-operators-for-onnx-runtime) - Install TensorRT by referring to [How to build TensorRT plugins in MMCV](https://mmcv.readthedocs.io/en/latest/deployment/tensorrt_plugin.html#how-to-build-tensorrt-plugins-in-mmcv) (optional) ### Usage ```bash python tools/deployment/test.py \ ${CONFIG_FILE} \ ${MODEL_FILE} \ --out ${OUTPUT_FILE} \ --backend ${BACKEND} \ --format-only ${FORMAT_ONLY} \ --eval ${EVALUATION_METRICS} \ --show-dir ${SHOW_DIRECTORY} \ ----show-score-thr ${SHOW_SCORE_THRESHOLD} \ ----cfg-options ${CFG_OPTIONS} \ ----eval-options ${EVALUATION_OPTIONS} \ ``` ### Description of all arguments - `config`: The path of a model config file. - `model`: The path of an input model file. - `--out`: The path of output result file in pickle format. - `--backend`: Backend for input model to run and should be `onnxruntime` or `tensorrt`. - `--format-only` : Format the output results without perform evaluation. It is useful when you want to format the result to a specific format and submit it to the test server. If not specified, it will be set to `False`. - `--eval`: Evaluation metrics, which depends on the dataset, e.g., "bbox", "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC. - `--show-dir`: Directory where painted images will be saved - `--show-score-thr`: Score threshold. Default is set to `0.3`. - `--cfg-options`: Override some settings in the used config file, the key-value pair in `xxx=yyy` format will be merged into config file. - `--eval-options`: Custom options for evaluation, the key-value pair in `xxx=yyy` format will be kwargs for `dataset.evaluate()` function Notes: - If the deployed backend platform is TensorRT, please add environment variables before running the file: ```bash export ONNX_BACKEND=MMCVTensorRT ``` - If you want to use the `--dynamic-export` parameter in the TensorRT backend to export ONNX, please remove the `--simplify` parameter, and vice versa. ### Results and Models <table border="1" class="docutils"> <tr> <th align="center">Model</th> <th align="center">Config</th> <th align="center">Metric</th> <th align="center">PyTorch</th> <th align="center">ONNX Runtime</th> <th align="center">TensorRT</th> </tr > <tr > <td align="center">FCOS</td> <td align="center"><code>configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">36.6</td> <td align="center">36.5</td> <td align="center">36.3</td> </tr> <tr > <td align="center">FSAF</td> <td align="center"><code>configs/fsaf/fsaf_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">36.0</td> <td align="center">36.0</td> <td align="center">35.9</td> </tr> <tr > <td align="center">RetinaNet</td> <td align="center"><code>configs/retinanet/retinanet_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">36.5</td> <td align="center">36.4</td> <td align="center">36.3</td> </tr> <tr > <td align="center" align="center" >SSD</td> <td align="center" align="center"><code>configs/ssd/ssd300_coco.py</code></td> <td align="center" align="center">Box AP</td> <td align="center" align="center">25.6</td> <td align="center" align="center">25.6</td> <td align="center" align="center">25.6</td> </tr> <tr > <td align="center">YOLOv3</td> <td align="center"><code>configs/yolo/yolov3_d53_mstrain-608_273e_coco.py</code></td> <td align="center">Box AP</td> <td align="center">33.5</td> <td align="center">33.5</td> <td align="center">33.5</td> </tr> <tr > <td align="center">Faster R-CNN</td> <td align="center"><code>configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">37.4</td> <td align="center">37.4</td> <td align="center">37.0</td> </tr> <tr > <td align="center">Cascade R-CNN</td> <td align="center"><code>configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">40.3</td> <td align="center">40.3</td> <td align="center">40.1</td> </tr> <tr > <td align="center" rowspan="2">Mask R-CNN</td> <td align="center" rowspan="2"><code>configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">38.2</td> <td align="center">38.1</td> <td align="center">37.7</td> </tr> <tr> <td align="center">Mask AP</td> <td align="center">34.7</td> <td align="center">33.7</td> <td align="center">33.3</td> </tr> <tr > <td align="center" rowspan="2">Cascade Mask R-CNN</td> <td align="center" rowspan="2"><code>configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">41.2</td> <td align="center">41.2</td> <td align="center">40.9</td> </tr> <tr> <td align="center">Mask AP</td> <td align="center">35.9</td> <td align="center">34.8</td> <td align="center">34.5</td> </tr> <tr > <td align="center">CornerNet</td> <td align="center"><code>configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py</code></td> <td align="center">Box AP</td> <td align="center">40.6</td> <td align="center">40.4</td> <td align="center">-</td> </tr> <tr > <td align="center">DETR</td> <td align="center"><code>configs/detr/detr_r50_8x2_150e_coco.py</code></td> <td align="center">Box AP</td> <td align="center">40.1</td> <td align="center">40.1</td> <td align="center">-</td> </tr> <tr > <td align="center" rowspan="2">PointRend</td> <td align="center" rowspan="2"><code>configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py</code></td> <td align="center">Box AP</td> <td align="center">38.4</td> <td align="center">38.4</td> <td align="center">-</td> </tr> <tr> <td align="center">Mask AP</td> <td align="center">36.3</td> <td align="center">35.2</td> <td align="center">-</td> </tr> </table> Notes: - All ONNX models are evaluated with dynamic shape on coco dataset and images are preprocessed according to the original config file. Note that CornerNet is evaluated without test-time flip, since currently only single-scale evaluation is supported with ONNX Runtime. - Mask AP of Mask R-CNN drops by 1% for ONNXRuntime. The main reason is that the predicted masks are directly interpolated to original image in PyTorch, while they are at first interpolated to the preprocessed input image of the model and then to original image in other backend. ## List of supported models exportable to ONNX The table below lists the models that are guaranteed to be exportable to ONNX and runnable in ONNX Runtime. | Model | Config | Dynamic Shape | Batch Inference | Note | | :----------------: | :-----------------------------------------------------------------: | :-----------: | :-------------: | :---------------------------------------------------------------------------: | | FCOS | `configs/fcos/fcos_r50_caffe_fpn_gn-head_4x4_1x_coco.py` | Y | Y | | | FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | | RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | | SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | | YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | | Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | CornerNet | `configs/cornernet/cornernet_hourglass104_mstest_10x5_210e_coco.py` | Y | N | no flip, no batch inference, tested with torch==1.7.0 and onnxruntime==1.5.1. | | DETR | `configs/detr/detr_r50_8x2_150e_coco.py` | Y | Y | batch inference is *not recommended* | | PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | Notes: - Minimum required version of MMCV is `1.3.5` - *All models above are tested with Pytorch==1.6.0 and onnxruntime==1.5.1*, except for CornerNet. For more details about the torch version when exporting CornerNet to ONNX, which involves `mmcv::cummax`, please refer to the [Known Issues](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/onnxruntime_op.md#known-issues) in mmcv. - Though supported, it is *not recommended* to use batch inference in onnxruntime for `DETR`, because there is huge performance gap between ONNX and torch model (e.g. 33.5 vs 39.9 mAP on COCO for onnxruntime and torch respectively, with a batch size 2). The main reason for the gap is that these is non-negligible effect on the predicted regressions during batch inference for ONNX, since the predicted coordinates is normalized by `img_shape` (without padding) and should be converted to absolute format, but `img_shape` is not dynamically traceable thus the padded `img_shape_for_onnx` is used. - Currently only single-scale evaluation is supported with ONNX Runtime, also `mmcv::SoftNonMaxSuppression` is only supported for single image by now. ## The Parameters of Non-Maximum Suppression in ONNX Export In the process of exporting the ONNX model, we set some parameters for the NMS op to control the number of output bounding boxes. The following will introduce the parameter setting of the NMS op in the supported models. You can set these parameters through `--cfg-options`. - `nms_pre`: The number of boxes before NMS. The default setting is `1000`. - `deploy_nms_pre`: The number of boxes before NMS when exporting to ONNX model. The default setting is `0`. - `max_per_img`: The number of boxes to be kept after NMS. The default setting is `100`. - `max_output_boxes_per_class`: Maximum number of output boxes per class of NMS. The default setting is `200`. ## Reminders - When the input model has custom op such as `RoIAlign` and if you want to verify the exported ONNX model, you may have to build `mmcv` with [ONNXRuntime](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) from source. - `mmcv.onnx.simplify` feature is based on [onnx-simplifier](https://github.com/daquexian/onnx-simplifier). If you want to try it, please refer to [onnx in `mmcv`](https://mmcv.readthedocs.io/en/latest/deployment/onnx.html) and [onnxruntime op in `mmcv`](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) for more information. - If you meet any problem with the listed models above, please create an issue and it would be taken care of soon. For models not included in the list, please try to dig a little deeper and debug a little bit more and hopefully solve them by yourself. - Because this feature is experimental and may change fast, please always try with the latest `mmcv` and `mmdetecion`. ## FAQs - None
17,669
51.746269
596
md
mmdetection
mmdetection-master/docs/en/tutorials/test_results_submission.md
# Tutorial 12: Test Results Submission ## Panoptic segmentation test results submission The following sections introduce how to produce the prediction results of panoptic segmentation models on the COCO test-dev set and submit the predictions to [COCO evaluation server](https://competitions.codalab.org/competitions/19507). ### Prerequisites - Download [COCO test dataset images](http://images.cocodataset.org/zips/test2017.zip), [testing image info](http://images.cocodataset.org/annotations/image_info_test2017.zip), and [panoptic train/val annotations](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip), then unzip them, put 'test2017' to `data/coco/`, put json files and annotation files to `data/coco/annotations/`. ```shell # suppose data/coco/ does not exist mkdir -pv data/coco/ # download test2017 wget -P data/coco/ http://images.cocodataset.org/zips/test2017.zip wget -P data/coco/ http://images.cocodataset.org/annotations/image_info_test2017.zip wget -P data/coco/ http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip # unzip them unzip data/coco/test2017.zip -d data/coco/ unzip data/coco/image_info_test2017.zip -d data/coco/ unzip data/coco/panoptic_annotations_trainval2017.zip -d data/coco/ # remove zip files (optional) rm -rf data/coco/test2017.zip data/coco/image_info_test2017.zip data/coco/panoptic_annotations_trainval2017.zip ``` - Run the following code to update category information in testing image info. Since the attribute `isthing` is missing in category information of 'image_info_test-dev2017.json', we need to update it with the category information in 'panoptic_val2017.json'. ```shell python tools/misc/gen_coco_panoptic_test_info.py data/coco/annotations ``` After completing the above preparations, your directory structure of `data` should be like this: ```text data `-- coco |-- annotations | |-- image_info_test-dev2017.json | |-- image_info_test2017.json | |-- panoptic_image_info_test-dev2017.json | |-- panoptic_train2017.json | |-- panoptic_train2017.zip | |-- panoptic_val2017.json | `-- panoptic_val2017.zip `-- test2017 ``` ### Inference on coco test-dev The commands to perform inference on test2017 are as below: ```shell # test with single gpu CUDA_VISIBLE_DEVICES=0 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ --format-only \ --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ --eval-options jsonfile_prefix=${WORK_DIR}/results # test with four gpus CUDA_VISIBLE_DEVICES=0,1,3,4 bash tools/dist_test.sh \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ 4 \ # four gpus --format-only \ --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ --eval-options jsonfile_prefix=${WORK_DIR}/results # test with slurm GPUS=8 tools/slurm_test.sh \ ${Partition} \ ${JOB_NAME} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ --format-only \ --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ --eval-options jsonfile_prefix=${WORK_DIR}/results ``` Example Suppose we perform inference on `test2017` using pretrained MaskFormer with ResNet-50 backbone. ```shell # test with single gpu CUDA_VISIBLE_DEVICES=0 python tools/test.py \ configs/maskformer/maskformer_r50_mstrain_16x1_75e_coco.py \ checkpoints/maskformer_r50_mstrain_16x1_75e_coco_20220221_141956-bc2699cb.pth \ --format-only \ --cfg-options data.test.ann_file=data/coco/annotations/panoptic_image_info_test-dev2017.json data.test.img_prefix=data/coco/test2017 \ --eval-options jsonfile_prefix=work_dirs/maskformer/results ``` ### Rename files and zip results After inference, the panoptic segmentation results (a json file and a directory where the masks are stored) will be in `WORK_DIR`. We should rename them according to the naming convention described on [COCO's Website](https://cocodataset.org/#upload). Finally, we need to compress the json and the directory where the masks are stored into a zip file, and rename the zip file according to the naming convention. Note that the zip file should **directly** contains the above two files. The commands to rename files and zip results: ```shell # In WORK_DIR, we have panoptic segmentation results: 'panoptic' and 'results.panoptic.json'. cd ${WORK_DIR} # replace '[algorithm_name]' with the name of algorithm you used. mv ./panoptic ./panoptic_test-dev2017_[algorithm_name]_results mv ./results.panoptic.json ./panoptic_test-dev2017_[algorithm_name]_results.json zip panoptic_test-dev2017_[algorithm_name]_results.zip -ur panoptic_test-dev2017_[algorithm_name]_results panoptic_test-dev2017_[algorithm_name]_results.json ```
4,953
42.840708
484
md
mmdetection
mmdetection-master/docs/en/tutorials/useful_hooks.md
# Tutorial 13: Useful Hooks MMDetection and MMCV provide users with various useful hooks including log hooks, evaluation hooks, NumClassCheckHook, etc. This tutorial introduces the functionalities and usages of hooks implemented in MMDetection. For using hooks in MMCV, please read the [API documentation in MMCV](https://github.com/open-mmlab/mmcv/blob/master/docs/en/understand_mmcv/runner.md). ## CheckInvalidLossHook ## EvalHook and DistEvalHook ## ExpMomentumEMAHook and LinearMomentumEMAHook ## NumClassCheckHook ## [MemoryProfilerHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/hook/memory_profiler_hook.py) Memory profiler hook records memory information including virtual memory, swap memory, and the memory of the current process. This hook helps grasp the memory usage of the system and discover potential memory leak bugs. To use this hook, users should install `memory_profiler` and `psutil` by `pip install memory_profiler psutil` first. ### Usage To use this hook, users should add the following code to the config file. ```python custom_hooks = [ dict(type='MemoryProfilerHook', interval=50) ] ``` ### Result During training, you can see the messages in the log recorded by `MemoryProfilerHook` as below. The system has 250 GB (246360 MB + 9407 MB) of memory and 8 GB (5740 MB + 2452 MB) of swap memory in total. Currently 9407 MB (4.4%) of memory and 5740 MB (29.9%) of swap memory were consumed. And the current training process consumed 5434 MB of memory. ```text 2022-04-21 08:49:56,881 - mmdet - INFO - Memory information available_memory: 246360 MB, used_memory: 9407 MB, memory_utilization: 4.4 %, available_swap_memory: 5740 MB, used_swap_memory: 2452 MB, swap_memory_utilization: 29.9 %, current_process_memory: 5434 MB ``` ## SetEpochInfoHook ## SyncNormHook ## SyncRandomSizeHook ## YOLOXLrUpdaterHook ## YOLOXModeSwitchHook ## How to implement a custom hook In general, there are 10 points where hooks can be inserted from the beginning to the end of model training. The users can implement custom hooks and insert them at different points in the process of training to do what they want. - global points: `before_run`, `after_run` - points in training: `before_train_epoch`, `before_train_iter`, `after_train_iter`, `after_train_epoch` - points in validation: `before_val_epoch`, `before_val_iter`, `after_val_iter`, `after_val_epoch` For example, users can implement a hook to check loss and terminate training when loss goes NaN. To achieve that, there are three steps to go: 1. Implement a new hook that inherits the `Hook` class in MMCV, and implement `after_train_iter` method which checks whether loss goes NaN after every `n` training iterations. 2. The implemented hook should be registered in `HOOKS` by `@HOOKS.register_module()` as shown in the code below. 3. Add `custom_hooks = [dict(type='CheckInvalidLossHook', interval=50)]` in the config file. ```python import torch from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class CheckInvalidLossHook(Hook): """Check invalid loss hook. This hook will regularly check whether the loss is valid during training. Args: interval (int): Checking interval (every k iterations). Default: 50. """ def __init__(self, interval=50): self.interval = interval def after_train_iter(self, runner): if self.every_n_iters(runner, self.interval): assert torch.isfinite(runner.outputs['loss']), \ runner.logger.info('loss become infinite or NaN!') ``` Please read [customize_runtime](https://mmdetection.readthedocs.io/en/latest/tutorials/customize_runtime.html#customize-self-implemented-hooks) for more about implementing a custom hook.
3,774
43.940476
368
md
mmdetection
mmdetection-master/docs/zh_cn/1_exist_data_model.md
# 1: 使用已有模型在标准数据集上进行推理 MMDetection 在 [Model Zoo](https://mmdetection.readthedocs.io/en/latest/model_zoo.html) 中提供了数以百计的检测模型,并支持多种标准数据集,包括 Pascal VOC,COCO,Cityscapes,LVIS 等。这份文档将会讲述如何使用这些模型和标准数据集来运行一些常见的任务,包括: - 使用现有模型在给定图片上进行推理 - 在标准数据集上测试现有模型 - 在标准数据集上训练预定义的模型 ## 使用现有模型进行推理 推理是指使用训练好的模型来检测图像上的目标。在 MMDetection 中,一个模型被定义为一个配置文件和对应的存储在 checkpoint 文件内的模型参数的集合。 首先,我们建议从 [Faster RCNN](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 开始,其 [配置](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py) 文件和 [checkpoint](http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth) 文件在此。 我们建议将 checkpoint 文件下载到 `checkpoints` 文件夹内。 ### 推理的高层编程接口 MMDetection 为在图片上推理提供了 Python 的高层编程接口。下面是建立模型和在图像或视频上进行推理的例子。 ```python from mmdet.apis import init_detector, inference_detector import mmcv # 指定模型的配置文件和 checkpoint 文件路径 config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' # 根据配置文件和 checkpoint 文件构建模型 model = init_detector(config_file, checkpoint_file, device='cuda:0') # 测试单张图片并展示结果 img = 'test.jpg' # 或者 img = mmcv.imread(img),这样图片仅会被读一次 result = inference_detector(model, img) # 在一个新的窗口中将结果可视化 model.show_result(img, result) # 或者将可视化结果保存为图片 model.show_result(img, result, out_file='result.jpg') # 测试视频并展示结果 video = mmcv.VideoReader('video.mp4') for frame in video: result = inference_detector(model, frame) model.show_result(frame, result, wait_time=1) ``` jupyter notebook 上的演示样例在 [demo/inference_demo.ipynb](https://github.com/open-mmlab/mmdetection/blob/master/demo/inference_demo.ipynb) 。 ### 异步接口-支持 Python 3.7+ 对于 Python 3.7+,MMDetection 也有异步接口。利用 CUDA 流,绑定 GPU 的推理代码不会阻塞 CPU,从而使得 CPU/GPU 在单线程应用中能达到更高的利用率。在推理流程中,不同数据样本的推理和不同模型的推理都能并发地运行。 您可以参考 `tests/async_benchmark.py` 来对比同步接口和异步接口的运行速度。 ```python import asyncio import torch from mmdet.apis import init_detector, async_inference_detector from mmdet.utils.contextmanagers import concurrent async def main(): config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' device = 'cuda:0' model = init_detector(config_file, checkpoint=checkpoint_file, device=device) # 此队列用于并行推理多张图像 streamqueue = asyncio.Queue() # 队列大小定义了并行的数量 streamqueue_size = 3 for _ in range(streamqueue_size): streamqueue.put_nowait(torch.cuda.Stream(device=device)) # 测试单张图片并展示结果 img = 'test.jpg' # or 或者 img = mmcv.imread(img),这样图片仅会被读一次 async with concurrent(streamqueue): result = await async_inference_detector(model, img) # 在一个新的窗口中将结果可视化 model.show_result(img, result) # 或者将可视化结果保存为图片 model.show_result(img, result, out_file='result.jpg') asyncio.run(main()) ``` ### 演示样例 我们还提供了三个演示脚本,它们是使用高层编程接口实现的。 [源码在此](https://github.com/open-mmlab/mmdetection/tree/master/demo) 。 #### 图片样例 这是在单张图片上进行推理的脚本,可以开启 `--async-test` 来进行异步推理。 ```shell python demo/image_demo.py \ ${IMAGE_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] \ [--async-test] ``` 运行样例: ```shell python demo/image_demo.py demo/demo.jpg \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --device cpu ``` #### 摄像头样例 这是使用摄像头实时图片的推理脚本。 ```shell python demo/webcam_demo.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--camera-id ${CAMERA-ID}] \ [--score-thr ${SCORE_THR}] ``` 运行样例: ```shell python demo/webcam_demo.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth ``` #### 视频样例 这是在视频样例上进行推理的脚本。 ```shell python demo/video_demo.py \ ${VIDEO_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] \ [--out ${OUT_FILE}] \ [--show] \ [--wait-time ${WAIT_TIME}] ``` 运行样例: ```shell python demo/video_demo.py demo/demo.mp4 \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --out result.mp4 ``` #### 视频样例,显卡加速版本 这是在视频样例上进行推理的脚本,使用显卡加速。 ```shell python demo/video_gpuaccel_demo.py \ ${VIDEO_FILE} \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--device ${GPU_ID}] \ [--score-thr ${SCORE_THR}] \ [--nvdecode] \ [--out ${OUT_FILE}] \ [--show] \ [--wait-time ${WAIT_TIME}] ``` 运行样例: ```shell python demo/video_gpuaccel_demo.py demo/demo.mp4 \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --nvdecode --out result.mp4 ``` ## 在标准数据集上测试现有模型 为了测试一个模型的精度,我们通常会在标准数据集上对其进行测试。MMDetection 支持多个公共数据集,包括 [COCO](https://cocodataset.org/) , [Pascal VOC](http://host.robots.ox.ac.uk/pascal/VOC) ,[Cityscapes](https://www.cityscapes-dataset.com/) 等等。 这一部分将会介绍如何在支持的数据集上测试现有模型。 ### 数据集准备 一些公共数据集,比如 Pascal VOC 及其镜像数据集,或者 COCO 等数据集都可以从官方网站或者镜像网站获取。 注意:在检测任务中,Pascal VOC 2012 是 Pascal VOC 2007 的无交集扩展,我们通常将两者一起使用。 我们建议将数据集下载,然后解压到项目外部的某个文件夹内,然后通过符号链接的方式,将数据集根目录链接到 `$MMDETECTION/data` 文件夹下,格式如下所示。 如果你的文件夹结构和下方不同的话,你需要在配置文件中改变对应的路径。 我们提供了下载 COCO 等数据集的脚本,你可以运行 `python tools/misc/download_dataset.py --dataset-name coco2017` 下载 COCO 数据集。 ```plain mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ ├── cityscapes │ │ ├── annotations │ │ ├── leftImg8bit │ │ │ ├── train │ │ │ ├── val │ │ ├── gtFine │ │ │ ├── train │ │ │ ├── val │ ├── VOCdevkit │ │ ├── VOC2007 │ │ ├── VOC2012 ``` 有些模型需要额外的 [COCO-stuff](http://calvin.inf.ed.ac.uk/wp-content/uploads/data/cocostuffdataset/stuffthingmaps_trainval2017.zip) 数据集,比如 HTC,DetectoRS 和 SCNet,你可以下载并解压它们到 `coco` 文件夹下。文件夹会是如下结构: ```plain mmdetection ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ │ ├── stuffthingmaps ``` PanopticFPN 等全景分割模型需要额外的 [COCO Panoptic](http://images.cocodataset.org/annotations/panoptic_annotations_trainval2017.zip) 数据集,你可以下载并解压它们到 `coco/annotations` 文件夹下。文件夹会是如下结构: ```text mmdetection ├── data │ ├── coco │ │ ├── annotations │ │ │ ├── panoptic_train2017.json │ │ │ ├── panoptic_train2017 │ │ │ ├── panoptic_val2017.json │ │ │ ├── panoptic_val2017 │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 ``` Cityscape 数据集的标注格式需要转换,以与 COCO 数据集标注格式保持一致,使用 `tools/dataset_converters/cityscapes.py` 来完成转换: ```shell pip install cityscapesscripts python tools/dataset_converters/cityscapes.py \ ./data/cityscapes \ --nproc 8 \ --out-dir ./data/cityscapes/annotations ``` ### 测试现有模型 我们提供了测试脚本,能够测试一个现有模型在所有数据集(COCO,Pascal VOC,Cityscapes 等)上的性能。我们支持在如下环境下测试: - 单 GPU 测试 - CPU 测试 - 单节点多 GPU 测试 - 多节点测试 根据以上测试环境,选择合适的脚本来执行测试过程。 ```shell # 单 GPU 测试 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # CPU 测试:禁用 GPU 并运行单 GPU 测试脚本 export CUDA_VISIBLE_DEVICES=-1 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # 单节点多 GPU 测试 bash tools/dist_test.sh \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ ${GPU_NUM} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] ``` `tools/dist_test.sh` 也支持多节点测试,不过需要依赖 PyTorch 的 [启动工具](https://pytorch.org/docs/stable/distributed.html#launch-utility) 。 可选参数: - `RESULT_FILE`: 结果文件名称,需以 .pkl 形式存储。如果没有声明,则不将结果存储到文件。 - `EVAL_METRICS`: 需要测试的度量指标。可选值是取决于数据集的,比如 `proposal_fast`,`proposal`,`bbox`,`segm` 是 COCO 数据集的可选值,`mAP`,`recall` 是 Pascal VOC 数据集的可选值。Cityscapes 数据集可以测试 `cityscapes` 和所有 COCO 数据集支持的度量指标。 - `--show`: 如果开启,检测结果将被绘制在图像上,以一个新窗口的形式展示。它只适用于单 GPU 的测试,是用于调试和可视化的。请确保使用此功能时,你的 GUI 可以在环境中打开。否则,你可能会遇到这么一个错误 `cannot connect to X server`。 - `--show-dir`: 如果指明,检测结果将会被绘制在图像上并保存到指定目录。它只适用于单 GPU 的测试,是用于调试和可视化的。即使你的环境中没有 GUI,这个选项也可使用。 - `--show-score-thr`: 如果指明,得分低于此阈值的检测结果将会被移除。 - `--cfg-options`: 如果指明,这里的键值对将会被合并到配置文件中。 - `--eval-options`: 如果指明,这里的键值对将会作为字典参数被传入 `dataset.evaluation()` 函数中,仅在测试阶段使用。 ### 样例 假设你已经下载了 checkpoint 文件到 `checkpoints/` 文件下了。 1. 测试 Faster R-CNN 并可视化其结果。按任意键继续下张图片的测试。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 。 ```shell python tools/test.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --show ``` 2. 测试 Faster R-CNN,并为了之后的可视化保存绘制的图像。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/faster_rcnn) 。 ```shell python tools/test.py \ configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py \ checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth \ --show-dir faster_rcnn_r50_fpn_1x_results ``` 3. 在 Pascal VOC 数据集上测试 Faster R-CNN,不保存测试结果,测试 `mAP`。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/pascal_voc) 。 ```shell python tools/test.py \ configs/pascal_voc/faster_rcnn_r50_fpn_1x_voc.py \ checkpoints/faster_rcnn_r50_fpn_1x_voc0712_20200624-c9895d40.pth \ --eval mAP ``` 4. 使用 8 块 GPU 测试 Mask R-CNN,测试 `bbox` 和 `mAP` 。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 ```shell ./tools/dist_test.sh \ configs/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --out results.pkl \ --eval bbox segm ``` 5. 使用 8 块 GPU 测试 Mask R-CNN,测试**每类**的 `bbox` 和 `mAP`。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 ```shell ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --out results.pkl \ --eval bbox segm \ --options "classwise=True" ``` 6. 在 COCO test-dev 数据集上,使用 8 块 GPU 测试 Mask R-CNN,并生成 JSON 文件提交到官方评测服务器。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/mask_rcnn) 。 ```shell ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ --format-only \ --options "jsonfile_prefix=./mask_rcnn_test-dev_results" ``` 这行命令生成两个 JSON 文件 `mask_rcnn_test-dev_results.bbox.json` 和 `mask_rcnn_test-dev_results.segm.json`。 7. 在 Cityscapes 数据集上,使用 8 块 GPU 测试 Mask R-CNN,生成 txt 和 png 文件,并上传到官方评测服务器。配置文件和 checkpoint 文件 [在此](https://github.com/open-mmlab/mmdetection/tree/master/configs/cityscapes) 。 ```shell ./tools/dist_test.sh \ configs/cityscapes/mask_rcnn_r50_fpn_1x_cityscapes.py \ checkpoints/mask_rcnn_r50_fpn_1x_cityscapes_20200227-afe51d5a.pth \ 8 \ --format-only \ --options "txtfile_prefix=./mask_rcnn_cityscapes_test_results" ``` 生成的 png 和 txt 文件在 `./mask_rcnn_cityscapes_test_results` 文件夹下。 ### 不使用 Ground Truth 标注进行测试 MMDetection 支持在不使用 ground-truth 标注的情况下对模型进行测试,这需要用到 `CocoDataset`。如果你的数据集格式不是 COCO 格式的,请将其转化成 COCO 格式。如果你的数据集格式是 VOC 或者 Cityscapes,你可以使用 [tools/dataset_converters](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters) 内的脚本直接将其转化成 COCO 格式。如果是其他格式,可以使用 [images2coco 脚本](https://github.com/open-mmlab/mmdetection/tree/master/tools/dataset_converters/images2coco.py) 进行转换。 ```shell python tools/dataset_converters/images2coco.py \ ${IMG_PATH} \ ${CLASSES} \ ${OUT} \ [--exclude-extensions] ``` 参数: - `IMG_PATH`: 图片根路径。 - `CLASSES`: 类列表文本文件名。文本中每一行存储一个类别。 - `OUT`: 输出 json 文件名。 默认保存目录和 `IMG_PATH` 在同一级。 - `exclude-extensions`: 待排除的文件后缀名。 在转换完成后,使用如下命令进行测试 ```shell # 单 GPU 测试 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ --format-only \ --options ${JSONFILE_PREFIX} \ [--show] # CPU 测试:禁用 GPU 并运行单 GPU 测试脚本 export CUDA_VISIBLE_DEVICES=-1 python tools/test.py \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ [--out ${RESULT_FILE}] \ [--eval ${EVAL_METRICS}] \ [--show] # 单节点多 GPU 测试 bash tools/dist_test.sh \ ${CONFIG_FILE} \ ${CHECKPOINT_FILE} \ ${GPU_NUM} \ --format-only \ --options ${JSONFILE_PREFIX} \ [--show] ``` 假设 [model zoo](https://mmdetection.readthedocs.io/en/latest/modelzoo_statistics.html) 中的 checkpoint 文件被下载到了 `checkpoints/` 文件夹下, 我们可以使用以下命令,用 8 块 GPU 在 COCO test-dev 数据集上测试 Mask R-CNN,并且生成 JSON 文件。 ```sh ./tools/dist_test.sh \ configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py \ checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth \ 8 \ -format-only \ --options "jsonfile_prefix=./mask_rcnn_test-dev_results" ``` 这行命令生成两个 JSON 文件 `mask_rcnn_test-dev_results.bbox.json` 和 `mask_rcnn_test-dev_results.segm.json`。 ### 批量推理 MMDetection 在测试模式下,既支持单张图片的推理,也支持对图像进行批量推理。默认情况下,我们使用单张图片的测试,你可以通过修改测试数据配置文件中的 `samples_per_gpu` 来开启批量测试。 开启批量推理的配置文件修改方法为: ```shell data = dict(train=dict(...), val=dict(...), test=dict(samples_per_gpu=2, ...)) ``` 或者你可以通过将 `--cfg-options` 设置为 `--cfg-options data.test.samples_per_gpu=2` 来开启它。 ### 弃用 ImageToTensor 在测试模式下,弃用 `ImageToTensor` 流程,取而代之的是 `DefaultFormatBundle`。建议在你的测试数据流程的配置文件中手动替换它,如: ```python # (已弃用)使用 ImageToTensor pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # (建议使用)手动将 ImageToTensor 替换为 DefaultFormatBundle pipelines = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', mean=[0, 0, 0], std=[1, 1, 1]), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img']), ]) ] ``` ## 在标准数据集上训练预定义的模型 MMDetection 也为训练检测模型提供了开盖即食的工具。本节将展示在标准数据集(比如 COCO)上如何训练一个预定义的模型。 ### 数据集 训练需要准备好数据集,细节请参考 [数据集准备](#%E6%95%B0%E6%8D%AE%E9%9B%86%E5%87%86%E5%A4%87) 。 **注意**: 目前,`configs/cityscapes` 文件夹下的配置文件都是使用 COCO 预训练权值进行初始化的。如果网络连接不可用或者速度很慢,你可以提前下载现存的模型。否则可能在训练的开始会有错误发生。 ### 学习率自动缩放 **注意**:在配置文件中的学习率是在 8 块 GPU,每块 GPU 有 2 张图像(批大小为 8\*2=16)的情况下设置的。其已经设置在`config/_base_/default_runtime.py` 中的 `auto_scale_lr.base_batch_size`。当配置文件的批次大小为`16`时,学习率会基于该值进行自动缩放。同时,为了不影响其他基于 mmdet 的 codebase,启用自动缩放标志 `auto_scale_lr.enable` 默认设置为 `False`。 如果要启用此功能,需在命令添加参数 `--auto-scale-lr`。并且在启动命令之前,请检查下即将使用的配置文件的名称,因为配置名称指示默认的批处理大小。 在默认情况下,批次大小是 `8 x 2 = 16`,例如:`faster_rcnn_r50_caffe_fpn_90k_coco.py` 或者 `pisa_faster_rcnn_x101_32x4d_fpn_1x_coco.py`;若不是默认批次,你可以在配置文件看到像 `_NxM_` 字样的,例如:`cornernet_hourglass104_mstest_32x3_210e_coco.py` 的批次大小是 `32 x 3 = 96`, 或者 `scnet_x101_64x4d_fpn_8x1_20e_coco.py` 的批次大小是 `8 x 1 = 8`。 **请记住:如果使用不是默认批次大小为`16`的配置文件,请检查配置文件中的底部,会有 `auto_scale_lr.base_batch_size`。如果找不到,可以在其继承的 `_base_=[xxx]` 文件中找到。另外,如果想使用自动缩放学习率的功能,请不要修改这些值。** 学习率自动缩放基本用法如下: ```shell python tools/train.py \ ${CONFIG_FILE} \ --auto-scale-lr \ [optional arguments] ``` 执行命令之后,会根据机器的GPU数量和训练的批次大小对学习率进行自动缩放,缩放方式详见 [线性扩展规则](https://arxiv.org/abs/1706.02677) ,比如:在 4 块 GPU 并且每张 GPU 上有 2 张图片的情况下 `lr=0.01`,那么在 16 块 GPU 并且每张 GPU 上有 4 张图片的情况下, LR 会自动缩放至`lr=0.08`。 如果不启用该功能,则需要根据 [线性扩展规则](https://arxiv.org/abs/1706.02677) 来手动计算并修改配置文件里面 `optimizer.lr` 的值。 ### 使用单 GPU 训练 我们提供了 `tools/train.py` 来开启在单张 GPU 上的训练任务。基本使用如下: ```shell python tools/train.py \ ${CONFIG_FILE} \ [optional arguments] ``` 在训练期间,日志文件和 checkpoint 文件将会被保存在工作目录下,它需要通过配置文件中的 `work_dir` 或者 CLI 参数中的 `--work-dir` 来指定。 默认情况下,模型将在每轮训练之后在 validation 集上进行测试,测试的频率可以通过设置配置文件来指定: ```python # 每 12 轮迭代进行一次测试评估 evaluation = dict(interval=12) ``` 这个工具接受以下参数: - `--no-validate` (**不建议**): 在训练期间关闭测试. - `--work-dir ${WORK_DIR}`: 覆盖工作目录. - `--resume-from ${CHECKPOINT_FILE}`: 从某个 checkpoint 文件继续训练. - `--options 'Key=value'`: 覆盖使用的配置文件中的其他设置. **注意**: `resume-from` 和 `load-from` 的区别: `resume-from` 既加载了模型的权重和优化器的状态,也会继承指定 checkpoint 的迭代次数,不会重新开始训练。`load-from` 则是只加载模型的权重,它的训练是从头开始的,经常被用于微调模型。 ### 使用 CPU 训练 使用 CPU 训练的流程和使用单 GPU 训练的流程一致,我们仅需要在训练流程开始前禁用 GPU。 ```shell export CUDA_VISIBLE_DEVICES=-1 ``` 之后运行单 GPU 训练脚本即可。 **注意**: 我们不推荐用户使用 CPU 进行训练,这太过缓慢。我们支持这个功能是为了方便用户在没有 GPU 的机器上进行调试。 ### 在多 GPU 上训练 我们提供了 `tools/dist_train.sh` 来开启在多 GPU 上的训练。基本使用如下: ```shell bash ./tools/dist_train.sh \ ${CONFIG_FILE} \ ${GPU_NUM} \ [optional arguments] ``` 可选参数和单 GPU 训练的可选参数一致。 #### 同时启动多个任务 如果你想在一台机器上启动多个任务的话,比如在一个有 8 块 GPU 的机器上启动 2 个需要 4 块GPU的任务,你需要给不同的训练任务指定不同的端口(默认为 29500)来避免冲突。 如果你使用 `dist_train.sh` 来启动训练任务,你可以使用命令来设置端口。 ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 PORT=29500 ./tools/dist_train.sh ${CONFIG_FILE} 4 CUDA_VISIBLE_DEVICES=4,5,6,7 PORT=29501 ./tools/dist_train.sh ${CONFIG_FILE} 4 ``` ### 使用多台机器训练 如果您想使用由 ethernet 连接起来的多台机器, 您可以使用以下命令: 在第一台机器上: ```shell NNODES=2 NODE_RANK=0 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS ``` 在第二台机器上: ```shell NNODES=2 NODE_RANK=1 PORT=$MASTER_PORT MASTER_ADDR=$MASTER_ADDR sh tools/dist_train.sh $CONFIG $GPUS ``` 但是,如果您不使用高速网路连接这几台机器的话,训练将会非常慢。 ### 使用 Slurm 来管理任务 Slurm 是一个常见的计算集群调度系统。在 Slurm 管理的集群上,你可以使用 `slurm.sh` 来开启训练任务。它既支持单节点训练也支持多节点训练。 基本使用如下: ```shell [GPUS=${GPUS}] ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} ${CONFIG_FILE} ${WORK_DIR} ``` 以下是在一个名称为 _dev_ 的 Slurm 分区上,使用 16 块 GPU 来训练 Mask R-CNN 的例子,并且将 `work-dir` 设置在了某些共享文件系统下。 ```shell GPUS=16 ./tools/slurm_train.sh dev mask_r50_1x configs/mask_rcnn_r50_fpn_1x_coco.py /nfs/xxxx/mask_rcnn_r50_fpn_1x ``` 你可以查看 [源码](https://github.com/open-mmlab/mmdetection/blob/master/tools/slurm_train.sh) 来检查全部的参数和环境变量. 在使用 Slurm 时,端口需要以下方的某个方法之一来设置。 1. 通过 `--options` 来设置端口。我们非常建议用这种方法,因为它无需改变原始的配置文件。 ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} --options 'dist_params.port=29500' CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} --options 'dist_params.port=29501' ``` 2. 修改配置文件来设置不同的交流端口。 在 `config1.py` 中,设置: ```python dist_params = dict(backend='nccl', port=29500) ``` 在 `config2.py` 中,设置: ```python dist_params = dict(backend='nccl', port=29501) ``` 然后你可以使用 `config1.py` 和 `config2.py` 来启动两个任务了。 ```shell CUDA_VISIBLE_DEVICES=0,1,2,3 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config1.py ${WORK_DIR} CUDA_VISIBLE_DEVICES=4,5,6,7 GPUS=4 ./tools/slurm_train.sh ${PARTITION} ${JOB_NAME} config2.py ${WORK_DIR} ```
19,333
27.474227
394
md
mmdetection
mmdetection-master/docs/zh_cn/2_new_data_model.md
# 2: 在自定义数据集上进行训练 通过本文档,你将会知道如何使用自定义数据集对预先定义好的模型进行推理,测试以及训练。我们使用 [balloon dataset](https://github.com/matterport/Mask_RCNN/tree/master/samples/balloon) 作为例子来描述整个过程。 基本步骤如下: 1. 准备自定义数据集 2. 准备配置文件 3. 在自定义数据集上进行训练,测试和推理。 ## 准备自定义数据集 MMDetection 一共支持三种形式应用新数据集: 1. 将数据集重新组织为 COCO 格式。 2. 将数据集重新组织为一个中间格式。 3. 实现一个新的数据集。 我们通常建议使用前面两种方法,因为它们通常来说比第三种方法要简单。 在本文档中,我们展示一个例子来说明如何将数据转化为 COCO 格式。 **注意**:MMDetection 现只支持对 COCO 格式的数据集进行 mask AP 的评测。 所以用户如果要进行实例分割,只能将数据转成 COCO 格式。 ### COCO标注格式 用于实例分割的 COCO 数据集格式如下所示,其中的键(key)都是必要的,参考[这里](https://cocodataset.org/#format-data)来获取更多细节。 ```json { "images": [image], "annotations": [annotation], "categories": [category] } image = { "id": int, "width": int, "height": int, "file_name": str, } annotation = { "id": int, "image_id": int, "category_id": int, "segmentation": RLE or [polygon], "area": float, "bbox": [x,y,width,height], "iscrowd": 0 or 1, } categories = [{ "id": int, "name": str, "supercategory": str, }] ``` 现在假设我们使用 balloon dataset。 下载了数据集之后,我们需要实现一个函数将标注格式转化为 COCO 格式。然后我们就可以使用已经实现的 `COCODataset` 类来加载数据并进行训练以及评测。 如果你浏览过新数据集,你会发现格式如下: ```json {'base64_img_data': '', 'file_attributes': {}, 'filename': '34020010494_e5cb88e1c4_k.jpg', 'fileref': '', 'regions': {'0': {'region_attributes': {}, 'shape_attributes': {'all_points_x': [1020, 1000, 994, 1003, 1023, 1050, 1089, 1134, 1190, 1265, 1321, 1361, 1403, 1428, 1442, 1445, 1441, 1427, 1400, 1361, 1316, 1269, 1228, 1198, 1207, 1210, 1190, 1177, 1172, 1174, 1170, 1153, 1127, 1104, 1061, 1032, 1020], 'all_points_y': [963, 899, 841, 787, 738, 700, 663, 638, 621, 619, 643, 672, 720, 765, 800, 860, 896, 942, 990, 1035, 1079, 1112, 1129, 1134, 1144, 1153, 1166, 1166, 1150, 1136, 1129, 1122, 1112, 1084, 1037, 989, 963], 'name': 'polygon'}}}, 'size': 1115004} ``` 标注文件时是 JSON 格式的,其中所有键(key)组成了一张图片的所有标注。 其中将 balloon dataset 转化为 COCO 格式的代码如下所示。 ```python import os.path as osp import mmcv def convert_balloon_to_coco(ann_file, out_file, image_prefix): data_infos = mmcv.load(ann_file) annotations = [] images = [] obj_count = 0 for idx, v in enumerate(mmcv.track_iter_progress(data_infos.values())): filename = v['filename'] img_path = osp.join(image_prefix, filename) height, width = mmcv.imread(img_path).shape[:2] images.append(dict( id=idx, file_name=filename, height=height, width=width)) bboxes = [] labels = [] masks = [] for _, obj in v['regions'].items(): assert not obj['region_attributes'] obj = obj['shape_attributes'] px = obj['all_points_x'] py = obj['all_points_y'] poly = [(x + 0.5, y + 0.5) for x, y in zip(px, py)] poly = [p for x in poly for p in x] x_min, y_min, x_max, y_max = ( min(px), min(py), max(px), max(py)) data_anno = dict( image_id=idx, id=obj_count, category_id=0, bbox=[x_min, y_min, x_max - x_min, y_max - y_min], area=(x_max - x_min) * (y_max - y_min), segmentation=[poly], iscrowd=0) annotations.append(data_anno) obj_count += 1 coco_format_json = dict( images=images, annotations=annotations, categories=[{'id':0, 'name': 'balloon'}]) mmcv.dump(coco_format_json, out_file) ``` 使用如上的函数,用户可以成功将标注文件转化为 JSON 格式,之后可以使用 `CocoDataset` 对模型进行训练和评测。 ## 准备配置文件 第二步需要准备一个配置文件来成功加载数据集。假设我们想要用 balloon dataset 来训练配备了 FPN 的 Mask R-CNN ,如下是我们的配置文件。假设配置文件命名为 `mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py`,相应保存路径为 `configs/balloon/`,配置文件内容如下所示。 ```python # 这个新的配置文件继承自一个原始配置文件,只需要突出必要的修改部分即可 _base_ = 'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py' # 我们需要对头中的类别数量进行修改来匹配数据集的标注 model = dict( roi_head=dict( bbox_head=dict(num_classes=1), mask_head=dict(num_classes=1))) # 修改数据集相关设置 dataset_type = 'CocoDataset' classes = ('balloon',) data = dict( train=dict( img_prefix='balloon/train/', classes=classes, ann_file='balloon/train/annotation_coco.json'), val=dict( img_prefix='balloon/val/', classes=classes, ann_file='balloon/val/annotation_coco.json'), test=dict( img_prefix='balloon/val/', classes=classes, ann_file='balloon/val/annotation_coco.json')) # 我们可以使用预训练的 Mask R-CNN 来获取更好的性能 load_from = 'checkpoints/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' ``` ## 训练一个新的模型 为了使用新的配置方法来对模型进行训练,你只需要运行如下命令。 ```shell python tools/train.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py ``` 参考[情况 1](./1_exist_data_model.md)来获取更多详细的使用方法。 ## 测试以及推理 为了测试训练完毕的模型,你只需要运行如下命令。 ```shell python tools/test.py configs/balloon/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py work_dirs/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_balloon.py/latest.pth --eval bbox segm ``` 参考[情况 1](./1_exist_data_model.md)来获取更多详细的使用方法。
5,528
19.630597
183
md
mmdetection
mmdetection-master/docs/zh_cn/3_exist_data_new_model.md
# 3: 在标准数据集上训练自定义模型 在本文中,你将知道如何在标准数据集上训练、测试和推理自定义模型。我们将在 cityscapes 数据集上以自定义 Cascade Mask R-CNN R50 模型为例演示整个过程,为了方便说明,我们将 neck 模块中的 `FPN` 替换为 `AugFPN`,并且在训练中的自动增强类中增加 `Rotate` 或 `Translate`。 基本步骤如下所示: 1. 准备标准数据集 2. 准备你的自定义模型 3. 准备配置文件 4. 在标准数据集上对模型进行训练、测试和推理 ## 准备标准数据集 在本文中,我们使用 cityscapes 标准数据集为例进行说明。 推荐将数据集根路径采用符号链接方式链接到 `$MMDETECTION/data`。 如果你的文件结构不同,你可能需要在配置文件中进行相应的路径更改。标准的文件组织格式如下所示: ```none mmdetection ├── mmdet ├── tools ├── configs ├── data │ ├── coco │ │ ├── annotations │ │ ├── train2017 │ │ ├── val2017 │ │ ├── test2017 │ ├── cityscapes │ │ ├── annotations │ │ ├── leftImg8bit │ │ │ ├── train │ │ │ ├── val │ │ ├── gtFine │ │ │ ├── train │ │ │ ├── val │ ├── VOCdevkit │ │ ├── VOC2007 │ │ ├── VOC2012 ``` 你也可以通过如下方式设定数据集根路径 ```bash export MMDET_DATASETS=$data_root ``` 我们将会使用环境便变量 `$MMDET_DATASETS` 作为数据集的根目录,因此你无需再修改相应配置文件的路径信息。 你需要使用脚本 `tools/dataset_converters/cityscapes.py` 将 cityscapes 标注转化为 coco 标注格式。 ```shell pip install cityscapesscripts python tools/dataset_converters/cityscapes.py ./data/cityscapes --nproc 8 --out-dir ./data/cityscapes/annotations ``` 目前在 `cityscapes `文件夹中的配置文件所对应模型是采用 COCO 预训练权重进行初始化的。 如果你的网络不可用或者比较慢,建议你先手动下载对应的预训练权重,否则可能在训练开始时候出现错误。 ## 准备你的自定义模型 第二步是准备你的自定义模型或者训练相关配置。假设你想在已有的 Cascade Mask R-CNN R50 检测模型基础上,新增一个新的 neck 模块 `AugFPN` 去代替默认的 `FPN`,以下是具体实现: ### 1 定义新的 neck (例如 AugFPN) 首先创建新文件 `mmdet/models/necks/augfpn.py`. ```python from ..builder import NECKS @NECKS.register_module() class AugFPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False): pass def forward(self, inputs): # implementation is ignored pass ``` ### 2 导入模块 你可以采用两种方式导入模块,第一种是在 `mmdet/models/necks/__init__.py` 中添加如下内容 ```python from .augfpn import AugFPN ``` 第二种是增加如下代码到对应配置中,这种方式的好处是不需要改动代码 ```python custom_imports = dict( imports=['mmdet.models.necks.augfpn.py'], allow_failed_imports=False) ``` ### 3 修改配置 ```python neck=dict( type='AugFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5) ``` 关于自定义模型其余相关细节例如实现新的骨架网络,头部网络、损失函数,以及运行时训练配置例如定义新的优化器、使用梯度裁剪、定制训练调度策略和钩子等,请参考文档 [自定义模型](tutorials/customize_models.md) 和 [自定义运行时训练配置](tutorials/customize_runtime.md)。 ## 准备配置文件 第三步是准备训练配置所需要的配置文件。假设你打算基于 cityscapes 数据集,在 Cascade Mask R-CNN R50 中新增 `AugFPN` 模块,同时增加 `Rotate` 或者 `Translate` 数据增强策略,假设你的配置文件位于 `configs/cityscapes/` 目录下,并且取名为 `cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py`,则配置信息如下: ```python # 继承 base 配置,然后进行针对性修改 _base_ = [ '../_base_/models/cascade_mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] model = dict( # 设置为 None,表示不加载 ImageNet 预训练权重, # 后续可以设置 `load_from` 参数用来加载 COCO 预训练权重 backbone=dict(init_cfg=None), pretrained=None, # 使用新增的 `AugFPN` 模块代替默认的 `FPN` neck=dict( type='AugFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5), # 我们也需要将 num_classes 从 80 修改为 8 来匹配 cityscapes 数据集标注 # 这个修改包括 `bbox_head` 和 `mask_head`. roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # 将 COCO 类别修改为 cityscapes 类别 num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # 将 COCO 类别修改为 cityscapes 类别 num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.05, 0.05, 0.1, 0.1]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, # 将 COCO 类别修改为 cityscapes 类别 num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.033, 0.033, 0.067, 0.067]), reg_class_agnostic=True, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)) ], mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, # 将 COCO 类别修改为 cityscapes 类别 num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) # 覆写 `train_pipeline`,然后新增 `AutoAugment` 训练配置 img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='AutoAugment', policies=[ [dict( type='Rotate', level=5, img_fill_val=(124, 116, 104), prob=0.5, scale=1) ], [dict(type='Rotate', level=7, img_fill_val=(124, 116, 104)), dict( type='Translate', level=5, prob=0.5, img_fill_val=(124, 116, 104)) ], ]), dict( type='Resize', img_scale=[(2048, 800), (2048, 1024)], keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] # 设置每张显卡的批处理大小,同时设置新的训练 pipeline data = dict( samples_per_gpu=1, workers_per_gpu=3, # 用新的训练 pipeline 配置覆写 pipeline train=dict(dataset=dict(pipeline=train_pipeline))) # 设置优化器 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # 设置定制的学习率策略 lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[8]) runner = dict(type='EpochBasedRunner', max_epochs=10) # 我们采用 COCO 预训练过的 Cascade Mask R-CNN R50 模型权重作为初始化权重,可以得到更加稳定的性能 load_from = 'http://download.openmmlab.com/mmdetection/v2.0/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco/cascade_mask_rcnn_r50_fpn_1x_coco_20200203-9d4dcb24.pth' ``` ## 训练新模型 为了能够使用新增配置来训练模型,你可以运行如下命令: ```shell python tools/train.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py ``` 如果想了解更多用法,可以参考 [例子1](1_exist_data_model.md)。 ## 测试和推理 为了能够测试训练好的模型,你可以运行如下命令: ```shell python tools/test.py configs/cityscapes/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py work_dirs/cascade_mask_rcnn_r50_augfpn_autoaug_10e_cityscapes.py/latest.pth --eval bbox segm ``` 如果想了解更多用法,可以参考 [例子1](1_exist_data_model.md)。
8,105
27.542254
227
md
mmdetection
mmdetection-master/docs/zh_cn/article.md
## 中文解读文案汇总 ### 1 官方解读文案 #### 1.1 框架解读 - **[轻松掌握 MMDetection 整体构建流程(一)](https://zhuanlan.zhihu.com/p/337375549)** - **[轻松掌握 MMDetection 整体构建流程(二)](https://zhuanlan.zhihu.com/p/341954021)** - **[轻松掌握 MMDetection 中 Head 流程](https://zhuanlan.zhihu.com/p/343433169)** #### 1.2 算法解读 - **[轻松掌握 MMDetection 中常用算法(一):RetinaNet 及配置详解](https://zhuanlan.zhihu.com/p/346198300)** - **[轻松掌握 MMDetection 中常用算法(二):Faster R-CNN|Mask R-CNN](https://zhuanlan.zhihu.com/p/349807581)** - [轻松掌握 MMDetection 中常用算法(三):FCOS](https://zhuanlan.zhihu.com/p/358056615) - [轻松掌握 MMDetection 中常用算法(四):ATSS](https://zhuanlan.zhihu.com/p/358125611) - [轻松掌握 MMDetection 中常用算法(五):Cascade R-CNN](https://zhuanlan.zhihu.com/p/360952172) - [轻松掌握 MMDetection 中常用算法(六):YOLOF](https://zhuanlan.zhihu.com/p/370758213) - [轻松掌握 MMDetection 中常用算法(七):CenterNet](https://zhuanlan.zhihu.com/p/374891478) - [轻松掌握 MMDetection 中常用算法(八):YOLACT](https://zhuanlan.zhihu.com/p/376347955) - [轻松掌握 MMDetection 中常用算法(九):AutoAssign](https://zhuanlan.zhihu.com/p/378581552) - [YOLOX 在 MMDetection 中复现全流程解析](https://zhuanlan.zhihu.com/p/398545304) - [喂喂喂!你可以减重了!小模型 - MMDetection 新增SSDLite 、 MobileNetV2YOLOV3 两大经典算法](https://zhuanlan.zhihu.com/p/402781143) #### 1.3 工具解读 - [OpenMMLab 中混合精度训练 AMP 的正确打开方式](https://zhuanlan.zhihu.com/p/375224982) - [小白都能看懂!手把手教你使用混淆矩阵分析目标检测](https://zhuanlan.zhihu.com/p/443499860) - [MMDetection 图像缩放 Resize 详细说明 OpenMMLab](https://zhuanlan.zhihu.com/p/381117525) - [拿什么拯救我的 4G 显卡](https://zhuanlan.zhihu.com/p/430123077) - [MMDet居然能用MMCls的Backbone?论配置文件的打开方式](https://zhuanlan.zhihu.com/p/436865195) #### 1.4 知乎问答 - [COCO数据集上1x模式下为什么不采用多尺度训练?](https://www.zhihu.com/question/462170786/answer/1915119662) - [MMDetection中SOTA论文源码中将训练过程中BN层的eval打开?](https://www.zhihu.com/question/471189603/answer/2195540892) - [基于PyTorch的MMDetection中训练的随机性来自何处?](https://www.zhihu.com/question/453511684/answer/1839683634) - [单阶段、双阶段、anchor-based、anchor-free 这四者之间有什么联系吗?](https://www.zhihu.com/question/428972054/answer/1619925296) - [目标检测的深度学习方法,有推荐的书籍或资料吗?](https://www.zhihu.com/question/391577080/answer/1612593817) - [大佬们,刚入学研究生,想入门目标检测,有什么学习路线可以入门的?](https://www.zhihu.com/question/343768934/answer/1612580715) - [目标检测领域还有什么可以做的?](https://www.zhihu.com/question/280703314/answer/1627885518) - [如何看待Transformer在CV上的应用前景,未来有可能替代CNN吗?](https://www.zhihu.com/question/437495132/answer/1686380553) - [MMDetection如何学习源码?](https://www.zhihu.com/question/451585041/answer/1832498963) - [如何具体上手实现目标检测呢?](https://www.zhihu.com/question/341401981/answer/1848561187) #### 1.5 其他 - **[不得不知的 MMDetection 学习路线(个人经验版)](https://zhuanlan.zhihu.com/p/369826931)** - [OpenMMLab 社区专访之 YOLOX 复现篇](https://zhuanlan.zhihu.com/p/405913343) ### 2 社区解读文案 - [手把手带你实现经典检测网络 Mask R-CNN 的推理](https://zhuanlan.zhihu.com/p/414082071)
2,777
50.444444
109
md
mmdetection
mmdetection-master/docs/zh_cn/compatibility.md
# MMDetection v2.x 兼容性说明 ## MMDetection 2.25.0 为了加入 Mask2Former 实例分割模型,对 Mask2Former 的配置文件进行了重命名 [PR #7571](https://github.com/open-mmlab/mmdetection/pull/7571): <table align="center"> <thead> <tr align='center'> <td>在 v2.25.0 之前</td> <td>v2.25.0 及之后</td> </tr> </thead> <tbody><tr valign='top'> <th> ``` 'mask2former_xxx_coco.py' 代表全景分割的配置文件 ``` </th> <th> ``` 'mask2former_xxx_coco.py' 代表实例分割的配置文件 'mask2former_xxx_coco-panoptic.py' 代表全景分割的配置文件 ``` </th></tr> </tbody></table> ## MMDetection 2.21.0 为了支持 CPU 训练,MMCV 中进行批处理的 scatter 的代码逻辑已经被修改。我们推荐使用 MMCV v1.4.4 或更高版本, 更多信息请参考 [MMCV PR #1621](https://github.com/open-mmlab/mmcv/pull/1621). ## MMDetection 2.18.1 ### MMCV compatibility 为了修复 BaseTransformerLayer 中的权重引用问题, MultiheadAttention 中 batch first 的逻辑有所改变。 我们推荐使用 MMCV v1.3.17 或更高版本。 更多信息请参考 [MMCV PR #1418](https://github.com/open-mmlab/mmcv/pull/1418) 。 ## MMDetection 2.18.0 ### DIIHead 兼容性 为了支持 QueryInst,在 DIIHead 的返回元组中加入了 attn_feats。 ## MMDetection v2.14.0 ### MMCV 版本 为了修复 EvalHook 优先级过低的问题,MMCV v1.3.8 中所有 hook 的优先级都重新进行了调整,因此 MMDetection v2.14.0 需要依赖最新的 MMCV v1.3.8 版本。 相关信息请参考[PR #1120](https://github.com/open-mmlab/mmcv/pull/1120) ,相关问题请参考[#5343](https://github.com/open-mmlab/mmdetection/issues/5343) 。 ### SSD 兼容性 在 v2.14.0 中,为了使 SSD 能够被更灵活地使用,[PR #5291](https://github.com/open-mmlab/mmdetection/pull/5291) 重构了 SSD 的 backbone、neck 和 head。用户可以使用 tools/model_converters/upgrade_ssd_version.py 转换旧版本训练的模型。 ```shell python tools/model_converters/upgrade_ssd_version.py ${OLD_MODEL_PATH} ${NEW_MODEL_PATH} ``` - OLD_MODEL_PATH:旧版 SSD 模型的路径。 - NEW_MODEL_PATH:保存转换后模型权重的路径。 ## MMDetection v2.12.0 在 v2.12.0 到 v2.18.0(或以上)版本的这段时间,为了提升通用性和便捷性,MMDetection 正在进行大规模重构。在升级到 v2.12.0 后 MMDetection 不可避免地带来了一些 BC Breaking,包括 MMCV 的版本依赖、模型初始化方式、模型 registry 和 mask AP 的评估。 ### MMCV 版本 MMDetection v2.12.0 依赖 MMCV v1.3.3 中新增加的功能,包括:使用 `BaseModule` 统一参数初始化,模型 registry,以及[Deformable DETR](https://arxiv.org/abs/2010.04159) 中的 `MultiScaleDeformableAttn` CUDA 算子。 注意,尽管 MMCV v1.3.2 已经包含了 MMDet 所需的功能,但是存在一些已知的问题。我们建议用户跳过 MMCV v1.3.2 使用 v1.3.3 版本。 ### 统一模型初始化 为了统一 OpenMMLab 项目中的参数初始化方式,MMCV 新增加了 `BaseModule` 类,使用 `init_cfg` 参数对模块进行统一且灵活的初始化配置管理。 现在用户需要在训练脚本中显式调用 `model.init_weights()` 来初始化模型(例如 [这行代码](https://github.com/open-mmlab/mmdetection/blob/master/tools/train.py#L162) ,在这之前则是在 detector 中进行处理的。 **下游项目必须相应地更新模型初始化方式才能使用 MMDetection v2.12.0**。请参阅 [PR #4750](https://github.com/open-mmlab/mmdetection/pull/4750) 了解详情。 ### 统一模型 registry 为了能够使用在其他 OpenMMLab 项目中实现的 backbone,MMDetection v2.12.0 继承了在 MMCV (#760) 中创建的模型 registry。 这样,只要 OpenMMLab 项目实现了某个 backbone,并且该项目也使用 MMCV 中的 registry,那么用户只需修改配置即可在 MMDetection 中使用该 backbone,不再需要将代码复制到 MMDetection 中。 更多详细信息,请参阅 [PR #5059](https://github.com/open-mmlab/mmdetection/pull/5059) 。 ### Mask AP 评估 在 [PR #4898](https://github.com/open-mmlab/mmdetection/pull/4898) 和 v2.12.0 之前,对小、中、大目标的 mask AP 的评估是基于其边界框区域而不是真正的 mask 区域。 这导致 `APs` 和 `APm` 变得更高但 `APl` 变得更低,但是不会影响整体的 mask AP。 [PR #4898](https://github.com/open-mmlab/mmdetection/pull/4898) 删除了 mask AP 计算中的 `bbox` ,改为使用 mask 区域。 新的计算方式不会影响整体的 mask AP 评估,与 [Detectron2](https://github.com/facebookresearch/detectron2/)一致。 ## 与 MMDetection v1.x 的兼容性 MMDetection v2.0 经过了大规模重构并解决了许多遗留问题。 MMDetection v2.0 不兼容 v1.x 版本,在这两个版本中使用相同的模型权重运行推理会产生不同的结果。 因此,MMDetection v2.0 重新对所有模型进行了 benchmark,并在 model zoo 中提供了新模型的权重和训练记录。 新旧版本的主要的区别有四方面:坐标系、代码库约定、训练超参和模块设计。 ### 坐标系 新坐标系与 [Detectron2](https://github.com/facebookresearch/detectron2/) 一致, 将最左上角的像素的中心视为坐标原点 (0, 0) 而不是最左上角像素的左上角。 因此 COCO 边界框和分割标注中的坐标被解析为范围 `[0,width]` 和 `[0,height]` 中的坐标。 这个修改影响了所有与 bbox 及像素选择相关的计算,变得更加自然且更加准确。 - 在新坐标系中,左上角和右下角为 (x1, y1) (x2, y2) 的框的宽度及高度计算公式为 `width = x2 - x1` 和 `height = y2 - y1`。 在 MMDetection v1.x 和之前的版本中,高度和宽度都多了 `+ 1` 的操作。 本次修改包括三部分: 1. box 回归中的检测框变换以及编码/解码。 2. IoU 计算。这会影响 ground truth 和检测框之间的匹配以及 NMS 。但对兼容性的影响可以忽略不计。 3. Box 的角点坐标为浮点型,不再取整。这能使得检测结果更为准确,也使得检测框和 RoI 的最小尺寸不再为 1,但影响很小。 - Anchor 的中心与特征图的网格点对齐,类型变为 float。 在 MMDetection v1.x 和之前的版本中,anchors 是 `int` 类型且没有居中对齐。 这会影响 RPN 中的 Anchor 生成和所有基于 Anchor 的方法。 - ROIAlign 更好地与图像坐标系对齐。新的实现来自 [Detectron2](https://github.com/facebookresearch/detectron2/tree/master/detectron2/layers/csrc/ROIAlign) 。 当 RoI 用于提取 RoI 特征时,与 MMDetection v1.x 相比默认情况下相差半个像素。 能够通过设置 `aligned=False` 而不是 `aligned=True` 来维持旧版本的设置。 - Mask 的裁剪和粘贴更准确。 1. 我们使用新的 RoIAlign 来提取 mask 目标。 在 MMDetection v1.x 中,bounding box 在提取 mask 目标之前被取整,裁剪过程是 numpy 实现的。 而在新版本中,裁剪的边界框不经过取整直接输入 RoIAlign。 此实现大大加快了训练速度(每次迭代约加速 0.1 秒,1x schedule 训练 Mask R50 时加速约 2 小时)并且理论上会更准确。 2. 在 MMDetection v2.0 中,修改后的 `paste_mask()` 函数应该比之前版本更准确。 此更改参考了 [Detectron2](https://github.com/facebookresearch/detectron2/blob/master/detectron2/structures/masks.py) 中的修改,可以将 COCO 上的 mask AP 提高约 0.5%。 ### 代码库约定 - MMDetection v2.0 更改了类别标签的顺序,减少了回归和 mask 分支里的无用参数并使得顺序更加自然(没有 +1 和 -1)。 这会影响模型的所有分类层,使其输出的类别标签顺序发生改变。回归分支和 mask head 的最后一层不再为 K 个类别保留 K+1 个通道,类别顺序与分类分支一致。 - 在 MMDetection v2.0 中,标签 “K” 表示背景,标签 \[0, K-1\] 对应于 K = num_categories 个对象类别。 - 在 MMDetection v1.x 及之前的版本中,标签 “0” 表示背景,标签 \[1, K\] 对应 K 个类别。 - **注意**:softmax RPN 的类顺序在 version\<=2.4.0 中仍然和 1.x 中的一样,而 sigmoid RPN 不受影响。从 MMDetection v2.5.0 开始,所有 head 中的类顺序是统一的。 - 不使用 R-CNN 中的低质量匹配。在 MMDetection v1.x 和之前的版本中,`max_iou_assigner` 会在 RPN 和 R-CNN 训练时给每个 ground truth 匹配低质量框。我们发现这会导致最佳的 GT 框不会被分配给某些边界框, 因此,在MMDetection v2.0 的 R-CNN 训练中默认不允许低质量匹配。这有时可能会稍微改善 box AP(约为 0.1%)。 - 单独的宽高比例系数。在 MMDetection v1.x 和以前的版本中,`keep_ratio=True` 时比例系数是单个浮点数,这并不准确,因为宽度和高度的比例系数会有一定的差异。 MMDetection v2.0 对宽度和高度使用单独的比例系数,对 AP 的提升约为 0.1%。 - 修改了 config 文件名称的规范。 由于 model zoo 中模型不断增多, MMDetection v2.0 采用新的命名规则: ```shell [model]_(model setting)_[backbone]_[neck]_(norm setting)_(misc)_(gpu x batch)_[schedule]_[dataset].py ``` 其中 (`misc`) 包括 DCN 和 GCBlock 等。更多详细信息在 [配置文件说明文档](config.md) 中说明 - MMDetection v2.0 使用新的 ResNet Caffe backbone 来减少加载预训练模型时的警告。新 backbone 中的大部分权重与以前的相同,但没有 `conv.bias`,且它们使用不同的 `img_norm_cfg`。因此,新的 backbone 不会报 `unexpected keys` 的警告。 ### 训练超参 训练超参的调整不会影响模型的兼容性,但会略微提高性能。主要有: - 通过设置 `nms_post=1000` 和 `max_num=1000`,将 nms 之后的 proposal 数量从 2000 更改为 1000。使 mask AP 和 bbox AP 提高了约 0.2%。 - Mask R-CNN、Faster R-CNN 和 RetinaNet 的默认回归损失从 smooth L1 损失更改为 L1 损失,使得 box AP 整体上都有所提升(约 0.6%)。但是,将 L1-loss 用在 Cascade R-CNN 和 HTC 等其他方法上并不能提高性能,因此我们保留这些方法的原始设置。 - 为简单起见,RoIAlign 层的 `sampling_ratio` 设置为 0。略微提升了 AP(约 0.2% 绝对值)。 - 为了提升训练速度,默认设置在训练过程中不再使用梯度裁剪。大多数模型的性能不会受到影响。对于某些模型(例如 RepPoints),我们依旧使用梯度裁剪来稳定训练过程从而获得更好的性能。 - 因为不再默认使用梯度裁剪,默认 warmup 比率从 1/3 更改为 0.001,以使模型训练预热更加平缓。不过我们重新进行基准测试时发现这种影响可以忽略不计。 ### 将模型从 v1.x 升级至 v2.0 用户可以使用脚本 `tools/model_converters/upgrade_model_version.py` 来将 MMDetection 1.x 训练的模型转换为 MMDetection v2.0。转换后的模型可以在 MMDetection v2.0 中运行,但性能略有下降(小于 1% AP)。 详细信息可以在 `configs/legacy` 中找到。 ## pycocotools 兼容性 `mmpycocotools` 是 OpenMMLab 维护的 `pycocotools` 的复刻版,适用于 MMDetection 和 Detectron2。 在 [PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 之前,由于 `pycocotools` 和 `mmpycocotool` 具有相同的包名,如果用户已经安装了 `pyccocotools`(在相同环境下先安装了 Detectron2 ),那么 MMDetection 的安装过程会跳过安装 `mmpycocotool`。 导致 MMDetection 缺少 `mmpycocotools` 而报错。 但如果在 Detectron2 之前安装 MMDetection,则可以在相同的环境下工作。 [PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 弃用 mmpycocotools,使用官方 pycocotools。 在 [PR #4939](https://github.com/open-mmlab/mmdetection/pull/4939) 之后,用户能够在相同环境下安装 MMDetection 和 Detectron2,不再需要关注安装顺序。
7,409
40.629213
240
md
mmdetection
mmdetection-master/docs/zh_cn/conf.py
# Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # import os import subprocess import sys import pytorch_sphinx_theme sys.path.insert(0, os.path.abspath('../../')) # -- Project information ----------------------------------------------------- project = 'MMDetection' copyright = '2018-2021, OpenMMLab' author = 'MMDetection Authors' version_file = '../../mmdet/version.py' def get_version(): with open(version_file, 'r') as f: exec(compile(f.read(), version_file, 'exec')) return locals()['__version__'] # The full version, including alpha/beta/rc tags release = get_version() # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.napoleon', 'sphinx.ext.viewcode', 'myst_parser', 'sphinx_markdown_tables', 'sphinx_copybutton', ] myst_enable_extensions = ['colon_fence'] myst_heading_anchors = 3 autodoc_mock_imports = [ 'matplotlib', 'pycocotools', 'terminaltables', 'mmdet.version', 'mmcv.ops' ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # source_suffix = { '.rst': 'restructuredtext', '.md': 'markdown', } # The master toctree document. master_doc = 'index' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # html_theme = 'sphinx_rtd_theme' html_theme = 'pytorch_sphinx_theme' html_theme_path = [pytorch_sphinx_theme.get_html_theme_path()] html_theme_options = { 'menu': [ { 'name': 'GitHub', 'url': 'https://github.com/open-mmlab/mmdetection' }, ], # Specify the language of shared menu 'menu_lang': 'cn', } # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] html_css_files = ['css/readthedocs.css'] language = 'zh_CN' # -- Extension configuration ------------------------------------------------- # Ignore >>> when copying code copybutton_prompt_text = r'>>> |\.\.\. ' copybutton_prompt_is_regexp = True def builder_inited_handler(app): subprocess.run(['./stat.py']) def setup(app): app.connect('builder-inited', builder_inited_handler)
3,461
28.092437
79
py
mmdetection
mmdetection-master/docs/zh_cn/conventions.md
# 默认约定 如果你想把 MMDetection 修改为自己的项目,请遵循下面的约定。 ## 损失 在 MMDetection 中,`model(**data)` 的返回值是一个字典,包含着所有的损失和评价指标,他们将会由 `model(**data)` 返回。 例如,在 bbox head 中, ```python class BBoxHead(nn.Module): ... def loss(self, ...): losses = dict() # 分类损失 losses['loss_cls'] = self.loss_cls(...) # 分类准确率 losses['acc'] = accuracy(...) # 边界框损失 losses['loss_bbox'] = self.loss_bbox(...) return losses ``` `'bbox_head.loss()'` 在模型 forward 阶段会被调用。返回的字典中包含了 `'loss_bbox'`,`'loss_cls'`,`'acc'`。只有 `'loss_bbox'`, `'loss_cls'` 会被用于反向传播,`'acc'` 只会被作为评价指标来监控训练过程。 我们默认,只有那些键的名称中包含 `'loss'` 的值会被用于反向传播。这个行为可以通过修改 `BaseDetector.train_step()` 来改变。 ## 空 proposals 在 MMDetection 中,我们为两阶段方法中空 proposals 的情况增加了特殊处理和单元测试。我们同时需要处理整个 batch 和单一图片中空 proposals 的情况。例如,在 CascadeRoIHead 中, ```python # 简单的测试 ... # 在整个 batch中 都没有 proposals if rois.shape[0] == 0: bbox_results = [[ np.zeros((0, 5), dtype=np.float32) for _ in range(self.bbox_head[-1].num_classes) ]] * num_imgs if self.with_mask: mask_classes = self.mask_head[-1].num_classes segm_results = [[[] for _ in range(mask_classes)] for _ in range(num_imgs)] results = list(zip(bbox_results, segm_results)) else: results = bbox_results return results ... # 在单张图片中没有 proposals for i in range(self.num_stages): ... if i < self.num_stages - 1: for j in range(num_imgs): # 处理空 proposals if rois[j].shape[0] > 0: bbox_label = cls_score[j][:, :-1].argmax(dim=1) refine_roi = self.bbox_head[i].regress_by_class( rois[j], bbox_label[j], bbox_pred[j], img_metas[j]) refine_roi_list.append(refine_roi) ``` 如果你有自定义的 `RoIHead`, 你可以参考上面的方法来处理空 proposals 的情况。 ## 全景分割数据集 在 MMDetection 中,我们支持了 COCO 全景分割数据集 `CocoPanopticDataset`。对于它的实现,我们在这里声明一些默认约定。 1. 在 mmdet\<=2.16.0 时,语义分割标注中的前景和背景标签范围与 MMDetection 中的默认规定有所不同。标签 `0` 代表 `VOID` 标签。 从 mmdet=2.17.0 开始,为了和框的类别标注保持一致,语义分割标注的类别标签也改为从 `0` 开始,标签 `255` 代表 `VOID` 类。 为了达成这一目标,我们在流程 `Pad` 里支持了设置 `seg` 的填充值的功能。 2. 在评估中,全景分割结果必须是一个与原图大小相同的图。结果图中每个像素的值有如此形式:`instance_id * INSTANCE_OFFSET + category_id`。
2,283
29.052632
150
md
mmdetection
mmdetection-master/docs/zh_cn/faq.md
# 常见问题解答 我们在这里列出了使用时的一些常见问题及其相应的解决方案。 如果您发现有一些问题被遗漏,请随时提 PR 丰富这个列表。 如果您无法在此获得帮助,请使用 [issue模板](https://github.com/open-mmlab/mmdetection/blob/master/.github/ISSUE_TEMPLATE/error-report.md/)创建问题,但是请在模板中填写所有必填信息,这有助于我们更快定位问题。 ## MMCV 安装相关 - MMCV 与 MMDetection 的兼容问题: "ConvWS is already registered in conv layer"; "AssertionError: MMCV==xxx is used but incompatible. Please install mmcv>=xxx, \<=xxx." 请按 [安装说明](https://mmdetection.readthedocs.io/zh_CN/latest/get_started.html#installation) 为你的 MMDetection 安装正确版本的 MMCV 。 - "No module named 'mmcv.ops'"; "No module named 'mmcv.\_ext'". 原因是安装了 `mmcv` 而不是 `mmcv-full`。 1. `pip uninstall mmcv` 卸载安装的 `mmcv` 2. 安装 `mmcv-full` 根据 [安装说明](https://mmcv.readthedocs.io/zh/latest/#installation)。 ## PyTorch/CUDA 环境相关 - "RTX 30 series card fails when building MMCV or MMDet" 1. 临时解决方案为使用命令 `MMCV_WITH_OPS=1 MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80' pip install -e .` 进行编译。 常见报错信息为 `nvcc fatal : Unsupported gpu architecture 'compute_86'` 意思是你的编译器不支持 sm_86 架构(包括英伟达 30 系列的显卡)的优化,至 CUDA toolkit 11.0 依旧未支持. 这个命令是通过增加宏 `MMCV_CUDA_ARGS='-gencode=arch=compute_80,code=sm_80` 让 nvcc 编译器为英伟达 30 系列显卡进行 `sm_80` 的优化,虽然这有可能会无法发挥出显卡所有性能。 2. 有开发者已经在 [pytorch/pytorch#47585](https://github.com/pytorch/pytorch/pull/47585) 更新了 PyTorch 默认的编译 flag, 但是我们对此并没有进行测试。 - "invalid device function" or "no kernel image is available for execution". 1. 检查您正常安装了 CUDA runtime (一般在`/usr/local/`),或者使用 `nvcc --version` 检查本地版本,有时安装 PyTorch 会顺带安装一个 CUDA runtime,并且实际优先使用 conda 环境中的版本,你可以使用 `conda list cudatoolkit` 查看其版本。 2. 编译 extension 的 CUDA Toolkit 版本与运行时的 CUDA Toolkit 版本是否相符, - 如果您从源码自己编译的,使用 `python mmdet/utils/collect_env.py` 检查编译编译 extension 的 CUDA Toolkit 版本,然后使用 `conda list cudatoolkit` 检查当前 conda 环境是否有 CUDA Toolkit,若有检查版本是否匹配, 如不匹配,更换 conda 环境的 CUDA Toolkit,或者使用匹配的 CUDA Toolkit 中的 nvcc 编译即可,如环境中无 CUDA Toolkit,可以使用 `nvcc -V`。 等命令查看当前使用的 CUDA runtime。 - 如果您是通过 pip 下载的预编译好的版本,请确保与当前 CUDA runtime 一致。 3. 运行 `python mmdet/utils/collect_env.py` 检查是否为正确的 GPU 架构编译的 PyTorch, torchvision, 与 MMCV。 你或许需要设置 `TORCH_CUDA_ARCH_LIST` 来重新安装 MMCV,可以参考 [GPU 架构表](https://docs.nvidia.com/cuda/cuda-compiler-driver-nvcc/index.html#gpu-feature-list), 例如, 运行 `TORCH_CUDA_ARCH_LIST=7.0 pip install mmcv-full` 为 Volta GPU 编译 MMCV。这种架构不匹配的问题一般会出现在使用一些旧型号的 GPU 时候出现, 例如, Tesla K80。 - "undefined symbol" or "cannot open xxx.so". 1. 如果这些 symbol 属于 CUDA/C++ (如 libcudart.so 或者 GLIBCXX),使用 `python mmdet/utils/collect_env.py`检查 CUDA/GCC runtime 与编译 MMCV 的 CUDA 版本是否相同。 2. 如果这些 symbols 属于 PyTorch,(例如, symbols containing caffe, aten, and TH), 检查当前 Pytorch 版本是否与编译 MMCV 的版本一致。 3. 运行 `python mmdet/utils/collect_env.py` 检查 PyTorch, torchvision, MMCV 等的编译环境与运行环境一致。 - setuptools.sandbox.UnpickleableException: DistutilsSetupError("each element of 'ext_modules' option must be an Extension instance or 2-tuple") 1. 如果你在使用 miniconda 而不是 anaconda,检查是否正确的安装了 Cython 如 [#3379](https://github.com/open-mmlab/mmdetection/issues/3379). 2. 检查环境中的 `setuptools`, `Cython`, and `PyTorch` 相互之间版本是否匹配。 - "Segmentation fault". 1. 检查 GCC 的版本,通常是因为 PyTorch 版本与 GCC 版本不匹配 (例如 GCC \< 4.9 ),我们推荐用户使用 GCC 5.4,我们也不推荐使用 GCC 5.5, 因为有反馈 GCC 5.5 会导致 "segmentation fault" 并且切换到 GCC 5.4 就可以解决问题。 2. 检查是否正确安装了 CUDA 版本的 PyTorch 。 ```shell python -c 'import torch; print(torch.cuda.is_available())' ``` 是否返回True。 3. 如果 `torch` 的安装是正确的,检查是否正确编译了 MMCV。 ```shell python -c 'import mmcv; import mmcv.ops' ``` 4. 如果 MMCV 与 PyTorch 都被正确安装了,则使用 `ipdb`, `pdb` 设置断点,直接查找哪一部分的代码导致了 `segmentation fault`。 ## Training 相关 - "Loss goes Nan" 1. 检查数据的标注是否正常, 长或宽为 0 的框可能会导致回归 loss 变为 nan,一些小尺寸(宽度或高度小于 1)的框在数据增强(例如,instaboost)后也会导致此问题。 因此,可以检查标注并过滤掉那些特别小甚至面积为 0 的框,并关闭一些可能会导致 0 面积框出现数据增强。 2. 降低学习率:由于某些原因,例如 batch size 大小的变化, 导致当前学习率可能太大。 您可以降低为可以稳定训练模型的值。 3. 延长 warm up 的时间:一些模型在训练初始时对学习率很敏感,您可以把 `warmup_iters` 从 500 更改为 1000 或 2000。 4. 添加 gradient clipping: 一些模型需要梯度裁剪来稳定训练过程。 默认的 `grad_clip` 是 `None`, 你可以在 config 设置 `optimizer_config=dict(_delete_=True, grad_clip=dict(max_norm=35, norm_type=2))` 如果你的 config 没有继承任何包含 `optimizer_config=dict(grad_clip=None)`, 你可以直接设置`optimizer_config=dict(grad_clip=dict(max_norm=35, norm_type=2))`. - "GPU out of memory" 1. 存在大量 ground truth boxes 或者大量 anchor 的场景,可能在 assigner 会 OOM。 您可以在 assigner 的配置中设置 `gpu_assign_thr=N`,这样当超过 N 个 GT boxes 时,assigner 会通过 CPU 计算 IOU。 2. 在 backbone 中设置 `with_cp=True`。 这使用 PyTorch 中的 `sublinear strategy` 来降低 backbone 占用的 GPU 显存。 3. 使用 `config/fp16` 中的示例尝试混合精度训练。`loss_scale` 可能需要针对不同模型进行调整。 4. 你也可以尝试使用 `AvoidCUDAOOM` 来避免该问题。首先它将尝试调用 `torch.cuda.empty_cache()`。如果失败,将会尝试把输入类型转换到 FP16。如果仍然失败,将会把输入从 GPUs 转换到 CPUs 进行计算。这里提供了两个使用的例子: ```python from mmdet.utils import AvoidCUDAOOM output = AvoidCUDAOOM.retry_if_cuda_oom(some_function)(input1, input2) ``` 你也可也使用 `AvoidCUDAOOM` 作为装饰器让代码遇到 OOM 的时候继续运行: ```python from mmdet.utils import AvoidCUDAOOM @AvoidCUDAOOM.retry_if_cuda_oom def function(*args, **kwargs): ... return xxx ``` - "RuntimeError: Expected to have finished reduction in the prior iteration before starting a new one" 1. 这个错误出现在存在参数没有在 forward 中使用,容易在 DDP 中运行不同分支时发生。 2. 你可以在 config 设置 `find_unused_parameters = True` 进行训练 (会降低训练速度)。 3. 你也可以通过在 config 中的 `optimizer_config` 里设置 `detect_anomalous_params=True` 查找哪些参数没有用到,但是需要 MMCV 的版本 >= 1.4.1。 - 训练中保存最好模型 可以通过配置 `evaluation = dict(save_best=‘auto’)`开启。在 auto 参数情况下会根据返回的验证结果中的第一个 key 作为选择最优模型的依据,你也可以直接设置评估结果中的 key 来手动设置,例如 `evaluation = dict(save_best=‘mAP’)`。 - 在 Resume 训练中使用 `ExpMomentumEMAHook` 如果在训练中使用了 `ExpMomentumEMAHook`,那么 resume 时候不能仅仅通过命令行参数 `--resume-from` 或 `--cfg-options resume_from` 实现恢复模型参数功能例如 `python tools/train.py configs/yolox/yolox_s_8x8_300e_coco.py --resume-from ./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth`。以 `yolox_s` 算法为例,由于 `ExpMomentumEMAHook` 需要重新加载权重,你可以通过如下做法实现: ```python # 直接打开 configs/yolox/yolox_s_8x8_300e_coco.py 修改所有 resume_from 字段 resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth custom_hooks=[... dict( type='ExpMomentumEMAHook', resume_from=./work_dir/yolox_s_8x8_300e_coco/epoch_x.pth, momentum=0.0001, priority=49) ] ``` ## Evaluation 相关 - 使用 COCO Dataset 的测评接口时, 测评结果中 AP 或者 AR = -1 1. 根据COCO数据集的定义,一张图像中的中等物体与小物体面积的阈值分别为 9216(96\*96)与 1024(32\*32)。 2. 如果在某个区间没有检测框 AP 与 AR 认定为 -1. ## Model 相关 - **ResNet style 参数说明** ResNet style 可选参数允许 `pytorch` 和 `caffe`,其差别在于 Bottleneck 模块。Bottleneck 是 `1x1-3x3-1x1` 堆叠结构,在 `caffe` 模式模式下 stride=2 参数放置在第一个 `1x1` 卷积处,而 `pyorch` 模式下 stride=2 放在第二个 `3x3` 卷积处。一个简单示例如下: ```python if self.style == 'pytorch': self.conv1_stride = 1 self.conv2_stride = stride else: self.conv1_stride = stride self.conv2_stride = 1 ``` - **ResNeXt 参数说明** ResNeXt 来自论文 [`Aggregated Residual Transformations for Deep Neural Networks`](https://arxiv.org/abs/1611.05431). 其引入分组卷积,并且通过变量基数来控制组的数量达到精度和复杂度的平衡,其有两个超参 `baseWidth` 和 `cardinality `来控制内部 Bottleneck 模块的基本宽度和分组数参数。以 MMDetection 中配置名为 `mask_rcnn_x101_64x4d_fpn_mstrain-poly_3x_coco.py` 为例,其中 `mask_rcnn` 代表算法采用 Mask R-CNN,`x101` 代表骨架网络采用 ResNeXt-101,`64x4d`代表 Bottleneck 一共分成 64 组,每组的基本宽度是 4。 - **骨架网络 eval 模式说明** 因为检测模型通常比较大且输入图片分辨率很高,这会导致检测模型的 batch 很小,通常是 2,这会使得 BatchNorm 在训练过程计算的统计量方差非常大,不如主干网络预训练时得到的统计量稳定,因此在训练是一般都会使用 `norm_eval=True` 模式,直接使用预训练主干网络中的 BatchNorm 统计量,少数使用大 batch 的算法是 `norm_eval=False` 模式,例如 NASFPN。对于没有 ImageNet 预训练的骨架网络,如果 batch 比较小,可以考虑使用 `SyncBN`。
7,554
45.349693
393
md
mmdetection
mmdetection-master/docs/zh_cn/get_started.md
## 依赖 - Linux 和 macOS (Windows 理论上支持) - Python 3.7 + - PyTorch 1.3+ - CUDA 9.2+ (如果基于 PyTorch 源码安装,也能够支持 CUDA 9.0) - GCC 5+ - [MMCV](https://mmcv.readthedocs.io/en/latest/#installation) MMDetection 和 MMCV 版本兼容性如下所示,需要安装正确的 MMCV 版本以避免安装出现问题。 | MMDetection 版本 | MMCV 版本 | | :--------------: | :------------------------: | | master | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.2 | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.1 | mmcv-full>=1.3.17, \<1.8.0 | | 2.28.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.27.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.26.0 | mmcv-full>=1.3.17, \<1.8.0 | | 2.25.3 | mmcv-full>=1.3.17, \<1.7.0 | | 2.25.2 | mmcv-full>=1.3.17, \<1.7.0 | | 2.25.1 | mmcv-full>=1.3.17, \<1.6.0 | | 2.25.0 | mmcv-full>=1.3.17, \<1.6.0 | | 2.24.1 | mmcv-full>=1.3.17, \<1.6.0 | | 2.24.0 | mmcv-full>=1.3.17, \<1.6.0 | | 2.23.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.22.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.21.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.20.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.19.1 | mmcv-full>=1.3.17, \<1.5.0 | | 2.19.0 | mmcv-full>=1.3.17, \<1.5.0 | | 2.18.1 | mmcv-full>=1.3.17, \<1.4.0 | | 2.18.0 | mmcv-full>=1.3.14, \<1.4.0 | | 2.17.0 | mmcv-full>=1.3.14, \<1.4.0 | | 2.16.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.15.1 | mmcv-full>=1.3.8, \<1.4.0 | | 2.15.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.14.0 | mmcv-full>=1.3.8, \<1.4.0 | | 2.13.0 | mmcv-full>=1.3.3, \<1.4.0 | | 2.12.0 | mmcv-full>=1.3.3, \<1.4.0 | | 2.11.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.10.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.9.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.8.0 | mmcv-full>=1.2.4, \<1.4.0 | | 2.7.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.6.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.5.0 | mmcv-full>=1.1.5, \<1.4.0 | | 2.4.0 | mmcv-full>=1.1.1, \<1.4.0 | | 2.3.0 | mmcv-full==1.0.5 | | 2.3.0rc0 | mmcv-full>=1.0.2 | | 2.2.1 | mmcv==0.6.2 | | 2.2.0 | mmcv==0.6.2 | | 2.1.0 | mmcv>=0.5.9, \<=0.6.1 | | 2.0.0 | mmcv>=0.5.1, \<=0.5.8 | \*\*注意:\*\*如果已经安装了 mmcv,首先需要使用 `pip uninstall mmcv` 卸载已安装的 mmcv,如果同时安装了 mmcv 和 mmcv-full,将会报 `ModuleNotFoundError` 错误。 ## 安装流程 ### 从零开始设置脚本 假设当前已经成功安装 CUDA 10.1,这里提供了一个完整的基于 conda 安装 MMDetection 的脚本。您可以参考下一节中的分步安装说明。 ```shell conda create -n openmmlab python=3.7 pytorch==1.6.0 cudatoolkit=10.1 torchvision -c pytorch -y conda activate openmmlab pip install openmim mim install mmcv-full git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection pip install -r requirements/build.txt pip install -v -e . ``` ### 准备环境 1. 使用 conda 新建虚拟环境,并进入该虚拟环境; ```shell conda create -n open-mmlab python=3.7 -y conda activate open-mmlab ``` 2. 基于 [PyTorch 官网](https://pytorch.org/)安装 PyTorch 和 torchvision,例如: ```shell conda install pytorch torchvision -c pytorch ``` **注意**:需要确保 CUDA 的编译版本和运行版本匹配。可以在 [PyTorch 官网](https://pytorch.org/)查看预编译包所支持的 CUDA 版本。 `例 1` 例如在 `/usr/local/cuda` 下安装了 CUDA 10.1, 并想安装 PyTorch 1.5,则需要安装支持 CUDA 10.1 的预构建 PyTorch: ```shell conda install pytorch cudatoolkit=10.1 torchvision -c pytorch ``` `例 2` 例如在 `/usr/local/cuda` 下安装了 CUDA 9.2, 并想安装 PyTorch 1.3.1,则需要安装支持 CUDA 9.2 的预构建 PyTorch: ```shell conda install pytorch=1.3.1 cudatoolkit=9.2 torchvision=0.4.2 -c pytorch ``` 如果不是安装预构建的包,而是从源码中构建 PyTorch,则可以使用更多的 CUDA 版本,例如 CUDA 9.0。 ### 安装 MMDetection 我们建议使用 [MIM](https://github.com/open-mmlab/mim) 来安装 MMDetection: ```shell pip install openmim mim install mmdet ``` MIM 能够自动地安装 OpenMMLab 的项目以及对应的依赖包。 或者,可以手动安装 MMDetection: 1. 安装 mmcv-full,我们建议使用预构建包来安装: ```shell pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/{cu_version}/{torch_version}/index.html ``` 需要把命令行中的 `{cu_version}` 和 `{torch_version}` 替换成对应的版本。例如:在 CUDA 11 和 PyTorch 1.7.0 的环境下,可以使用下面命令安装最新版本的 MMCV: ```shell pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7.0/index.html ``` 请参考 [MMCV](https://mmcv.readthedocs.io/en/latest/#installation) 获取不同版本的 MMCV 所兼容的的不同的 PyTorch 和 CUDA 版本。同时,也可以通过以下命令行从源码编译 MMCV: ```shell git clone https://github.com/open-mmlab/mmcv.git cd mmcv MMCV_WITH_OPS=1 pip install -e . # 安装好 mmcv-full cd .. ``` 或者,可以直接使用命令行安装: ```shell pip install mmcv-full ``` PyTorch 在 1.x.0 和 1.x.1 之间通常是兼容的,故 mmcv-full 只提供 1.x.0 的编译包。如果你的 PyTorch 版本是 1.x.1,你可以放心地安装在 1.x.0 版本编译的 mmcv-full。 ``` # 我们可以忽略 PyTorch 的小版本号 pip install mmcv-full -f https://download.openmmlab.com/mmcv/dist/cu110/torch1.7/index.html ``` 2. 安装 MMDetection: 你可以直接通过如下命令从 pip 安装使用 mmdetection: ```shell pip install mmdet ``` 或者从 git 仓库编译源码 ```shell git clone https://github.com/open-mmlab/mmdetection.git cd mmdetection pip install -r requirements/build.txt pip install -v -e . # or "python setup.py develop" ``` 3. 安装额外的依赖以使用 Instaboost, 全景分割, 或者 LVIS 数据集 ```shell # 安装 instaboost 依赖 pip install instaboostfast # 安装全景分割依赖 pip install git+https://github.com/cocodataset/panopticapi.git # 安装 LVIS 数据集依赖 pip install git+https://github.com/lvis-dataset/lvis-api.git # 安装 albumentations 依赖 pip install -r requirements/albu.txt ``` **注意:** (1) 按照上述说明,MMDetection 安装在 `dev` 模式下,因此在本地对代码做的任何修改都会生效,无需重新安装; (2) 如果希望使用 `opencv-python-headless` 而不是 `opencv-python`, 可以在安装 MMCV 之前安装; (3) 一些安装依赖是可以选择的。例如只需要安装最低运行要求的版本,则可以使用 `pip install -v -e .` 命令。如果希望使用可选择的像 `albumentations` 和 `imagecorruptions` 这种依赖项,可以使用 `pip install -r requirements/optional.txt` 进行手动安装,或者在使用 `pip` 时指定所需的附加功能(例如 `pip install -v -e .[optional]`),支持附加功能的有效键值包括 `all`、`tests`、`build` 以及 `optional` 。 (4) 如果希望使用 `albumentations`,我们建议使用 `pip install -r requirements/albu.txt` 或者 `pip install -U albumentations --no-binary qudida,albumentations` 进行安装。 如果简单地使用 `pip install albumentations>=0.3.2` 进行安装,则会同时安装 `opencv-python-headless`(即便已经安装了 `opencv-python` 也会再次安装)。我们建议在安装 `albumentations` 后检查环境,以确保没有同时安装 `opencv-python` 和 `opencv-python-headless`,因为同时安装可能会导致一些问题。更多细节请参考[官方文档](https://albumentations.ai/docs/getting_started/installation/#note-on-opencv-dependencies)。 ### 只在 CPU 安装 我们的代码能够建立在只使用 CPU 的环境(CUDA 不可用)。 在 CPU 模式下,可以进行模型训练(需要 MMCV 版本 >= 1.4.4)、测试或者推理,然而以下功能将在 CPU 模式下不能使用: - Deformable Convolution - Modulated Deformable Convolution - ROI pooling - Deformable ROI pooling - CARAFE: Content-Aware ReAssembly of FEatures - SyncBatchNorm - CrissCrossAttention: Criss-Cross Attention - MaskedConv2d - Temporal Interlace Shift - nms_cuda - sigmoid_focal_loss_cuda - bbox_overlaps 因此,如果尝试使用包含上述操作的模型进行训练/测试/推理,将会报错。下表列出了由于依赖上述算子而无法在 CPU 上运行的相关模型: | 操作 | 模型 | | :-----------------------------------------------------: | :--------------------------------------------------------------------------------------: | | Deformable Convolution/Modulated Deformable Convolution | DCN、Guided Anchoring、RepPoints、CentripetalNet、VFNet、CascadeRPN、NAS-FCOS、DetectoRS | | MaskedConv2d | Guided Anchoring | | CARAFE | CARAFE | | SyncBatchNorm | ResNeSt | ### 另一种选择: Docker 镜像 我们提供了 [Dockerfile](https://github.com/open-mmlab/mmdetection/blob/master/docker/Dockerfile) 来生成镜像,请确保 [docker](https://docs.docker.com/engine/install/) 的版本 >= 19.03。 ```shell # 基于 PyTorch 1.6, CUDA 10.1 生成镜像 docker build -t mmdetection docker/ ``` 运行命令: ```shell docker run --gpus all --shm-size=8g -it -v {DATA_DIR}:/mmdetection/data mmdetection ``` ### 使用多个 MMDetection 版本进行开发 训练和测试的脚本已经在 PYTHONPATH 中进行了修改,以确保脚本使用当前目录中的 MMDetection。 要使环境中安装默认的 MMDetection 而不是当前正在在使用的,可以删除出现在相关脚本中的代码: ```shell PYTHONPATH="$(dirname $0)/..":$PYTHONPATH ``` ## 验证 为了验证是否正确安装了 MMDetection 和所需的环境,我们可以运行示例的 Python 代码来初始化检测器并推理一个演示图像: ```python from mmdet.apis import init_detector, inference_detector config_file = 'configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' # 从 model zoo 下载 checkpoint 并放在 `checkpoints/` 文件下 # 网址为: http://download.openmmlab.com/mmdetection/v2.0/faster_rcnn/faster_rcnn_r50_fpn_1x_coco/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth checkpoint_file = 'checkpoints/faster_rcnn_r50_fpn_1x_coco_20200130-047c8118.pth' device = 'cuda:0' # 初始化检测器 model = init_detector(config_file, checkpoint_file, device=device) # 推理演示图像 inference_detector(model, 'demo/demo.jpg') ``` 如果成功安装 MMDetection,则上面的代码可以完整地运行。
9,100
33.343396
466
md
mmdetection
mmdetection-master/docs/zh_cn/model_zoo.md
# 模型库 ## 镜像地址 从 MMDetection V2.0 起,我们只通过阿里云维护模型库。V1.x 版本的模型已经弃用。 ## 共同设置 - 所有模型都是在 `coco_2017_train` 上训练,在 `coco_2017_val` 上测试。 - 我们使用分布式训练。 - 所有 pytorch-style 的 ImageNet 预训练主干网络来自 PyTorch 的模型库,caffe-style 的预训练主干网络来自 detectron2 最新开源的模型。 - 为了与其他代码库公平比较,文档中所写的 GPU 内存是8个 GPU 的 `torch.cuda.max_memory_allocated()` 的最大值,此值通常小于 nvidia-smi 显示的值。 - 我们以网络 forward 和后处理的时间加和作为推理时间,不包含数据加载时间。所有结果通过 [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) 脚本计算所得。该脚本会计算推理 2000 张图像的平均时间。 ## ImageNet 预训练模型 通过 ImageNet 分类任务预训练的主干网络进行初始化是很常见的操作。所有预训练模型的链接都可以在 [open_mmlab](https://github.com/open-mmlab/mmcv/blob/master/mmcv/model_zoo/open_mmlab.json) 中找到。根据 `img_norm_cfg` 和原始权重,我们可以将所有 ImageNet 预训练模型分为以下几种情况: - TorchVision:torchvision 模型权重,包含 ResNet50, ResNet101。`img_norm_cfg` 为 `dict(mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True)`。 - Pycls:[pycls](https://github.com/facebookresearch/pycls) 模型权重,包含 RegNetX。`img_norm_cfg` 为 `dict( mean=[103.530, 116.280, 123.675], std=[57.375, 57.12, 58.395], to_rgb=False)`。 - MSRA styles:[MSRA](https://github.com/KaimingHe/deep-residual-networks) 模型权重,包含 ResNet50_Caffe,ResNet101_Caffe。`img_norm_cfg` 为 `dict( mean=[103.530, 116.280, 123.675], std=[1.0, 1.0, 1.0], to_rgb=False)`。 - Caffe2 styles:现阶段只包含 ResNext101_32x8d。`img_norm_cfg` 为 `dict(mean=[103.530, 116.280, 123.675], std=[57.375, 57.120, 58.395], to_rgb=False)`。 - Other styles: SSD 的 `img_norm_cfg` 为 `dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True)`,YOLOv3 的 `img_norm_cfg` 为 `dict(mean=[0, 0, 0], std=[255., 255., 255.], to_rgb=True)`。 MMdetection 常用到的主干网络细节如下表所示: | 模型 | 来源 | 链接 | 描述 | | ---------------- | ----------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ResNet50 | TorchVision | [torchvision 中的 ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth) | 来自 [torchvision 中的 ResNet-50](https://download.pytorch.org/models/resnet50-19c8e357.pth)。 | | ResNet101 | TorchVision | [torchvision 中的 ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth) | 来自 [torchvision 中的 ResNet-101](https://download.pytorch.org/models/resnet101-5d3b4d8f.pth)。 | | RegNetX | Pycls | [RegNetX_3.2gf](https://download.openmmlab.com/pretrain/third_party/regnetx_3.2gf-c2599b0f.pth),[RegNetX_800mf](https://download.openmmlab.com/pretrain/third_party/regnetx_800mf-1f4be4c7.pth) 等 | 来自 [pycls](https://github.com/facebookresearch/pycls)。 | | ResNet50_Caffe | MSRA | [MSRA 中的 ResNet-50](https://download.openmmlab.com/pretrain/third_party/resnet50_caffe-788b5fa3.pth) | 由 [Detectron2 中的 R-50.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-50.pkl) 转化的副本。原始权重文件来自 [MSRA 中的原始 ResNet-50](https://github.com/KaimingHe/deep-residual-networks)。 | | ResNet101_Caffe | MSRA | [MSRA 中的 ResNet-101](https://download.openmmlab.com/pretrain/third_party/resnet101_caffe-3ad79236.pth) | 由 [Detectron2 中的 R-101.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/MSRA/R-101.pkl) 转化的副本。原始权重文件来自 [MSRA 中的原始 ResNet-101](https://github.com/KaimingHe/deep-residual-networks)。 | | ResNext101_32x8d | Caffe2 | [Caffe2 ResNext101_32x8d](https://download.openmmlab.com/pretrain/third_party/resnext101_32x8d-1516f1aa.pth) | 由 [Detectron2 中的 X-101-32x8d.pkl](https://dl.fbaipublicfiles.com/detectron2/ImageNetPretrained/FAIR/X-101-32x8d.pkl) 转化的副本。原始 ResNeXt-101-32x8d 由 FB 使用 Caffe2 训练。 | ## Baselines ### RPN 请参考 [RPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/rpn)。 ### Faster R-CNN 请参考 [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn)。 ### Mask R-CNN 请参考 [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn)。 ### Fast R-CNN (使用提前计算的 proposals) 请参考 [Fast R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/fast_rcnn)。 ### RetinaNet 请参考 [RetinaNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet)。 ### Cascade R-CNN and Cascade Mask R-CNN 请参考 [Cascade R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/cascade_rcnn)。 ### Hybrid Task Cascade (HTC) 请参考 [HTC](https://github.com/open-mmlab/mmdetection/blob/master/configs/htc)。 ### SSD 请参考 [SSD](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd)。 ### Group Normalization (GN) 请参考 [Group Normalization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn)。 ### Weight Standardization 请参考 [Weight Standardization](https://github.com/open-mmlab/mmdetection/blob/master/configs/gn+ws)。 ### Deformable Convolution v2 请参考 [Deformable Convolutional Networks](https://github.com/open-mmlab/mmdetection/blob/master/configs/dcn)。 ### CARAFE: Content-Aware ReAssembly of FEatures 请参考 [CARAFE](https://github.com/open-mmlab/mmdetection/blob/master/configs/carafe)。 ### Instaboost 请参考 [Instaboost](https://github.com/open-mmlab/mmdetection/blob/master/configs/instaboost)。 ### Libra R-CNN 请参考 [Libra R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/libra_rcnn)。 ### Guided Anchoring 请参考 [Guided Anchoring](https://github.com/open-mmlab/mmdetection/blob/master/configs/guided_anchoring)。 ### FCOS 请参考 [FCOS](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos)。 ### FoveaBox 请参考 [FoveaBox](https://github.com/open-mmlab/mmdetection/blob/master/configs/foveabox)。 ### RepPoints 请参考 [RepPoints](https://github.com/open-mmlab/mmdetection/blob/master/configs/reppoints)。 ### FreeAnchor 请参考 [FreeAnchor](https://github.com/open-mmlab/mmdetection/blob/master/configs/free_anchor)。 ### Grid R-CNN (plus) 请参考 [Grid R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/grid_rcnn)。 ### GHM 请参考 [GHM](https://github.com/open-mmlab/mmdetection/blob/master/configs/ghm)。 ### GCNet 请参考 [GCNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/gcnet)。 ### HRNet 请参考 [HRNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/hrnet)。 ### Mask Scoring R-CNN 请参考 [Mask Scoring R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/ms_rcnn)。 ### Train from Scratch 请参考 [Rethinking ImageNet Pre-training](https://github.com/open-mmlab/mmdetection/blob/master/configs/scratch)。 ### NAS-FPN 请参考 [NAS-FPN](https://github.com/open-mmlab/mmdetection/blob/master/configs/nas_fpn)。 ### ATSS 请参考 [ATSS](https://github.com/open-mmlab/mmdetection/blob/master/configs/atss)。 ### FSAF 请参考 [FSAF](https://github.com/open-mmlab/mmdetection/blob/master/configs/fsaf)。 ### RegNetX 请参考 [RegNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet)。 ### Res2Net 请参考 [Res2Net](https://github.com/open-mmlab/mmdetection/blob/master/configs/res2net)。 ### GRoIE 请参考 [GRoIE](https://github.com/open-mmlab/mmdetection/blob/master/configs/groie)。 ### Dynamic R-CNN 请参考 [Dynamic R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/dynamic_rcnn)。 ### PointRend 请参考 [PointRend](https://github.com/open-mmlab/mmdetection/blob/master/configs/point_rend)。 ### DetectoRS 请参考 [DetectoRS](https://github.com/open-mmlab/mmdetection/blob/master/configs/detectors)。 ### Generalized Focal Loss 请参考 [Generalized Focal Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/gfl)。 ### CornerNet 请参考 [CornerNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/cornernet)。 ### YOLOv3 请参考 [YOLOv3](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo)。 ### PAA 请参考 [PAA](https://github.com/open-mmlab/mmdetection/blob/master/configs/paa)。 ### SABL 请参考 [SABL](https://github.com/open-mmlab/mmdetection/blob/master/configs/sabl)。 ### CentripetalNet 请参考 [CentripetalNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centripetalnet)。 ### ResNeSt 请参考 [ResNeSt](https://github.com/open-mmlab/mmdetection/blob/master/configs/resnest)。 ### DETR 请参考 [DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/detr)。 ### Deformable DETR 请参考 [Deformable DETR](https://github.com/open-mmlab/mmdetection/blob/master/configs/deformable_detr)。 ### AutoAssign 请参考 [AutoAssign](https://github.com/open-mmlab/mmdetection/blob/master/configs/autoassign)。 ### YOLOF 请参考 [YOLOF](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolof)。 ### Seesaw Loss 请参考 [Seesaw Loss](https://github.com/open-mmlab/mmdetection/blob/master/configs/seesaw_loss)。 ### CenterNet 请参考 [CenterNet](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet)。 ### YOLOX 请参考 [YOLOX](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox)。 ### PVT 请参考 [PVT](https://github.com/open-mmlab/mmdetection/blob/master/configs/pvt)。 ### SOLO 请参考 [SOLO](https://github.com/open-mmlab/mmdetection/blob/master/configs/solo)。 ### QueryInst 请参考 [QueryInst](https://github.com/open-mmlab/mmdetection/blob/master/configs/queryinst)。 ### RF-Next 请参考 [RF-Next](https://github.com/open-mmlab/mmdetection/blob/master/configs/rfnext). ### Other datasets 我们还在 [PASCAL VOC](https://github.com/open-mmlab/mmdetection/blob/master/configs/pascal_voc),[Cityscapes](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes) 和 [WIDER FACE](https://github.com/open-mmlab/mmdetection/blob/master/configs/wider_face) 上对一些方法进行了基准测试。 ### Pre-trained Models 我们还通过多尺度训练和更长的训练策略来训练用 ResNet-50 和 [RegNetX-3.2G](https://github.com/open-mmlab/mmdetection/blob/master/configs/regnet) 作为主干网络的 [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn) 和 [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn)。这些模型可以作为下游任务的预训练模型。 ## 速度基准 ### 训练速度基准 我们提供 [analyze_logs.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py) 来得到训练中每一次迭代的平均时间。示例请参考 [Log Analysis](https://mmdetection.readthedocs.io/en/latest/useful_tools.html#log-analysis)。 我们与其他流行框架的 Mask R-CNN 训练速度进行比较(数据是从 [detectron2](https://github.com/facebookresearch/detectron2/blob/master/docs/notes/benchmarks.md/) 复制而来)。在 mmdetection 中,我们使用 [mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_poly_1x_coco_v1.py) 进行基准测试。它与 detectron2 的 [mask_rcnn_R_50_FPN_noaug_1x.yaml](https://github.com/facebookresearch/detectron2/blob/master/configs/Detectron1-Comparisons/mask_rcnn_R_50_FPN_noaug_1x.yaml) 设置完全一样。同时,我们还提供了[模型权重](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_compare_20200518-10127928.pth)和[训练 log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug/mask_rcnn_r50_caffe_fpn_poly_1x_coco_no_aug_20200518_105755.log.json) 作为参考。为了跳过 GPU 预热时间,吞吐量按照100-500次迭代之间的平均吞吐量来计算。 | 框架 | 吞吐量 (img/s) | | -------------------------------------------------------------------------------------- | -------------- | | [Detectron2](https://github.com/facebookresearch/detectron2) | 62 | | [MMDetection](https://github.com/open-mmlab/mmdetection) | 61 | | [maskrcnn-benchmark](https://github.com/facebookresearch/maskrcnn-benchmark/) | 53 | | [tensorpack](https://github.com/tensorpack/tensorpack/tree/master/examples/FasterRCNN) | 50 | | [simpledet](https://github.com/TuSimple/simpledet/) | 39 | | [Detectron](https://github.com/facebookresearch/Detectron) | 19 | | [matterport/Mask_RCNN](https://github.com/matterport/Mask_RCNN/) | 14 | ### 推理时间基准 我们提供 [benchmark.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/analysis_tools/benchmark.py) 对推理时间进行基准测试。此脚本将推理 2000 张图片并计算忽略前 5 次推理的平均推理时间。可以通过设置 `LOG-INTERVAL` 来改变 log 输出间隔(默认为 50)。 ```shell python tools/benchmark.py ${CONFIG} ${CHECKPOINT} [--log-interval $[LOG-INTERVAL]] [--fuse-conv-bn] ``` 模型库中,所有模型在基准测量推理时间时都没设置 `fuse-conv-bn`, 此设置可以使推理时间更短。 ## 与 Detectron2 对比 我们在速度和精度方面对 mmdetection 和 [Detectron2](https://github.com/facebookresearch/detectron2.git) 进行对比。对比所使用的 detectron2 的 commit id 为 [185c27e](https://github.com/facebookresearch/detectron2/tree/185c27e4b4d2d4c68b5627b3765420c6d7f5a659)(30/4/2020)。 为了公平对比,我们所有的实验都在同一机器下进行。 ### 硬件 - 8 NVIDIA Tesla V100 (32G) GPUs - Intel(R) Xeon(R) Gold 6148 CPU @ 2.40GHz ### 软件环境 - Python 3.7 - PyTorch 1.4 - CUDA 10.1 - CUDNN 7.6.03 - NCCL 2.4.08 ### 精度 | 模型 | 训练策略 | Detectron2 | mmdetection | 下载 | | -------------------------------------------------------------------------------------------------------------------------------------- | -------- | -------------------------------------------------------------------------------------------------------------------------------------- | ----------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | [Faster R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/faster_rcnn/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [37.9](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/faster_rcnn_R_50_FPN_1x.yaml) | 38.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco-5324cff8.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco/faster_rcnn_r50_caffe_fpn_mstrain_1x_coco_20200429_234554.log.json) | | [Mask R-CNN](https://github.com/open-mmlab/mmdetection/blob/master/configs/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py) | 1x | [38.6 & 35.2](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml) | 38.8 & 35.4 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco-dbecf295.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco_20200430_054239.log.json) | | [Retinanet](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_caffe_fpn_mstrain_1x_coco.py) | 1x | [36.5](https://github.com/facebookresearch/detectron2/blob/master/configs/COCO-Detection/retinanet_R_50_FPN_1x.yaml) | 37.0 | [model](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco-586977a0.pth) \| [log](https://download.openmmlab.com/mmdetection/v2.0/benchmark/retinanet_r50_caffe_fpn_mstrain_1x_coco/retinanet_r50_caffe_fpn_mstrain_1x_coco_20200430_014748.log.json) | ### 训练速度 训练速度使用 s/iter 来度量。结果越低越好。 | 模型 | Detectron2 | mmdetection | | ------------ | ---------- | ----------- | | Faster R-CNN | 0.210 | 0.216 | | Mask R-CNN | 0.261 | 0.265 | | Retinanet | 0.200 | 0.205 | ### 推理速度 推理速度通过单张 GPU 下的 fps(img/s) 来度量,越高越好。 为了与 Detectron2 保持一致,我们所写的推理时间除去了数据加载时间。 对于 Mask RCNN,我们去除了后处理中 RLE 编码的时间。 我们在括号中给出了官方给出的速度。由于硬件差异,官方给出的速度会比我们所测试得到的速度快一些。 | 模型 | Detectron2 | mmdetection | | ------------ | ----------- | ----------- | | Faster R-CNN | 25.6 (26.3) | 22.2 | | Mask R-CNN | 22.5 (23.3) | 19.6 | | Retinanet | 17.8 (18.2) | 20.6 | ### 训练内存 | 模型 | Detectron2 | mmdetection | | ------------ | ---------- | ----------- | | Faster R-CNN | 3.0 | 3.8 | | Mask R-CNN | 3.4 | 3.9 | | Retinanet | 3.9 | 3.4 |
18,583
53.982249
932
md
mmdetection
mmdetection-master/docs/zh_cn/projects.md
# 基于 MMDetection 的项目 有许多开源项目都是基于 MMDetection 搭建的,我们在这里列举一部分作为样例,展示如何基于 MMDetection 搭建您自己的项目。 由于这个页面列举的项目并不完全,我们欢迎社区提交 Pull Request 来更新这个文档。 ## MMDetection 的拓展项目 一些项目拓展了 MMDetection 的边界,如将 MMDetection 拓展支持 3D 检测或者将 MMDetection 用于部署。 它们展示了 MMDetection 的许多可能性,所以我们在这里也列举一些。 - [OTEDetection](https://github.com/opencv/mmdetection): OpenVINO training extensions for object detection. - [MMDetection3d](https://github.com/open-mmlab/mmdetection3d): OpenMMLab's next-generation platform for general 3D object detection. ## 研究项目 同样有许多研究论文是基于 MMDetection 进行的。许多论文都发表在了顶级的会议或期刊上,或者对社区产生了深远的影响。 为了向社区提供一个可以参考的论文列表,帮助大家开发或者比较新的前沿算法,我们在这里也遵循会议的时间顺序列举了一些论文。 MMDetection 中已经支持的算法不在此列。 - Involution: Inverting the Inherence of Convolution for Visual Recognition, CVPR21. [\[paper\]](https://arxiv.org/abs/2103.06255)[\[github\]](https://github.com/d-li14/involution) - Multiple Instance Active Learning for Object Detection, CVPR 2021. [\[paper\]](https://openaccess.thecvf.com/content/CVPR2021/papers/Yuan_Multiple_Instance_Active_Learning_for_Object_Detection_CVPR_2021_paper.pdf)[\[github\]](https://github.com/yuantn/MI-AOD) - Adaptive Class Suppression Loss for Long-Tail Object Detection, CVPR 2021. [\[paper\]](https://arxiv.org/abs/2104.00885)[\[github\]](https://github.com/CASIA-IVA-Lab/ACSL) - Generalizable Pedestrian Detection: The Elephant In The Room, CVPR2021. [\[paper\]](https://arxiv.org/abs/2003.08799)[\[github\]](https://github.com/hasanirtiza/Pedestron) - Group Fisher Pruning for Practical Network Compression, ICML2021. [\[paper\]](https://github.com/jshilong/FisherPruning/blob/main/resources/paper.pdf)[\[github\]](https://github.com/jshilong/FisherPruning) - Overcoming Classifier Imbalance for Long-tail Object Detection with Balanced Group Softmax, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Li_Overcoming_Classifier_Imbalance_for_Long-Tail_Object_Detection_With_Balanced_Group_CVPR_2020_paper.pdf)[\[github\]](https://github.com/FishYuLi/BalancedGroupSoftmax) - Coherent Reconstruction of Multiple Humans from a Single Image, CVPR2020. [\[paper\]](https://jiangwenpl.github.io/multiperson/)[\[github\]](https://github.com/JiangWenPL/multiperson) - Look-into-Object: Self-supervised Structure Modeling for Object Recognition, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/papers/Zhou_Look-Into-Object_Self-Supervised_Structure_Modeling_for_Object_Recognition_CVPR_2020_paper.pdf)[\[github\]](https://github.com/JDAI-CV/LIO) - Video Panoptic Segmentation, CVPR2020. [\[paper\]](https://arxiv.org/abs/2006.11339)[\[github\]](https://github.com/mcahny/vps) - D2Det: Towards High Quality Object Detection and Instance Segmentation, CVPR2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cao_D2Det_Towards_High_Quality_Object_Detection_and_Instance_Segmentation_CVPR_2020_paper.html)[\[github\]](https://github.com/JialeCao001/D2Det) - CentripetalNet: Pursuing High-quality Keypoint Pairs for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.09119)[\[github\]](https://github.com/KiveeDong/CentripetalNet) - Learning a Unified Sample Weighting Network for Object Detection, CVPR 2020. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2020/html/Cai_Learning_a_Unified_Sample_Weighting_Network_for_Object_Detection_CVPR_2020_paper.html)[\[github\]](https://github.com/caiqi/sample-weighting-network) - Scale-equalizing Pyramid Convolution for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2005.03101) [\[github\]](https://github.com/jshilong/SEPC) - Revisiting the Sibling Head in Object Detector, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.07540)[\[github\]](https://github.com/Sense-X/TSD) - PolarMask: Single Shot Instance Segmentation with Polar Representation, CVPR2020. [\[paper\]](https://arxiv.org/abs/1909.13226)[\[github\]](https://github.com/xieenze/PolarMask) - Hit-Detector: Hierarchical Trinity Architecture Search for Object Detection, CVPR2020. [\[paper\]](https://arxiv.org/abs/2003.11818)[\[github\]](https://github.com/ggjy/HitDet.pytorch) - ZeroQ: A Novel Zero Shot Quantization Framework, CVPR2020. [\[paper\]](https://arxiv.org/abs/2001.00281)[\[github\]](https://github.com/amirgholami/ZeroQ) - CBNet: A Novel Composite Backbone Network Architecture for Object Detection, AAAI2020. [\[paper\]](https://aaai.org/Papers/AAAI/2020GB/AAAI-LiuY.1833.pdf)[\[github\]](https://github.com/VDIGPKU/CBNet) - RDSNet: A New Deep Architecture for Reciprocal Object Detection and Instance Segmentation, AAAI2020. [\[paper\]](https://arxiv.org/abs/1912.05070)[\[github\]](https://github.com/wangsr126/RDSNet) - Training-Time-Friendly Network for Real-Time Object Detection, AAAI2020. [\[paper\]](https://arxiv.org/abs/1909.00700)[\[github\]](https://github.com/ZJULearning/ttfnet) - Cascade RPN: Delving into High-Quality Region Proposal Network with Adaptive Convolution, NeurIPS 2019. [\[paper\]](https://arxiv.org/abs/1909.06720)[\[github\]](https://github.com/thangvubk/Cascade-RPN) - Reasoning R-CNN: Unifying Adaptive Global Reasoning into Large-scale Object Detection, CVPR2019. [\[paper\]](http://openaccess.thecvf.com/content_CVPR_2019/papers/Xu_Reasoning-RCNN_Unifying_Adaptive_Global_Reasoning_Into_Large-Scale_Object_Detection_CVPR_2019_paper.pdf)[\[github\]](https://github.com/chanyn/Reasoning-RCNN) - Learning RoI Transformer for Oriented Object Detection in Aerial Images, CVPR2019. [\[paper\]](https://arxiv.org/abs/1812.00155)[\[github\]](https://github.com/dingjiansw101/AerialDetection) - SOLO: Segmenting Objects by Locations. [\[paper\]](https://arxiv.org/abs/1912.04488)[\[github\]](https://github.com/WXinlong/SOLO) - SOLOv2: Dynamic, Faster and Stronger. [\[paper\]](https://arxiv.org/abs/2003.10152)[\[github\]](https://github.com/WXinlong/SOLO) - Dense Peppoints: Representing Visual Objects with Dense Point Sets. [\[paper\]](https://arxiv.org/abs/1912.11473)[\[github\]](https://github.com/justimyhxu/Dense-RepPoints) - IterDet: Iterative Scheme for Object Detection in Crowded Environments. [\[paper\]](https://arxiv.org/abs/2005.05708)[\[github\]](https://github.com/saic-vul/iterdet) - Cross-Iteration Batch Normalization. [\[paper\]](https://arxiv.org/abs/2002.05712)[\[github\]](https://github.com/Howal/Cross-iterationBatchNorm) - A Ranking-based, Balanced Loss Function Unifying Classification and Localisation in Object Detection, NeurIPS2020 [\[paper\]](https://arxiv.org/abs/2009.13592)[\[github\]](https://github.com/kemaloksuz/aLRPLoss)
6,521
132.102041
338
md
mmdetection
mmdetection-master/docs/zh_cn/robustness_benchmarking.md
# 检测器鲁棒性检查 ## 介绍 我们提供了在 [Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming](https://arxiv.org/abs/1907.07484) 中定义的「图像损坏基准测试」上测试目标检测和实例分割模型的工具。 此页面提供了如何使用该基准测试的基本教程。 ```latex @article{michaelis2019winter, title={Benchmarking Robustness in Object Detection: Autonomous Driving when Winter is Coming}, author={Michaelis, Claudio and Mitzkus, Benjamin and Geirhos, Robert and Rusak, Evgenia and Bringmann, Oliver and Ecker, Alexander S. and Bethge, Matthias and Brendel, Wieland}, journal={arXiv:1907.07484}, year={2019} } ``` ![image corruption example](../resources/corruptions_sev_3.png) ## 关于基准测试 要将结果提交到基准测试,请访问[基准测试主页](https://github.com/bethgelab/robust-detection-benchmark) 基准测试是仿照 [imagenet-c 基准测试](https://github.com/hendrycks/robustness),由 Dan Hendrycks 和 Thomas Dietterich 在[Benchmarking Neural Network Robustness to Common Corruptions and Perturbations](https://arxiv.org/abs/1903.12261)(ICLR 2019)中发表。 图像损坏变换功能包含在此库中,但可以使用以下方法单独安装: ```shell pip install imagecorruptions ``` 与 imagenet-c 相比,我们必须进行一些更改以处理任意大小的图像和灰度图像。 我们还修改了“运动模糊”和“雪”损坏,以解除对于 linux 特定库的依赖, 否则必须单独安装这些库。有关详细信息,请参阅 [imagecorruptions](https://github.com/bethgelab/imagecorruptions)。 ## 使用预训练模型进行推理 我们提供了一个测试脚本来评估模型在基准测试中提供的各种损坏变换组合下的性能。 ### 在数据集上测试 - [x] 单张 GPU 测试 - [ ] 多张 GPU 测试 - [ ] 可视化检测结果 您可以使用以下命令在基准测试中使用 15 种损坏变换来测试模型性能。 ```shell # single-gpu testing python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] ``` 也可以选择其它不同类型的损坏变换。 ```shell # noise python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions noise # blur python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions blur # wetaher python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions weather # digital python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions digital ``` 或者使用一组自定义的损坏变换,例如: ```shell # gaussian noise, zoom blur and snow python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --corruptions gaussian_noise zoom_blur snow ``` 最后,我们也可以选择施加在图像上的损坏变换的严重程度。 严重程度从 1 到 5 逐级增强,0 表示不对图像施加损坏变换,即原始图像数据。 ```shell # severity 1 python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 1 # severities 0,2,4 python tools/analysis_tools/test_robustness.py ${CONFIG_FILE} ${CHECKPOINT_FILE} [--out ${RESULT_FILE}] [--eval ${EVAL_METRICS}] --severities 0 2 4 ``` ## 模型测试结果 下表是各模型在 COCO 2017val 上的测试结果。 | Model | Backbone | Style | Lr schd | box AP clean | box AP corr. | box % | mask AP clean | mask AP corr. | mask % | | :-----------------: | :-----------------: | :-----: | :-----: | :----------: | :----------: | :---: | :-----------: | :-----------: | :----: | | Faster R-CNN | R-50-FPN | pytorch | 1x | 36.3 | 18.2 | 50.2 | - | - | - | | Faster R-CNN | R-101-FPN | pytorch | 1x | 38.5 | 20.9 | 54.2 | - | - | - | | Faster R-CNN | X-101-32x4d-FPN | pytorch | 1x | 40.1 | 22.3 | 55.5 | - | - | - | | Faster R-CNN | X-101-64x4d-FPN | pytorch | 1x | 41.3 | 23.4 | 56.6 | - | - | - | | Faster R-CNN | R-50-FPN-DCN | pytorch | 1x | 40.0 | 22.4 | 56.1 | - | - | - | | Faster R-CNN | X-101-32x4d-FPN-DCN | pytorch | 1x | 43.4 | 26.7 | 61.6 | - | - | - | | Mask R-CNN | R-50-FPN | pytorch | 1x | 37.3 | 18.7 | 50.1 | 34.2 | 16.8 | 49.1 | | Mask R-CNN | R-50-FPN-DCN | pytorch | 1x | 41.1 | 23.3 | 56.7 | 37.2 | 20.7 | 55.7 | | Cascade R-CNN | R-50-FPN | pytorch | 1x | 40.4 | 20.1 | 49.7 | - | - | - | | Cascade Mask R-CNN | R-50-FPN | pytorch | 1x | 41.2 | 20.7 | 50.2 | 35.7 | 17.6 | 49.3 | | RetinaNet | R-50-FPN | pytorch | 1x | 35.6 | 17.8 | 50.1 | - | - | - | | Hybrid Task Cascade | X-101-64x4d-FPN-DCN | pytorch | 1x | 50.6 | 32.7 | 64.7 | 43.8 | 28.1 | 64.0 | 由于对图像的损坏变换存在随机性,测试结果可能略有不同。
4,966
44.154545
233
md
mmdetection
mmdetection-master/docs/zh_cn/stat.py
#!/usr/bin/env python import functools as func import glob import os.path as osp import re import numpy as np url_prefix = 'https://github.com/open-mmlab/mmdetection/blob/master/' files = sorted(glob.glob('../configs/*/README.md')) stats = [] titles = [] num_ckpts = 0 for f in files: url = osp.dirname(f.replace('../', url_prefix)) with open(f, 'r') as content_file: content = content_file.read() title = content.split('\n')[0].replace('# ', '').strip() ckpts = set(x.lower().strip() for x in re.findall(r'\[model\]\((https?.*)\)', content)) if len(ckpts) == 0: continue _papertype = [x for x in re.findall(r'\[([A-Z]+)\]', content)] assert len(_papertype) > 0 papertype = _papertype[0] paper = set([(papertype, title)]) titles.append(title) num_ckpts += len(ckpts) statsmsg = f""" \t* [{papertype}] [{title}]({url}) ({len(ckpts)} ckpts) """ stats.append((paper, ckpts, statsmsg)) allpapers = func.reduce(lambda a, b: a.union(b), [p for p, _, _ in stats]) msglist = '\n'.join(x for _, _, x in stats) papertypes, papercounts = np.unique([t for t, _ in allpapers], return_counts=True) countstr = '\n'.join( [f' - {t}: {c}' for t, c in zip(papertypes, papercounts)]) modelzoo = f""" # Model Zoo Statistics * Number of papers: {len(set(titles))} {countstr} * Number of checkpoints: {num_ckpts} {msglist} """ with open('modelzoo_statistics.md', 'w') as f: f.write(modelzoo)
1,519
22.384615
74
py
mmdetection
mmdetection-master/docs/zh_cn/switch_language.md
## <a href='https://mmdetection.readthedocs.io/en/latest/'>English</a> ## <a href='https://mmdetection.readthedocs.io/zh_CN/latest/'>简体中文</a>
143
35
70
md
mmdetection
mmdetection-master/docs/zh_cn/useful_tools.md
## 日志分析
8
3.5
7
md
mmdetection
mmdetection-master/docs/zh_cn/_static/css/readthedocs.css
.header-logo { background-image: url("../image/mmdet-logo.png"); background-size: 156px 40px; height: 40px; width: 156px; }
140
19.142857
53
css
mmdetection
mmdetection-master/docs/zh_cn/device/npu.md
# NPU (华为 昇腾) ## 使用方法 请参考 [MMCV 的安装文档](https://mmcv.readthedocs.io/en/latest/get_started/build.html#build-mmcv-full-on-ascend-npu-machine) 来安装 NPU 版本的 MMCV。 以下展示单机八卡场景的运行指令: ```shell bash tools/dist_train.sh configs/ssd/ssd300_coco.py 8 ``` 以下展示单机单卡下的运行指令: ```shell python tools/train.py configs/ssd/ssd300_coco.py ``` ## 模型验证结果 | Model | box AP | mask AP | Config | Download | | :------------------: | :----: | :-----: | :---------------------------------------------------------------------------------------------------------------------------- | :----------------------------------------------------------------------------------------------------------- | | [ssd300](<>) | 25.6 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssd300_fp16_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssd300_coco.log.json) | | [ssd512](<>) | 29.4 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssd512_fp16_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssd512_coco.log.json) | | [ssdlite-mbv2\*](<>) | 20.2 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ssdlite_mobilenetv2_scratch_600e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/ssdlite_mobilenetv2_scratch_600e_coco.log.json) | | [retinanet-r18](<>) | 31.8 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r18_fpn_1x8_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/retinanet_r18_fpn_1x8_1x_coco.log.json) | | [retinanet-r50](<>) | 36.6 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/retinanet_r50_fpn_fp16_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/retinanet_r50_fpn_1x_coco.log.json) | | [yolov3-608](<>) | 34.7 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolo/yolov3_d53_fp16_mstrain-608_273e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/yolov3_d53_fp16_mstrain-608_273e_coco.log.json) | | [yolox-s\*\*](<>) | 39.9 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/yolox/yolox_s_8x8_300e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/yolox_s_8x8_300e_coco.log.json) | | [centernet-r18](<>) | 26.1 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/centernet/centernet_resnet18_140e_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/centernet_resnet18_140e_coco.log.json) | | [fcos-r50\*](<>) | 36.1 | --- | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/fcos/fcos_r50_caffe_fpn_gn-head_fp16_1x_bs8x8_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/fcos_r50_caffe_fpn_gn-head_1x_coco_bs8x8.log.json) | | [solov2-r50](<>) | --- | 34.7 | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/solov2/solov2_r50_fpn_1x_coco.py) | [log](https://download.openmmlab.com/mmdetection/v2.0/npu/solov2_r50_fpn_1x_coco.log.json) | **注意:** - 如果没有特别标记,NPU 上的结果与使用 FP32 的 GPU 上的结果结果相同。 - (\*) 这些模型在 NPU 上的结果与 GPU 上的混合精度训练结果一致,但低于 FP32 的结果。这种情况主要与模型本身在混合精度训练中的特点有关, 用户可以自行调整超参数来获得更高精度。 - (\*\*) GPU 上 yolox-s 在混合精度下的精度为 40.1 低于readme中 40.5 的水平;默认情况下,yolox-s 启用 `persister_woker=True`,但这个参数 目前在NPU上存在一些bug,会导致在最后几个epoch由于资源耗尽报错退出,对整体精度影响有限可以忽略。 ## Ascend加速模块验证结果 优化方案简介: 1. 修改循环计算为一次整体计算,目的是减少下发指令数量。 2. 修改索引计算为掩码计算,原因是SIMD架构芯片擅长处理连续数据计算。 | Model | Config | v100 iter time | 910A iter time | | :------------------------: | :-----------------------------------------------------------------------------------------------------------------------: | :------------: | :------------------------: | | [ascend-ssd300](<>) | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/ssd/ascend_ssd300_fp16_coco.py) | 0.165s/iter | 0.383s/iter -> 0.13s/iter | | [ascend-retinanet-r18](<>) | [config](https://github.com/open-mmlab/mmdetection/blob/master/configs/retinanet/ascend_retinanet_r18_fpn_1x8_1x_coco.py) | 0.567s/iter | 0.780s/iter -> 0.420s/iter | **以上模型结果由华为昇腾团队提供**
4,971
89.4
282
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/config.md
# 教程 1: 学习配置文件 我们在配置文件中支持了继承和模块化,这便于进行各种实验。如果需要检查配置文件,可以通过运行 `python tools/misc/print_config.py /PATH/TO/CONFIG` 来查看完整的配置。 ## 通过脚本参数修改配置 当运行 `tools/train.py` 和 `tools/test.py` 时,可以通过 `--cfg-options` 来修改配置文件。 - 更新字典链中的配置 可以按照原始配置文件中的 dict 键顺序地指定配置预选项。例如,使用 `--cfg-options model.backbone.norm_eval=False` 将模型主干网络中的所有 BN 模块都改为 `train` 模式。 - 更新配置列表中的键 在配置文件里,一些字典型的配置被包含在列表中。例如,数据训练流程 `data.train.pipeline` 通常是一个列表,比如 `[dict(type='LoadImageFromFile'), ...]`。如果需要将 `'LoadImageFromFile'` 改成 `'LoadImageFromWebcam'`,需要写成下述形式: `--cfg-options data.train.pipeline.0.type=LoadImageFromWebcam`。 - 更新列表或元组的值 如果要更新的值是列表或元组。例如,配置文件通常设置 `workflow=[('train', 1)]`,如果需要改变这个键,可以通过 `--cfg-options workflow="[(train,1),(val,1)]"` 来重新设置。需要注意,引号 " 是支持列表或元组数据类型所必需的,并且在指定值的引号内**不允许**有空格。 ## 配置文件结构 在 `config/_base_` 文件夹下有 4 个基本组件类型,分别是:数据集(dataset),模型(model),训练策略(schedule)和运行时的默认设置(default runtime)。许多方法,例如 Faster R-CNN、Mask R-CNN、Cascade R-CNN、RPN、SSD 能够很容易地构建出来。由 `_base_` 下的组件组成的配置,被我们称为 _原始配置(primitive)_。 对于同一文件夹下的所有配置,推荐**只有一个**对应的**原始配置**文件。所有其他的配置文件都应该继承自这个**原始配置**文件。这样就能保证配置文件的最大继承深度为 3。 为了便于理解,我们建议贡献者继承现有方法。例如,如果在 Faster R-CNN 的基础上做了一些修改,用户首先可以通过指定 `_base_ = ../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` 来继承基础的 Faster R-CNN 结构,然后修改配置文件中的必要参数以完成继承。 如果你在构建一个与任何现有方法不共享结构的全新方法,那么可以在 `configs` 文件夹下创建一个新的例如 `xxx_rcnn` 文件夹。更多细节请参考 [MMCV](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html) 文档。 ## 配置文件名称风格 我们遵循以下样式来命名配置文件。建议贡献者遵循相同的风格。 ``` {model}_[model setting]_{backbone}_{neck}_[norm setting]_[misc]_[gpu x batch_per_gpu]_{schedule}_{dataset} ``` `{xxx}` 是被要求的文件 `[yyy]` 是可选的。 - `{model}`: 模型种类,例如 `faster_rcnn`, `mask_rcnn` 等。 - `[model setting]`: 特定的模型,例如 `htc` 中的`without_semantic`, `reppoints` 中的 `moment` 等。 - `{backbone}`: 主干网络种类例如 `r50` (ResNet-50), `x101` (ResNeXt-101) 等。 - `{neck}`: Neck 模型的种类包括 `fpn`, `pafpn`, `nasfpn`, `c4 ` 等。 - `[norm_setting]`: 默认使用 `bn` (Batch Normalization),其他指定可以有 `gn` (Group Normalization), `syncbn` (Synchronized Batch Normalization) 等。 `gn-head`/`gn-neck` 表示 GN 仅应用于网络的 Head 或 Neck, `gn-all` 表示 GN 用于整个模型, 例如主干网络、Neck 和 Head。 - `[misc]`: 模型中各式各样的设置/插件,例如 `dconv`、 `gcb`、 `attention`、`albu`、 `mstrain` 等。 - `[gpu x batch_per_gpu]`:GPU 数量和每个 GPU 的样本数,默认使用 `8x2`。 - `{schedule}`: 训练方案,选项是 `1x`、 `2x`、 `20e` 等。`1x` 和 `2x` 分别代表 12 epoch 和 24 epoch,`20e` 在级联模型中使用,表示 20 epoch。对于 `1x`/`2x`,初始学习率在第 8/16 和第 11/22 epoch 衰减 10 倍;对于 `20e` ,初始学习率在第 16 和第 19 epoch 衰减 10 倍。 - `{dataset}`:数据集,例如 `coco`、 `cityscapes`、 `voc_0712`、 `wider_face` 等。 ## 弃用的 train_cfg/test_cfg `train_cfg` 和 `test_cfg` 在配置文件中已弃用,请在模型配置中指定它们。原始配置结构如下: ```python # 已经弃用的形式 model = dict( type=..., ... ) train_cfg=dict(...) test_cfg=dict(...) ``` 推荐的配置结构如下: ```python # 推荐的形式 model = dict( type=..., ... train_cfg=dict(...), test_cfg=dict(...), ) ``` ## Mask R-CNN 配置文件示例 为了帮助用户对 MMDetection 检测系统中的完整配置和模块有一个基本的了解,我们对使用 ResNet50 和 FPN 的 Mask R-CNN 的配置文件进行简要注释说明。更详细的用法和各个模块对应的替代方案,请参考 API 文档。 ```python model = dict( type='MaskRCNN', # 检测器(detector)名称 backbone=dict( # 主干网络的配置文件 type='ResNet', # 主干网络的类别,可用选项请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/backbones/resnet.py#L308 depth=50, # 主干网络的深度,对于 ResNet 和 ResNext 通常设置为 50 或 101。 num_stages=4, # 主干网络状态(stages)的数目,这些状态产生的特征图作为后续的 head 的输入。 out_indices=(0, 1, 2, 3), # 每个状态产生的特征图输出的索引。 frozen_stages=1, # 第一个状态的权重被冻结 norm_cfg=dict( # 归一化层(norm layer)的配置项。 type='BN', # 归一化层的类别,通常是 BN 或 GN。 requires_grad=True), # 是否训练归一化里的 gamma 和 beta。 norm_eval=True, # 是否冻结 BN 里的统计项。 style='pytorch', # 主干网络的风格,'pytorch' 意思是步长为2的层为 3x3 卷积, 'caffe' 意思是步长为2的层为 1x1 卷积。 init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50')), # 加载通过 ImageNet 预训练的模型 neck=dict( type='FPN', # 检测器的 neck 是 FPN,我们同样支持 'NASFPN', 'PAFPN' 等,更多细节可以参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/fpn.py#L10。 in_channels=[256, 512, 1024, 2048], # 输入通道数,这与主干网络的输出通道一致 out_channels=256, # 金字塔特征图每一层的输出通道 num_outs=5), # 输出的范围(scales) rpn_head=dict( type='RPNHead', # RPN_head 的类型是 'RPNHead', 我们也支持 'GARPNHead' 等,更多细节可以参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/rpn_head.py#L12。 in_channels=256, # 每个输入特征图的输入通道,这与 neck 的输出通道一致。 feat_channels=256, # head 卷积层的特征通道。 anchor_generator=dict( # 锚点(Anchor)生成器的配置。 type='AnchorGenerator', # 大多是方法使用 AnchorGenerator 作为锚点生成器, SSD 检测器使用 `SSDAnchorGenerator`。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/anchor/anchor_generator.py#L10。 scales=[8], # 锚点的基本比例,特征图某一位置的锚点面积为 scale * base_sizes ratios=[0.5, 1.0, 2.0], # 高度和宽度之间的比率。 strides=[4, 8, 16, 32, 64]), # 锚生成器的步幅。这与 FPN 特征步幅一致。 如果未设置 base_sizes,则当前步幅值将被视为 base_sizes。 bbox_coder=dict( # 在训练和测试期间对框进行编码和解码。 type='DeltaXYWHBBoxCoder', # 框编码器的类别,'DeltaXYWHBBoxCoder' 是最常用的,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/coder/delta_xywh_bbox_coder.py#L9。 target_means=[0.0, 0.0, 0.0, 0.0], # 用于编码和解码框的目标均值 target_stds=[1.0, 1.0, 1.0, 1.0]), # 用于编码和解码框的标准差 loss_cls=dict( # 分类分支的损失函数配置 type='CrossEntropyLoss', # 分类分支的损失类型,我们也支持 FocalLoss 等。 use_sigmoid=True, # RPN通常进行二分类,所以通常使用sigmoid函数。 los_weight=1.0), # 分类分支的损失权重。 loss_bbox=dict( # 回归分支的损失函数配置。 type='L1Loss', # 损失类型,我们还支持许多 IoU Losses 和 Smooth L1-loss 等,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/smooth_l1_loss.py#L56。 loss_weight=1.0)), # 回归分支的损失权重。 roi_head=dict( # RoIHead 封装了两步(two-stage)/级联(cascade)检测器的第二步。 type='StandardRoIHead', # RoI head 的类型,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/standard_roi_head.py#L10。 bbox_roi_extractor=dict( # 用于 bbox 回归的 RoI 特征提取器。 type='SingleRoIExtractor', # RoI 特征提取器的类型,大多数方法使用 SingleRoIExtractor,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/roi_extractors/single_level.py#L10。 roi_layer=dict( # RoI 层的配置 type='RoIAlign', # RoI 层的类别, 也支持 DeformRoIPoolingPack 和 ModulatedDeformRoIPoolingPack,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/ops/roi_align/roi_align.py#L79。 output_size=7, # 特征图的输出大小。 sampling_ratio=0), # 提取 RoI 特征时的采样率。0 表示自适应比率。 out_channels=256, # 提取特征的输出通道。 featmap_strides=[4, 8, 16, 32]), # 多尺度特征图的步幅,应该与主干的架构保持一致。 bbox_head=dict( # RoIHead 中 box head 的配置. type='Shared2FCBBoxHead', # bbox head 的类别,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/bbox_heads/convfc_bbox_head.py#L177。 in_channels=256, # bbox head 的输入通道。 这与 roi_extractor 中的 out_channels 一致。 fc_out_channels=1024, # FC 层的输出特征通道。 roi_feat_size=7, # 候选区域(Region of Interest)特征的大小。 num_classes=80, # 分类的类别数量。 bbox_coder=dict( # 第二阶段使用的框编码器。 type='DeltaXYWHBBoxCoder', # 框编码器的类别,大多数情况使用 'DeltaXYWHBBoxCoder'。 target_means=[0.0, 0.0, 0.0, 0.0], # 用于编码和解码框的均值 target_stds=[0.1, 0.1, 0.2, 0.2]), # 编码和解码的标准差。因为框更准确,所以值更小,常规设置时 [0.1, 0.1, 0.2, 0.2]。 reg_class_agnostic=False, # 回归是否与类别无关。 loss_cls=dict( # 分类分支的损失函数配置 type='CrossEntropyLoss', # 分类分支的损失类型,我们也支持 FocalLoss 等。 use_sigmoid=False, # 是否使用 sigmoid。 loss_weight=1.0), # 分类分支的损失权重。 loss_bbox=dict( # 回归分支的损失函数配置。 type='L1Loss', # 损失类型,我们还支持许多 IoU Losses 和 Smooth L1-loss 等。 loss_weight=1.0)), # 回归分支的损失权重。 mask_roi_extractor=dict( # 用于 mask 生成的 RoI 特征提取器。 type='SingleRoIExtractor', # RoI 特征提取器的类型,大多数方法使用 SingleRoIExtractor。 roi_layer=dict( # 提取实例分割特征的 RoI 层配置 type='RoIAlign', # RoI 层的类型,也支持 DeformRoIPoolingPack 和 ModulatedDeformRoIPoolingPack。 output_size=14, # 特征图的输出大小。 sampling_ratio=0), # 提取 RoI 特征时的采样率。 out_channels=256, # 提取特征的输出通道。 featmap_strides=[4, 8, 16, 32]), # 多尺度特征图的步幅。 mask_head=dict( # mask 预测 head 模型 type='FCNMaskHead', # mask head 的类型,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/roi_heads/mask_heads/fcn_mask_head.py#L21。 num_convs=4, # mask head 中的卷积层数 in_channels=256, # 输入通道,应与 mask roi extractor 的输出通道一致。 conv_out_channels=256, # 卷积层的输出通道。 num_classes=80, # 要分割的类别数。 loss_mask=dict( # mask 分支的损失函数配置。 type='CrossEntropyLoss', # 用于分割的损失类型。 use_mask=True, # 是否只在正确的类中训练 mask。 loss_weight=1.0))), # mask 分支的损失权重. train_cfg = dict( # rpn 和 rcnn 训练超参数的配置 rpn=dict( # rpn 的训练配置 assigner=dict( # 分配器(assigner)的配置 type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 用于许多常见的检测器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 pos_iou_thr=0.7, # IoU >= 0.7(阈值) 被视为正样本。 neg_iou_thr=0.3, # IoU < 0.3(阈值) 被视为负样本。 min_pos_iou=0.3, # 将框作为正样本的最小 IoU 阈值。 match_low_quality=True, # 是否匹配低质量的框(更多细节见 API 文档). ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值。 sampler=dict( # 正/负采样器(sampler)的配置 type='RandomSampler', # 采样器类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 num=256, # 样本数量。 pos_fraction=0.5, # 正样本占总样本的比例。 neg_pos_ub=-1, # 基于正样本数量的负样本上限。 add_gt_as_proposals=False), # 采样后是否添加 GT 作为 proposal。 allowed_border=-1, # 填充有效锚点后允许的边框。 pos_weight=-1, # 训练期间正样本的权重。 debug=False), # 是否设置调试(debug)模式 rpn_proposal=dict( # 在训练期间生成 proposals 的配置 nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于 `GARPNHead` ,naive rpn 不支持 nms cross levels。 nms_pre=2000, # NMS 前的 box 数 nms_post=1000, # NMS 要保留的 box 的数量,只在 GARPNHead 中起作用。 max_per_img=1000, # NMS 后要保留的 box 数量。 nms=dict( # NMS 的配置 type='nms', # NMS 的类别 iou_threshold=0.7 # NMS 的阈值 ), min_bbox_size=0), # 允许的最小 box 尺寸 rcnn=dict( # roi head 的配置。 assigner=dict( # 第二阶段分配器的配置,这与 rpn 中的不同 type='MaxIoUAssigner', # 分配器的类型,MaxIoUAssigner 目前用于所有 roi_heads。更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/assigners/max_iou_assigner.py#L10。 pos_iou_thr=0.5, # IoU >= 0.5(阈值)被认为是正样本。 neg_iou_thr=0.5, # IoU < 0.5(阈值)被认为是负样本。 min_pos_iou=0.5, # 将 box 作为正样本的最小 IoU 阈值 match_low_quality=False, # 是否匹配低质量下的 box(有关更多详细信息,请参阅 API 文档)。 ignore_iof_thr=-1), # 忽略 bbox 的 IoF 阈值 sampler=dict( type='RandomSampler', #采样器的类型,还支持 PseudoSampler 和其他采样器,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/bbox/samplers/random_sampler.py#L8。 num=512, # 样本数量 pos_fraction=0.25, # 正样本占总样本的比例。. neg_pos_ub=-1, # 基于正样本数量的负样本上限。. add_gt_as_proposals=True ), # 采样后是否添加 GT 作为 proposal。 mask_size=28, # mask 的大小 pos_weight=-1, # 训练期间正样本的权重。 debug=False)), # 是否设置调试模式。 test_cfg = dict( # 用于测试 rpn 和 rcnn 超参数的配置 rpn=dict( # 测试阶段生成 proposals 的配置 nms_across_levels=False, # 是否对跨层的 box 做 NMS。仅适用于`GARPNHead`,naive rpn 不支持做 NMS cross levels。 nms_pre=1000, # NMS 前的 box 数 nms_post=1000, # NMS 要保留的 box 的数量,只在`GARPNHead`中起作用。 max_per_img=1000, # NMS 后要保留的 box 数量 nms=dict( # NMS 的配置 type='nms', # NMS 的类型 iou_threshold=0.7 # NMS 阈值 ), min_bbox_size=0), # box 允许的最小尺寸 rcnn=dict( # roi heads 的配置 score_thr=0.05, # bbox 的分数阈值 nms=dict( # 第二步的 NMS 配置 type='nms', # NMS 的类型 iou_thr=0.5), # NMS 的阈值 max_per_img=100, # 每张图像的最大检测次数 mask_thr_binary=0.5))) # mask 预处的阈值 dataset_type = 'CocoDataset' # 数据集类型,这将被用来定义数据集。 data_root = 'data/coco/' # 数据的根路径。 img_norm_cfg = dict( # 图像归一化配置,用来归一化输入的图像。 mean=[123.675, 116.28, 103.53], # 预训练里用于预训练主干网络模型的平均值。 std=[58.395, 57.12, 57.375], # 预训练里用于预训练主干网络模型的标准差。 to_rgb=True ) # 预训练里用于预训练主干网络的图像的通道顺序。 train_pipeline = [ # 训练流程 dict(type='LoadImageFromFile'), # 第 1 个流程,从文件路径里加载图像。 dict( type='LoadAnnotations', # 第 2 个流程,对于当前图像,加载它的注释信息。 with_bbox=True, # 是否使用标注框(bounding box), 目标检测需要设置为 True。 with_mask=True, # 是否使用 instance mask,实例分割需要设置为 True。 poly2mask=False), # 是否将 polygon mask 转化为 instance mask, 设置为 False 以加速和节省内存。 dict( type='Resize', # 变化图像和其注释大小的数据增广的流程。 img_scale=(1333, 800), # 图像的最大规模。 keep_ratio=True ), # 是否保持图像的长宽比。 dict( type='RandomFlip', # 翻转图像和其注释大小的数据增广的流程。 flip_ratio=0.5), # 翻转图像的概率。 dict( type='Normalize', # 归一化当前图像的数据增广的流程。 mean=[123.675, 116.28, 103.53], # 这些键与 img_norm_cfg 一致,因为 img_norm_cfg 被 std=[58.395, 57.12, 57.375], # 用作参数。 to_rgb=True), dict( type='Pad', # 填充当前图像到指定大小的数据增广的流程。 size_divisor=32), # 填充图像可以被当前值整除。 dict(type='DefaultFormatBundle'), # 流程里收集数据的默认格式捆。 dict( type='Collect', # 决定数据中哪些键应该传递给检测器的流程 keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ] test_pipeline = [ dict(type='LoadImageFromFile'), # 第 1 个流程,从文件路径里加载图像。 dict( type='MultiScaleFlipAug', # 封装测试时数据增广(test time augmentations)。 img_scale=(1333, 800), # 决定测试时可改变图像的最大规模。用于改变图像大小的流程。 flip=False, # 测试时是否翻转图像。 transforms=[ dict(type='Resize', # 使用改变图像大小的数据增广。 keep_ratio=True), # 是否保持宽和高的比例,这里的图像比例设置将覆盖上面的图像规模大小的设置。 dict(type='RandomFlip'), # 考虑到 RandomFlip 已经被添加到流程里,当 flip=False 时它将不被使用。 dict( type='Normalize', # 归一化配置项,值来自 img_norm_cfg。 mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict( type='Pad', # 将配置传递给可被 32 整除的图像。 size_divisor=32), dict( type='ImageToTensor', # 将图像转为张量 keys=['img']), dict( type='Collect', # 收集测试时必须的键的收集流程。 keys=['img']) ]) ] data = dict( samples_per_gpu=2, # 单个 GPU 的 Batch size workers_per_gpu=2, # 单个 GPU 分配的数据加载线程数 train=dict( # 训练数据集配置 type='CocoDataset', # 数据集的类别, 更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py#L19。 ann_file='data/coco/annotations/instances_train2017.json', # 注释文件路径 img_prefix='data/coco/train2017/', # 图片路径前缀 pipeline=[ # 流程, 这是由之前创建的 train_pipeline 传递的。 dict(type='LoadImageFromFile'), dict( type='LoadAnnotations', with_bbox=True, with_mask=True, poly2mask=False), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict( type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']) ]), val=dict( # 验证数据集的配置 type='CocoDataset', ann_file='data/coco/annotations/instances_val2017.json', img_prefix='data/coco/val2017/', pipeline=[ # 由之前创建的 test_pipeline 传递的流程。 dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ]), test=dict( # 测试数据集配置,修改测试开发/测试(test-dev/test)提交的 ann_file type='CocoDataset', ann_file='data/coco/annotations/instances_val2017.json', img_prefix='data/coco/val2017/', pipeline=[ # 由之前创建的 test_pipeline 传递的流程。 dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict( type='Normalize', mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ], samples_per_gpu=2 # 单个 GPU 测试时的 Batch size )) evaluation = dict( # evaluation hook 的配置,更多细节请参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/evaluation/eval_hooks.py#L7。 interval=1, # 验证的间隔。 metric=['bbox', 'segm']) # 验证期间使用的指标。 optimizer = dict( # 用于构建优化器的配置文件。支持 PyTorch 中的所有优化器,同时它们的参数与 PyTorch 里的优化器参数一致。 type='SGD', # 优化器种类,更多细节可参考 https://github.com/open-mmlab/mmdetection/blob/master/mmdet/core/optimizer/default_constructor.py#L13。 lr=0.02, # 优化器的学习率,参数的使用细节请参照对应的 PyTorch 文档。 momentum=0.9, # 动量(Momentum) weight_decay=0.0001) # SGD 的衰减权重(weight decay)。 optimizer_config = dict( # optimizer hook 的配置文件,执行细节请参考 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/optimizer.py#L8。 grad_clip=None) # 大多数方法不使用梯度限制(grad_clip)。 lr_config = dict( # 学习率调整配置,用于注册 LrUpdater hook。 policy='step', # 调度流程(scheduler)的策略,也支持 CosineAnnealing, Cyclic, 等。请从 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/lr_updater.py#L9 参考 LrUpdater 的细节。 warmup='linear', # 预热(warmup)策略,也支持 `exp` 和 `constant`。 warmup_iters=500, # 预热的迭代次数 warmup_ratio= 0.001, # 用于热身的起始学习率的比率 step=[8, 11]) # 衰减学习率的起止回合数 runner = dict( type='EpochBasedRunner', # 将使用的 runner 的类别 (例如 IterBasedRunner 或 EpochBasedRunner)。 max_epochs=12) # runner 总回合数, 对于 IterBasedRunner 使用 `max_iters` checkpoint_config = dict( # Checkpoint hook 的配置文件。执行时请参考 https://github.com/open-mmlab/mmcv/blob/master/mmcv/runner/hooks/checkpoint.py。 interval=1) # 保存的间隔是 1。 log_config = dict( # register logger hook 的配置文件。 interval=50, # 打印日志的间隔 hooks=[ # 训练期间执行的钩子 dict(type='TextLoggerHook', by_epoch=False), dict(type='TensorboardLoggerHook', by_epoch=False), dict(type='MMDetWandbHook', by_epoch=False, # 还支持 Wandb 记录器,它需要安装 `wandb`。 init_kwargs={'entity': "OpenMMLab", # 用于登录wandb的实体 'project': "MMDet", # WandB中的项目名称 'config': cfg_dict}), # 检查 https://docs.wandb.ai/ref/python/init 以获取更多初始化参数 ]) # 用于记录训练过程的记录器(logger)。 dist_params = dict(backend='nccl') # 用于设置分布式训练的参数,端口也同样可被设置。 log_level = 'INFO' # 日志的级别。 load_from = None # 从一个给定路径里加载模型作为预训练模型,它并不会消耗训练时间。 resume_from = None # 从给定路径里恢复检查点(checkpoints),训练模式将从检查点保存的轮次开始恢复训练。 workflow = [('train', 1)] # runner 的工作流程,[('train', 1)] 表示只有一个工作流且工作流仅执行一次。根据 total_epochs 工作流训练 12个回合。 work_dir = 'work_dir' # 用于保存当前实验的模型检查点和日志的目录。 ``` ## 常问问题 (FAQ) ### 忽略基础配置文件里的部分内容 有时,您也许会设置 `_delete_=True` 去忽略基础配置文件里的一些域内容。 您也许可以参照 [mmcv](https://mmcv.readthedocs.io/en/latest/understand_mmcv/config.html#inherit-from-base-config-with-ignored-fields) 来获得一些简单的指导。 在 MMDetection里,例如为了改变 Mask R-CNN 的主干网络的某些内容: ```python model = dict( type='MaskRCNN', pretrained='torchvision://resnet50', backbone=dict( type='ResNet', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), frozen_stages=1, norm_cfg=dict(type='BN', requires_grad=True), norm_eval=True, style='pytorch'), neck=dict(...), rpn_head=dict(...), roi_head=dict(...)) ``` 基础配置的 `Mask R-CNN` 使用 `ResNet-50`,在需要将主干网络改成 `HRNet` 的时候,因为 `HRNet` 和 `ResNet` 中有不同的字段,需要使用 `_delete_=True` 将新的键去替换 `backbone` 域内所有老的键。 ```python _base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w32', backbone=dict( _delete_=True, type='HRNet', extra=dict( stage1=dict( num_modules=1, num_branches=1, block='BOTTLENECK', num_blocks=(4, ), num_channels=(64, )), stage2=dict( num_modules=1, num_branches=2, block='BASIC', num_blocks=(4, 4), num_channels=(32, 64)), stage3=dict( num_modules=4, num_branches=3, block='BASIC', num_blocks=(4, 4, 4), num_channels=(32, 64, 128)), stage4=dict( num_modules=3, num_branches=4, block='BASIC', num_blocks=(4, 4, 4, 4), num_channels=(32, 64, 128, 256)))), neck=dict(...)) ``` ### 使用配置文件里的中间变量 配置文件里会使用一些中间变量,例如数据集里的 `train_pipeline`/`test_pipeline`。我们在定义新的 `train_pipeline`/`test_pipeline` 之后,需要将它们传递到 `data` 里。例如,我们想在训练或测试时,改变 Mask R-CNN 的多尺度策略 (multi scale strategy),`train_pipeline`/`test_pipeline` 是我们想要修改的中间变量。 ```python _base_ = './mask_rcnn_r50_fpn_1x_coco.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True, with_mask=True), dict( type='Resize', img_scale=[(1333, 640), (1333, 672), (1333, 704), (1333, 736), (1333, 768), (1333, 800)], multiscale_mode="value", keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) ``` 我们首先定义新的 `train_pipeline`/`test_pipeline` 然后传递到 `data` 里。 同样的,如果我们想从 `SyncBN` 切换到 `BN` 或者 `MMSyncBN`,我们需要修改配置文件里的每一个 `norm_cfg`。 ```python _base_ = './mask_rcnn_r50_fpn_1x_coco.py' norm_cfg = dict(type='BN', requires_grad=True) model = dict( backbone=dict(norm_cfg=norm_cfg), neck=dict(norm_cfg=norm_cfg), ...) ```
24,350
45.032136
236
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/customize_dataset.md
# 教程 2: 自定义数据集 ## 支持新的数据格式 为了支持新的数据格式,可以选择将数据转换成现成的格式(COCO 或者 PASCAL)或将其转换成中间格式。当然也可以选择以离线的形式(在训练之前使用脚本转换)或者在线的形式(实现一个新的 dataset 在训练中进行转换)来转换数据。 在 MMDetection 中,建议将数据转换成 COCO 格式并以离线的方式进行,因此在完成数据转换后只需修改配置文件中的标注数据的路径和类别即可。 ### 将新的数据格式转换为现有的数据格式 最简单的方法就是将你的数据集转换成现有的数据格式(COCO 或者 PASCAL VOC) COCO 格式的 json 标注文件有如下必要的字段: ```python 'images': [ { 'file_name': 'COCO_val2014_000000001268.jpg', 'height': 427, 'width': 640, 'id': 1268 }, ... ], 'annotations': [ { 'segmentation': [[192.81, 247.09, ... 219.03, 249.06]], # 如果有 mask 标签 'area': 1035.749, 'iscrowd': 0, 'image_id': 1268, 'bbox': [192.81, 224.8, 74.73, 33.43], 'category_id': 16, 'id': 42986 }, ... ], 'categories': [ {'id': 0, 'name': 'car'}, ] ``` 在 json 文件中有三个必要的键: - `images`: 包含多个图片以及它们的信息的数组,例如 `file_name`、`height`、`width` 和 `id`。 - `annotations`: 包含多个实例标注信息的数组。 - `categories`: 包含多个类别名字和 ID 的数组。 在数据预处理之后,使用现有的数据格式来训练自定义的新数据集有如下两步(以 COCO 为例): 1. 为自定义数据集修改配置文件。 2. 检查自定义数据集的标注。 这里我们举一个例子来展示上面的两个步骤,这个例子使用包括 5 个类别的 COCO 格式的数据集来训练一个现有的 Cascade Mask R-CNN R50-FPN 检测器 #### 1. 为自定义数据集修改配置文件 配置文件的修改涉及两个方面: 1. `data` 部分。需要在 `data.train`、`data.val` 和 `data.test` 中添加 `classes`。 2. `model` 部分中的 `num_classes`。需要将默认值(COCO 数据集中为 80)修改为自定义数据集中的类别数。 `configs/my_custom_config.py` 内容如下: ```python # 新的配置来自基础的配置以更好地说明需要修改的地方 _base_ = './cascade_mask_rcnn_r50_fpn_1x_coco.py' # 1. 数据集设定 dataset_type = 'CocoDataset' classes = ('a', 'b', 'c', 'd', 'e') data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict( type=dataset_type, # 将类别名字添加至 `classes` 字段中 classes=classes, ann_file='path/to/your/train/annotation_data', img_prefix='path/to/your/train/image_data'), val=dict( type=dataset_type, # 将类别名字添加至 `classes` 字段中 classes=classes, ann_file='path/to/your/val/annotation_data', img_prefix='path/to/your/val/image_data'), test=dict( type=dataset_type, # 将类别名字添加至 `classes` 字段中 classes=classes, ann_file='path/to/your/test/annotation_data', img_prefix='path/to/your/test/image_data')) # 2. 模型设置 # 将所有的 `num_classes` 默认值修改为5(原来为80) model = dict( roi_head=dict( bbox_head=[ dict( type='Shared2FCBBoxHead', # 将所有的 `num_classes` 默认值修改为 5(原来为 80) num_classes=5), dict( type='Shared2FCBBoxHead', # 将所有的 `num_classes` 默认值修改为 5(原来为 80) num_classes=5), dict( type='Shared2FCBBoxHead', # 将所有的 `num_classes` 默认值修改为 5(原来为 80) num_classes=5)], # 将所有的 `num_classes` 默认值修改为 5(原来为 80) mask_head=dict(num_classes=5))) ``` #### 2. 检查自定义数据集的标注 假设你自己的数据集是 COCO 格式,那么需要保证数据的标注没有问题: 1. 标注文件中 `categories` 的长度要与配置中的 `classes` 元组长度相匹配,它们都表示有几类。(如例子中有 5 个类别) 2. 配置文件中 `classes` 字段应与标注文件里 `categories` 下的 `name` 有相同的元素且顺序一致。MMDetection 会自动将 `categories` 中不连续的 `id` 映射成连续的索引,因此 `categories` 下的 `name`的字符串顺序会影响标签的索引。同时,配置文件中的 `classes` 的字符串顺序也会影响到预测框可视化时的标签。 3. `annotations` 中的 `category_id` 必须是有效的值。比如所有 `category_id` 的值都应该属于 `categories` 中的 `id`。 下面是一个有效标注的例子: ```python 'annotations': [ { 'segmentation': [[192.81, 247.09, ... 219.03, 249.06]], #如果有 mask 标签。 'area': 1035.749, 'iscrowd': 0, 'image_id': 1268, 'bbox': [192.81, 224.8, 74.73, 33.43], 'category_id': 16, 'id': 42986 }, ... ], # MMDetection 会自动将 `categories` 中不连续的 `id` 映射成连续的索引。 'categories': [ {'id': 1, 'name': 'a'}, {'id': 3, 'name': 'b'}, {'id': 4, 'name': 'c'}, {'id': 16, 'name': 'd'}, {'id': 17, 'name': 'e'}, ] ``` 我们使用这种方式来支持 CityScapes 数据集。脚本在[cityscapes.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/cityscapes.py) 并且我们提供了微调的[configs](https://github.com/open-mmlab/mmdetection/blob/master/configs/cityscapes). **注意** 1. 对于实例分割数据集, **MMDetection 目前只支持评估 COCO 格式的 mask AP**. 2. 推荐训练之前进行离线转换,这样就可以继续使用 `CocoDataset` 且只需修改标注文件的路径以及训练的种类。 ### 调整新的数据格式为中间格式 如果不想将标注格式转换为 COCO 或者 PASCAL 格式也是可行的。实际上,我们定义了一种简单的标注格式并且与所有现有的数据格式兼容,也能进行离线或者在线转换。 数据集的标注是包含多个字典(dict)的列表,每个字典(dict)都与一张图片对应。测试时需要用到 `filename`(相对路径)、`width` 和 `height` 三个字段;训练时则额外需要 `ann`。`ann` 也是至少包含了两个字段的字典:`bboxes` 和 `labels`,它们都是 numpy array。有些数据集可能会提供如:crowd/difficult/ignored bboxes 标注,那么我们使用 `bboxes_ignore` 以及 `labels_ignore` 来包含它们。 下面给出一个例子。 ```python [ { 'filename': 'a.jpg', 'width': 1280, 'height': 720, 'ann': { 'bboxes': <np.ndarray, float32> (n, 4), 'labels': <np.ndarray, int64> (n, ), 'bboxes_ignore': <np.ndarray, float32> (k, 4), 'labels_ignore': <np.ndarray, int64> (k, ) (可选字段) } }, ... ] ``` 有两种方法处理自定义数据。 - 在线转换(online conversion) 可以新写一个继承自 `CustomDataset` 的 Dataset 类,并重写 `load_annotations(self, ann_file)` 以及 `get_ann_info(self, idx)` 这两个方法,正如[CocoDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/coco.py)与[VOCDataset](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/voc.py). - 离线转换(offline conversion) 可以将标注格式转换为上述的任意格式并将其保存为 pickle 或者 json 文件,例如[pascal_voc.py](https://github.com/open-mmlab/mmdetection/blob/master/tools/dataset_converters/pascal_voc.py)。 然后使用`CustomDataset`。 ### 自定义数据集的例子: 假设文本文件中表示的是一种全新的标注格式。边界框的标注信息保存在 `annotation.txt` 中,内容如下: ``` # 000001.jpg 1280 720 2 10 20 40 60 1 20 40 50 60 2 # 000002.jpg 1280 720 3 50 20 40 60 2 20 40 30 45 2 30 40 50 60 3 ``` 我们可以在 `mmdet/datasets/my_dataset.py` 中创建一个新的 dataset 用以加载数据。 ```python import mmcv import numpy as np from .builder import DATASETS from .custom import CustomDataset @DATASETS.register_module() class MyDataset(CustomDataset): CLASSES = ('person', 'bicycle', 'car', 'motorcycle') def load_annotations(self, ann_file): ann_list = mmcv.list_from_file(ann_file) data_infos = [] for i, ann_line in enumerate(ann_list): if ann_line != '#': continue img_shape = ann_list[i + 2].split(' ') width = int(img_shape[0]) height = int(img_shape[1]) bbox_number = int(ann_list[i + 3]) anns = ann_line.split(' ') bboxes = [] labels = [] for anns in ann_list[i + 4:i + 4 + bbox_number]: bboxes.append([float(ann) for ann in anns[:4]]) labels.append(int(anns[4])) data_infos.append( dict( filename=ann_list[i + 1], width=width, height=height, ann=dict( bboxes=np.array(bboxes).astype(np.float32), labels=np.array(labels).astype(np.int64)) )) return data_infos def get_ann_info(self, idx): return self.data_infos[idx]['ann'] ``` 配置文件中,可以使用 `MyDataset` 进行如下修改 ```python dataset_A_train = dict( type='MyDataset', ann_file = 'image_list.txt', pipeline=train_pipeline ) ``` ## 使用 dataset 包装器自定义数据集 MMDetection 也支持非常多的数据集包装器(wrapper)来混合数据集或在训练时修改数据集的分布。 最近 MMDetection 支持如下三种数据集包装: - `RepeatDataset`:将整个数据集简单地重复。 - `ClassBalancedDataset`:以类别均衡的方式重复数据集。 - `ConcatDataset`:合并数据集。 ### 重复数据集(Repeat dataset) 使用 `RepeatDataset` 包装器来重复数据集。例如,假设原始数据集为 `Dataset_A`,重复它过后,其配置如下: ```python dataset_A_train = dict( type='RepeatDataset', times=N, dataset=dict( # Dataset_A 的原始配置信息 type='Dataset_A', ... pipeline=train_pipeline ) ) ``` ### 类别均衡数据集(Class balanced dataset) 使用 `ClassBalancedDataset` 作为包装器在类别的出现的频率上重复数据集。数据集需要实例化 `self.get_cat_ids(idx)` 函数以支持 `ClassBalancedDataset`。 比如,以 `oversample_thr=1e-3` 来重复数据集 `Dataset_A`,其配置如下: ```python dataset_A_train = dict( type='ClassBalancedDataset', oversample_thr=1e-3, dataset=dict( # Dataset_A 的原始配置信息 type='Dataset_A', ... pipeline=train_pipeline ) ) ``` 更多细节请参考[源码](../../mmdet/datasets/dataset_wrappers.py)。 ### 合并数据集(Concatenate dataset) 合并数据集有三种方法: 1. 如果要合并的数据集类型一致但有多个的标注文件,那么可以使用如下配置将其合并。 ```python dataset_A_train = dict( type='Dataset_A', ann_file = ['anno_file_1', 'anno_file_2'], pipeline=train_pipeline ) ``` 如果合并的数据集适用于测试或者评估,那么这种方式支持每个数据集分开进行评估。如果想要将合并的数据集作为整体用于评估,那么可以像如下一样设置 `separate_eval=False`。 ```python dataset_A_train = dict( type='Dataset_A', ann_file = ['anno_file_1', 'anno_file_2'], separate_eval=False, pipeline=train_pipeline ) ``` 2. 如果想要合并的是不同数据集,那么可以使用如下配置。 ```python dataset_A_val = dict() dataset_B_val = dict() data = dict( imgs_per_gpu=2, workers_per_gpu=2, train=dataset_A_train, val=dict( type='ConcatDataset', datasets=[dataset_A_val, dataset_B_val], separate_eval=False)) ``` 只需设置 `separate_eval=False`,用户就可以将所有的数据集作为一个整体来评估。 **注意** 1. 在做评估时,`separate_eval=False` 选项是假设数据集使用了 `self.data_infos`。因此COCO数据集不支持此项操作,因为COCO数据集在做评估时并不是所有都依赖 `self.data_infos`。组合不同类型的数据集并将其作为一个整体来评估,这种做法没有得到测试,也不建议这样做。 2. 因为不支持评估 `ClassBalancedDataset` 和 `RepeatDataset`,所以也不支持评估它们的组合。 一个更复杂的例子则是分别将 `Dataset_A` 和 `Dataset_B` 重复N和M次,然后进行如下合并。 ```python dataset_A_train = dict( type='RepeatDataset', times=N, dataset=dict( type='Dataset_A', ... pipeline=train_pipeline ) ) dataset_A_val = dict( ... pipeline=test_pipeline ) dataset_A_test = dict( ... pipeline=test_pipeline ) dataset_B_train = dict( type='RepeatDataset', times=M, dataset=dict( type='Dataset_B', ... pipeline=train_pipeline ) ) data = dict( imgs_per_gpu=2, workers_per_gpu=2, train = [ dataset_A_train, dataset_B_train ], val = dataset_A_val, test = dataset_A_test ) ``` ## 修改数据集的类别 根据现有数据集的类型,我们可以修改它们的类别名称来训练其标注的子集。 例如,如果只想训练当前数据集中的三个类别,那么就可以修改数据集的类别元组。 数据集就会自动屏蔽掉其他类别的真实框。 ```python classes = ('person', 'bicycle', 'car') data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) ``` MMDetection V2.0 也支持从文件中读取类别名称,这种方式在实际应用中很常见。 假设存在文件 `classes.txt`,其包含了如下的类别名称。 ``` person bicycle car ``` 用户可以将类别设置成文件路径,数据集就会自动将其加载并转换成一个列表。 ```python classes = 'path/to/classes.txt' data = dict( train=dict(classes=classes), val=dict(classes=classes), test=dict(classes=classes)) ``` **注意** - 在 MMDetection v2.5.0 之前,如果类别为集合时数据集将自动过滤掉不包含 GT 的图片,且没办法通过修改配置将其关闭。这是一种不可取的行为而且会引起混淆,因为当类别不是集合时数据集只有在 `filter_empty_gt=True` 以及 `test_mode=False` 的情况下才会过滤掉不包含 GT 的图片。在 MMDetection v2.5.0 之后,我们将图片的过滤以及类别的修改进行解耦,如,数据集只有在 `filter_empty_gt=True` 和 `test_mode=False` 的情况下才会过滤掉不包含 GT 的图片,无论类别是否为集合。设置类别只会影响用于训练的标注类别,用户可以自行决定是否过滤不包含 GT 的图片。 - 因为中间格式只有框的标签并不包含类别的名字,所以使用 `CustomDataset` 时用户不能通过修改配置来过滤不含 GT 的图片。但是可以通过离线的方式来解决。 - 当设置数据集中的 `classes` 时,记得修改 `num_classes`。从 v2.9.0 (PR#4508) 之后,我们实现了[NumClassCheckHook](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/datasets/utils.py)来检查类别数是否一致。 - 我们在未来将会重构设置数据集类别以及数据集过滤的特性,使其更加地方便用户使用。
11,384
23.912473
335
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/customize_losses.md
# 教程 6: 自定义损失函数 MMDetection 为用户提供了不同的损失函数。但是默认的配置可能无法适应不同的数据和模型,所以用户可能会希望修改某一个损失函数来适应新的情况。 本教程首先详细的解释计算损失的过程然后给出一些关于如何修改每一个步骤的指导。对损失的修改可以被分为微调和加权。 ## 一个损失的计算过程 给定输入(包括预测和目标,以及权重),损失函数会把输入的张量映射到最后的损失标量。映射过程可以分为下面五个步骤: 1. 设置采样方法为对正负样本进行采样。 2. 通过损失核函数获取**元素**或者**样本**损失。 3. 通过权重张量来给损失**逐元素**权重。 4. 把损失张量归纳为一个**标量**。 5. 用一个**张量**给当前损失一个权重。 ## 设置采样方法(步骤 1) 对于一些损失函数,需要采样策略来避免正负样本之间的不平衡。 例如,在RPN head中使用`CrossEntropyLoss`时,我们需要在`train_cfg`中设置`RandomSampler` ```python train_cfg=dict( rpn=dict( sampler=dict( type='RandomSampler', num=256, pos_fraction=0.5, neg_pos_ub=-1, add_gt_as_proposals=False)) ``` 对于其他一些具有正负样本平衡机制的损失,例如 Focal Loss、GHMC 和 QualityFocalLoss,不再需要进行采样。 ## 微调损失 微调一个损失主要与步骤 2,4,5 有关,大部分的修改可以在配置文件中指定。这里我们用 [Focal Loss (FL)](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/losses/focal_loss.py) 作为例子。 下面的代码分别是构建 FL 的方法和它的配置文件,他们是一一对应的。 ```python @LOSSES.register_module() class FocalLoss(nn.Module): def __init__(self, use_sigmoid=True, gamma=2.0, alpha=0.25, reduction='mean', loss_weight=1.0): ``` ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0) ``` ### 微调超参数(步骤2) `gamma` 和 `beta` 是 Focal Loss 中的两个超参数。如果我们想把 `gamma` 的值设为 1.5,把 `alpha` 的值设为 0.5,我们可以在配置文件中按照如下指定: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=1.5, alpha=0.5, loss_weight=1.0) ``` ### 微调归纳方式(步骤4) Focal Loss 默认的归纳方式是 `mean`。如果我们想把归纳方式从 `mean` 改成 `sum`,我们可以在配置文件中按照如下指定: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=1.0, reduction='sum') ``` ### 微调损失权重(步骤5) 这里的损失权重是一个标量,他用来控制多任务学习中不同损失的重要程度,例如,分类损失和回归损失。如果我们想把分类损失的权重设为 0.5,我们可以在配置文件中如下指定: ```python loss_cls=dict( type='FocalLoss', use_sigmoid=True, gamma=2.0, alpha=0.25, loss_weight=0.5) ``` ## 加权损失(步骤3) 加权损失就是我们逐元素修改损失权重。更具体来说,我们给损失张量乘以一个与他有相同形状的权重张量。所以,损失中不同的元素可以被赋予不同的比例,所以这里叫做逐元素。损失的权重在不同模型中变化很大,而且与上下文相关,但是总的来说主要有两种损失权重:分类损失的 `label_weights` 和边界框的 `bbox_weights`。你可以在相应的头中的 `get_target` 方法中找到他们。这里我们使用 [ATSSHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/atss_head.py#L530) 作为一个例子。它继承了 [AnchorHead](https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/dense_heads/anchor_head.py),但是我们重写它的 `get_targets` 方法来产生不同的 `label_weights` 和 `bbox_weights`。 ``` class ATSSHead(AnchorHead): ... def get_targets(self, anchor_list, valid_flag_list, gt_bboxes_list, img_metas, gt_bboxes_ignore_list=None, gt_labels_list=None, label_channels=1, unmap_outputs=True): ```
2,932
22.277778
440
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/customize_models.md
# 教程 4: 自定义模型 我们简单地把模型的各个组件分为五类: - 主干网络 (backbone):通常是一个用来提取特征图 (feature map) 的全卷积网络 (FCN network),例如:ResNet, MobileNet。 - Neck:主干网络和 Head 之间的连接部分,例如:FPN, PAFPN。 - Head:用于具体任务的组件,例如:边界框预测和掩码预测。 - 区域提取器 (roi extractor):从特征图中提取 RoI 特征,例如:RoI Align。 - 损失 (loss):在 Head 组件中用于计算损失的部分,例如:FocalLoss, L1Loss, GHMLoss. ## 开发新的组件 ### 添加一个新的主干网络 这里,我们以 MobileNet 为例来展示如何开发新组件。 #### 1. 定义一个新的主干网络(以 MobileNet 为例) 新建一个文件 `mmdet/models/backbones/mobilenet.py` ```python import torch.nn as nn from ..builder import BACKBONES @BACKBONES.register_module() class MobileNet(nn.Module): def __init__(self, arg1, arg2): pass def forward(self, x): # should return a tuple pass ``` #### 2. 导入该模块 你可以添加下述代码到 `mmdet/models/backbones/__init__.py` ```python from .mobilenet import MobileNet ``` 或添加: ```python custom_imports = dict( imports=['mmdet.models.backbones.mobilenet'], allow_failed_imports=False) ``` 到配置文件以避免原始代码被修改。 #### 3. 在你的配置文件中使用该主干网络 ```python model = dict( ... backbone=dict( type='MobileNet', arg1=xxx, arg2=xxx), ... ``` ### 添加新的 Neck #### 1. 定义一个 Neck(以 PAFPN 为例) 新建一个文件 `mmdet/models/necks/pafpn.py` ```python from ..builder import NECKS @NECKS.register_module() class PAFPN(nn.Module): def __init__(self, in_channels, out_channels, num_outs, start_level=0, end_level=-1, add_extra_convs=False): pass def forward(self, inputs): # implementation is ignored pass ``` #### 2. 导入该模块 你可以添加下述代码到 `mmdet/models/necks/__init__.py` ```python from .pafpn import PAFPN ``` 或添加: ```python custom_imports = dict( imports=['mmdet.models.necks.pafpn.py'], allow_failed_imports=False) ``` 到配置文件以避免原始代码被修改。 #### 3. 修改配置文件 ```python neck=dict( type='PAFPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=5) ``` ### 添加新的 Head 我们以 [Double Head R-CNN](https://arxiv.org/abs/1904.06493) 为例来展示如何添加一个新的 Head。 首先,添加一个新的 bbox head 到 `mmdet/models/roi_heads/bbox_heads/double_bbox_head.py`。 Double Head R-CNN 在目标检测上实现了一个新的 bbox head。为了实现 bbox head,我们需要使用如下的新模块中三个函数。 ```python from mmdet.models.builder import HEADS from .bbox_head import BBoxHead @HEADS.register_module() class DoubleConvFCBBoxHead(BBoxHead): r"""Bbox head used in Double-Head R-CNN /-> cls /-> shared convs -> \-> reg roi features /-> cls \-> shared fc -> \-> reg """ # noqa: W605 def __init__(self, num_convs=0, num_fcs=0, conv_out_channels=1024, fc_out_channels=1024, conv_cfg=None, norm_cfg=dict(type='BN'), **kwargs): kwargs.setdefault('with_avg_pool', True) super(DoubleConvFCBBoxHead, self).__init__(**kwargs) def forward(self, x_cls, x_reg): ``` 然后,如有必要,实现一个新的 bbox head。我们打算从 `StandardRoIHead` 来继承新的 `DoubleHeadRoIHead`。我们可以发现 `StandardRoIHead` 已经实现了下述函数。 ```python import torch from mmdet.core import bbox2result, bbox2roi, build_assigner, build_sampler from ..builder import HEADS, build_head, build_roi_extractor from .base_roi_head import BaseRoIHead from .test_mixins import BBoxTestMixin, MaskTestMixin @HEADS.register_module() class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin): """Simplest base roi head including one bbox head and one mask head. """ def init_assigner_sampler(self): def init_bbox_head(self, bbox_roi_extractor, bbox_head): def init_mask_head(self, mask_roi_extractor, mask_head): def forward_dummy(self, x, proposals): def forward_train(self, x, img_metas, proposal_list, gt_bboxes, gt_labels, gt_bboxes_ignore=None, gt_masks=None): def _bbox_forward(self, x, rois): def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, img_metas): def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks, img_metas): def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None): def simple_test(self, x, proposal_list, img_metas, proposals=None, rescale=False): """Test without augmentation.""" ``` Double Head 的修改主要在 bbox_forward 的逻辑中,且它从 `StandardRoIHead` 中继承了其他逻辑。在 `mmdet/models/roi_heads/double_roi_head.py` 中,我们用下述代码实现新的 bbox head: ```python from ..builder import HEADS from .standard_roi_head import StandardRoIHead @HEADS.register_module() class DoubleHeadRoIHead(StandardRoIHead): """RoI head for Double Head RCNN https://arxiv.org/abs/1904.06493 """ def __init__(self, reg_roi_scale_factor, **kwargs): super(DoubleHeadRoIHead, self).__init__(**kwargs) self.reg_roi_scale_factor = reg_roi_scale_factor def _bbox_forward(self, x, rois): bbox_cls_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois) bbox_reg_feats = self.bbox_roi_extractor( x[:self.bbox_roi_extractor.num_inputs], rois, roi_scale_factor=self.reg_roi_scale_factor) if self.with_shared_head: bbox_cls_feats = self.shared_head(bbox_cls_feats) bbox_reg_feats = self.shared_head(bbox_reg_feats) cls_score, bbox_pred = self.bbox_head(bbox_cls_feats, bbox_reg_feats) bbox_results = dict( cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_cls_feats) return bbox_results ``` 最终,用户需要把该模块添加到 `mmdet/models/bbox_heads/__init__.py` 和 `mmdet/models/roi_heads/__init__.py` 以使相关的注册表可以找到并加载他们。 或者,用户可以添加: ```python custom_imports=dict( imports=['mmdet.models.roi_heads.double_roi_head', 'mmdet.models.bbox_heads.double_bbox_head']) ``` 到配置文件并实现相同的目的。 Double Head R-CNN 的配置文件如下: ```python _base_ = '../faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py' model = dict( roi_head=dict( type='DoubleHeadRoIHead', reg_roi_scale_factor=1.3, bbox_head=dict( _delete_=True, type='DoubleConvFCBBoxHead', num_convs=4, num_fcs=2, in_channels=256, conv_out_channels=1024, fc_out_channels=1024, roi_feat_size=7, num_classes=80, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=2.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=2.0)))) ``` 从 MMDetection 2.0 版本起,配置系统支持继承配置以使用户可以专注于修改。 Double Head R-CNN 主要使用了一个新的 DoubleHeadRoIHead 和一个新的 `DoubleConvFCBBoxHead`,参数需要根据每个模块的 `__init__` 函数来设置。 ### 添加新的损失 假设你想添加一个新的损失 `MyLoss` 用于边界框回归。 为了添加一个新的损失函数,用户需要在 `mmdet/models/losses/my_loss.py` 中实现。 装饰器 `weighted_loss` 可以使损失每个部分加权。 ```python import torch import torch.nn as nn from ..builder import LOSSES from .utils import weighted_loss @weighted_loss def my_loss(pred, target): assert pred.size() == target.size() and target.numel() > 0 loss = torch.abs(pred - target) return loss @LOSSES.register_module() class MyLoss(nn.Module): def __init__(self, reduction='mean', loss_weight=1.0): super(MyLoss, self).__init__() self.reduction = reduction self.loss_weight = loss_weight def forward(self, pred, target, weight=None, avg_factor=None, reduction_override=None): assert reduction_override in (None, 'none', 'mean', 'sum') reduction = ( reduction_override if reduction_override else self.reduction) loss_bbox = self.loss_weight * my_loss( pred, target, weight, reduction=reduction, avg_factor=avg_factor) return loss_bbox ``` 然后,用户需要把它加到 `mmdet/models/losses/__init__.py`。 ```python from .my_loss import MyLoss, my_loss ``` 或者,你可以添加: ```python custom_imports=dict( imports=['mmdet.models.losses.my_loss']) ``` 到配置文件来实现相同的目的。 如使用,请修改 `loss_xxx` 字段。 因为 MyLoss 是用于回归的,你需要在 Head 中修改 `loss_xxx` 字段。 ```python loss_bbox=dict(type='MyLoss', loss_weight=1.0)) ```
8,781
23.394444
138
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/customize_runtime.md
# 教程 5: 自定义训练配置
16
7.5
15
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/data_pipeline.md
# 教程 3: 自定义数据预处理流程 ## 数据流程的设计 按照惯例,我们使用 `Dataset` 和 `DataLoader` 进行多进程的数据加载。`Dataset` 返回字典类型的数据,数据内容为模型 `forward` 方法的各个参数。由于在目标检测中,输入的图像数据具有不同的大小,我们在 `MMCV` 里引入一个新的 `DataContainer` 类去收集和分发不同大小的输入数据。更多细节请参考[这里](https://github.com/open-mmlab/mmcv/blob/master/mmcv/parallel/data_container.py)。 数据的准备流程和数据集是解耦的。通常一个数据集定义了如何处理标注数据(annotations)信息,而一个数据流程定义了准备一个数据字典的所有步骤。一个流程包括一系列的操作,每个操作都把一个字典作为输入,然后再输出一个新的字典给下一个变换操作。 我们在下图展示了一个经典的数据处理流程。蓝色块是数据处理操作,随着数据流程的处理,每个操作都可以在结果字典中加入新的键(标记为绿色)或更新现有的键(标记为橙色)。 ![pipeline figure](../../../resources/data_pipeline.png) 这些操作可以分为数据加载(data loading)、预处理(pre-processing)、格式变化(formatting)和测试时数据增强(test-time augmentation)。 下面的例子是 `Faster R-CNN` 的一个流程: ```python img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1333, 800), flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] ``` 对于每个操作,我们列出它添加、更新、移除的相关字典域 (dict fields): ### 数据加载 Data loading `LoadImageFromFile` - 增加:img, img_shape, ori_shape `LoadAnnotations` - 增加:gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg, bbox_fields, mask_fields `LoadProposals` - 增加:proposals ### 预处理 Pre-processing `Resize` - 增加:scale, scale_idx, pad_shape, scale_factor, keep_ratio - 更新:img, img_shape, \*bbox_fields, \*mask_fields, \*seg_fields `RandomFlip` - 增加:flip - 更新:img, \*bbox_fields, \*mask_fields, \*seg_fields `Pad` - 增加:pad_fixed_size, pad_size_divisor - 更新:img, pad_shape, \*mask_fields, \*seg_fields `RandomCrop` - 更新:img, pad_shape, gt_bboxes, gt_labels, gt_masks, \*bbox_fields `Normalize` - 增加:img_norm_cfg - 更新:img `SegRescale` - 更新:gt_semantic_seg `PhotoMetricDistortion` - 更新:img `Expand` - 更新:img, gt_bboxes `MinIoURandomCrop` - 更新:img, gt_bboxes, gt_labels `Corrupt` - 更新:img ### 格式 Formatting `ToTensor` - 更新:由 `keys` 指定 `ImageToTensor` - 更新:由 `keys` 指定 `Transpose` - 更新:由 `keys` 指定 `ToDataContainer` - 更新:由 `keys` 指定 `DefaultFormatBundle` - 更新:img, proposals, gt_bboxes, gt_bboxes_ignore, gt_labels, gt_masks, gt_semantic_seg `Collect` - 增加:img_metas(img_metas 的键(key)被 `meta_keys` 指定) - 移除:除了 `keys` 指定的键(key)之外的所有其他的键(key) ### 测试时数据增强 Test time augmentation `MultiScaleFlipAug` ## 拓展和使用自定义的流程 1. 在任意文件里写一个新的流程,例如在 `my_pipeline.py`,它以一个字典作为输入并且输出一个字典: ```python import random from mmdet.datasets import PIPELINES @PIPELINES.register_module() class MyTransform: """Add your transform Args: p (float): Probability of shifts. Default 0.5. """ def __init__(self, p=0.5): self.p = p def __call__(self, results): if random.random() > self.p: results['dummy'] = True return results ``` 2. 在配置文件里调用并使用你写的数据处理流程,需要确保你的训练脚本能够正确导入新增模块: ```python custom_imports = dict(imports=['path.to.my_pipeline'], allow_failed_imports=False) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True), dict(type='Resize', img_scale=(1333, 800), keep_ratio=True), dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='MyTransform', p=0.2), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), ] ``` 3. 可视化数据增强处理流程的结果 如果想要可视化数据增强处理流程的结果,可以使用 `tools/misc/browse_dataset.py` 直观 地浏览检测数据集(图像和标注信息),或将图像保存到指定目录。 使用方法请参考[日志分析](../useful_tools.md)
4,400
22.041885
260
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/finetune.md
# 教程 7: 模型微调 在 COCO 数据集上预训练的检测器可以作为其他数据集(例如 CityScapes 和 KITTI 数据集)优质的预训练模型。 本教程将指导用户如何把 [ModelZoo](../model_zoo.md) 中提供的模型用于其他数据集中并使得当前所训练的模型获得更好性能。 以下是在新数据集中微调模型需要的两个步骤。 - 按 [教程2:自定义数据集的方法](customize_dataset.md) 中的方法对新数据集添加支持中的方法对新数据集添加支持 - 按照本教程中所讨论方法,修改配置信息 接下来将会以 Cityscapes Dataset 上的微调过程作为例子,具体讲述用户需要在配置中修改的五个部分。 ## 继承基础配置 为了减轻编写整个配置的负担并减少漏洞的数量, MMDetection V2.0 支持从多个现有配置中继承配置信息。微调 MaskRCNN 模型的时候,新的配置信息需要使用从 `_base_/models/mask_rcnn_r50_fpn.py`中继承的配置信息来构建模型的基本结构。当使用 Cityscapes 数据集时,新的配置信息可以简便地从`_base_/datasets/cityscapes_instance.py`中继承。对于训练过程的运行设置部分,新配置需要从 `_base_/default_runtime.py`中继承。这些配置文件`configs`的目录下,用户可以选择全部内容的重新编写而不是使用继承方法。 ```python _base_ = [ '../_base_/models/mask_rcnn_r50_fpn.py', '../_base_/datasets/cityscapes_instance.py', '../_base_/default_runtime.py' ] ``` ## Head 的修改 接下来新的配置还需要根据新数据集的类别数量对 Head 进行修改。只需要对 roi_head 中的 `num_classes`进行修改。修改后除了最后的预测模型的 Head 之外,预训练模型的权重的大部分都会被重新使用。 ```python model = dict( pretrained=None, roi_head=dict( bbox_head=dict( type='Shared2FCBBoxHead', in_channels=256, fc_out_channels=1024, roi_feat_size=7, num_classes=8, bbox_coder=dict( type='DeltaXYWHBBoxCoder', target_means=[0., 0., 0., 0.], target_stds=[0.1, 0.1, 0.2, 0.2]), reg_class_agnostic=False, loss_cls=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), mask_head=dict( type='FCNMaskHead', num_convs=4, in_channels=256, conv_out_channels=256, num_classes=8, loss_mask=dict( type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)))) ``` ## 数据集的修改 用户可能还需要准备数据集并编写有关数据集的配置。目前 MMDetection V2.0 的配置文件已经支持 VOC、WIDER FACE、COCO 和 Cityscapes Dataset 的数据集信息。 ## 训练策略的修改 微调超参数与默认的训练策略不同。它通常需要更小的学习率和更少的训练回合。 ```python # 优化器 # batch size 为 8 时的 lr 配置 optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) optimizer_config = dict(grad_clip=None) # 学习策略 lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[7]) # lr_config 中的 max_epochs 和 step 需要针对自定义数据集进行专门调整 runner = dict(max_epochs=8) log_config = dict(interval=100) ``` ## 使用预训练模型 如果要使用预训练模型时,可以在 `load_from` 中查阅新的配置信息,用户需要在训练开始之前下载好需要的模型权重,从而避免在训练过程中浪费了宝贵时间。 ```python load_from = 'https://download.openmmlab.com/mmdetection/v2.0/mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco/mask_rcnn_r50_caffe_fpn_mstrain-poly_3x_coco_bbox_mAP-0.408__segm_mAP-0.37_20200504_163245-42aa3d00.pth' # noqa ```
2,742
30.170455
315
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/how_to.md
# 教程 11: How to xxx 本教程收集了任何如何使用 MMDetection 进行 xxx 的答案。 如果您遇到有关`如何做`的问题及答案,请随时更新此文档! ## 使用 MMClassification 的骨干网络 MMDet、MMCls、MMSeg 中的模型注册表都继承自 MMCV 中的根注册表,允许这些存储库直接使用彼此已经实现的模块。 因此用户可以在 MMDetection 中使用来自 MMClassification 的骨干网络,而无需实现MMClassification 中已经存在的网络。 ### 使用在 MMClassification 中实现的骨干网络 假设想将 `MobileNetV3-small` 作为 `RetinaNet` 的骨干网络,则配置文件如下。 ```python _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) pretrained = 'https://download.openmmlab.com/mmclassification/v0/mobilenet_v3/convert/mobilenet_v3_small-8427ecf0.pth' model = dict( backbone=dict( _delete_=True, # 将 _base_ 中关于 backbone 的字段删除 type='mmcls.MobileNetV3', # 使用 mmcls 中的 MobileNetV3 arch='small', out_indices=(3, 8, 11), # 修改 out_indices init_cfg=dict( type='Pretrained', checkpoint=pretrained, prefix='backbone.')), # MMCls 中骨干网络的预训练权重含义 prefix='backbone.',为了正常加载权重,需要把这个 prefix 去掉。 # 修改 in_channels neck=dict(in_channels=[24, 48, 96], start_level=0)) ``` ### 通过 MMClassification 使用 TIMM 中实现的骨干网络 由于 MMClassification 提供了 Py**T**orch **Im**age **M**odels (`timm`) 骨干网络的封装,用户也可以通过 MMClassification 直接使用 `timm` 中的骨干网络。假设想将 [`EfficientNet-B1`](https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py) 作为 `RetinaNet` 的骨干网络,则配置文件如下。 ```python # https://github.com/open-mmlab/mmdetection/blob/master/configs/timm_example/retinanet_timm_efficientnet_b1_fpn_1x_coco.py _base_ = [ '../_base_/models/retinanet_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] # please install mmcls>=0.20.0 # import mmcls.models to trigger register_module in mmcls custom_imports = dict(imports=['mmcls.models'], allow_failed_imports=False) model = dict( backbone=dict( _delete_=True, # 将 _base_ 中关于 backbone 的字段删除 type='mmcls.TIMMBackbone', # 使用 mmcls 中 timm 骨干网络 model_name='efficientnet_b1', features_only=True, pretrained=True, out_indices=(1, 2, 3, 4)), # 修改 out_indices neck=dict(in_channels=[24, 40, 112, 320])) # 修改 in_channels optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001) ``` `type='mmcls.TIMMBackbone'` 表示在 MMDetection 中使用 MMClassification 中的 `TIMMBackbone` 类,并且使用的模型为` EfficientNet-B1`,其中 `mmcls` 表示 MMClassification 库,而 `TIMMBackbone ` 表示 MMClassification 中实现的 TIMMBackbone 包装器。 关于层次注册器的具体原理可以参考 [MMCV 文档](https://github.com/open-mmlab/mmcv/blob/master/docs/zh_cn/understand_mmcv/registry.md#%E6%B3%A8%E5%86%8C%E5%99%A8%E5%B1%82%E7%BB%93%E6%9E%84),关于如何使用 MMClassification 中的其他 backbone,可以参考 [MMClassification 文档](https://github.com/open-mmlab/mmclassification/blob/master/docs/zh_CN/tutorials/config.md)。 ## 使用马赛克数据增强 如果你想在训练中使用 `Mosaic`,那么请确保你同时使用 `MultiImageMixDataset`。以 `Faster R-CNN` 算法为例,你可以通过如下做法实现: ```python # 直接打开 configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py ,增添如下字段 data_root = 'data/coco/' dataset_type = 'CocoDataset' img_scale=(1333, 800)​ img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) train_pipeline = [ dict(type='Mosaic', img_scale=img_scale, pad_val=114.0), dict( type='RandomAffine', scaling_ratio_range=(0.1, 2), border=(-img_scale[0] // 2, -img_scale[1] // 2)), # 图像经过马赛克处理后会放大4倍,所以我们使用仿射变换来恢复图像的大小。 dict(type='RandomFlip', flip_ratio=0.5), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size_divisor=32), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']) ] train_dataset = dict( _delete_ = True, # 删除不必要的设置 type='MultiImageMixDataset', dataset=dict( type=dataset_type, ann_file=data_root + 'annotations/instances_train2017.json', img_prefix=data_root + 'train2017/', pipeline=[ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', with_bbox=True) ], filter_empty_gt=False, ), pipeline=train_pipeline ) ​ data = dict( train=train_dataset ) ``` ## 在配置文件中冻结骨干网络后在训练中解冻骨干网络 如果你在配置文件中已经冻结了骨干网络并希望在几个训练周期后解冻它,你可以通过 hook 来实现这个功能。以用 ResNet 为骨干网络的 Faster R-CNN 为例,你可以冻结一个骨干网络的一个层并在配置文件中添加如下 `custom_hooks`: ```python _base_ = [ '../_base_/models/faster_rcnn_r50_fpn.py', '../_base_/datasets/coco_detection.py', '../_base_/schedules/schedule_1x.py', '../_base_/default_runtime.py' ] model = dict( # freeze one stage of the backbone network. backbone=dict(frozen_stages=1), ) custom_hooks = [dict(type="UnfreezeBackboneEpochBasedHook", unfreeze_epoch=1)] ``` 同时在 `mmdet/core/hook/unfreeze_backbone_epoch_based_hook.py` 当中书写 `UnfreezeBackboneEpochBasedHook` 类 ```python from mmcv.parallel import is_module_wrapper from mmcv.runner.hooks import HOOKS, Hook @HOOKS.register_module() class UnfreezeBackboneEpochBasedHook(Hook): """Unfreeze backbone network Hook. Args: unfreeze_epoch (int): The epoch unfreezing the backbone network. """ def __init__(self, unfreeze_epoch=1): self.unfreeze_epoch = unfreeze_epoch def before_train_epoch(self, runner): # Unfreeze the backbone network. # Only valid for resnet. if runner.epoch == self.unfreeze_epoch: model = runner.model if is_module_wrapper(model): model = model.module backbone = model.backbone if backbone.frozen_stages >= 0: if backbone.deep_stem: backbone.stem.train() for param in backbone.stem.parameters(): param.requires_grad = True else: backbone.norm1.train() for m in [backbone.conv1, backbone.norm1]: for param in m.parameters(): param.requires_grad = True for i in range(1, backbone.frozen_stages + 1): m = getattr(backbone, f'layer{i}') m.train() for param in m.parameters(): param.requires_grad = True ``` ## 获得新的骨干网络的通道数 如果你想获得一个新骨干网络的通道数,你可以单独构建这个骨干网络并输入一个伪造的图片来获取每一个阶段的输出。 以 `ResNet` 为例: ```python from mmdet.models import ResNet import torch self = ResNet(depth=18) self.eval() inputs = torch.rand(1, 3, 32, 32) level_outputs = self.forward(inputs) for level_out in level_outputs: print(tuple(level_out.shape)) ``` 以上脚本的输出为: ```python (1, 64, 8, 8) (1, 128, 4, 4) (1, 256, 2, 2) (1, 512, 1, 1) ``` 用户可以通过将脚本中的 `ResNet(depth=18)` 替换为自己的骨干网络配置来得到新的骨干网络的通道数。
6,933
32.990196
325
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/init_cfg.md
# 教程 10: 权重初始化 在训练过程中,适当的初始化策略有利于加快训练速度或获得更⾼的性能。 [MMCV](https://github.com/open-mmlab/mmcv/blob/master/mmcv/cnn/utils/weight_init.py) 提供了一些常⽤的初始化模块的⽅法,如 `nn.Conv2d`。 MMdetection 中的模型初始化主要使⽤ `init_cfg`。⽤⼾可以通过以下两个步骤来初始化模型: 1. 在 `model_cfg` 中为模型或其组件定义 `init_cfg`,但⼦组件的 `init_cfg` 优先级更⾼,会覆盖⽗模块的 `init_cfg` 。 2. 像往常一样构建模型,然后显式调⽤ `model.init_weights()` ⽅法,此时模型参数将会被按照配置文件写法进行初始化。 MMdetection 初始化工作流的高层 API 调用流程是: model_cfg(init_cfg) -> build_from_cfg -> model -> init_weight() -> initialize(self, self.init_cfg) -> children's init_weight() ### 描述 它的数据类型是 dict 或者 list\[dict\],包含了下列键值: - `type` (str),包含 `INTIALIZERS` 中的初始化器名称,后面跟着初始化器的参数。 - `layer`(str 或 list\[str\]),包含 Pytorch 或 MMCV 中基本层的名称,以及将被初始化的可学习参数,例如 `'Conv2d'`,`'DeformConv2d'`。 - `override` (dict 或 list\[dict\]),包含不继承⾃ `BaseModule` 且其初始化配置与 `layer` 键中的其他层不同的⼦模块。 `type` 中定义的初始化器将适⽤于 `layer` 中定义的所有层,因此如果⼦模块不是 `BaseModule` 的派⽣类但可以与 `layer` 中的层相同的⽅式初始化,则不需要使⽤ `override`。`override` 包含了: - `type` 后跟初始化器的参数; - `name` 用以指⽰将被初始化的⼦模块。 ### 初始化参数 从 `mmcv.runner.BaseModule` 或 `mmdet.models` 继承一个新模型。这里我们用 FooModel 来举个例子。 ```python import torch.nn as nn from mmcv.runner import BaseModule class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=None): super(FooModel, self).__init__(init_cfg) ... ``` - 直接在代码中使⽤ `init_cfg` 初始化模型 ```python import torch.nn as nn from mmcv.runner import BaseModule # or directly inherit mmdet models class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=XXX): super(FooModel, self).__init__(init_cfg) ... ``` - 在 `mmcv.Sequential` 或 `mmcv.ModuleList` 代码中直接使⽤ `init_cfg` 初始化模型 ```python from mmcv.runner import BaseModule, ModuleList class FooModel(BaseModule) def __init__(self, arg1, arg2, init_cfg=None): super(FooModel, self).__init__(init_cfg) ... self.conv1 = ModuleList(init_cfg=XXX) ``` - 使⽤配置⽂件中的 `init_cfg` 初始化模型 ```python model = dict( ... model = dict( type='FooModel', arg1=XXX, arg2=XXX, init_cfg=XXX), ... ``` ### init_cfg 的使用 1. 用 `layer` 键初始化模型 如果我们只定义了 `layer`, 它只会在 `layer` 键中初始化网络层。 注意: `layer` 键对应的值是 Pytorch 的带有 weights 和 bias 属性的类名(因此不⽀持 `MultiheadAttention` 层)。 - 定义⽤于初始化具有相同配置的模块的 `layer` 键。 ```python init_cfg = dict(type='Constant', layer=['Conv1d', 'Conv2d', 'Linear'], val=1) # ⽤相同的配置初始化整个模块 ``` - 定义⽤于初始化具有不同配置的层的 `layer` 键。 ```python init_cfg = [dict(type='Constant', layer='Conv1d', val=1), dict(type='Constant', layer='Conv2d', val=2), dict(type='Constant', layer='Linear', val=3)] # nn.Conv1d 将被初始化为 dict(type='Constant', val=1) # nn.Conv2d 将被初始化为 dict(type='Constant', val=2) # nn.Linear 将被初始化为 dict(type='Constant', val=3) ``` 2. 使⽤ `override` 键初始化模型 - 当使⽤属性名初始化某些特定部分时,我们可以使⽤ `override` 键, `override` 中的值将忽略 init_cfg 中的值。 ```python # layers: # self.feat = nn.Conv1d(3, 1, 3) # self.reg = nn.Conv2d(3, 3, 3) # self.cls = nn.Linear(1,2) init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(type='Constant', name='reg', val=3, bias=4)) # self.feat and self.cls 将被初始化为 dict(type='Constant', val=1, bias=2) # 叫 'reg' 的模块将被初始化为 dict(type='Constant', val=3, bias=4) ``` - 如果 init_cfg 中的 `layer` 为 None,则只会初始化 override 中有 name 的⼦模块,⽽ override 中的 type 和其他参数可以省略。 ```python # layers: # self.feat = nn.Conv1d(3, 1, 3) # self.reg = nn.Conv2d(3, 3, 3) # self.cls = nn.Linear(1,2) init_cfg = dict(type='Constant', val=1, bias=2, override=dict(name='reg')) # self.feat and self.cls 将被 Pytorch 初始化 # 叫 'reg' 的模块将被 dict(type='Constant', val=1, bias=2) 初始化 ``` - 如果我们不定义 `layer` 或 `override` 键,它不会初始化任何东西。 - 无效的使用 ```python # override 没有 name 键的话是无效的 init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(type='Constant', val=3, bias=4)) # override 有 name 键和其他参数但是没有 type 键也是无效的 init_cfg = dict(type='Constant', layer=['Conv1d','Conv2d'], val=1, bias=2, override=dict(name='reg', val=3, bias=4)) ``` 3. 使⽤预训练模型初始化模型 ```python init_cfg = dict(type='Pretrained', checkpoint='torchvision://resnet50') ``` 更多细节可以参考 [MMCV](https://mmcv.readthedocs.io/en/latest/cnn.html#weight-initialization) 的文档和 MMCV [PR #780](https://github.com/open-mmlab/mmcv/pull/780)
4,579
27.271605
207
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/onnx2tensorrt.md
# 教程 9: ONNX 到 TensorRT 的模型转换(实验性支持) > ## [尝试使用新的 MMDeploy 来部署你的模型](https://mmdeploy.readthedocs.io/) <!-- TOC --> - [教程 9: ONNX 到 TensorRT 的模型转换(实验性支持)](#%E6%95%99%E7%A8%8B-9-onnx-%E5%88%B0-tensorrt-%E7%9A%84%E6%A8%A1%E5%9E%8B%E8%BD%AC%E6%8D%A2%E5%AE%9E%E9%AA%8C%E6%80%A7%E6%94%AF%E6%8C%81) - [如何将模型从 ONNX 转换为 TensorRT](#%E5%A6%82%E4%BD%95%E5%B0%86%E6%A8%A1%E5%9E%8B%E4%BB%8E-onnx-%E8%BD%AC%E6%8D%A2%E4%B8%BA-tensorrt) - [先决条件](#%E5%85%88%E5%86%B3%E6%9D%A1%E4%BB%B6) - [用法](#%E7%94%A8%E6%B3%95) - [如何评估导出的模型](#%E5%A6%82%E4%BD%95%E8%AF%84%E4%BC%B0%E5%AF%BC%E5%87%BA%E7%9A%84%E6%A8%A1%E5%9E%8B) - [支持转换为 TensorRT 的模型列表](#%E6%94%AF%E6%8C%81%E8%BD%AC%E6%8D%A2%E4%B8%BA-tensorrt-%E7%9A%84%E6%A8%A1%E5%9E%8B%E5%88%97%E8%A1%A8) - [提醒](#%E6%8F%90%E9%86%92) - [常见问题](#%E5%B8%B8%E8%A7%81%E9%97%AE%E9%A2%98) <!-- TOC --> ## 如何将模型从 ONNX 转换为 TensorRT ### 先决条件 1. 请参考 [get_started.md](https://mmdetection.readthedocs.io/en/latest/get_started.html) 从源码安装 MMCV 和 MMDetection。 2. 请参考 [ONNXRuntime in mmcv](https://mmcv.readthedocs.io/en/latest/deployment/onnxruntime_op.html) 和 [TensorRT plugin in mmcv](https://github.com/open-mmlab/mmcv/blob/master/docs/en/deployment/tensorrt_plugin.md/) 安装支持 ONNXRuntime 自定义操作和 TensorRT 插件的 `mmcv-full`。 3. 使用工具 [pytorch2onnx](https://mmdetection.readthedocs.io/en/latest/tutorials/pytorch2onnx.html) 将模型从 PyTorch 转换为 ONNX。 ### 用法 ```bash python tools/deployment/onnx2tensorrt.py \ ${CONFIG} \ ${MODEL} \ --trt-file ${TRT_FILE} \ --input-img ${INPUT_IMAGE_PATH} \ --shape ${INPUT_IMAGE_SHAPE} \ --min-shape ${MIN_IMAGE_SHAPE} \ --max-shape ${MAX_IMAGE_SHAPE} \ --workspace-size {WORKSPACE_SIZE} \ --show \ --verify \ ``` 所有参数的说明: - `config`: 模型配置文件的路径。 - `model`: ONNX 模型文件的路径。 - `--trt-file`: 输出 TensorRT 引擎文件的路径。如果未指定,它将被设置为 `tmp.trt`。 - `--input-img`: 用于追踪和转换的输入图像的路径。默认情况下,它将设置为 `demo/demo.jpg`。 - `--shape`: 模型输入的高度和宽度。如果未指定,它将设置为 `400 600`。 - `--min-shape`: 模型输入的最小高度和宽度。如果未指定,它将被设置为与 `--shape` 相同。 - `--max-shape`: 模型输入的最大高度和宽度。如果未指定,它将被设置为与 `--shape` 相同。 - `--workspace-size`: 构建 TensorRT 引擎所需的 GPU 工作空间大小(以 GiB 为单位)。如果未指定,它将设置为 `1` GiB。 - `--show`: 确定是否显示模型的输出。如果未指定,它将设置为 `False`。 - `--verify`: 确定是否在 ONNXRuntime 和 TensorRT 之间验证模型的正确性。如果未指定,它将设置为 `False`。 - `--verbose`: 确定是否打印日志消息。它对调试很有用。如果未指定,它将设置为 `False`。 例子: ```bash python tools/deployment/onnx2tensorrt.py \ configs/retinanet/retinanet_r50_fpn_1x_coco.py \ checkpoints/retinanet_r50_fpn_1x_coco.onnx \ --trt-file checkpoints/retinanet_r50_fpn_1x_coco.trt \ --input-img demo/demo.jpg \ --shape 400 600 \ --show \ --verify \ ``` ## 如何评估导出的模型 我们准备了一个工具 `tools/deplopyment/test.py` 来评估 TensorRT 模型。 请参阅以下链接以获取更多信息。 - [如何评估导出的模型](pytorch2onnx.md#how-to-evaluate-the-exported-models) - [结果和模型](pytorch2onnx.md#results-and-models) ## 支持转换为 TensorRT 的模型列表 下表列出了确定可转换为 TensorRT 的模型。 | Model | Config | Dynamic Shape | Batch Inference | Note | | :----------------: | :--------------------------------------------------------------: | :-----------: | :-------------: | :--: | | SSD | `configs/ssd/ssd300_coco.py` | Y | Y | | | FSAF | `configs/fsaf/fsaf_r50_fpn_1x_coco.py` | Y | Y | | | FCOS | `configs/fcos/fcos_r50_caffe_fpn_4x4_1x_coco.py` | Y | Y | | | YOLOv3 | `configs/yolo/yolov3_d53_mstrain-608_273e_coco.py` | Y | Y | | | RetinaNet | `configs/retinanet/retinanet_r50_fpn_1x_coco.py` | Y | Y | | | Faster R-CNN | `configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade R-CNN | `configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Mask R-CNN | `configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | Cascade Mask R-CNN | `configs/cascade_rcnn/cascade_mask_rcnn_r50_fpn_1x_coco.py` | Y | Y | | | PointRend | `configs/point_rend/point_rend_r50_caffe_fpn_mstrain_1x_coco.py` | Y | Y | | 注意: - *以上所有模型通过 Pytorch==1.6.0, onnx==1.7.0 与 TensorRT-7.2.1.6.Ubuntu-16.04.x86_64-gnu.cuda-10.2.cudnn8.0 测试* ## 提醒 - 如果您在上面列出的模型中遇到任何问题,请创建 issue,我们会尽快处理。对于未包含在列表中的模型,由于资源有限,我们可能无法在此提供太多帮助。请尝试深入挖掘并自行调试。 - 由于此功能是实验性的,并且可能会快速更改,因此请始终尝试使用最新的 `mmcv` 和 `mmdetecion`。 ## 常见问题 - 空
4,722
43.140187
263
md
mmdetection
mmdetection-master/docs/zh_cn/tutorials/pytorch2onnx.md
# 教程 8: Pytorch 到 ONNX 的模型转换(实验性支持) > ## [尝试使用新的 MMDeploy 來部署你的模型](https://mmdeploy.readthedocs.io/)
102
24.75
64
md
mmdetection
mmdetection-master/mmdet/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. import mmcv from .version import __version__, short_version def digit_version(version_str): digit_version = [] for x in version_str.split('.'): if x.isdigit(): digit_version.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') digit_version.append(int(patch_version[0]) - 1) digit_version.append(int(patch_version[1])) return digit_version mmcv_minimum_version = '1.3.17' mmcv_maximum_version = '1.8.0' mmcv_version = digit_version(mmcv.__version__) assert (mmcv_version >= digit_version(mmcv_minimum_version) and mmcv_version <= digit_version(mmcv_maximum_version)), \ f'MMCV=={mmcv.__version__} is used but incompatible. ' \ f'Please install mmcv>={mmcv_minimum_version}, <={mmcv_maximum_version}.' __all__ = ['__version__', 'short_version']
909
29.333333
77
py
mmdetection
mmdetection-master/mmdet/version.py
# Copyright (c) OpenMMLab. All rights reserved. __version__ = '2.28.2' short_version = __version__ def parse_version_info(version_str): version_info = [] for x in version_str.split('.'): if x.isdigit(): version_info.append(int(x)) elif x.find('rc') != -1: patch_version = x.split('rc') version_info.append(int(patch_version[0])) version_info.append(f'rc{patch_version[1]}') return tuple(version_info) version_info = parse_version_info(__version__)
529
25.5
56
py
mmdetection
mmdetection-master/mmdet/apis/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .inference import (async_inference_detector, inference_detector, init_detector, show_result_pyplot) from .test import multi_gpu_test, single_gpu_test from .train import (get_root_logger, init_random_seed, set_random_seed, train_detector) __all__ = [ 'get_root_logger', 'set_random_seed', 'train_detector', 'init_detector', 'async_inference_detector', 'inference_detector', 'show_result_pyplot', 'multi_gpu_test', 'single_gpu_test', 'init_random_seed' ]
563
42.384615
76
py
mmdetection
mmdetection-master/mmdet/apis/inference.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from pathlib import Path import mmcv import numpy as np import torch from mmcv.ops import RoIPool from mmcv.parallel import collate, scatter from mmcv.runner import load_checkpoint from mmdet.core import get_classes from mmdet.datasets import replace_ImageToTensor from mmdet.datasets.pipelines import Compose from mmdet.models import build_detector def init_detector(config, checkpoint=None, device='cuda:0', cfg_options=None): """Initialize a detector from config file. Args: config (str, :obj:`Path`, or :obj:`mmcv.Config`): Config file path, :obj:`Path`, or the config object. checkpoint (str, optional): Checkpoint path. If left as None, the model will not load any weights. cfg_options (dict): Options to override some settings in the used config. Returns: nn.Module: The constructed detector. """ if isinstance(config, (str, Path)): config = mmcv.Config.fromfile(config) elif not isinstance(config, mmcv.Config): raise TypeError('config must be a filename or Config object, ' f'but got {type(config)}') if cfg_options is not None: config.merge_from_dict(cfg_options) if 'pretrained' in config.model: config.model.pretrained = None elif 'init_cfg' in config.model.backbone: config.model.backbone.init_cfg = None config.model.train_cfg = None model = build_detector(config.model, test_cfg=config.get('test_cfg')) if checkpoint is not None: checkpoint = load_checkpoint(model, checkpoint, map_location='cpu') if 'CLASSES' in checkpoint.get('meta', {}): model.CLASSES = checkpoint['meta']['CLASSES'] else: warnings.simplefilter('once') warnings.warn('Class names are not saved in the checkpoint\'s ' 'meta data, use COCO classes by default.') model.CLASSES = get_classes('coco') model.cfg = config # save the config in the model for convenience model.to(device) model.eval() if device == 'npu': from mmcv.device.npu import NPUDataParallel model = NPUDataParallel(model) model.cfg = config return model class LoadImage: """Deprecated. A simple pipeline to load image. """ def __call__(self, results): """Call function to load images into results. Args: results (dict): A result dict contains the file name of the image to be read. Returns: dict: ``results`` will be returned containing loaded image. """ warnings.simplefilter('once') warnings.warn('`LoadImage` is deprecated and will be removed in ' 'future releases. You may use `LoadImageFromWebcam` ' 'from `mmdet.datasets.pipelines.` instead.') if isinstance(results['img'], str): results['filename'] = results['img'] results['ori_filename'] = results['img'] else: results['filename'] = None results['ori_filename'] = None img = mmcv.imread(results['img']) results['img'] = img results['img_fields'] = ['img'] results['img_shape'] = img.shape results['ori_shape'] = img.shape return results def inference_detector(model, imgs): """Inference image(s) with the detector. Args: model (nn.Module): The loaded detector. imgs (str/ndarray or list[str/ndarray] or tuple[str/ndarray]): Either image files or loaded images. Returns: If imgs is a list or tuple, the same length list type results will be returned, otherwise return the detection results directly. """ if isinstance(imgs, (list, tuple)): is_batch = True else: imgs = [imgs] is_batch = False cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # forward the model with torch.no_grad(): results = model(return_loss=False, rescale=True, **data) if not is_batch: return results[0] else: return results async def async_inference_detector(model, imgs): """Async inference image(s) with the detector. Args: model (nn.Module): The loaded detector. img (str | ndarray): Either image files or loaded images. Returns: Awaitable detection results. """ if not isinstance(imgs, (list, tuple)): imgs = [imgs] cfg = model.cfg device = next(model.parameters()).device # model device if isinstance(imgs[0], np.ndarray): cfg = cfg.copy() # set loading pipeline type cfg.data.test.pipeline[0].type = 'LoadImageFromWebcam' cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline) test_pipeline = Compose(cfg.data.test.pipeline) datas = [] for img in imgs: # prepare data if isinstance(img, np.ndarray): # directly add img data = dict(img=img) else: # add information into dict data = dict(img_info=dict(filename=img), img_prefix=None) # build the data pipeline data = test_pipeline(data) datas.append(data) data = collate(datas, samples_per_gpu=len(imgs)) # just get the actual data from DataContainer data['img_metas'] = [img_metas.data[0] for img_metas in data['img_metas']] data['img'] = [img.data[0] for img in data['img']] if next(model.parameters()).is_cuda: # scatter to specified GPU data = scatter(data, [device])[0] else: for m in model.modules(): assert not isinstance( m, RoIPool ), 'CPU inference with RoIPool is not supported currently.' # We don't restore `torch.is_grad_enabled()` value during concurrent # inference since execution can overlap torch.set_grad_enabled(False) results = await model.aforward_test(rescale=True, **data) return results def show_result_pyplot(model, img, result, score_thr=0.3, title='result', wait_time=0, palette=None, out_file=None): """Visualize the detection results on the image. Args: model (nn.Module): The loaded detector. img (str or np.ndarray): Image filename or loaded image. result (tuple[list] or list): The detection result, can be either (bbox, segm) or just bbox. score_thr (float): The threshold to visualize the bboxes and masks. title (str): Title of the pyplot figure. wait_time (float): Value of waitKey param. Default: 0. palette (str or tuple(int) or :obj:`Color`): Color. The tuple of color should be in BGR order. out_file (str or None): The path to write the image. Default: None. """ if hasattr(model, 'module'): model = model.module model.show_result( img, result, score_thr=score_thr, show=True, wait_time=wait_time, win_name=title, bbox_color=palette, text_color=(200, 200, 200), mask_color=palette, out_file=out_file)
8,629
32.449612
79
py
mmdetection
mmdetection-master/mmdet/apis/test.py
# Copyright (c) OpenMMLab. All rights reserved. import os.path as osp import pickle import shutil import tempfile import time import mmcv import torch import torch.distributed as dist from mmcv.image import tensor2imgs from mmcv.runner import get_dist_info from mmdet.core import encode_mask_results def single_gpu_test(model, data_loader, show=False, out_dir=None, show_score_thr=0.3): model.eval() results = [] dataset = data_loader.dataset PALETTE = getattr(dataset, 'PALETTE', None) prog_bar = mmcv.ProgressBar(len(dataset)) for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) batch_size = len(result) if show or out_dir: if batch_size == 1 and isinstance(data['img'][0], torch.Tensor): img_tensor = data['img'][0] else: img_tensor = data['img'][0].data[0] img_metas = data['img_metas'][0].data[0] imgs = tensor2imgs(img_tensor, **img_metas[0]['img_norm_cfg']) assert len(imgs) == len(img_metas) for i, (img, img_meta) in enumerate(zip(imgs, img_metas)): h, w, _ = img_meta['img_shape'] img_show = img[:h, :w, :] ori_h, ori_w = img_meta['ori_shape'][:-1] img_show = mmcv.imresize(img_show, (ori_w, ori_h)) if out_dir: out_file = osp.join(out_dir, img_meta['ori_filename']) else: out_file = None model.module.show_result( img_show, result[i], bbox_color=PALETTE, text_color=PALETTE, mask_color=PALETTE, show=show, out_file=out_file, score_thr=show_score_thr) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] # This logic is only used in panoptic segmentation test. elif isinstance(result[0], dict) and 'ins_results' in result[0]: for j in range(len(result)): bbox_results, mask_results = result[j]['ins_results'] result[j]['ins_results'] = (bbox_results, encode_mask_results(mask_results)) results.extend(result) for _ in range(batch_size): prog_bar.update() return results def multi_gpu_test(model, data_loader, tmpdir=None, gpu_collect=False): """Test model with multiple gpus. This method tests model with multiple gpus and collects the results under two different modes: gpu and cpu modes. By setting 'gpu_collect=True' it encodes results to gpu tensors and use gpu communication for results collection. On cpu mode it saves the results on different gpus to 'tmpdir' and collects them by the rank 0 worker. Args: model (nn.Module): Model to be tested. data_loader (nn.Dataloader): Pytorch data loader. tmpdir (str): Path of directory to save the temporary results from different gpus under cpu mode. gpu_collect (bool): Option to use either gpu or cpu to collect results. Returns: list: The prediction results. """ model.eval() results = [] dataset = data_loader.dataset rank, world_size = get_dist_info() if rank == 0: prog_bar = mmcv.ProgressBar(len(dataset)) time.sleep(2) # This line can prevent deadlock problem in some cases. for i, data in enumerate(data_loader): with torch.no_grad(): result = model(return_loss=False, rescale=True, **data) # encode mask results if isinstance(result[0], tuple): result = [(bbox_results, encode_mask_results(mask_results)) for bbox_results, mask_results in result] # This logic is only used in panoptic segmentation test. elif isinstance(result[0], dict) and 'ins_results' in result[0]: for j in range(len(result)): bbox_results, mask_results = result[j]['ins_results'] result[j]['ins_results'] = ( bbox_results, encode_mask_results(mask_results)) results.extend(result) if rank == 0: batch_size = len(result) for _ in range(batch_size * world_size): prog_bar.update() # collect results from all ranks if gpu_collect: results = collect_results_gpu(results, len(dataset)) else: results = collect_results_cpu(results, len(dataset), tmpdir) return results def collect_results_cpu(result_part, size, tmpdir=None): rank, world_size = get_dist_info() # create a tmp dir if it is not specified if tmpdir is None: MAX_LEN = 512 # 32 is whitespace dir_tensor = torch.full((MAX_LEN, ), 32, dtype=torch.uint8, device='cuda') if rank == 0: mmcv.mkdir_or_exist('.dist_test') tmpdir = tempfile.mkdtemp(dir='.dist_test') tmpdir = torch.tensor( bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda') dir_tensor[:len(tmpdir)] = tmpdir dist.broadcast(dir_tensor, 0) tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip() else: mmcv.mkdir_or_exist(tmpdir) # dump the part result to the dir mmcv.dump(result_part, osp.join(tmpdir, f'part_{rank}.pkl')) dist.barrier() # collect all parts if rank != 0: return None else: # load results of all parts from tmp dir part_list = [] for i in range(world_size): part_file = osp.join(tmpdir, f'part_{i}.pkl') part_list.append(mmcv.load(part_file)) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] # remove tmp dir shutil.rmtree(tmpdir) return ordered_results def collect_results_gpu(result_part, size): rank, world_size = get_dist_info() # dump result part to tensor with pickle part_tensor = torch.tensor( bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda') # gather all result part tensor shape shape_tensor = torch.tensor(part_tensor.shape, device='cuda') shape_list = [shape_tensor.clone() for _ in range(world_size)] dist.all_gather(shape_list, shape_tensor) # padding result part tensor to max length shape_max = torch.tensor(shape_list).max() part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda') part_send[:shape_tensor[0]] = part_tensor part_recv_list = [ part_tensor.new_zeros(shape_max) for _ in range(world_size) ] # gather all result part dist.all_gather(part_recv_list, part_send) if rank == 0: part_list = [] for recv, shape in zip(part_recv_list, shape_list): part_list.append( pickle.loads(recv[:shape[0]].cpu().numpy().tobytes())) # sort the results ordered_results = [] for res in zip(*part_list): ordered_results.extend(list(res)) # the dataloader may pad some samples ordered_results = ordered_results[:size] return ordered_results
7,817
36.228571
79
py
mmdetection
mmdetection-master/mmdet/apis/train.py
# Copyright (c) OpenMMLab. All rights reserved. import os import random import numpy as np import torch import torch.distributed as dist from mmcv.runner import (DistSamplerSeedHook, EpochBasedRunner, Fp16OptimizerHook, OptimizerHook, build_runner, get_dist_info) from mmdet.core import DistEvalHook, EvalHook, build_optimizer from mmdet.datasets import (build_dataloader, build_dataset, replace_ImageToTensor) from mmdet.utils import (build_ddp, build_dp, compat_cfg, find_latest_checkpoint, get_root_logger) def init_random_seed(seed=None, device='cuda'): """Initialize random seed. If the seed is not set, the seed will be automatically randomized, and then broadcast to all processes to prevent some potential bugs. Args: seed (int, Optional): The seed. Default to None. device (str): The device where the seed will be put on. Default to 'cuda'. Returns: int: Seed to be used. """ if seed is not None: return seed # Make sure all ranks share the same random seed to prevent # some potential bugs. Please refer to # https://github.com/open-mmlab/mmdetection/issues/6339 rank, world_size = get_dist_info() seed = np.random.randint(2**31) if world_size == 1: return seed if rank == 0: random_num = torch.tensor(seed, dtype=torch.int32, device=device) else: random_num = torch.tensor(0, dtype=torch.int32, device=device) dist.broadcast(random_num, src=0) return random_num.item() def set_random_seed(seed, deterministic=False): """Set random seed. Args: seed (int): Seed to be used. deterministic (bool): Whether to set the deterministic option for CUDNN backend, i.e., set `torch.backends.cudnn.deterministic` to True and `torch.backends.cudnn.benchmark` to False. Default: False. """ random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) if deterministic: torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False def auto_scale_lr(cfg, distributed, logger): """Automatically scaling LR according to GPU number and sample per GPU. Args: cfg (config): Training config. distributed (bool): Using distributed or not. logger (logging.Logger): Logger. """ # Get flag from config if ('auto_scale_lr' not in cfg) or \ (not cfg.auto_scale_lr.get('enable', False)): logger.info('Automatic scaling of learning rate (LR)' ' has been disabled.') return # Get base batch size from config base_batch_size = cfg.auto_scale_lr.get('base_batch_size', None) if base_batch_size is None: return # Get gpu number if distributed: _, world_size = get_dist_info() num_gpus = len(range(world_size)) else: num_gpus = len(cfg.gpu_ids) # calculate the batch size samples_per_gpu = cfg.data.train_dataloader.samples_per_gpu batch_size = num_gpus * samples_per_gpu logger.info(f'Training with {num_gpus} GPU(s) with {samples_per_gpu} ' f'samples per GPU. The total batch size is {batch_size}.') if batch_size != base_batch_size: # scale LR with # [linear scaling rule](https://arxiv.org/abs/1706.02677) scaled_lr = (batch_size / base_batch_size) * cfg.optimizer.lr logger.info('LR has been automatically scaled ' f'from {cfg.optimizer.lr} to {scaled_lr}') cfg.optimizer.lr = scaled_lr else: logger.info('The batch size match the ' f'base batch size: {base_batch_size}, ' f'will not scaling the LR ({cfg.optimizer.lr}).') def train_detector(model, dataset, cfg, distributed=False, validate=False, timestamp=None, meta=None): cfg = compat_cfg(cfg) logger = get_root_logger(log_level=cfg.log_level) # prepare data loaders dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset] runner_type = 'EpochBasedRunner' if 'runner' not in cfg else cfg.runner[ 'type'] train_dataloader_default_args = dict( samples_per_gpu=2, workers_per_gpu=2, # `num_gpus` will be ignored if distributed num_gpus=len(cfg.gpu_ids), dist=distributed, seed=cfg.seed, runner_type=runner_type, persistent_workers=False) train_loader_cfg = { **train_dataloader_default_args, **cfg.data.get('train_dataloader', {}) } data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset] # put model on gpus if distributed: find_unused_parameters = cfg.get('find_unused_parameters', False) # Sets the `find_unused_parameters` parameter in # torch.nn.parallel.DistributedDataParallel model = build_ddp( model, cfg.device, device_ids=[int(os.environ['LOCAL_RANK'])], broadcast_buffers=False, find_unused_parameters=find_unused_parameters) else: model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids) # build optimizer auto_scale_lr(cfg, distributed, logger) optimizer = build_optimizer(model, cfg.optimizer) runner = build_runner( cfg.runner, default_args=dict( model=model, optimizer=optimizer, work_dir=cfg.work_dir, logger=logger, meta=meta)) # an ugly workaround to make .log and .log.json filenames the same runner.timestamp = timestamp # fp16 setting fp16_cfg = cfg.get('fp16', None) if fp16_cfg is None and cfg.get('device', None) == 'npu': fp16_cfg = dict(loss_scale='dynamic') if fp16_cfg is not None: optimizer_config = Fp16OptimizerHook( **cfg.optimizer_config, **fp16_cfg, distributed=distributed) elif distributed and 'type' not in cfg.optimizer_config: optimizer_config = OptimizerHook(**cfg.optimizer_config) else: optimizer_config = cfg.optimizer_config # register hooks runner.register_training_hooks( cfg.lr_config, optimizer_config, cfg.checkpoint_config, cfg.log_config, cfg.get('momentum_config', None), custom_hooks_config=cfg.get('custom_hooks', None)) if distributed: if isinstance(runner, EpochBasedRunner): runner.register_hook(DistSamplerSeedHook()) # register eval hooks if validate: val_dataloader_default_args = dict( samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False, persistent_workers=False) val_dataloader_args = { **val_dataloader_default_args, **cfg.data.get('val_dataloader', {}) } # Support batch_size > 1 in validation if val_dataloader_args['samples_per_gpu'] > 1: # Replace 'ImageToTensor' to 'DefaultFormatBundle' cfg.data.val.pipeline = replace_ImageToTensor( cfg.data.val.pipeline) val_dataset = build_dataset(cfg.data.val, dict(test_mode=True)) val_dataloader = build_dataloader(val_dataset, **val_dataloader_args) eval_cfg = cfg.get('evaluation', {}) eval_cfg['by_epoch'] = cfg.runner['type'] != 'IterBasedRunner' eval_hook = DistEvalHook if distributed else EvalHook # In this PR (https://github.com/open-mmlab/mmcv/pull/1193), the # priority of IterTimerHook has been modified from 'NORMAL' to 'LOW'. runner.register_hook( eval_hook(val_dataloader, **eval_cfg), priority='LOW') resume_from = None if cfg.resume_from is None and cfg.get('auto_resume'): resume_from = find_latest_checkpoint(cfg.work_dir) if resume_from is not None: cfg.resume_from = resume_from if cfg.resume_from: runner.resume(cfg.resume_from) elif cfg.load_from: runner.load_checkpoint(cfg.load_from) runner.run(data_loaders, cfg.workflow)
8,379
32.927126
79
py
mmdetection
mmdetection-master/mmdet/core/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .anchor import * # noqa: F401, F403 from .bbox import * # noqa: F401, F403 from .data_structures import * # noqa: F401, F403 from .evaluation import * # noqa: F401, F403 from .hook import * # noqa: F401, F403 from .mask import * # noqa: F401, F403 from .optimizers import * # noqa: F401, F403 from .post_processing import * # noqa: F401, F403 from .utils import * # noqa: F401, F403
445
39.545455
50
py
mmdetection
mmdetection-master/mmdet/core/anchor/__init__.py
# Copyright (c) OpenMMLab. All rights reserved. from .anchor_generator import (AnchorGenerator, LegacyAnchorGenerator, YOLOAnchorGenerator) from .builder import (ANCHOR_GENERATORS, PRIOR_GENERATORS, build_anchor_generator, build_prior_generator) from .point_generator import MlvlPointGenerator, PointGenerator from .utils import anchor_inside_flags, calc_region, images_to_levels __all__ = [ 'AnchorGenerator', 'LegacyAnchorGenerator', 'anchor_inside_flags', 'PointGenerator', 'images_to_levels', 'calc_region', 'build_anchor_generator', 'ANCHOR_GENERATORS', 'YOLOAnchorGenerator', 'build_prior_generator', 'PRIOR_GENERATORS', 'MlvlPointGenerator' ]
720
47.066667
73
py
mmdetection
mmdetection-master/mmdet/core/anchor/anchor_generator.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings import mmcv import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import PRIOR_GENERATORS @PRIOR_GENERATORS.register_module() class AnchorGenerator: """Standard anchor generator for 2D anchor-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int] | None): The basic sizes of anchors in multiple levels. If None is given, strides will be used as base_sizes. (If strides are non square, the shortest stride is taken.) scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. If a list of tuple of float is given, they will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0 in V2.0. Examples: >>> from mmdet.core import AnchorGenerator >>> self = AnchorGenerator([16], [1.], [1.], [9]) >>> all_anchors = self.grid_priors([(2, 2)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]])] >>> self = AnchorGenerator([16, 32], [1.], [1.], [9, 18]) >>> all_anchors = self.grid_priors([(2, 2), (1, 1)], device='cpu') >>> print(all_anchors) [tensor([[-4.5000, -4.5000, 4.5000, 4.5000], [11.5000, -4.5000, 20.5000, 4.5000], [-4.5000, 11.5000, 4.5000, 20.5000], [11.5000, 11.5000, 20.5000, 20.5000]]), \ tensor([[-9., -9., 9., 9.]])] """ def __init__(self, strides, ratios, scales=None, base_sizes=None, scale_major=True, octave_base_scale=None, scales_per_octave=None, centers=None, center_offset=0.): # check center and center_offset if center_offset != 0: assert centers is None, 'center cannot be set when center_offset' \ f'!=0, {centers} is given.' if not (0 <= center_offset <= 1): raise ValueError('center_offset should be in range [0, 1], ' f'{center_offset} is given.') if centers is not None: assert len(centers) == len(strides), \ 'The number of strides should be the same as centers, got ' \ f'{strides} and {centers}' # calculate base sizes of anchors self.strides = [_pair(stride) for stride in strides] self.base_sizes = [min(stride) for stride in self.strides ] if base_sizes is None else base_sizes assert len(self.base_sizes) == len(self.strides), \ 'The number of strides should be the same as base sizes, got ' \ f'{self.strides} and {self.base_sizes}' # calculate scales of anchors assert ((octave_base_scale is not None and scales_per_octave is not None) ^ (scales is not None)), \ 'scales and octave_base_scale with scales_per_octave cannot' \ ' be set at the same time' if scales is not None: self.scales = torch.Tensor(scales) elif octave_base_scale is not None and scales_per_octave is not None: octave_scales = np.array( [2**(i / scales_per_octave) for i in range(scales_per_octave)]) scales = octave_scales * octave_base_scale self.scales = torch.Tensor(scales) else: raise ValueError('Either scales or octave_base_scale with ' 'scales_per_octave should be set') self.octave_base_scale = octave_base_scale self.scales_per_octave = scales_per_octave self.ratios = torch.Tensor(ratios) self.scale_major = scale_major self.centers = centers self.center_offset = center_offset self.base_anchors = self.gen_base_anchors() @property def num_base_anchors(self): """list[int]: total number of base anchors in a feature grid""" return self.num_base_priors @property def num_base_priors(self): """list[int]: The number of priors (anchors) at a point on the feature grid""" return [base_anchors.size(0) for base_anchors in self.base_anchors] @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors( base_size, scales=self.scales, ratios=self.ratios, center=center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ w = base_size h = base_size if center is None: x_center = self.center_offset * w y_center = self.center_offset * h else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * ws, y_center - 0.5 * hs, x_center + 0.5 * ws, y_center + 0.5 * hs ] base_anchors = torch.stack(base_anchors, dim=-1) return base_anchors def _meshgrid(self, x, y, row_major=True): """Generate mesh grid of x and y. Args: x (torch.Tensor): Grids of x dimension. y (torch.Tensor): Grids of y dimension. row_major (bool, optional): Whether to return y grids first. Defaults to True. Returns: tuple[torch.Tensor]: The mesh grids of x and y. """ # use shape instead of len to keep tracing while exporting to onnx xx = x.repeat(y.shape[0]) yy = y.view(-1, 1).repeat(1, x.shape[0]).view(-1) if row_major: return xx, yy else: return yy, xx def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. dtype (:obj:`torch.dtype`): Dtype of priors. Default: torch.float32. device (str): The device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int]): Size of the feature maps. level_idx (int): The index of corresponding feature map level. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ base_anchors = self.base_anchors[level_idx].to(device).to(dtype) feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] # First create Range with the default dtype, than convert to # target `dtype` for onnx exporting. shift_x = torch.arange(0, feat_w, device=device).to(dtype) * stride_w shift_y = torch.arange(0, feat_h, device=device).to(dtype) * stride_h shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate sparse anchors according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int]): feature map size arrange as (h, w). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points.Defaults to ``torch.float32``. device (obj:`torch.device`): The device where the points is located. Returns: Tensor: Anchor with shape (N, 4), N should be equal to the length of ``prior_idxs``. """ height, width = featmap_size num_base_anchors = self.num_base_anchors[level_idx] base_anchor_id = prior_idxs % num_base_anchors x = (prior_idxs // num_base_anchors) % width * self.strides[level_idx][0] y = (prior_idxs // width // num_base_anchors) % height * self.strides[level_idx][1] priors = torch.stack([x, y, x, y], 1).to(dtype).to(device) + \ self.base_anchors[level_idx][base_anchor_id, :].to(device) return priors def grid_anchors(self, featmap_sizes, device='cuda'): """Generate grid anchors in multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels. device (str): Device where the anchors will be put on. Return: list[torch.Tensor]: Anchors in multiple feature levels. \ The sizes of each tensor should be [N, 4], where \ N = width * height * num_base_anchors, width and height \ are the sizes of the corresponding feature level, \ num_base_anchors is the number of anchors for that level. """ warnings.warn('``grid_anchors`` would be deprecated soon. ' 'Please use ``grid_priors`` ') assert self.num_levels == len(featmap_sizes) multi_level_anchors = [] for i in range(self.num_levels): anchors = self.single_level_grid_anchors( self.base_anchors[i].to(device), featmap_sizes[i], self.strides[i], device=device) multi_level_anchors.append(anchors) return multi_level_anchors def single_level_grid_anchors(self, base_anchors, featmap_size, stride=(16, 16), device='cuda'): """Generate grid anchors of a single level. Note: This function is usually called by method ``self.grid_anchors``. Args: base_anchors (torch.Tensor): The base anchors of a feature grid. featmap_size (tuple[int]): Size of the feature maps. stride (tuple[int], optional): Stride of the feature map in order (w, h). Defaults to (16, 16). device (str, optional): Device the tensor will be put on. Defaults to 'cuda'. Returns: torch.Tensor: Anchors in the overall feature maps. """ warnings.warn( '``single_level_grid_anchors`` would be deprecated soon. ' 'Please use ``single_level_grid_priors`` ') # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly feat_h, feat_w = featmap_size shift_x = torch.arange(0, feat_w, device=device) * stride[0] shift_y = torch.arange(0, feat_h, device=device) * stride[1] shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) shifts = torch.stack([shift_xx, shift_yy, shift_xx, shift_yy], dim=-1) shifts = shifts.type_as(base_anchors) # first feat_w elements correspond to the first row of shifts # add A anchors (1, A, 4) to K shifts (K, 1, 4) to get # shifted anchors (K, A, 4), reshape to (K*A, 4) all_anchors = base_anchors[None, :, :] + shifts[:, None, :] all_anchors = all_anchors.view(-1, 4) # first A rows correspond to A anchors of (0, 0) in feature map, # then (0, 1), (0, 2), ... return all_anchors def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of anchors in multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. pad_shape (tuple): The padded shape of the image. device (str): Device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of anchors in multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / anchor_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / anchor_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), self.num_base_anchors[i], device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, num_base_anchors, device='cuda'): """Generate the valid flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as (h, w). valid_size (tuple[int]): The valid size of the feature maps. num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy valid = valid[:, None].expand(valid.size(0), num_base_anchors).contiguous().view(-1) return valid def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}octave_base_scale=' repr_str += f'{self.octave_base_scale},\n' repr_str += f'{indent_str}scales_per_octave=' repr_str += f'{self.scales_per_octave},\n' repr_str += f'{indent_str}num_levels={self.num_levels}\n' repr_str += f'{indent_str}centers={self.centers},\n' repr_str += f'{indent_str}center_offset={self.center_offset})' return repr_str @PRIOR_GENERATORS.register_module() class SSDAnchorGenerator(AnchorGenerator): """Anchor generator for SSD. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. min_sizes (list[float]): The list of minimum anchor sizes on each level. max_sizes (list[float]): The list of maximum anchor sizes on each level. basesize_ratio_range (tuple(float)): Ratio range of anchors. Being used when not setting min_sizes and max_sizes. input_size (int): Size of feature map, 300 for SSD300, 512 for SSD512. Being used when not setting min_sizes and max_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. It is always set to be False in SSD. """ def __init__(self, strides, ratios, min_sizes=None, max_sizes=None, basesize_ratio_range=(0.15, 0.9), input_size=300, scale_major=True): assert len(strides) == len(ratios) assert not (min_sizes is None) ^ (max_sizes is None) self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] if min_sizes is None and max_sizes is None: # use hard code to generate SSD anchors self.input_size = input_size assert mmcv.is_tuple_of(basesize_ratio_range, float) self.basesize_ratio_range = basesize_ratio_range # calculate anchor ratios and sizes min_ratio, max_ratio = basesize_ratio_range min_ratio = int(min_ratio * 100) max_ratio = int(max_ratio * 100) step = int(np.floor(max_ratio - min_ratio) / (self.num_levels - 2)) min_sizes = [] max_sizes = [] for ratio in range(int(min_ratio), int(max_ratio) + 1, step): min_sizes.append(int(self.input_size * ratio / 100)) max_sizes.append(int(self.input_size * (ratio + step) / 100)) if self.input_size == 300: if basesize_ratio_range[0] == 0.15: # SSD300 COCO min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) elif basesize_ratio_range[0] == 0.2: # SSD300 VOC min_sizes.insert(0, int(self.input_size * 10 / 100)) max_sizes.insert(0, int(self.input_size * 20 / 100)) else: raise ValueError( 'basesize_ratio_range[0] should be either 0.15' 'or 0.2 when input_size is 300, got ' f'{basesize_ratio_range[0]}.') elif self.input_size == 512: if basesize_ratio_range[0] == 0.1: # SSD512 COCO min_sizes.insert(0, int(self.input_size * 4 / 100)) max_sizes.insert(0, int(self.input_size * 10 / 100)) elif basesize_ratio_range[0] == 0.15: # SSD512 VOC min_sizes.insert(0, int(self.input_size * 7 / 100)) max_sizes.insert(0, int(self.input_size * 15 / 100)) else: raise ValueError( 'When not setting min_sizes and max_sizes,' 'basesize_ratio_range[0] should be either 0.1' 'or 0.15 when input_size is 512, got' f' {basesize_ratio_range[0]}.') else: raise ValueError( 'Only support 300 or 512 in SSDAnchorGenerator when ' 'not setting min_sizes and max_sizes, ' f'got {self.input_size}.') assert len(min_sizes) == len(max_sizes) == len(strides) anchor_ratios = [] anchor_scales = [] for k in range(len(self.strides)): scales = [1., np.sqrt(max_sizes[k] / min_sizes[k])] anchor_ratio = [1.] for r in ratios[k]: anchor_ratio += [1 / r, r] # 4 or 6 ratio anchor_ratios.append(torch.Tensor(anchor_ratio)) anchor_scales.append(torch.Tensor(scales)) self.base_sizes = min_sizes self.scales = anchor_scales self.ratios = anchor_ratios self.scale_major = scale_major self.center_offset = 0 self.base_anchors = self.gen_base_anchors() def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_size in enumerate(self.base_sizes): base_anchors = self.gen_single_level_base_anchors( base_size, scales=self.scales[i], ratios=self.ratios[i], center=self.centers[i]) indices = list(range(len(self.ratios[i]))) indices.insert(1, len(indices)) base_anchors = torch.index_select(base_anchors, 0, torch.LongTensor(indices)) multi_level_base_anchors.append(base_anchors) return multi_level_base_anchors def __repr__(self): """str: a string that describes the module""" indent_str = ' ' repr_str = self.__class__.__name__ + '(\n' repr_str += f'{indent_str}strides={self.strides},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}scale_major={self.scale_major},\n' repr_str += f'{indent_str}input_size={self.input_size},\n' repr_str += f'{indent_str}scales={self.scales},\n' repr_str += f'{indent_str}ratios={self.ratios},\n' repr_str += f'{indent_str}num_levels={self.num_levels},\n' repr_str += f'{indent_str}base_sizes={self.base_sizes},\n' repr_str += f'{indent_str}basesize_ratio_range=' repr_str += f'{self.basesize_ratio_range})' return repr_str @PRIOR_GENERATORS.register_module() class LegacyAnchorGenerator(AnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. Note: Difference to the V2.0 anchor generator: 1. The center offset of V1.x anchors are set to be 0.5 rather than 0. 2. The width/height are minused by 1 when calculating the anchors' \ centers and corners to meet the V1.x coordinate system. 3. The anchors' corners are quantized. Args: strides (list[int] | list[tuple[int]]): Strides of anchors in multiple feature levels. ratios (list[float]): The list of ratios between the height and width of anchors in a single level. scales (list[int] | None): Anchor scales for anchors in a single level. It cannot be set at the same time if `octave_base_scale` and `scales_per_octave` are set. base_sizes (list[int]): The basic sizes of anchors in multiple levels. If None is given, strides will be used to generate base_sizes. scale_major (bool): Whether to multiply scales first when generating base anchors. If true, the anchors in the same row will have the same scales. By default it is True in V2.0 octave_base_scale (int): The base scale of octave. scales_per_octave (int): Number of scales for each octave. `octave_base_scale` and `scales_per_octave` are usually used in retinanet and the `scales` should be None when they are set. centers (list[tuple[float, float]] | None): The centers of the anchor relative to the feature grid center in multiple feature levels. By default it is set to be None and not used. It a list of float is given, this list will be used to shift the centers of anchors. center_offset (float): The offset of center in proportion to anchors' width and height. By default it is 0.5 in V2.0 but it should be 0.5 in v1.x models. Examples: >>> from mmdet.core import LegacyAnchorGenerator >>> self = LegacyAnchorGenerator( >>> [16], [1.], [1.], [9], center_offset=0.5) >>> all_anchors = self.grid_anchors(((2, 2),), device='cpu') >>> print(all_anchors) [tensor([[ 0., 0., 8., 8.], [16., 0., 24., 8.], [ 0., 16., 8., 24.], [16., 16., 24., 24.]])] """ def gen_single_level_base_anchors(self, base_size, scales, ratios, center=None): """Generate base anchors of a single level. Note: The width/height of anchors are minused by 1 when calculating \ the centers and corners to meet the V1.x coordinate system. Args: base_size (int | float): Basic size of an anchor. scales (torch.Tensor): Scales of the anchor. ratios (torch.Tensor): The ratio between between the height. and width of anchors in a single level. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature map. """ w = base_size h = base_size if center is None: x_center = self.center_offset * (w - 1) y_center = self.center_offset * (h - 1) else: x_center, y_center = center h_ratios = torch.sqrt(ratios) w_ratios = 1 / h_ratios if self.scale_major: ws = (w * w_ratios[:, None] * scales[None, :]).view(-1) hs = (h * h_ratios[:, None] * scales[None, :]).view(-1) else: ws = (w * scales[:, None] * w_ratios[None, :]).view(-1) hs = (h * scales[:, None] * h_ratios[None, :]).view(-1) # use float anchor and the anchor's center is aligned with the # pixel center base_anchors = [ x_center - 0.5 * (ws - 1), y_center - 0.5 * (hs - 1), x_center + 0.5 * (ws - 1), y_center + 0.5 * (hs - 1) ] base_anchors = torch.stack(base_anchors, dim=-1).round() return base_anchors @PRIOR_GENERATORS.register_module() class LegacySSDAnchorGenerator(SSDAnchorGenerator, LegacyAnchorGenerator): """Legacy anchor generator used in MMDetection V1.x. The difference between `LegacySSDAnchorGenerator` and `SSDAnchorGenerator` can be found in `LegacyAnchorGenerator`. """ def __init__(self, strides, ratios, basesize_ratio_range, input_size=300, scale_major=True): super(LegacySSDAnchorGenerator, self).__init__( strides=strides, ratios=ratios, basesize_ratio_range=basesize_ratio_range, input_size=input_size, scale_major=scale_major) self.centers = [((stride - 1) / 2., (stride - 1) / 2.) for stride in strides] self.base_anchors = self.gen_base_anchors() @PRIOR_GENERATORS.register_module() class YOLOAnchorGenerator(AnchorGenerator): """Anchor generator for YOLO. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels. base_sizes (list[list[tuple[int, int]]]): The basic sizes of anchors in multiple levels. """ def __init__(self, strides, base_sizes): self.strides = [_pair(stride) for stride in strides] self.centers = [(stride[0] / 2., stride[1] / 2.) for stride in self.strides] self.base_sizes = [] num_anchor_per_level = len(base_sizes[0]) for base_sizes_per_level in base_sizes: assert num_anchor_per_level == len(base_sizes_per_level) self.base_sizes.append( [_pair(base_size) for base_size in base_sizes_per_level]) self.base_anchors = self.gen_base_anchors() @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.base_sizes) def gen_base_anchors(self): """Generate base anchors. Returns: list(torch.Tensor): Base anchors of a feature grid in multiple \ feature levels. """ multi_level_base_anchors = [] for i, base_sizes_per_level in enumerate(self.base_sizes): center = None if self.centers is not None: center = self.centers[i] multi_level_base_anchors.append( self.gen_single_level_base_anchors(base_sizes_per_level, center)) return multi_level_base_anchors def gen_single_level_base_anchors(self, base_sizes_per_level, center=None): """Generate base anchors of a single level. Args: base_sizes_per_level (list[tuple[int, int]]): Basic sizes of anchors. center (tuple[float], optional): The center of the base anchor related to a single feature grid. Defaults to None. Returns: torch.Tensor: Anchors in a single-level feature maps. """ x_center, y_center = center base_anchors = [] for base_size in base_sizes_per_level: w, h = base_size # use float anchor and the anchor's center is aligned with the # pixel center base_anchor = torch.Tensor([ x_center - 0.5 * w, y_center - 0.5 * h, x_center + 0.5 * w, y_center + 0.5 * h ]) base_anchors.append(base_anchor) base_anchors = torch.stack(base_anchors, dim=0) return base_anchors def responsible_flags(self, featmap_sizes, gt_bboxes, device='cuda'): """Generate responsible anchor flags of grid cells in multiple scales. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). device (str): Device where the anchors will be put on. Return: list(torch.Tensor): responsible flags of anchors in multiple level """ assert self.num_levels == len(featmap_sizes) multi_level_responsible_flags = [] for i in range(self.num_levels): anchor_stride = self.strides[i] flags = self.single_level_responsible_flags( featmap_sizes[i], gt_bboxes, anchor_stride, self.num_base_anchors[i], device=device) multi_level_responsible_flags.append(flags) return multi_level_responsible_flags def single_level_responsible_flags(self, featmap_size, gt_bboxes, stride, num_base_anchors, device='cuda'): """Generate the responsible flags of anchor in a single feature map. Args: featmap_size (tuple[int]): The size of feature maps. gt_bboxes (Tensor): Ground truth boxes, shape (n, 4). stride (tuple(int)): stride of current level num_base_anchors (int): The number of base anchors. device (str, optional): Device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each anchor in a single level \ feature map. """ feat_h, feat_w = featmap_size gt_bboxes_cx = ((gt_bboxes[:, 0] + gt_bboxes[:, 2]) * 0.5).to(device) gt_bboxes_cy = ((gt_bboxes[:, 1] + gt_bboxes[:, 3]) * 0.5).to(device) gt_bboxes_grid_x = torch.floor(gt_bboxes_cx / stride[0]).long() gt_bboxes_grid_y = torch.floor(gt_bboxes_cy / stride[1]).long() # row major indexing gt_bboxes_grid_idx = gt_bboxes_grid_y * feat_w + gt_bboxes_grid_x responsible_grid = torch.zeros( feat_h * feat_w, dtype=torch.uint8, device=device) responsible_grid[gt_bboxes_grid_idx] = 1 responsible_grid = responsible_grid[:, None].expand( responsible_grid.size(0), num_base_anchors).contiguous().view(-1) return responsible_grid
37,205
41.913495
79
py
mmdetection
mmdetection-master/mmdet/core/anchor/builder.py
# Copyright (c) OpenMMLab. All rights reserved. import warnings from mmcv.utils import Registry, build_from_cfg PRIOR_GENERATORS = Registry('Generator for anchors and points') ANCHOR_GENERATORS = PRIOR_GENERATORS def build_prior_generator(cfg, default_args=None): return build_from_cfg(cfg, PRIOR_GENERATORS, default_args) def build_anchor_generator(cfg, default_args=None): warnings.warn( '``build_anchor_generator`` would be deprecated soon, please use ' '``build_prior_generator`` ') return build_prior_generator(cfg, default_args=default_args)
583
28.2
74
py
mmdetection
mmdetection-master/mmdet/core/anchor/point_generator.py
# Copyright (c) OpenMMLab. All rights reserved. import numpy as np import torch from torch.nn.modules.utils import _pair from .builder import PRIOR_GENERATORS @PRIOR_GENERATORS.register_module() class PointGenerator: def _meshgrid(self, x, y, row_major=True): xx = x.repeat(len(y)) yy = y.view(-1, 1).repeat(1, len(x)).view(-1) if row_major: return xx, yy else: return yy, xx def grid_points(self, featmap_size, stride=16, device='cuda'): feat_h, feat_w = featmap_size shift_x = torch.arange(0., feat_w, device=device) * stride shift_y = torch.arange(0., feat_h, device=device) * stride shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) stride = shift_x.new_full((shift_xx.shape[0], ), stride) shifts = torch.stack([shift_xx, shift_yy, stride], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_size, valid_size, device='cuda'): feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid @PRIOR_GENERATORS.register_module() class MlvlPointGenerator: """Standard points generator for multi-level (Mlvl) feature maps in 2D points-based detectors. Args: strides (list[int] | list[tuple[int, int]]): Strides of anchors in multiple feature levels in order (w, h). offset (float): The offset of points, the value is normalized with corresponding stride. Defaults to 0.5. """ def __init__(self, strides, offset=0.5): self.strides = [_pair(stride) for stride in strides] self.offset = offset @property def num_levels(self): """int: number of feature levels that the generator will be applied""" return len(self.strides) @property def num_base_priors(self): """list[int]: The number of priors (points) at a point on the feature grid""" return [1 for _ in range(len(self.strides))] def _meshgrid(self, x, y, row_major=True): yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exporting # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def grid_priors(self, featmap_sizes, dtype=torch.float32, device='cuda', with_stride=False): """Generate grid points of multiple feature levels. Args: featmap_sizes (list[tuple]): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str): The device where the anchors will be put on. with_stride (bool): Whether to concatenate the stride to the last dimension of points. Return: list[torch.Tensor]: Points of multiple feature levels. The sizes of each tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ assert self.num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(self.num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size, level_idx, dtype=torch.float32, device='cuda', with_stride=False): """Generate grid Points of a single level. Note: This function is usually called by method ``self.grid_priors``. Args: featmap_size (tuple[int]): Size of the feature maps, arrange as (h, w). level_idx (int): The index of corresponding feature map level. dtype (:obj:`dtype`): Dtype of priors. Default: torch.float32. device (str, optional): The device the tensor will be put on. Defaults to 'cuda'. with_stride (bool): Concatenate the stride to the last dimension of points. Return: Tensor: Points of single feature levels. The shape of tensor should be (N, 2) when with stride is ``False``, where N = width * height, width and height are the sizes of the corresponding feature level, and the last dimension 2 represent (coord_x, coord_y), otherwise the shape should be (N, 4), and the last dimension 4 represent (coord_x, coord_y, stride_w, stride_h). """ feat_h, feat_w = featmap_size stride_w, stride_h = self.strides[level_idx] shift_x = (torch.arange(0, feat_w, device=device) + self.offset) * stride_w # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_x = shift_x.to(dtype) shift_y = (torch.arange(0, feat_h, device=device) + self.offset) * stride_h # keep featmap_size as Tensor instead of int, so that we # can convert to ONNX correctly shift_y = shift_y.to(dtype) shift_xx, shift_yy = self._meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: # use `shape[0]` instead of `len(shift_xx)` for ONNX export stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def valid_flags(self, featmap_sizes, pad_shape, device='cuda'): """Generate valid flags of points of multiple feature levels. Args: featmap_sizes (list(tuple)): List of feature map sizes in multiple feature levels, each size arrange as as (h, w). pad_shape (tuple(int)): The padded shape of the image, arrange as (h, w). device (str): The device where the anchors will be put on. Return: list(torch.Tensor): Valid flags of points of multiple levels. """ assert self.num_levels == len(featmap_sizes) multi_level_flags = [] for i in range(self.num_levels): point_stride = self.strides[i] feat_h, feat_w = featmap_sizes[i] h, w = pad_shape[:2] valid_feat_h = min(int(np.ceil(h / point_stride[1])), feat_h) valid_feat_w = min(int(np.ceil(w / point_stride[0])), feat_w) flags = self.single_level_valid_flags((feat_h, feat_w), (valid_feat_h, valid_feat_w), device=device) multi_level_flags.append(flags) return multi_level_flags def single_level_valid_flags(self, featmap_size, valid_size, device='cuda'): """Generate the valid flags of points of a single feature map. Args: featmap_size (tuple[int]): The size of feature maps, arrange as as (h, w). valid_size (tuple[int]): The valid size of the feature maps. The size arrange as as (h, w). device (str, optional): The device where the flags will be put on. Defaults to 'cuda'. Returns: torch.Tensor: The valid flags of each points in a single level \ feature map. """ feat_h, feat_w = featmap_size valid_h, valid_w = valid_size assert valid_h <= feat_h and valid_w <= feat_w valid_x = torch.zeros(feat_w, dtype=torch.bool, device=device) valid_y = torch.zeros(feat_h, dtype=torch.bool, device=device) valid_x[:valid_w] = 1 valid_y[:valid_h] = 1 valid_xx, valid_yy = self._meshgrid(valid_x, valid_y) valid = valid_xx & valid_yy return valid def sparse_priors(self, prior_idxs, featmap_size, level_idx, dtype=torch.float32, device='cuda'): """Generate sparse points according to the ``prior_idxs``. Args: prior_idxs (Tensor): The index of corresponding anchors in the feature map. featmap_size (tuple[int]): feature map size arrange as (w, h). level_idx (int): The level index of corresponding feature map. dtype (obj:`torch.dtype`): Date type of points. Defaults to ``torch.float32``. device (obj:`torch.device`): The device where the points is located. Returns: Tensor: Anchor with shape (N, 2), N should be equal to the length of ``prior_idxs``. And last dimension 2 represent (coord_x, coord_y). """ height, width = featmap_size x = (prior_idxs % width + self.offset) * self.strides[level_idx][0] y = ((prior_idxs // width) % height + self.offset) * self.strides[level_idx][1] prioris = torch.stack([x, y], 1).to(dtype) prioris = prioris.to(device) return prioris
10,739
39.681818
79
py
mmdetection
mmdetection-master/mmdet/core/anchor/utils.py
# Copyright (c) OpenMMLab. All rights reserved. import torch def images_to_levels(target, num_levels): """Convert targets by image to targets by feature level. [target_img0, target_img1] -> [target_level0, target_level1, ...] """ target = torch.stack(target, 0) level_targets = [] start = 0 for n in num_levels: end = start + n # level_targets.append(target[:, start:end].squeeze(0)) level_targets.append(target[:, start:end]) start = end return level_targets def anchor_inside_flags(flat_anchors, valid_flags, img_shape, allowed_border=0): """Check whether the anchors are inside the border. Args: flat_anchors (torch.Tensor): Flatten anchors, shape (n, 4). valid_flags (torch.Tensor): An existing valid flags of anchors. img_shape (tuple(int)): Shape of current image. allowed_border (int, optional): The border to allow the valid anchor. Defaults to 0. Returns: torch.Tensor: Flags indicating whether the anchors are inside a \ valid range. """ img_h, img_w = img_shape[:2] if allowed_border >= 0: inside_flags = valid_flags & \ (flat_anchors[:, 0] >= -allowed_border) & \ (flat_anchors[:, 1] >= -allowed_border) & \ (flat_anchors[:, 2] < img_w + allowed_border) & \ (flat_anchors[:, 3] < img_h + allowed_border) else: inside_flags = valid_flags return inside_flags def calc_region(bbox, ratio, featmap_size=None): """Calculate a proportional bbox region. The bbox center are fixed and the new h' and w' is h * ratio and w * ratio. Args: bbox (Tensor): Bboxes to calculate regions, shape (n, 4). ratio (float): Ratio of the output region. featmap_size (tuple): Feature map size used for clipping the boundary. Returns: tuple: x1, y1, x2, y2 """ x1 = torch.round((1 - ratio) * bbox[0] + ratio * bbox[2]).long() y1 = torch.round((1 - ratio) * bbox[1] + ratio * bbox[3]).long() x2 = torch.round(ratio * bbox[0] + (1 - ratio) * bbox[2]).long() y2 = torch.round(ratio * bbox[1] + (1 - ratio) * bbox[3]).long() if featmap_size is not None: x1 = x1.clamp(min=0, max=featmap_size[1]) y1 = y1.clamp(min=0, max=featmap_size[0]) x2 = x2.clamp(min=0, max=featmap_size[1]) y2 = y2.clamp(min=0, max=featmap_size[0]) return (x1, y1, x2, y2)
2,545
33.876712
79
py