repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18-d8_4x4_896x896_80k_isaid.py
_base_ = './deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py' model = dict( pretrained='open-mmlab://resnet18_v1c', backbone=dict(depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
328
26.416667
58
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x1024_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnet18_v1c', backbone=dict(depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
330
26.583333
60
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_loveda.py
_base_ = './deeplabv3plus_r50-d8_512x512_80k_loveda.py' model = dict( backbone=dict( depth=18, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
385
26.571429
72
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18-d8_512x512_80k_potsdam.py
_base_ = './deeplabv3plus_r50-d8_512x512_80k_potsdam.py' model = dict( pretrained='open-mmlab://resnet18_v1c', backbone=dict(depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
326
26.25
56
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18-d8_769x769_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnet18_v1c', backbone=dict(depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
329
26.5
59
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_512x1024_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='torchvision://resnet18', backbone=dict(type='ResNet', depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
342
27.583333
60
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r18b-d8_769x769_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' model = dict( pretrained='torchvision://resnet18', backbone=dict(type='ResNet', depth=18), decode_head=dict( c1_in_channels=64, c1_channels=12, in_channels=512, channels=128, ), auxiliary_head=dict(in_channels=256, channels=64))
341
27.5
59
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=60), auxiliary_head=dict(num_classes=60), test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
420
37.272727
75
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_40k_pascal_context_59.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=59), auxiliary_head=dict(num_classes=59), test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
423
37.545455
78
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_context.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=60), auxiliary_head=dict(num_classes=60), test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
420
37.272727
75
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_480x480_80k_pascal_context_59.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_context_59.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=59), auxiliary_head=dict(num_classes=59), test_cfg=dict(mode='slide', crop_size=(480, 480), stride=(320, 320))) optimizer = dict(type='SGD', lr=0.004, momentum=0.9, weight_decay=0.0001)
423
37.545455
78
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_512x512_80k_vaihingen.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/vaihingen.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6))
261
31.75
72
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_4x4_896x896_80k_isaid.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/isaid.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=16), auxiliary_head=dict(num_classes=16))
255
35.571429
78
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
175
28.333333
71
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
175
28.333333
71
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
259
36.142857
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
270
32.875
77
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
270
32.875
77
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
258
36
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_loveda.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/loveda.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=7), auxiliary_head=dict(num_classes=7))
254
35.428571
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_512x512_80k_potsdam.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/potsdam.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=6), auxiliary_head=dict(num_classes=6))
259
31.5
72
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
358
34.9
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/deeplabv3plus_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
358
34.9
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_512x1024_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
141
46.333333
79
py
mmsegmentation
mmsegmentation-master/configs/deeplabv3plus/deeplabv3plus_r50b-d8_769x769_80k_cityscapes.py
_base_ = './deeplabv3plus_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
140
46
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/README.md
# DMNet [Dynamic Multi-scale Filters for Semantic Segmentation](https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/Junjun2016/DMNet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dm_head.py#L93">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Multi-scale representation provides an effective way toaddress scale variation of objects and stuff in semantic seg-mentation. Previous works construct multi-scale represen-tation by utilizing different filter sizes, expanding filter sizeswith dilated filters or pooling grids, and the parameters ofthese filters are fixed after training. These methods oftensuffer from heavy computational cost or have more param-eters, and are not adaptive to the input image during in-ference. To address these problems, this paper proposes aDynamic Multi-scale Network (DMNet) to adaptively cap-ture multi-scale contents for predicting pixel-level semanticlabels. DMNet is composed of multiple Dynamic Convolu-tional Modules (DCMs) arranged in parallel, each of whichexploits context-aware filters to estimate semantic represen-tation for a specific scale. The outputs of multiple DCMsare further integrated for final segmentation. We conductextensive experiments to evaluate our DMNet on three chal-lenging semantic segmentation and scene parsing datasets,PASCAL VOC 2012, Pascal-Context, and ADE20K. DMNetachieves a new record 84.4% mIoU on PASCAL VOC 2012test set without MS COCO pre-trained and post-processing,and also obtains state-of-the-art performance on Pascal-Context and ADE20K. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142900781-6215763f-8b71-4e0b-a6b1-c41372db2aa0.png" width="70%"/> </div> ## Citation ```bibtex @InProceedings{He_2019_ICCV, author = {He, Junjun and Deng, Zhongying and Qiao, Yu}, title = {Dynamic Multi-Scale Filters for Semantic Segmentation}, booktitle = {Proceedings of the IEEE/CVF International Conference on Computer Vision (ICCV)}, month = {October}, year = {2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | DMNet | R-50-D8 | 512x1024 | 40000 | 7.0 | 3.66 | 77.78 | 79.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes-20201215_042326.log.json) | | DMNet | R-101-D8 | 512x1024 | 40000 | 10.6 | 2.54 | 78.37 | 79.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes-20201215_043100.log.json) | | DMNet | R-50-D8 | 769x769 | 40000 | 7.9 | 1.57 | 78.49 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes-20201215_093706.log.json) | | DMNet | R-101-D8 | 769x769 | 40000 | 12.0 | 1.01 | 77.62 | 78.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes-20201215_081348.log.json) | | DMNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.07 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes-20201215_053728.log.json) | | DMNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes-20201215_031718.log.json) | | DMNet | R-50-D8 | 769x769 | 80000 | - | - | 79.22 | 80.55 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes-20201215_034006.log.json) | | DMNet | R-101-D8 | 769x769 | 80000 | - | - | 79.19 | 80.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes-20201215_082810.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | DMNet | R-50-D8 | 512x512 | 80000 | 9.4 | 20.95 | 42.37 | 43.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k-20201215_144744.log.json) | | DMNet | R-101-D8 | 512x512 | 80000 | 13.0 | 13.88 | 45.34 | 46.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k-20201215_104812.log.json) | | DMNet | R-50-D8 | 512x512 | 160000 | - | - | 43.15 | 44.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k-20201215_115313.log.json) | | DMNet | R-101-D8 | 512x512 | 160000 | - | - | 45.42 | 46.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k-20201215_111145.log.json) |
11,130
184.516667
1,277
md
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet.yml
Collections: - Name: DMNet Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://openaccess.thecvf.com/content_ICCV_2019/papers/He_Dynamic_Multi-Scale_Filters_for_Semantic_Segmentation_ICCV_2019_paper.pdf Title: Dynamic Multi-scale Filters for Semantic Segmentation README: configs/dmnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dm_head.py#L93 Version: v0.17.0 Converted From: Code: https://github.com/Junjun2016/DMNet Models: - Name: dmnet_r50-d8_512x1024_40k_cityscapes In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 273.22 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 7.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.78 mIoU(ms+flip): 79.14 Config: configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes/dmnet_r50-d8_512x1024_40k_cityscapes_20201215_042326-615373cf.pth - Name: dmnet_r101-d8_512x1024_40k_cityscapes In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 393.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 10.6 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.37 mIoU(ms+flip): 79.72 Config: configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes/dmnet_r101-d8_512x1024_40k_cityscapes_20201215_043100-8291e976.pth - Name: dmnet_r50-d8_769x769_40k_cityscapes In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 636.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 7.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.49 mIoU(ms+flip): 80.27 Config: configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_40k_cityscapes/dmnet_r50-d8_769x769_40k_cityscapes_20201215_093706-e7f0e23e.pth - Name: dmnet_r101-d8_769x769_40k_cityscapes In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 990.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 12.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.62 mIoU(ms+flip): 78.94 Config: configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_40k_cityscapes/dmnet_r101-d8_769x769_40k_cityscapes_20201215_081348-a74261f6.pth - Name: dmnet_r50-d8_512x1024_80k_cityscapes In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.07 mIoU(ms+flip): 80.22 Config: configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes/dmnet_r50-d8_512x1024_80k_cityscapes_20201215_053728-3c8893b9.pth - Name: dmnet_r101-d8_512x1024_80k_cityscapes In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.64 mIoU(ms+flip): 80.67 Config: configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes/dmnet_r101-d8_512x1024_80k_cityscapes_20201215_031718-fa081cb8.pth - Name: dmnet_r50-d8_769x769_80k_cityscapes In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.22 mIoU(ms+flip): 80.55 Config: configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_769x769_80k_cityscapes/dmnet_r50-d8_769x769_80k_cityscapes_20201215_034006-6060840e.pth - Name: dmnet_r101-d8_769x769_80k_cityscapes In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.19 mIoU(ms+flip): 80.65 Config: configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_769x769_80k_cityscapes/dmnet_r101-d8_769x769_80k_cityscapes_20201215_082810-7f0de59a.pth - Name: dmnet_r50-d8_512x512_80k_ade20k In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 47.73 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.4 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.37 mIoU(ms+flip): 43.62 Config: configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_80k_ade20k/dmnet_r50-d8_512x512_80k_ade20k_20201215_144744-f89092a6.pth - Name: dmnet_r101-d8_512x512_80k_ade20k In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 72.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 13.0 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.34 mIoU(ms+flip): 46.13 Config: configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_80k_ade20k/dmnet_r101-d8_512x512_80k_ade20k_20201215_104812-bfa45311.pth - Name: dmnet_r50-d8_512x512_160k_ade20k In Collection: DMNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.15 mIoU(ms+flip): 44.17 Config: configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r50-d8_512x512_160k_ade20k/dmnet_r50-d8_512x512_160k_ade20k_20201215_115313-025ab3f9.pth - Name: dmnet_r101-d8_512x512_160k_ade20k In Collection: DMNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.42 mIoU(ms+flip): 46.76 Config: configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dmnet/dmnet_r101-d8_512x512_160k_ade20k/dmnet_r101-d8_512x512_160k_ade20k_20201215_111145-a0bc02ef.pth
7,673
31.935622
172
yml
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_512x1024_40k_cityscapes.py
_base_ = './dmnet_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './dmnet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_512x512_160k_ade20k.py
_base_ = './dmnet_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
129
42.333333
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_512x512_80k_ade20k.py
_base_ = './dmnet_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
128
42
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_769x769_40k_cityscapes.py
_base_ = './dmnet_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
132
43.333333
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r101-d8_769x769_80k_cityscapes.py
_base_ = './dmnet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
132
43.333333
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
163
31.8
75
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
163
31.8
75
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
250
34.857143
76
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
350
34.1
79
py
mmsegmentation
mmsegmentation-master/configs/dmnet/dmnet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
350
34.1
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/README.md
# DNLNet [Disentangled Non-Local Neural Networks](https://arxiv.org/abs/2006.06668) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/yinmh17/DNL-Semantic-Segmentation">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dnl_head.py#L88">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The non-local block is a popular module for strengthening the context modeling ability of a regular convolutional neural network. This paper first studies the non-local block in depth, where we find that its attention computation can be split into two terms, a whitened pairwise term accounting for the relationship between two pixels and a unary term representing the saliency of every pixel. We also observe that the two terms trained alone tend to model different visual clues, e.g. the whitened pairwise term learns within-region relationships while the unary term learns salient boundaries. However, the two terms are tightly coupled in the non-local block, which hinders the learning of each. Based on these findings, we present the disentangled non-local block, where the two terms are decoupled to facilitate learning for both terms. We demonstrate the effectiveness of the decoupled design on various tasks, such as semantic segmentation on Cityscapes, ADE20K and PASCAL Context, object detection on COCO, and action recognition on Kinetics. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142900944-b8d93301-d2ce-488e-a461-b0813f96be49.png" width="70%"/> </div> ## Citation This example is to reproduce ["Disentangled Non-Local Neural Networks"](https://arxiv.org/abs/2006.06668) for semantic segmentation. It is still in progress. ## Citation ```bibtex @misc{yin2020disentangled, title={Disentangled Non-Local Neural Networks}, author={Minghao Yin and Zhuliang Yao and Yue Cao and Xiu Li and Zheng Zhang and Stephen Lin and Han Hu}, year={2020}, booktitle={ECCV} } ``` ## Results and models (in progress) ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | DNLNet | R-50-D8 | 512x1024 | 40000 | 7.3 | 2.56 | 78.61 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | | DNLNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.96 | 78.31 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes-20200904_233629.log.json) | | DNLNet | R-50-D8 | 769x769 | 40000 | 9.2 | 1.50 | 78.44 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes-20200820_232206.log.json) | | DNLNet | R-101-D8 | 769x769 | 40000 | 12.6 | 1.02 | 76.39 | 77.77 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes-20200820_171256.log.json) | | DNLNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.33 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | | DNLNet | R-101-D8 | 512x1024 | 80000 | - | - | 80.41 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes-20200904_233629.log.json) | | DNLNet | R-50-D8 | 769x769 | 80000 | - | - | 79.36 | 80.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes-20200820_011925.log.json) | | DNLNet | R-101-D8 | 769x769 | 80000 | - | - | 79.41 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes-20200821_051111.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | DNLNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.66 | 41.76 | 42.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k-20200826_183354.log.json) | | DNLNet | R-101-D8 | 512x512 | 80000 | 12.8 | 12.54 | 43.76 | 44.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k-20200826_183354.log.json) | | DNLNet | R-50-D8 | 512x512 | 160000 | - | - | 41.87 | 43.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k-20200826_183350.log.json) | | DNLNet | R-101-D8 | 512x512 | 160000 | - | - | 44.25 | 45.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k-20200826_183350.log.json) |
10,831
170.936508
1,050
md
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py
_base_ = './dnl_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py
_base_ = './dnl_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py
_base_ = './dnl_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
127
41.666667
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py
_base_ = './dnl_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
126
41.333333
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py
_base_ = './dnl_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py
_base_ = './dnl_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
161
31.4
73
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
161
31.4
73
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
249
34.714286
76
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
248
34.571429
76
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
348
33.9
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/dnl_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513))) optimizer = dict( paramwise_cfg=dict( custom_keys=dict(theta=dict(wd_mult=0.), phi=dict(wd_mult=0.))))
463
34.692308
79
py
mmsegmentation
mmsegmentation-master/configs/dnlnet/dnlnet.yml
Collections: - Name: DNLNet Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://arxiv.org/abs/2006.06668 Title: Disentangled Non-Local Neural Networks README: configs/dnlnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dnl_head.py#L88 Version: v0.17.0 Converted From: Code: https://github.com/yinmh17/DNL-Semantic-Segmentation Models: - Name: dnl_r50-d8_512x1024_40k_cityscapes In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 390.62 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 7.3 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.61 Config: configs/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_40k_cityscapes/dnl_r50-d8_512x1024_40k_cityscapes_20200904_233629-53d4ea93.pth - Name: dnl_r101-d8_512x1024_40k_cityscapes In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 510.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 10.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.31 Config: configs/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_40k_cityscapes/dnl_r101-d8_512x1024_40k_cityscapes_20200904_233629-9928ffef.pth - Name: dnl_r50-d8_769x769_40k_cityscapes In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 666.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 9.2 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.44 mIoU(ms+flip): 80.27 Config: configs/dnlnet/dnl_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_40k_cityscapes/dnl_r50-d8_769x769_40k_cityscapes_20200820_232206-0f283785.pth - Name: dnl_r101-d8_769x769_40k_cityscapes In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 980.39 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 12.6 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.39 mIoU(ms+flip): 77.77 Config: configs/dnlnet/dnl_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_40k_cityscapes/dnl_r101-d8_769x769_40k_cityscapes_20200820_171256-76c596df.pth - Name: dnl_r50-d8_512x1024_80k_cityscapes In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.33 Config: configs/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x1024_80k_cityscapes/dnl_r50-d8_512x1024_80k_cityscapes_20200904_233629-58b2f778.pth - Name: dnl_r101-d8_512x1024_80k_cityscapes In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.41 Config: configs/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x1024_80k_cityscapes/dnl_r101-d8_512x1024_80k_cityscapes_20200904_233629-758e2dd4.pth - Name: dnl_r50-d8_769x769_80k_cityscapes In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.36 mIoU(ms+flip): 80.7 Config: configs/dnlnet/dnl_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_769x769_80k_cityscapes/dnl_r50-d8_769x769_80k_cityscapes_20200820_011925-366bc4c7.pth - Name: dnl_r101-d8_769x769_80k_cityscapes In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.41 mIoU(ms+flip): 80.68 Config: configs/dnlnet/dnl_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_769x769_80k_cityscapes/dnl_r101-d8_769x769_80k_cityscapes_20200821_051111-95ff84ab.pth - Name: dnl_r50-d8_512x512_80k_ade20k In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 48.4 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.76 mIoU(ms+flip): 42.99 Config: configs/dnlnet/dnl_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_80k_ade20k/dnl_r50-d8_512x512_80k_ade20k_20200826_183354-1cf6e0c1.pth - Name: dnl_r101-d8_512x512_80k_ade20k In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 79.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.76 mIoU(ms+flip): 44.91 Config: configs/dnlnet/dnl_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_80k_ade20k/dnl_r101-d8_512x512_80k_ade20k_20200826_183354-d820d6ea.pth - Name: dnl_r50-d8_512x512_160k_ade20k In Collection: DNLNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.87 mIoU(ms+flip): 43.01 Config: configs/dnlnet/dnl_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r50-d8_512x512_160k_ade20k/dnl_r50-d8_512x512_160k_ade20k_20200826_183350-37837798.pth - Name: dnl_r101-d8_512x512_160k_ade20k In Collection: DNLNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.25 mIoU(ms+flip): 45.78 Config: configs/dnlnet/dnl_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dnlnet/dnl_r101-d8_512x512_160k_ade20k/dnl_r101-d8_512x512_160k_ade20k_20200826_183350-ed522c61.pth
7,410
31.362445
169
yml
mmsegmentation
mmsegmentation-master/configs/dpt/README.md
# DPT [Vision Transformer for Dense Prediction](https://arxiv.org/abs/2103.13413) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/isl-org/DPT">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We introduce dense vision transformers, an architecture that leverages vision transformers in place of convolutional networks as a backbone for dense prediction tasks. We assemble tokens from various stages of the vision transformer into image-like representations at various resolutions and progressively combine them into full-resolution predictions using a convolutional decoder. The transformer backbone processes representations at a constant and relatively high resolution and has a global receptive field at every stage. These properties allow the dense vision transformer to provide finer-grained and more globally coherent predictions when compared to fully-convolutional networks. Our experiments show that this architecture yields substantial improvements on dense prediction tasks, especially when a large amount of training data is available. For monocular depth estimation, we observe an improvement of up to 28% in relative performance when compared to a state-of-the-art fully-convolutional network. When applied to semantic segmentation, dense vision transformers set a new state of the art on ADE20K with 49.02% mIoU. We further show that the architecture can be fine-tuned on smaller datasets such as NYUv2, KITTI, and Pascal Context where it also sets the new state of the art. Our models are available at [this https URL](https://github.com/isl-org/DPT). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901057-00aabea5-dab4-43d3-a14a-5f73eb5dd9b9.png" width="80%"/> </div> ## Citation ```bibtex @article{dosoViTskiy2020, title={An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale}, author={DosoViTskiy, Alexey and Beyer, Lucas and Kolesnikov, Alexander and Weissenborn, Dirk and Zhai, Xiaohua and Unterthiner, Thomas and Dehghani, Mostafa and Minderer, Matthias and Heigold, Georg and Gelly, Sylvain and Uszkoreit, Jakob and Houlsby, Neil}, journal={arXiv preprint arXiv:2010.11929}, year={2020} } @article{Ranftl2021, author = {Ren\'{e} Ranftl and Alexey Bochkovskiy and Vladlen Koltun}, title = {Vision Transformers for Dense Prediction}, journal = {ArXiv preprint}, year = {2021}, } ``` ## Usage To use other repositories' pre-trained models, it is necessary to convert keys. We provide a script [`vit2mmseg.py`](../../tools/model_converters/vit2mmseg.py) in the tools directory to convert the key of models from [timm](https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py) to MMSegmentation style. ```shell python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/vit2mmseg.py https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_base_p16_224-80ecf9dd.pth pretrain/jx_vit_base_p16_224-80ecf9dd.pth ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | DPT | ViT-B | 512x512 | 160000 | 8.09 | 10.41 | 46.97 | 48.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-20210809_172025.log.json) |
4,889
70.911765
1,375
md
mmsegmentation
mmsegmentation-master/configs/dpt/dpt.yml
Collections: - Name: DPT Metadata: Training Data: - ADE20K Paper: URL: https://arxiv.org/abs/2103.13413 Title: Vision Transformer for Dense Prediction README: configs/dpt/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/dpt_head.py#L215 Version: v0.17.0 Converted From: Code: https://github.com/isl-org/DPT Models: - Name: dpt_vit-b16_512x512_160k_ade20k In Collection: DPT Metadata: backbone: ViT-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 96.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.09 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.97 mIoU(ms+flip): 48.34 Config: configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/dpt/dpt_vit-b16_512x512_160k_ade20k/dpt_vit-b16_512x512_160k_ade20k-db31cf52.pth
1,055
26.789474
142
yml
mmsegmentation
mmsegmentation-master/configs/dpt/dpt_vit-b16_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/dpt_vit-b16.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] # AdamW optimizer, no weight decay for position embedding & layer norm # in backbone optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'pos_embed': dict(decay_mult=0.), 'cls_token': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) # By default, models are trained on 8 GPUs with 2 images per GPU data = dict(samples_per_gpu=2, workers_per_gpu=2)
844
24.606061
74
py
mmsegmentation
mmsegmentation-master/configs/emanet/README.md
# EMANet [Expectation-Maximization Attention Networks for Semantic Segmentation](https://arxiv.org/abs/1907.13426) ## Introduction <!-- [ALGORITHM] --> <a href="https://xialipku.github.io/EMANet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ema_head.py#L80">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Self-attention mechanism has been widely used for various tasks. It is designed to compute the representation of each position by a weighted sum of the features at all positions. Thus, it can capture long-range relations for computer vision tasks. However, it is computationally consuming. Since the attention maps are computed w.r.t all other positions. In this paper, we formulate the attention mechanism into an expectation-maximization manner and iteratively estimate a much more compact set of bases upon which the attention maps are computed. By a weighted summation upon these bases, the resulting representation is low-rank and deprecates noisy information from the input. The proposed Expectation-Maximization Attention (EMA) module is robust to the variance of input and is also friendly in memory and computation. Moreover, we set up the bases maintenance and normalization methods to stabilize its training procedure. We conduct extensive experiments on popular semantic segmentation benchmarks including PASCAL VOC, PASCAL Context and COCO Stuff, on which we set new records. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901186-7bfe15e2-805a-420e-81b0-74f214f20a36.png" width="80%"/> </div> ## Citation ```bibtex @inproceedings{li2019expectation, title={Expectation-maximization attention networks for semantic segmentation}, author={Li, Xia and Zhong, Zhisheng and Wu, Jianlong and Yang, Yibo and Lin, Zhouchen and Liu, Hong}, booktitle={Proceedings of the IEEE International Conference on Computer Vision}, pages={9167--9176}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | EMANet | R-50-D8 | 512x1024 | 80000 | 5.4 | 4.58 | 77.59 | 79.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | | EMANet | R-101-D8 | 512x1024 | 80000 | 6.2 | 2.87 | 79.10 | 81.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes-20200901_100301.log.json) | | EMANet | R-50-D8 | 769x769 | 80000 | 8.9 | 1.97 | 79.33 | 80.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes-20200901_100301.log.json) | | EMANet | R-101-D8 | 769x769 | 80000 | 10.1 | 1.22 | 79.62 | 81.00 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes-20200901_100301.log.json) |
5,464
115.276596
1,088
md
mmsegmentation
mmsegmentation-master/configs/emanet/emanet.yml
Collections: - Name: EMANet Metadata: Training Data: - Cityscapes Paper: URL: https://arxiv.org/abs/1907.13426 Title: Expectation-Maximization Attention Networks for Semantic Segmentation README: configs/emanet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ema_head.py#L80 Version: v0.17.0 Converted From: Code: https://xialipku.github.io/EMANet Models: - Name: emanet_r50-d8_512x1024_80k_cityscapes In Collection: EMANet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 218.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.4 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.59 mIoU(ms+flip): 79.44 Config: configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_512x1024_80k_cityscapes/emanet_r50-d8_512x1024_80k_cityscapes_20200901_100301-c43fcef1.pth - Name: emanet_r101-d8_512x1024_80k_cityscapes In Collection: EMANet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 348.43 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.1 mIoU(ms+flip): 81.21 Config: configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_512x1024_80k_cityscapes/emanet_r101-d8_512x1024_80k_cityscapes_20200901_100301-2d970745.pth - Name: emanet_r50-d8_769x769_80k_cityscapes In Collection: EMANet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 inference time (ms/im): - value: 507.61 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 8.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.33 mIoU(ms+flip): 80.49 Config: configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r50-d8_769x769_80k_cityscapes/emanet_r50-d8_769x769_80k_cityscapes_20200901_100301-16f8de52.pth - Name: emanet_r101-d8_769x769_80k_cityscapes In Collection: EMANet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 inference time (ms/im): - value: 819.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 10.1 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.62 mIoU(ms+flip): 81.0 Config: configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/emanet/emanet_r101-d8_769x769_80k_cityscapes/emanet_r101-d8_769x769_80k_cityscapes_20200901_100301-47a324ce.pth
3,256
30.317308
175
yml
mmsegmentation
mmsegmentation-master/configs/emanet/emanet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './emanet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/emanet/emanet_r101-d8_769x769_80k_cityscapes.py
_base_ = './emanet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/emanet/emanet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/emanet/emanet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/emanet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/README.md
# EncNet [Context Encoding for Semantic Segmentation](https://arxiv.org/abs/1803.08904) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/zhanghang1989/PyTorch-Encoding">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/enc_head.py#L63">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Recent work has made significant progress in improving spatial resolution for pixelwise labeling with Fully Convolutional Network (FCN) framework by employing Dilated/Atrous convolution, utilizing multi-scale features and refining boundaries. In this paper, we explore the impact of global contextual information in semantic segmentation by introducing the Context Encoding Module, which captures the semantic context of scenes and selectively highlights class-dependent featuremaps. The proposed Context Encoding Module significantly improves semantic segmentation results with only marginal extra computation cost over FCN. Our approach has achieved new state-of-the-art results 51.7% mIoU on PASCAL-Context, 85.9% mIoU on PASCAL VOC 2012. Our single model achieves a final score of 0.5567 on ADE20K test set, which surpass the winning entry of COCO-Place Challenge in 2017. In addition, we also explore how the Context Encoding Module can improve the feature representation of relatively shallow networks for the image classification on CIFAR-10 dataset. Our 14 layer network has achieved an error rate of 3.45%, which is comparable with state-of-the-art approaches with over 10 times more layers. The source code for the complete system are publicly available. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901276-b364fbbf-3bdb-4000-9d31-b9a135e30935.png" width="70%"/> </div> ## Citation ```bibtex @InProceedings{Zhang_2018_CVPR, author = {Zhang, Hang and Dana, Kristin and Shi, Jianping and Zhang, Zhongyue and Wang, Xiaogang and Tyagi, Ambrish and Agrawal, Amit}, title = {Context Encoding for Semantic Segmentation}, booktitle = {The IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2018} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | EncNet | R-50-D8 | 512x1024 | 40000 | 8.6 | 4.58 | 75.67 | 77.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes-20200621_220958.log.json) | | EncNet | R-101-D8 | 512x1024 | 40000 | 12.1 | 2.66 | 75.81 | 77.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes-20200621_220933.log.json) | | EncNet | R-50-D8 | 769x769 | 40000 | 9.8 | 1.82 | 76.24 | 77.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes-20200621_220958.log.json) | | EncNet | R-101-D8 | 769x769 | 40000 | 13.7 | 1.26 | 74.25 | 76.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes-20200621_220933.log.json) | | EncNet | R-50-D8 | 512x1024 | 80000 | - | - | 77.94 | 79.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes-20200622_003554.log.json) | | EncNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.55 | 79.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes-20200622_003555.log.json) | | EncNet | R-50-D8 | 769x769 | 80000 | - | - | 77.44 | 78.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes-20200622_003554.log.json) | | EncNet | R-101-D8 | 769x769 | 80000 | - | - | 76.10 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes-20200622_003555.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | EncNet | R-50-D8 | 512x512 | 80000 | 10.1 | 22.81 | 39.53 | 41.17 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k-20200622_042412.log.json) | | EncNet | R-101-D8 | 512x512 | 80000 | 13.6 | 14.87 | 42.11 | 43.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k-20200622_101128.log.json) | | EncNet | R-50-D8 | 512x512 | 160000 | - | - | 40.10 | 41.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k-20200622_101059.log.json) | | EncNet | R-101-D8 | 512x512 | 160000 | - | - | 42.61 | 44.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k-20200622_073348.log.json) |
11,211
185.866667
1,264
md
mmsegmentation
mmsegmentation-master/configs/encnet/encnet.yml
Collections: - Name: EncNet Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://arxiv.org/abs/1803.08904 Title: Context Encoding for Semantic Segmentation README: configs/encnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/enc_head.py#L63 Version: v0.17.0 Converted From: Code: https://github.com/zhanghang1989/PyTorch-Encoding Models: - Name: encnet_r50-d8_512x1024_40k_cityscapes In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 218.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.6 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.67 mIoU(ms+flip): 77.08 Config: configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_40k_cityscapes/encnet_r50-d8_512x1024_40k_cityscapes_20200621_220958-68638a47.pth - Name: encnet_r101-d8_512x1024_40k_cityscapes In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 375.94 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 12.1 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.81 mIoU(ms+flip): 77.21 Config: configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_40k_cityscapes/encnet_r101-d8_512x1024_40k_cityscapes_20200621_220933-35e0a3e8.pth - Name: encnet_r50-d8_769x769_40k_cityscapes In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 549.45 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 9.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.24 mIoU(ms+flip): 77.85 Config: configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_40k_cityscapes/encnet_r50-d8_769x769_40k_cityscapes_20200621_220958-3bcd2884.pth - Name: encnet_r101-d8_769x769_40k_cityscapes In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 793.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 13.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.25 mIoU(ms+flip): 76.25 Config: configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_40k_cityscapes/encnet_r101-d8_769x769_40k_cityscapes_20200621_220933-2fafed55.pth - Name: encnet_r50-d8_512x1024_80k_cityscapes In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.94 mIoU(ms+flip): 79.13 Config: configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x1024_80k_cityscapes/encnet_r50-d8_512x1024_80k_cityscapes_20200622_003554-fc5c5624.pth - Name: encnet_r101-d8_512x1024_80k_cityscapes In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.55 mIoU(ms+flip): 79.47 Config: configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x1024_80k_cityscapes/encnet_r101-d8_512x1024_80k_cityscapes_20200622_003555-1de64bec.pth - Name: encnet_r50-d8_769x769_80k_cityscapes In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.44 mIoU(ms+flip): 78.72 Config: configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_769x769_80k_cityscapes/encnet_r50-d8_769x769_80k_cityscapes_20200622_003554-55096dcb.pth - Name: encnet_r101-d8_769x769_80k_cityscapes In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.1 mIoU(ms+flip): 76.97 Config: configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_769x769_80k_cityscapes/encnet_r101-d8_769x769_80k_cityscapes_20200622_003555-470ef79d.pth - Name: encnet_r50-d8_512x512_80k_ade20k In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 43.84 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 10.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 39.53 mIoU(ms+flip): 41.17 Config: configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_80k_ade20k/encnet_r50-d8_512x512_80k_ade20k_20200622_042412-44b46b04.pth - Name: encnet_r101-d8_512x512_80k_ade20k In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 67.25 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 13.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.11 mIoU(ms+flip): 43.61 Config: configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_80k_ade20k/encnet_r101-d8_512x512_80k_ade20k_20200622_101128-dd35e237.pth - Name: encnet_r50-d8_512x512_160k_ade20k In Collection: EncNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 40.1 mIoU(ms+flip): 41.71 Config: configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r50-d8_512x512_160k_ade20k/encnet_r50-d8_512x512_160k_ade20k_20200622_101059-b2db95e0.pth - Name: encnet_r101-d8_512x512_160k_ade20k In Collection: EncNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.61 mIoU(ms+flip): 44.01 Config: configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/encnet/encnet_r101-d8_512x512_160k_ade20k/encnet_r101-d8_512x512_160k_ade20k_20200622_073348-7989641f.pth
7,665
31.901288
175
yml
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x1024_40k_cityscapes.py
_base_ = './encnet_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './encnet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x512_160k_ade20k.py
_base_ = './encnet_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x512_20k_voc12aug.py
_base_ = './encnet_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x512_40k_voc12aug.py
_base_ = './encnet_r50-d8_512x512_40k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_512x512_80k_ade20k.py
_base_ = './encnet_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
129
42.333333
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_769x769_40k_cityscapes.py
_base_ = './encnet_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r101-d8_769x769_80k_cityscapes.py
_base_ = './encnet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
252
35.142857
76
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/encnet/encnet_r50s-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/encnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( backbone=dict(stem_channels=128), decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
293
31.666667
73
py
mmsegmentation
mmsegmentation-master/configs/erfnet/README.md
# ERFNet [ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation](http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/Eromera/erfnet_pytorch">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/erfnet.py#L321">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Semantic segmentation is a challenging task that addresses most of the perception needs of intelligent vehicles (IVs) in an unified way. Deep neural networks excel at this task, as they can be trained end-to-end to accurately classify multiple object categories in an image at pixel level. However, a good tradeoff between high quality and computational resources is yet not present in the state-of-the-art semantic segmentation approaches, limiting their application in real vehicles. In this paper, we propose a deep architecture that is able to run in real time while providing accurate semantic segmentation. The core of our architecture is a novel layer that uses residual connections and factorized convolutions in order to remain efficient while retaining remarkable accuracy. Our approach is able to run at over 83 FPS in a single Titan X, and 7 FPS in a Jetson TX1 (embedded device). A comprehensive set of experiments on the publicly available Cityscapes data set demonstrates that our system achieves an accuracy that is similar to the state of the art, while being orders of magnitude faster to compute than other architectures that achieve top precision. The resulting tradeoff makes our model an ideal approach for scene understanding in IV applications. The code is publicly available at: https://github.com/Eromera/erfnet. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/143479729-ea7951f6-1a3c-47d6-aaee-62c5759c0638.png" width="60%"/> </div> ## Citation ```bibtex @article{romera2017erfnet, title={Erfnet: Efficient residual factorized convnet for real-time semantic segmentation}, author={Romera, Eduardo and Alvarez, Jos{\'e} M and Bergasa, Luis M and Arroyo, Roberto}, journal={IEEE Transactions on Intelligent Transportation Systems}, volume={19}, number={1}, pages={263--272}, year={2017}, publisher={IEEE} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ---: | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | ERFNet | ERFNet | 512x1024 | 160000 | 6.04 | 15.26 | 72.5 | 74.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20220704_162145-dc90157a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20220704_162145.log.json) | Note: - The model is trained from scratch. - Last deconvolution layer in the [original paper](https://github.com/Eromera/erfnet_pytorch/blob/master/train/erfnet.py#L123) is replaced by a naive `FCNHead` decoder head and a bilinear upsampling layer, found more effective and efficient. - This model performance is sensitive to the seed values used, please refer to the log file for the specific settings of the seed. If you choose a different seed, the results might differ from the table results.
4,602
82.690909
1,338
md
mmsegmentation
mmsegmentation-master/configs/erfnet/erfnet.yml
Collections: - Name: ERFNet Metadata: Training Data: - Cityscapes Paper: URL: http://www.robesafe.uah.es/personal/eduardo.romera/pdfs/Romera17tits.pdf Title: 'ERFNet: Efficient Residual Factorized ConvNet for Real-time Semantic Segmentation' README: configs/erfnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/erfnet.py#L321 Version: v0.20.0 Converted From: Code: https://github.com/Eromera/erfnet_pytorch Models: - Name: erfnet_fcn_4x4_512x1024_160k_cityscapes In Collection: ERFNet Metadata: backbone: ERFNet crop size: (512,1024) lr schd: 160000 inference time (ms/im): - value: 65.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 6.04 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 72.5 mIoU(ms+flip): 74.75 Config: configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes/erfnet_fcn_4x4_512x1024_160k_cityscapes_20220704_162145-dc90157a.pth
1,218
31.078947
177
yml
mmsegmentation
mmsegmentation-master/configs/erfnet/erfnet_fcn_4x4_512x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/erfnet_fcn.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] data = dict( samples_per_gpu=4, workers_per_gpu=4, )
223
23.888889
74
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/README.md
# FastFCN [FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation](https://arxiv.org/abs/1903.11816) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/wuhuikai/FastFCN">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/jpu.py#L12">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Modern approaches for semantic segmentation usually employ dilated convolutions in the backbone to extract high-resolution feature maps, which brings heavy computation complexity and memory footprint. To replace the time and memory consuming dilated convolutions, we propose a novel joint upsampling module named Joint Pyramid Upsampling (JPU) by formulating the task of extracting high-resolution feature maps into a joint upsampling problem. With the proposed JPU, our method reduces the computation complexity by more than three times without performance loss. Experiments show that JPU is superior to other upsampling modules, which can be plugged into many existing approaches to reduce computation complexity and improve performance. By replacing dilated convolutions with the proposed JPU module, our method achieves the state-of-the-art performance in Pascal Context dataset (mIoU of 53.13%) and ADE20K dataset (final score of 0.5584) while running 3 times faster. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901374-6e0252ab-6e0f-4acd-86ad-1e9f49be3185.png" width="70%"/> </div> ## Citation ```bibtex @article{wu2019fastfcn, title={Fastfcn: Rethinking dilated convolution in the backbone for semantic segmentation}, author={Wu, Huikai and Zhang, Junge and Huang, Kaiqi and Liang, Kongming and Yu, Yizhou}, journal={arXiv preprint arXiv:1903.11816}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | FastFCN + DeepLabV3 | R-50-D32 | 512x1024 | 80000 | 5.67 | 2.64 | 79.12 | 80.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722.log.json) | | FastFCN + DeepLabV3 (4x4) | R-50-D32 | 512x1024 | 80000 | 9.79 | - | 79.52 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357.log.json) | | FastFCN + PSPNet | R-50-D32 | 512x1024 | 80000 | 5.67 | 4.40 | 79.26 | 80.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722.log.json) | | FastFCN + PSPNet (4x4) | R-50-D32 | 512x1024 | 80000 | 9.94 | - | 78.76 | 80.03 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841.log.json) | | FastFCN + EncNet | R-50-D32 | 512x1024 | 80000 | 8.15 | 4.77 | 77.97 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036.log.json) | | FastFCN + EncNet (4x4) | R-50-D32 | 512x1024 | 80000 | 15.45 | - | 78.6 | 80.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | FastFCN + DeepLabV3 | R-50-D32 | 512x512 | 80000 | 8.46 | 12.06 | 41.88 | 42.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619.log.json) | | FastFCN + DeepLabV3 | R-50-D32 | 512x512 | 160000 | - | - | 43.58 | 44.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246.log.json) | | FastFCN + PSPNet | R-50-D32 | 512x512 | 80000 | 8.02 | 19.21 | 41.40 | 42.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137.log.json) | | FastFCN + PSPNet | R-50-D32 | 512x512 | 160000 | - | - | 42.63 | 43.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455.log.json) | | FastFCN + EncNet | R-50-D32 | 512x512 | 80000 | 9.67 | 17.23 | 40.88 | 42.36 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214.log.json) | | FastFCN + EncNet | R-50-D32 | 512x512 | 160000 | - | - | 42.50 | 44.21 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456.log.json) | Note: - `4x4` means 4 GPUs with 4 samples per GPU in training, default setting is 4 GPUs with 2 samples per GPU in training. - Results of [DeepLabV3 (mIoU: 79.32)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/deeplabv3), [PSPNet (mIoU: 78.55)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/pspnet) and [ENCNet (mIoU: 77.94)](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/encnet) can be found in each original repository.
12,561
195.28125
972
md
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn.yml
Collections: - Name: FastFCN Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://arxiv.org/abs/1903.11816 Title: 'FastFCN: Rethinking Dilated Convolution in the Backbone for Semantic Segmentation' README: configs/fastfcn/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/jpu.py#L12 Version: v0.18.0 Converted From: Code: https://github.com/wuhuikai/FastFCN Models: - Name: fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 378.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.67 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.12 mIoU(ms+flip): 80.58 Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes_20210928_053722-5d1a2648.pth - Name: fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 Training Memory (GB): 9.79 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.52 mIoU(ms+flip): 80.91 Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes_20210924_214357-72220849.pth - Name: fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 227.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.67 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.26 mIoU(ms+flip): 80.86 Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes_20210928_053722-57749bed.pth - Name: fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 Training Memory (GB): 9.94 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.76 mIoU(ms+flip): 80.03 Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes_20210925_061841-77e87b0a.pth - Name: fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 209.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.15 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.97 mIoU(ms+flip): 79.92 Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes_20210928_030036-78da5046.pth - Name: fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,1024) lr schd: 80000 Training Memory (GB): 15.45 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.6 mIoU(ms+flip): 80.25 Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes_20210926_093217-e1eb6dbb.pth - Name: fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 82.92 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.46 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.88 mIoU(ms+flip): 42.91 Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k_20211013_190619-3aa40f2d.pth - Name: fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.58 mIoU(ms+flip): 44.92 Config: configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k_20211008_152246-27036aee.pth - Name: fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 52.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.02 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.4 mIoU(ms+flip): 42.12 Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k_20210930_225137-993d07c8.pth - Name: fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.63 mIoU(ms+flip): 43.71 Config: configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k_20211008_105455-e8f5a2fd.pth - Name: fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 58.04 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.67 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 40.88 mIoU(ms+flip): 42.36 Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k_20210930_225214-65aef6dd.pth - Name: fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k In Collection: FastFCN Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.5 mIoU(ms+flip): 44.21 Config: configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k_20211008_105456-d875ce3c.pth
8,314
34.233051
204
yml
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_4x4_512x1024_80k_cityscapes.py
# model settings _base_ = './fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py' data = dict( samples_per_gpu=4, workers_per_gpu=4, )
143
19.571429
64
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x1024_80k_cityscapes.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='ASPPHead', in_channels=2048, in_index=2, channels=512, dilations=(1, 12, 24, 36), dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
624
28.761905
74
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_160k_ade20k.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='ASPPHead', in_channels=2048, in_index=2, channels=512, dilations=(1, 12, 24, 36), dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
621
28.619048
74
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_aspp_512x512_80k_ade20k.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='ASPPHead', in_channels=2048, in_index=2, channels=512, dilations=(1, 12, 24, 36), dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
620
28.571429
74
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_4x4_512x1024_80k_cityscapes.py
# model settings _base_ = './fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py' data = dict( samples_per_gpu=4, workers_per_gpu=4, )
142
19.428571
63
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x1024_80k_cityscapes.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='EncHead', in_channels=[512, 1024, 2048], in_index=(0, 1, 2), channels=512, num_codes=32, use_se_loss=True, add_lateral=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_se_decode=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
786
30.48
73
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_160k_ade20k.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='EncHead', in_channels=[512, 1024, 2048], in_index=(0, 1, 2), channels=512, num_codes=32, use_se_loss=True, add_lateral=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_se_decode=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
783
30.36
73
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_enc_512x512_80k_ade20k.py
# model settings _base_ = './fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py' norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( _delete_=True, type='EncHead', in_channels=[512, 1024, 2048], in_index=(0, 1, 2), channels=512, num_codes=32, use_se_loss=True, add_lateral=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), loss_se_decode=dict( type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.2)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
782
30.32
73
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_4x4_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/fastfcn_r50-d32_jpu_psp.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] data = dict( samples_per_gpu=4, workers_per_gpu=4, )
239
23
71
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/fastfcn_r50-d32_jpu_psp.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
178
28.833333
71
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/fastfcn_r50-d32_jpu_psp.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
266
32.375
76
py
mmsegmentation
mmsegmentation-master/configs/fastfcn/fastfcn_r50-d32_jpu_psp_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/fastfcn_r50-d32_jpu_psp.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
265
32.25
76
py
mmsegmentation
mmsegmentation-master/configs/fastscnn/README.md
# Fast-SCNN [Fast-SCNN for Semantic Segmentation](https://arxiv.org/abs/1902.04502) ## Introduction <!-- [ALGORITHM] --> <a href="">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/fast_scnn.py#L272">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The encoder-decoder framework is state-of-the-art for offline semantic image segmentation. Since the rise in autonomous systems, real-time computation is increasingly desirable. In this paper, we introduce fast segmentation convolutional neural network (Fast-SCNN), an above real-time semantic segmentation model on high resolution image data (1024x2048px) suited to efficient computation on embedded devices with low memory. Building on existing two-branch methods for fast segmentation, we introduce our \`learning to downsample' module which computes low-level features for multiple resolution branches simultaneously. Our network combines spatial detail at high resolution with deep features extracted at lower resolution, yielding an accuracy of 68.0% mean intersection over union at 123.5 frames per second on Cityscapes. We also show that large scale pre-training is unnecessary. We thoroughly validate our metric in experiments with ImageNet pre-training and the coarse labeled data of Cityscapes. Finally, we show even faster computation with competitive results on subsampled inputs, without any network modifications. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901444-705b4ff4-6d1e-409b-899a-37bf3a6b69ce.png" width="80%"/> </div> ## Citation ```bibtex @article{poudel2019fast, title={Fast-scnn: Fast semantic segmentation network}, author={Poudel, Rudra PK and Liwicki, Stephan and Cipolla, Roberto}, journal={arXiv preprint arXiv:1902.04502}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | -------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | --------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | FastSCNN | FastSCNN | 512x1024 | 160000 | 3.3 | 56.45 | 70.96 | 72.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853-0cec9937.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/fast_scnn/fast_scnn_lr0.12_8x4_160k_cityscapes/fast_scnn_lr0.12_8x4_160k_cityscapes_20210630_164853.log.json) |
3,606
82.883721
1,128
md
mmsegmentation
mmsegmentation-master/configs/fastscnn/fast_scnn_lr0.12_8x4_160k_cityscapes.py
_base_ = [ '../_base_/models/fast_scnn.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] # Re-config the data sampler. data = dict(samples_per_gpu=4, workers_per_gpu=4) # Re-config the optimizer. optimizer = dict(type='SGD', lr=0.12, momentum=0.9, weight_decay=4e-5)
341
30.090909
74
py