repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_4x4_896x896_80k_isaid.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/isaid.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=16), auxiliary_head=dict(num_classes=16))
248
34.571429
74
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_40k_dark.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1920, 1080), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( test=dict( type='DarkZurichDataset', data_root='data/dark_zurich/', img_dir='rgb_anon/val/night/GOPR0356', ann_dir='gt/val/night/GOPR0356', pipeline=test_pipeline))
967
31.266667
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_40k_night_driving.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1920, 1080), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( test=dict( type='NightDrivingDataset', data_root='data/NighttimeDrivingTest/', img_dir='leftImg8bit/test/night', ann_dir='gtCoarse_daytime_trainvaltest/test/night', pipeline=test_pipeline))
992
32.1
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_80k_dark.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1920, 1080), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( test=dict( type='DarkZurichDataset', data_root='data/dark_zurich/', img_dir='rgb_anon/val/night/GOPR0356', ann_dir='gt/val/night/GOPR0356', pipeline=test_pipeline))
968
30.258065
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x1024_80k_night_driving.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(1920, 1080), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( test=dict( type='NightDrivingDataset', data_root='data/NighttimeDrivingTest/', img_dir='leftImg8bit/test/night', ann_dir='gtCoarse_daytime_trainvaltest/test/night', pipeline=test_pipeline))
992
32.1
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
252
35.142857
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_4x4_160k_coco-stuff164k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171))
264
32.125
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_4x4_20k_coco-stuff10k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171))
258
36
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_4x4_320k_coco-stuff164k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_320k.py' ] model = dict( decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171))
264
32.125
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_4x4_40k_coco-stuff10k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff10k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171))
258
36
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_4x4_80k_coco-stuff164k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=171), auxiliary_head=dict(num_classes=171))
263
32
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_512x512_80k_loveda.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/loveda.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=7), auxiliary_head=dict(num_classes=7))
247
34.428571
73
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50-d8_rsb-pretrain_512x1024_adamw_80k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] checkpoint = 'https://download.openmmlab.com/mmclassification/v0/resnet/resnet50_8xb256-rsb-a1-600e_in1k_20211228-20e21305.pth' # noqa model = dict( pretrained=None, backbone=dict( type='ResNet', init_cfg=dict( type='Pretrained', prefix='backbone.', checkpoint=checkpoint))) optimizer = dict(_delete_=True, type='AdamW', lr=0.0005, weight_decay=0.05) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False)
805
32.583333
135
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50b-d32_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/pspnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( pretrained='torchvision://resnet50', backbone=dict(type='ResNet', dilations=(1, 1, 2, 4), strides=(1, 2, 2, 2)))
299
36.5
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50b-d8_512x1024_80k_cityscapes.py
_base_ = './pspnet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/pspnet/pspnet_r50b-d8_769x769_80k_cityscapes.py
_base_ = './pspnet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='torchvision://resnet50', backbone=dict(type='ResNet'))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/resnest/README.md
# ResNeSt [ResNeSt: Split-Attention Networks](https://arxiv.org/abs/2004.08955) ## Introduction <!-- [BACKBONE] --> <a href="https://github.com/zhanghang1989/ResNeSt">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/resnest.py#L271">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> It is well known that featuremap attention and multi-path representation are important for visual recognition. In this paper, we present a modularized architecture, which applies the channel-wise attention on different network branches to leverage their success in capturing cross-feature interactions and learning diverse representations. Our design results in a simple and unified computation block, which can be parameterized using only a few variables. Our model, named ResNeSt, outperforms EfficientNet in accuracy and latency trade-off on image classification. In addition, ResNeSt has achieved superior transfer learning results on several public benchmarks serving as the backbone, and has been adopted by the winning entries of COCO-LVIS challenge. The source code for complete system and pretrained models are publicly available. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902526-3cf33345-7e40-47a6-985e-4381857e21df.png" width="60%"/> </div> ## Citation ```bibtex @article{zhang2020resnest, title={ResNeSt: Split-Attention Networks}, author={Zhang, Hang and Wu, Chongruo and Zhang, Zhongyue and Zhu, Yi and Zhang, Zhi and Lin, Haibin and Sun, Yue and He, Tong and Muller, Jonas and Manmatha, R. and Li, Mu and Smola, Alexander}, journal={arXiv preprint arXiv:2004.08955}, year={2020} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | FCN | S-101-D8 | 512x1024 | 80000 | 11.4 | 2.39 | 77.56 | 78.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | | PSPNet | S-101-D8 | 512x1024 | 80000 | 11.8 | 2.52 | 78.57 | 79.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes-20200807_140631.log.json) | | DeepLabV3 | S-101-D8 | 512x1024 | 80000 | 11.9 | 1.88 | 79.67 | 80.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | | DeepLabV3+ | S-101-D8 | 512x1024 | 80000 | 13.2 | 2.36 | 79.62 | 80.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes-20200807_144429.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | FCN | S-101-D8 | 512x512 | 160000 | 14.2 | 12.86 | 45.62 | 46.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | | PSPNet | S-101-D8 | 512x512 | 160000 | 14.2 | 13.02 | 45.44 | 46.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k-20200807_145416.log.json) | | DeepLabV3 | S-101-D8 | 512x512 | 160000 | 14.6 | 9.28 | 45.71 | 46.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) | | DeepLabV3+ | S-101-D8 | 512x512 | 160000 | 16.2 | 11.96 | 46.47 | 47.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k-20200807_144503.log.json) |
8,976
162.218182
839
md
mmsegmentation
mmsegmentation-master/configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py
_base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
271
26.2
68
py
mmsegmentation
mmsegmentation-master/configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py
_base_ = '../deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
267
25.8
64
py
mmsegmentation
mmsegmentation-master/configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
279
27
76
py
mmsegmentation
mmsegmentation-master/configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
275
26.6
72
py
mmsegmentation
mmsegmentation-master/configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py
_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
259
25
56
py
mmsegmentation
mmsegmentation-master/configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py
_base_ = '../fcn/fcn_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
255
24.6
52
py
mmsegmentation
mmsegmentation-master/configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py
_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
265
25.6
62
py
mmsegmentation
mmsegmentation-master/configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py
_base_ = '../pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='open-mmlab://resnest101', backbone=dict( type='ResNeSt', stem_channels=128, radix=2, reduction_factor=4, avg_down_stride=True))
261
25.2
58
py
mmsegmentation
mmsegmentation-master/configs/resnest/resnest.yml
Models: - Name: fcn_s101-d8_512x1024_80k_cityscapes In Collection: FCN Metadata: backbone: S-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 418.41 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 11.4 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.56 mIoU(ms+flip): 78.98 Config: configs/resnest/fcn_s101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x1024_80k_cityscapes/fcn_s101-d8_512x1024_80k_cityscapes_20200807_140631-f8d155b3.pth - Name: pspnet_s101-d8_512x1024_80k_cityscapes In Collection: PSPNet Metadata: backbone: S-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 396.83 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 11.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.57 mIoU(ms+flip): 79.19 Config: configs/resnest/pspnet_s101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x1024_80k_cityscapes/pspnet_s101-d8_512x1024_80k_cityscapes_20200807_140631-c75f3b99.pth - Name: deeplabv3_s101-d8_512x1024_80k_cityscapes In Collection: DeepLabV3 Metadata: backbone: S-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 531.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 11.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.67 mIoU(ms+flip): 80.51 Config: configs/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x1024_80k_cityscapes/deeplabv3_s101-d8_512x1024_80k_cityscapes_20200807_144429-b73c4270.pth - Name: deeplabv3plus_s101-d8_512x1024_80k_cityscapes In Collection: DeepLabV3+ Metadata: backbone: S-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 423.73 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 13.2 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.62 mIoU(ms+flip): 80.27 Config: configs/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x1024_80k_cityscapes/deeplabv3plus_s101-d8_512x1024_80k_cityscapes_20200807_144429-1239eb43.pth - Name: fcn_s101-d8_512x512_160k_ade20k In Collection: FCN Metadata: backbone: S-101-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 77.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 14.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.62 mIoU(ms+flip): 46.16 Config: configs/resnest/fcn_s101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/fcn_s101-d8_512x512_160k_ade20k/fcn_s101-d8_512x512_160k_ade20k_20200807_145416-d3160329.pth - Name: pspnet_s101-d8_512x512_160k_ade20k In Collection: PSPNet Metadata: backbone: S-101-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 76.8 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 14.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.44 mIoU(ms+flip): 46.28 Config: configs/resnest/pspnet_s101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/pspnet_s101-d8_512x512_160k_ade20k/pspnet_s101-d8_512x512_160k_ade20k_20200807_145416-a6daa92a.pth - Name: deeplabv3_s101-d8_512x512_160k_ade20k In Collection: DeepLabV3 Metadata: backbone: S-101-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 107.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 14.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.71 mIoU(ms+flip): 46.59 Config: configs/resnest/deeplabv3_s101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3_s101-d8_512x512_160k_ade20k/deeplabv3_s101-d8_512x512_160k_ade20k_20200807_144503-17ecabe5.pth - Name: deeplabv3plus_s101-d8_512x512_160k_ade20k In Collection: DeepLabV3+ Metadata: backbone: S-101-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 83.61 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 16.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.47 mIoU(ms+flip): 47.27 Config: configs/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/resnest/deeplabv3plus_s101-d8_512x512_160k_ade20k/deeplabv3plus_s101-d8_512x512_160k_ade20k_20200807_144503-27b26226.pth
5,664
30.825843
190
yml
mmsegmentation
mmsegmentation-master/configs/segformer/README.md
# SegFormer [SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers](https://arxiv.org/abs/2105.15203) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/NVlabs/SegFormer">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mit.py#L246">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We present SegFormer, a simple, efficient yet powerful semantic segmentation framework which unifies Transformers with lightweight multilayer perception (MLP) decoders. SegFormer has two appealing features: 1) SegFormer comprises a novel hierarchically structured Transformer encoder which outputs multiscale features. It does not need positional encoding, thereby avoiding the interpolation of positional codes which leads to decreased performance when the testing resolution differs from training. 2) SegFormer avoids complex decoders. The proposed MLP decoder aggregates information from different layers, and thus combining both local attention and global attention to render powerful representations. We show that this simple and lightweight design is the key to efficient segmentation on Transformers. We scale our approach up to obtain a series of models from SegFormer-B0 to SegFormer-B5, reaching significantly better performance and efficiency than previous counterparts. For example, SegFormer-B4 achieves 50.3% mIoU on ADE20K with 64M parameters, being 5x smaller and 2.2% better than the previous best method. Our best model, SegFormer-B5, achieves 84.0% mIoU on Cityscapes validation set and shows excellent zero-shot robustness on Cityscapes-C. Code will be released at: [this http URL](https://github.com/NVlabs/SegFormer). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902600-e188073e-5744-4ba9-8dbf-9316e55c74aa.png" width="70%"/> </div> ## Citation ```bibtex @article{xie2021segformer, title={SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers}, author={Xie, Enze and Wang, Wenhai and Yu, Zhiding and Anandkumar, Anima and Alvarez, Jose M and Luo, Ping}, journal={arXiv preprint arXiv:2105.15203}, year={2021} } ``` ## Usage We have provided pretrained models converted from [SegFormer](https://github.com/NVlabs/SegFormer). If you want to convert keys on your own, we also provide a script [`mit2mmseg.py`](../../tools/model_converters/mit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/NVlabs/SegFormer) to MMSegmentation style. ```shell python tools/model_converters/mit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | Segformer | MIT-B0 | 512x512 | 160000 | 2.1 | 38.17 | 37.85 | 38.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20220617_162207-c00b9603.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20220617_162207.log.json) | | Segformer | MIT-B1 | 512x512 | 160000 | 2.6 | 37.80 | 42.13 | 43.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20220620_112037-c3f39e00.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20220620_112037.log.json) | | Segformer | MIT-B2 | 512x512 | 160000 | 3.6 | 26.80 | 46.80 | 48.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20220620_114047-64e4feca.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20220620_114047.log.json) | | Segformer | MIT-B3 | 512x512 | 160000 | 4.8 | 19.19 | 48.25 | 49.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20220617_162254-3a4b7363.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20220617_162254.log.json) | | Segformer | MIT-B4 | 512x512 | 160000 | 6.1 | 14.54 | 49.09 | 50.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20220620_112216-4fa4f58f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20220620_112216.log.json) | | Segformer | MIT-B5 | 512x512 | 160000 | 7.2 | 11.89 | 49.13 | 50.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235.log.json) | | Segformer | MIT-B5 | 640x640 | 160000 | 11.5 | 10.60 | 50.19 | 51.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20220617_203542-940a6bd8.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20220617_203542.log.json) | Evaluation with `AlignedResize`: | Method | Backbone | Crop Size | Lr schd | mIoU | mIoU(ms+flip) | | --------- | -------- | --------- | ------: | ----: | ------------- | | Segformer | MIT-B0 | 512x512 | 160000 | 38.55 | 39.03 | | Segformer | MIT-B1 | 512x512 | 160000 | 43.26 | 44.11 | | Segformer | MIT-B2 | 512x512 | 160000 | 47.46 | 48.16 | | Segformer | MIT-B3 | 512x512 | 160000 | 49.27 | 49.94 | | Segformer | MIT-B4 | 512x512 | 160000 | 50.23 | 51.10 | | Segformer | MIT-B5 | 512x512 | 160000 | 50.08 | 50.72 | | Segformer | MIT-B5 | 640x640 | 160000 | 51.13 | 51.66 | ### Cityscapes The lower fps result is caused by the sliding window inference scheme (window size:1024x1024). | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | --------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Segformer | MIT-B0 | 1024x1024 | 160000 | 3.64 | 4.74 | 76.54 | 78.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857.log.json) | | Segformer | MIT-B1 | 1024x1024 | 160000 | 4.49 | 4.3 | 78.56 | 79.73 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213.log.json) | | Segformer | MIT-B2 | 1024x1024 | 160000 | 7.42 | 3.36 | 81.08 | 82.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205.log.json) | | Segformer | MIT-B3 | 1024x1024 | 160000 | 10.86 | 2.53 | 81.94 | 83.14 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823.log.json) | | Segformer | MIT-B4 | 1024x1024 | 160000 | 15.07 | 1.88 | 81.89 | 83.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709.log.json) | | Segformer | MIT-B5 | 1024x1024 | 160000 | 18.00 | 1.39 | 82.25 | 83.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934.log.json) | Note: Original SegFormer paper uses different `test_pipeline` and image ratios in `ms+flip`. If you want to cite SegFormer original results as benchmark you may modify settings as below: - We replace `AlignedResize` in original implementation to `Resize + ResizeToMultiple`. If you want to test by using `AlignedResize`, you can change the dataset pipeline like this: ```python test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), # resize image to multiple of 32, improve SegFormer by 0.5-1.0 mIoU. dict(type='ResizeToMultiple', size_divisor=32), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] ``` - Different from default setting of `ms+flip`, SegFormer original repo adopts [different image ratios](https://github.com/NVlabs/SegFormer/blob/master/tools/test.py#L97-L101) for ADE20K dataset. To re-implement numerical results of `ms+flip`, you can change image ratios in `tools/test.py` like this: ```python if args.aug_test: if cfg.data.test.type == 'ADE20KDataset': # hard code index cfg.data.test.pipeline[1].img_ratios = [ 0.75, 0.875, 1.0, 1.125, 1.25 ] ``` - Training of SegFormer is not very stable, which is sensitive to random seeds. - We use default training setting in MMSegmentation rather than `RepeatDataset` adopted in SegFormer official repo to accelerate [training](https://github.com/NVlabs/SegFormer/blob/master/local_configs/_base_/datasets/ade20k_repeat.py#L38-L39), here is its related [issue](https://github.com/NVlabs/SegFormer/issues/25).
15,635
121.15625
1,339
md
mmsegmentation
mmsegmentation-master/configs/segformer/segformer.yml
Collections: - Name: Segformer Metadata: Training Data: - ADE20K - Cityscapes Paper: URL: https://arxiv.org/abs/2105.15203 Title: 'SegFormer: Simple and Efficient Design for Semantic Segmentation with Transformers' README: configs/segformer/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mit.py#L246 Version: v0.17.0 Converted From: Code: https://github.com/NVlabs/SegFormer Models: - Name: segformer_mit-b0_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B0 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 26.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 37.85 mIoU(ms+flip): 38.97 Config: configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_512x512_160k_ade20k/segformer_mit-b0_512x512_160k_ade20k_20220617_162207-c00b9603.pth - Name: segformer_mit-b1_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B1 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 26.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.13 mIoU(ms+flip): 43.74 Config: configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_512x512_160k_ade20k/segformer_mit-b1_512x512_160k_ade20k_20220620_112037-c3f39e00.pth - Name: segformer_mit-b2_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B2 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 37.31 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 3.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.8 mIoU(ms+flip): 48.12 Config: configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_512x512_160k_ade20k/segformer_mit-b2_512x512_160k_ade20k_20220620_114047-64e4feca.pth - Name: segformer_mit-b3_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B3 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 52.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.25 mIoU(ms+flip): 49.58 Config: configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_512x512_160k_ade20k/segformer_mit-b3_512x512_160k_ade20k_20220617_162254-3a4b7363.pth - Name: segformer_mit-b4_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B4 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 68.78 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 49.09 mIoU(ms+flip): 50.72 Config: configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_512x512_160k_ade20k/segformer_mit-b4_512x512_160k_ade20k_20220620_112216-4fa4f58f.pth - Name: segformer_mit-b5_512x512_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B5 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 84.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 49.13 mIoU(ms+flip): 50.22 Config: configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_512x512_160k_ade20k/segformer_mit-b5_512x512_160k_ade20k_20210726_145235-94cedf59.pth - Name: segformer_mit-b5_640x640_160k_ade20k In Collection: Segformer Metadata: backbone: MIT-B5 crop size: (640,640) lr schd: 160000 inference time (ms/im): - value: 94.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (640,640) Training Memory (GB): 11.5 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 50.19 mIoU(ms+flip): 51.41 Config: configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_640x640_160k_ade20k/segformer_mit-b5_640x640_160k_ade20k_20220617_203542-940a6bd8.pth - Name: segformer_mit-b0_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B0 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 210.97 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 3.64 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.54 mIoU(ms+flip): 78.22 Config: configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes/segformer_mit-b0_8x1_1024x1024_160k_cityscapes_20211208_101857-e7f88502.pth - Name: segformer_mit-b1_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B1 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 232.56 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 4.49 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.56 mIoU(ms+flip): 79.73 Config: configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes/segformer_mit-b1_8x1_1024x1024_160k_cityscapes_20211208_064213-655c7b3f.pth - Name: segformer_mit-b2_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B2 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 297.62 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 7.42 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 81.08 mIoU(ms+flip): 82.18 Config: configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes/segformer_mit-b2_8x1_1024x1024_160k_cityscapes_20211207_134205-6096669a.pth - Name: segformer_mit-b3_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B3 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 395.26 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 10.86 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 81.94 mIoU(ms+flip): 83.14 Config: configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes/segformer_mit-b3_8x1_1024x1024_160k_cityscapes_20211206_224823-a8f8a177.pth - Name: segformer_mit-b4_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B4 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 531.91 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 15.07 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 81.89 mIoU(ms+flip): 83.38 Config: configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes/segformer_mit-b4_8x1_1024x1024_160k_cityscapes_20211207_080709-07f6c333.pth - Name: segformer_mit-b5_8x1_1024x1024_160k_cityscapes In Collection: Segformer Metadata: backbone: MIT-B5 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 719.42 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 18.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 82.25 mIoU(ms+flip): 83.48 Config: configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes/segformer_mit-b5_8x1_1024x1024_160k_cityscapes_20211206_072934-87a052ec.pth
9,886
31.523026
194
yml
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b0_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b0_20220624-7e0fe6dd.pth' # noqa model = dict(pretrained=checkpoint, decode_head=dict(num_classes=150)) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'pos_block': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'head': dict(lr_mult=10.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) data = dict(samples_per_gpu=2, workers_per_gpu=2)
899
24.714286
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/segformer_mit-b0.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b0_20220624-7e0fe6dd.pth' # noqa model = dict( backbone=dict(init_cfg=dict(type='Pretrained', checkpoint=checkpoint)), test_cfg=dict(mode='slide', crop_size=(1024, 1024), stride=(768, 768))) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'pos_block': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'head': dict(lr_mult=10.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) data = dict(samples_per_gpu=1, workers_per_gpu=1)
1,012
25.657895
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b1_512x512_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b1_20220624-02e5a6a1.pth' # noqa # model settings model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[2, 2, 2, 2]), decode_head=dict(in_channels=[64, 128, 320, 512]))
384
34
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b1_8x1_1024x1024_160k_cityscapes.py
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b1_20220624-02e5a6a1.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), embed_dims=64), decode_head=dict(in_channels=[64, 128, 320, 512]))
365
39.666667
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b2_512x512_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b2_20220624-66e8bf70.pth' # noqa # model settings model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 6, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
384
34
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b2_8x1_1024x1024_160k_cityscapes.py
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b2_20220624-66e8bf70.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), embed_dims=64, num_layers=[3, 4, 6, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
398
38.9
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b3_512x512_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b3_20220624-13b1141c.pth' # noqa # model settings model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 4, 18, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
385
34.090909
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b3_8x1_1024x1024_160k_cityscapes.py
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b3_20220624-13b1141c.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), embed_dims=64, num_layers=[3, 4, 18, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
399
39
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b4_512x512_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b4_20220624-d588d980.pth' # noqa # model settings model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 8, 27, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
385
34.090909
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b4_8x1_1024x1024_160k_cityscapes.py
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b4_20220624-d588d980.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), embed_dims=64, num_layers=[3, 8, 27, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
399
39
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b5_512x512_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b5_20220624-658746d9.pth' # noqa # model settings model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
385
34.090909
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b5_640x640_160k_ade20k.py
_base_ = ['./segformer_mit-b0_512x512_160k_ade20k.py'] # dataset settings img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (640, 640) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 640), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline)) # model settings checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b5_20220624-658746d9.pth' # noqa model = dict( pretrained=checkpoint, backbone=dict( embed_dims=64, num_heads=[1, 2, 5, 8], num_layers=[3, 6, 40, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
1,680
35.543478
121
py
mmsegmentation
mmsegmentation-master/configs/segformer/segformer_mit-b5_8x1_1024x1024_160k_cityscapes.py
_base_ = ['./segformer_mit-b0_8x1_1024x1024_160k_cityscapes.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segformer/mit_b5_20220624-658746d9.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), embed_dims=64, num_layers=[3, 6, 40, 3]), decode_head=dict(in_channels=[64, 128, 320, 512]))
399
39
121
py
mmsegmentation
mmsegmentation-master/configs/segmenter/README.md
# Segmenter [Segmenter: Transformer for Semantic Segmentation](https://arxiv.org/abs/2105.05633) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/rstrudel/segmenter">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.21.0/mmseg/models/decode_heads/segmenter_mask_head.py#L15">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Image segmentation is often ambiguous at the level of individual image patches and requires contextual information to reach label consensus. In this paper we introduce Segmenter, a transformer model for semantic segmentation. In contrast to convolution-based methods, our approach allows to model global context already at the first layer and throughout the network. We build on the recent Vision Transformer (ViT) and extend it to semantic segmentation. To do so, we rely on the output embeddings corresponding to image patches and obtain class labels from these embeddings with a point-wise linear decoder or a mask transformer decoder. We leverage models pre-trained for image classification and show that we can fine-tune them on moderate sized datasets available for semantic segmentation. The linear decoder allows to obtain excellent results already, but the performance can be further improved by a mask transformer generating class masks. We conduct an extensive ablation study to show the impact of the different parameters, in particular the performance is better for large models and small patch sizes. Segmenter attains excellent results for semantic segmentation. It outperforms the state of the art on both ADE20K and Pascal Context datasets and is competitive on Cityscapes. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/148507554-87eb80bd-02c7-4c31-b102-c6141e231ec8.png" width="70%"/> </div> ```bibtex @inproceedings{strudel2021segmenter, title={Segmenter: Transformer for semantic segmentation}, author={Strudel, Robin and Garcia, Ricardo and Laptev, Ivan and Schmid, Cordelia}, booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, pages={7262--7272}, year={2021} } ``` ## Usage We have provided pretrained models converted from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106). If you want to convert keys on your own to use the pre-trained ViT model from [Segmenter](https://github.com/rstrudel/segmenter), we also provide a script [`vitjax2mmseg.py`](../../tools/model_converters/vitjax2mmseg.py) in the tools directory to convert the key of models from [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) to MMSegmentation style. ```shell python tools/model_converters/vitjax2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/vitjax2mmseg.py \ Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz \ pretrain/vit_tiny_p16_384.pth ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. In our default setting, pretrained models and their corresponding [ViT-AugReg](https://github.com/rwightman/pytorch-image-models/blob/f55c22bebf9d8afc449d317a723231ef72e0d662/timm/models/vision_transformer.py#L54-L106) models could be defined below: | pretrained models | original models | | --------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | vit_tiny_p16_384.pth | ['vit_tiny_patch16_384'](https://storage.googleapis.com/vit_models/augreg/Ti_16-i21k-300ep-lr_0.001-aug_none-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | | vit_small_p16_384.pth | ['vit_small_patch16_384'](https://storage.googleapis.com/vit_models/augreg/S_16-i21k-300ep-lr_0.001-aug_light1-wd_0.03-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.03-res_384.npz) | | vit_base_p16_384.pth | ['vit_base_patch16_384'](https://storage.googleapis.com/vit_models/augreg/B_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.0-sd_0.0--imagenet2012-steps_20k-lr_0.01-res_384.npz) | | vit_large_p16_384.pth | ['vit_large_patch16_384'](https://storage.googleapis.com/vit_models/augreg/L_16-i21k-300ep-lr_0.001-aug_medium1-wd_0.1-do_0.1-sd_0.1--imagenet2012-steps_20k-lr_0.01-res_384.npz) | ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | -------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Segmenter Mask | ViT-T_16 | 512x512 | 160000 | 1.21 | 27.98 | 39.99 | 40.83 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | | Segmenter Linear | ViT-S_16 | 512x512 | 160000 | 1.78 | 28.07 | 45.75 | 46.82 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713.log.json) | | Segmenter Mask | ViT-S_16 | 512x512 | 160000 | 2.03 | 24.80 | 46.19 | 47.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | | Segmenter Mask | ViT-B_16 | 512x512 | 160000 | 4.20 | 13.20 | 49.60 | 51.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706.log.json) | | Segmenter Mask | ViT-L_16 | 640x640 | 160000 | 16.99 | 3.03 | 51.65 | 53.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k/segmenter_vit-l_mask_8x1_640x640_160k_ade20k_20220614_024513-4783a347.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k/segmenter_vit-l_mask_8x1_640x640_160k_ade20k_20220614_024513.log.json) | Note: - This model performance is sensitive to the seed values used, please refer to the log file for the specific settings of the seed. If you choose a different seed, the results might differ from the table results.
9,362
117.518987
1,290
md
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter.yml
Collections: - Name: Segmenter Metadata: Training Data: - ADE20K Paper: URL: https://arxiv.org/abs/2105.05633 Title: 'Segmenter: Transformer for Semantic Segmentation' README: configs/segmenter/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.21.0/mmseg/models/decode_heads/segmenter_mask_head.py#L15 Version: v0.21.0 Converted From: Code: https://github.com/rstrudel/segmenter Models: - Name: segmenter_vit-t_mask_8x1_512x512_160k_ade20k In Collection: Segmenter Metadata: backbone: ViT-T_16 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 35.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.21 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 39.99 mIoU(ms+flip): 40.83 Config: configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k/segmenter_vit-t_mask_8x1_512x512_160k_ade20k_20220105_151706-ffcf7509.pth - Name: segmenter_vit-s_linear_8x1_512x512_160k_ade20k In Collection: Segmenter Metadata: backbone: ViT-S_16 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 35.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.78 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.75 mIoU(ms+flip): 46.82 Config: configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k/segmenter_vit-s_linear_8x1_512x512_160k_ade20k_20220105_151713-39658c46.pth - Name: segmenter_vit-s_mask_8x1_512x512_160k_ade20k In Collection: Segmenter Metadata: backbone: ViT-S_16 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 40.32 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.03 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.19 mIoU(ms+flip): 47.85 Config: configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k/segmenter_vit-s_mask_8x1_512x512_160k_ade20k_20220105_151706-511bb103.pth - Name: segmenter_vit-b_mask_8x1_512x512_160k_ade20k In Collection: Segmenter Metadata: backbone: ViT-B_16 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 75.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 49.6 mIoU(ms+flip): 51.07 Config: configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k/segmenter_vit-b_mask_8x1_512x512_160k_ade20k_20220105_151706-bc533b08.pth - Name: segmenter_vit-l_mask_8x1_640x640_160k_ade20k In Collection: Segmenter Metadata: backbone: ViT-L_16 crop size: (640,640) lr schd: 160000 inference time (ms/im): - value: 330.03 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (640,640) Training Memory (GB): 16.99 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 51.65 mIoU(ms+flip): 53.58 Config: configs/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k/segmenter_vit-l_mask_8x1_640x640_160k_ade20k_20220614_024513-4783a347.pth
4,132
31.801587
194
yml
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter_vit-b_mask_8x1_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/segmenter_vit-b16_mask.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] optimizer = dict(lr=0.001, weight_decay=0.0) img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( # num_gpus: 8 -> batch_size: 8 samples_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,546
34.159091
71
py
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter_vit-l_mask_8x1_640x640_160k_ade20k.py
_base_ = [ '../_base_/models/segmenter_vit-b16_mask.py', '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_large_p16_384_20220308-d4efb41d.pth' # noqa model = dict( pretrained=checkpoint, backbone=dict( type='VisionTransformer', img_size=(640, 640), embed_dims=1024, num_layers=24, num_heads=16), decode_head=dict( type='SegmenterMaskTransformerHead', in_channels=1024, channels=1024, num_heads=16, embed_dims=1024), test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(608, 608))) optimizer = dict(lr=0.001, weight_decay=0.0) img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) crop_size = (640, 640) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2560, 640), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2560, 640), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( # num_gpus: 8 -> batch_size: 8 samples_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,121
33.225806
132
py
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter_vit-s_linear_8x1_512x512_160k_ade20k.py
_base_ = './segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py' model = dict( decode_head=dict( _delete_=True, type='FCNHead', in_channels=384, channels=384, num_convs=0, dropout_ratio=0.0, concat_input=False, num_classes=150, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
394
25.333333
74
py
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter_vit-s_mask_8x1_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/segmenter_vit-b16_mask.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_small_p16_384_20220308-410f6037.pth' # noqa backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) model = dict( pretrained=checkpoint, backbone=dict( img_size=(512, 512), embed_dims=384, num_heads=6, ), decode_head=dict( type='SegmenterMaskTransformerHead', in_channels=384, channels=384, num_classes=150, num_layers=2, num_heads=6, embed_dims=384, dropout_ratio=0.0, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))) optimizer = dict(lr=0.001, weight_decay=0.0) img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( # num_gpus: 8 -> batch_size: 8 samples_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,223
32.19403
132
py
mmsegmentation
mmsegmentation-master/configs/segmenter/segmenter_vit-t_mask_8x1_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/segmenter_vit-b16_mask.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_tiny_p16_384_20220308-cce8c795.pth' # noqa model = dict( pretrained=checkpoint, backbone=dict(embed_dims=192, num_heads=3), decode_head=dict( type='SegmenterMaskTransformerHead', in_channels=192, channels=192, num_heads=3, embed_dims=192)) optimizer = dict(lr=0.001, weight_decay=0.0) img_norm_cfg = dict( mean=[127.5, 127.5, 127.5], std=[127.5, 127.5, 127.5], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']) ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']) ]) ] data = dict( # num_gpus: 8 -> batch_size: 8 samples_per_gpu=1, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
1,930
32.877193
131
py
mmsegmentation
mmsegmentation-master/configs/segnext/README.md
# SegNeXt [SegNeXt: Rethinking Convolutional Attention Design for Semantic Segmentation](https://arxiv.org/abs/2209.08575) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/visual-attention-network/segnext">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.31.0/mmseg/models/backbones/mscan.py#L328">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We present SegNeXt, a simple convolutional network architecture for semantic segmentation. Recent transformer-based models have dominated the field of semantic segmentation due to the efficiency of self-attention in encoding spatial information. In this paper, we show that convolutional attention is a more efficient and effective way to encode contextual information than the self-attention mechanism in transformers. By re-examining the characteristics owned by successful segmentation models, we discover several key components leading to the performance improvement of segmentation models. This motivates us to design a novel convolutional attention network that uses cheap convolutional operations. Without bells and whistles, our SegNeXt significantly improves the performance of previous state-of-the-art methods on popular benchmarks, including ADE20K, Cityscapes, COCO-Stuff, Pascal VOC, Pascal Context, and iSAID. Notably, SegNeXt outperforms EfficientNet-L2 w/ NAS-FPN and achieves 90.6% mIoU on the Pascal VOC 2012 test leaderboard using only 1/10 parameters of it. On average, SegNeXt achieves about 2.0% mIoU improvements compared to the state-of-the-art methods on the ADE20K datasets with the same or fewer computations. Code is available at [this https URL](https://github.com/uyzhang/JSeg) (Jittor) and [this https URL](https://github.com/Visual-Attention-Network/SegNeXt) (Pytorch). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/215688018-5d4c8366-7793-4fdf-9397-960a09fac951.png" width="70%"/> </div> ```bibtex @article{guo2022segnext, title={SegNeXt: Rethinking Convolutional Attention Design for Semantic Segmentation}, author={Guo, Meng-Hao and Lu, Cheng-Ze and Hou, Qibin and Liu, Zhengning and Cheng, Ming-Ming and Hu, Shi-Min}, journal={arXiv preprint arXiv:2209.08575}, year={2022} } ``` ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | SegNeXt | MSCAN-T | 512x512 | 160000 | 17.88 | 52.38 | 41.50 | 42.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k_20230210_140244-05bd8466.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k_20230210_140244.log.json) | | SegNeXt | MSCAN-S | 512x512 | 160000 | 21.47 | 42.27 | 44.16 | 45.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k_20230214_113014-43013668.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k_20230214_113014.log.json) | | SegNeXt | MSCAN-B | 512x512 | 160000 | 31.03 | 35.15 | 48.03 | 49.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k_20230209_172053-b6f6c70c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k_20230209_172053.log.json) | | SegNeXt | MSCAN-L | 512x512 | 160000 | 43.32 | 22.91 | 50.99 | 52.10 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k_20230209_172055-19b14b63.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k_20230209_172055.log.json) | Note: - When we integrated SegNeXt into MMSegmentation, we modified some layers' names to make them more precise and concise without changing the model architecture. Therefore, the keys of pre-trained weights are different from the [original weights](https://cloud.tsinghua.edu.cn/d/c15b25a6745946618462/), but don't worry about these changes. we have converted them and uploaded the checkpoints, you might find URL of pre-trained checkpoints in config files and can use them directly for training. - The total batch size is 16. We trained for SegNeXt with a single GPU as the performance degrades significantly when using`SyncBN` (mainly in `OverlapPatchEmbed` modules of `MSCAN`) of PyTorch 1.9. - There will be subtle differences when model testing as Non-negative Matrix Factorization (NMF) in `LightHamHead` will be initialized randomly. To control this randomness, please set the random seed when model testing. You can modify [`./tools/test.py`](https://github.com/open-mmlab/mmsegmentation/blob/master/tools/test.py) like: ```python def main(): from mmseg.apis import set_random_seed random_seed = xxx # set random seed recorded in training log set_random_seed(random_seed, deterministic=False) ... ``` - This model performance is sensitive to the seed values used, please refer to the log file for the specific settings of the seed. If you choose a different seed, the results might differ from the table results. Take SegNeXt Large for example, its results range from 49.60 to 51.0.
7,509
120.129032
1,402
md
mmsegmentation
mmsegmentation-master/configs/segnext/segnext.yml
Collections: - Name: SegNeXt Metadata: Training Data: - ADE20K Paper: URL: https://arxiv.org/abs/2209.08575 Title: 'SegNeXt: Rethinking Convolutional Attention Design for Semantic Segmentation' README: configs/segnext/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.31.0/mmseg/models/backbones/mscan.py#L328 Version: v0.31.0 Converted From: Code: https://github.com/visual-attention-network/segnext Models: - Name: segnext_mscan-t_1x16_512x512_adamw_160k_ade20k In Collection: SegNeXt Metadata: backbone: MSCAN-T crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 19.09 hardware: A100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 17.88 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.5 mIoU(ms+flip): 42.59 Config: configs/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k_20230210_140244-05bd8466.pth - Name: segnext_mscan-s_1x16_512x512_adamw_160k_ade20k In Collection: SegNeXt Metadata: backbone: MSCAN-S crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 23.66 hardware: A100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 21.47 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.16 mIoU(ms+flip): 45.81 Config: configs/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k_20230214_113014-43013668.pth - Name: segnext_mscan-b_1x16_512x512_adamw_160k_ade20k In Collection: SegNeXt Metadata: backbone: MSCAN-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 28.45 hardware: A100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 31.03 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.03 mIoU(ms+flip): 49.68 Config: configs/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k_20230209_172053-b6f6c70c.pth - Name: segnext_mscan-l_1x16_512x512_adamw_160k_ade20k In Collection: SegNeXt Metadata: backbone: MSCAN-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 43.65 hardware: A100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 43.32 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 50.99 mIoU(ms+flip): 52.1 Config: configs/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k_20230209_172055-19b14b63.pth
3,417
31.865385
192
yml
mmsegmentation
mmsegmentation-master/configs/segnext/segnext_mscan-b_1x16_512x512_adamw_160k_ade20k.py
_base_ = './segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py' # model settings checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_b_20230227-3ab7d230.pth' # noqa ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( embed_dims=[64, 128, 320, 512], depths=[3, 3, 12, 3], init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), drop_path_rate=0.1, norm_cfg=dict(type='BN', requires_grad=True)), decode_head=dict( type='LightHamHead', in_channels=[128, 320, 512], in_index=[1, 2, 3], channels=512, ham_channels=512, dropout_ratio=0.1, num_classes=150, norm_cfg=ham_norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,029
35.785714
125
py
mmsegmentation
mmsegmentation-master/configs/segnext/segnext_mscan-l_1x16_512x512_adamw_160k_ade20k.py
_base_ = './segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py' # model settings checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_l_20230227-cef260d4.pth' # noqa ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( embed_dims=[64, 128, 320, 512], depths=[3, 5, 27, 3], init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), drop_path_rate=0.3, norm_cfg=dict(type='BN', requires_grad=True)), decode_head=dict( type='LightHamHead', in_channels=[128, 320, 512], in_index=[1, 2, 3], channels=1024, ham_channels=1024, dropout_ratio=0.1, num_classes=150, norm_cfg=ham_norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,031
35.857143
125
py
mmsegmentation
mmsegmentation-master/configs/segnext/segnext_mscan-s_1x16_512x512_adamw_160k_ade20k.py
_base_ = './segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py' # model settings checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_s_20230227-f33ccdf2.pth' # noqa ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( embed_dims=[64, 128, 320, 512], depths=[2, 2, 4, 2], init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), norm_cfg=dict(type='BN', requires_grad=True)), decode_head=dict( type='LightHamHead', in_channels=[128, 320, 512], in_index=[1, 2, 3], channels=256, ham_channels=256, ham_kwargs=dict(MD_R=16), dropout_ratio=0.1, num_classes=150, norm_cfg=ham_norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,034
35.964286
125
py
mmsegmentation
mmsegmentation-master/configs/segnext/segnext_mscan-t_1x16_512x512_adamw_160k_ade20k.py
_base_ = [ '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] # model settings checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segnext/mscan_t_20230227-119e8c9f.pth' # noqa ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='MSCAN', init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), embed_dims=[32, 64, 160, 256], mlp_ratios=[8, 8, 4, 4], drop_rate=0.0, drop_path_rate=0.1, depths=[3, 3, 5, 2], attention_kernel_sizes=[5, [1, 7], [1, 11], [1, 21]], attention_kernel_paddings=[2, [0, 3], [0, 5], [0, 10]], act_cfg=dict(type='GELU'), norm_cfg=dict(type='BN', requires_grad=True)), decode_head=dict( type='LightHamHead', in_channels=[64, 160, 256], in_index=[1, 2, 3], channels=256, ham_channels=256, dropout_ratio=0.1, num_classes=150, norm_cfg=ham_norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), ham_kwargs=dict( MD_S=1, MD_R=16, train_steps=6, eval_steps=7, inv_t=100, rand_init=True)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # dataset settings dataset_type = 'ADE20KDataset' data_root = 'data/ade/ADEChallengeData2016' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (512, 512) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 512), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='ResizeToMultiple', size_divisor=32), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( samples_per_gpu=16, workers_per_gpu=4, train=dict( type='RepeatDataset', times=50, dataset=dict( type=dataset_type, data_root=data_root, img_dir='images/training', ann_dir='annotations/training', pipeline=train_pipeline)), val=dict( type=dataset_type, data_root=data_root, img_dir='images/validation', ann_dir='annotations/validation', pipeline=test_pipeline), test=dict( type=dataset_type, data_root=data_root, img_dir='images/validation', ann_dir='annotations/validation', pipeline=test_pipeline)) # optimizer optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'pos_block': dict(decay_mult=0.), 'norm': dict(decay_mult=0.), 'head': dict(lr_mult=10.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False)
3,968
30.251969
125
py
mmsegmentation
mmsegmentation-master/configs/sem_fpn/README.md
# Semantic FPN [Panoptic Feature Pyramid Networks](https://arxiv.org/abs/1901.02446) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/facebookresearch/detectron2">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fpn_head.py#L12">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The recently introduced panoptic segmentation task has renewed our community's interest in unifying the tasks of instance segmentation (for thing classes) and semantic segmentation (for stuff classes). However, current state-of-the-art methods for this joint task use separate and dissimilar networks for instance and semantic segmentation, without performing any shared computation. In this work, we aim to unify these methods at the architectural level, designing a single network for both tasks. Our approach is to endow Mask R-CNN, a popular instance segmentation method, with a semantic segmentation branch using a shared Feature Pyramid Network (FPN) backbone. Surprisingly, this simple baseline not only remains effective for instance segmentation, but also yields a lightweight, top-performing method for semantic segmentation. In this work, we perform a detailed study of this minimally extended version of Mask R-CNN with FPN, which we refer to as Panoptic FPN, and show it is a robust and accurate baseline for both tasks. Given its effectiveness and conceptual simplicity, we hope our method can serve as a strong baseline and aid future research in panoptic segmentation. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902694-03ed2131-9104-467b-ace1-c74c62fb7177.png" width="60%"/> </div> ## Citation ```bibtex @inproceedings{kirillov2019panoptic, title={Panoptic feature pyramid networks}, author={Kirillov, Alexander and Girshick, Ross and He, Kaiming and Doll{\'a}r, Piotr}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={6399--6408}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | FPN | R-50 | 512x1024 | 80000 | 2.8 | 13.54 | 74.52 | 76.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes-20200717_021437.log.json) | | FPN | R-101 | 512x1024 | 80000 | 3.9 | 10.29 | 75.80 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes-20200717_012416.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | FPN | R-50 | 512x512 | 160000 | 4.9 | 55.77 | 37.49 | 39.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k-20200718_131734.log.json) | | FPN | R-101 | 512x512 | 160000 | 5.9 | 40.58 | 39.35 | 40.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k-20200718_131734.log.json) |
6,364
121.403846
1,184
md
mmsegmentation
mmsegmentation-master/configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py
_base_ = './fpn_r50_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
128
42
79
py
mmsegmentation
mmsegmentation-master/configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py
_base_ = './fpn_r50_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
124
40.666667
79
py
mmsegmentation
mmsegmentation-master/configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/fpn_r50.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
158
30.8
73
py
mmsegmentation
mmsegmentation-master/configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/fpn_r50.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict(decode_head=dict(num_classes=150))
203
33
74
py
mmsegmentation
mmsegmentation-master/configs/sem_fpn/sem_fpn.yml
Collections: - Name: FPN Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://arxiv.org/abs/1901.02446 Title: Panoptic Feature Pyramid Networks README: configs/sem_fpn/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/fpn_head.py#L12 Version: v0.17.0 Converted From: Code: https://github.com/facebookresearch/detectron2 Models: - Name: fpn_r50_512x1024_80k_cityscapes In Collection: FPN Metadata: backbone: R-50 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 73.86 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 2.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.52 mIoU(ms+flip): 76.08 Config: configs/sem_fpn/fpn_r50_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x1024_80k_cityscapes/fpn_r50_512x1024_80k_cityscapes_20200717_021437-94018a0d.pth - Name: fpn_r101_512x1024_80k_cityscapes In Collection: FPN Metadata: backbone: R-101 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 97.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 3.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.8 mIoU(ms+flip): 77.4 Config: configs/sem_fpn/fpn_r101_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x1024_80k_cityscapes/fpn_r101_512x1024_80k_cityscapes_20200717_012416-c5800d4c.pth - Name: fpn_r50_512x512_160k_ade20k In Collection: FPN Metadata: backbone: R-50 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 17.93 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.9 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 37.49 mIoU(ms+flip): 39.09 Config: configs/sem_fpn/fpn_r50_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r50_512x512_160k_ade20k/fpn_r50_512x512_160k_ade20k_20200718_131734-5b5a6ab9.pth - Name: fpn_r101_512x512_160k_ade20k In Collection: FPN Metadata: backbone: R-101 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 24.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 5.9 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 39.35 mIoU(ms+flip): 40.72 Config: configs/sem_fpn/fpn_r101_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/sem_fpn/fpn_r101_512x512_160k_ade20k/fpn_r101_512x512_160k_ade20k_20200718_131734-306b5004.pth
3,097
28.504762
164
yml
mmsegmentation
mmsegmentation-master/configs/setr/README.md
# SETR [Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers](https://arxiv.org/abs/2012.15840) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/fudan-zvg/SETR">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/setr_up_head.py#L11">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Most recent semantic segmentation methods adopt a fully-convolutional network (FCN) with an encoder-decoder architecture. The encoder progressively reduces the spatial resolution and learns more abstract/semantic visual concepts with larger receptive fields. Since context modeling is critical for segmentation, the latest efforts have been focused on increasing the receptive field, through either dilated/atrous convolutions or inserting attention modules. However, the encoder-decoder based FCN architecture remains unchanged. In this paper, we aim to provide an alternative perspective by treating semantic segmentation as a sequence-to-sequence prediction task. Specifically, we deploy a pure transformer (ie, without convolution and resolution reduction) to encode an image as a sequence of patches. With the global context modeled in every layer of the transformer, this encoder can be combined with a simple decoder to provide a powerful segmentation model, termed SEgmentation TRansformer (SETR). Extensive experiments show that SETR achieves new state of the art on ADE20K (50.28% mIoU), Pascal Context (55.83% mIoU) and competitive results on Cityscapes. Particularly, we achieve the first position in the highly competitive ADE20K test server leaderboard on the day of submission. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902777-ee2d34b7-a631-4fa7-ad68-118ff5716afe.png" width="80%"/> </div> ```None This head has two version head. ``` ## Citation ```bibtex @article{zheng2020rethinking, title={Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers}, author={Zheng, Sixiao and Lu, Jiachen and Zhao, Hengshuang and Zhu, Xiatian and Luo, Zekun and Wang, Yabiao and Fu, Yanwei and Feng, Jianfeng and Xiang, Tao and Torr, Philip HS and others}, journal={arXiv preprint arXiv:2012.15840}, year={2020} } ``` ## Usage You can download the pretrain from [here](https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-vitjx/jx_vit_large_p16_384-b3be5167.pth). Then you can convert its keys with the script `vit2mmseg.py` in the tools directory. ```shell python tools/model_converters/vit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/vit2mmseg.py \ jx_vit_large_p16_384-b3be5167.pth pretrain/vit_large_p16.pth ``` This script convert the model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. ## Results and models ### ADE20K | Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | --------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | SETR Naive | ViT-L | 512x512 | 16 | 160000 | 18.40 | 4.72 | 48.28 | 49.56 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_naive_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258.log.json) | | SETR PUP | ViT-L | 512x512 | 16 | 160000 | 19.54 | 4.50 | 48.24 | 49.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_pup_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343.log.json) | | SETR MLA | ViT-L | 512x512 | 8 | 160000 | 10.96 | - | 47.34 | 49.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_mla_512x512_160k_b8_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118.log.json) | | SETR MLA | ViT-L | 512x512 | 16 | 160000 | 17.30 | 5.25 | 47.39 | 49.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_mla_512x512_160k_b16_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057.log.json) | ### Cityscapes | Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ---------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | SETR Naive | ViT-L | 768x768 | 8 | 80000 | 24.06 | 0.39 | 78.10 | 80.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505.log.json) | | SETR PUP | ViT-L | 768x768 | 8 | 80000 | 27.96 | 0.37 | 79.21 | 81.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115.log.json) | | SETR MLA | ViT-L | 768x768 | 8 | 80000 | 24.10 | 0.41 | 77.00 | 79.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003.log.json) |
9,431
124.76
1,292
md
mmsegmentation
mmsegmentation-master/configs/setr/setr.yml
Collections: - Name: SETR Metadata: Training Data: - ADE20K - Cityscapes Paper: URL: https://arxiv.org/abs/2012.15840 Title: Rethinking Semantic Segmentation from a Sequence-to-Sequence Perspective with Transformers README: configs/setr/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/setr_up_head.py#L11 Version: v0.17.0 Converted From: Code: https://github.com/fudan-zvg/SETR Models: - Name: setr_naive_512x512_160k_b16_ade20k In Collection: SETR Metadata: backbone: ViT-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 211.86 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 18.4 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.28 mIoU(ms+flip): 49.56 Config: configs/setr/setr_naive_512x512_160k_b16_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_512x512_160k_b16_ade20k/setr_naive_512x512_160k_b16_ade20k_20210619_191258-061f24f5.pth - Name: setr_pup_512x512_160k_b16_ade20k In Collection: SETR Metadata: backbone: ViT-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 222.22 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 19.54 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.24 mIoU(ms+flip): 49.99 Config: configs/setr/setr_pup_512x512_160k_b16_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_512x512_160k_b16_ade20k/setr_pup_512x512_160k_b16_ade20k_20210619_191343-7e0ce826.pth - Name: setr_mla_512x512_160k_b8_ade20k In Collection: SETR Metadata: backbone: ViT-L crop size: (512,512) lr schd: 160000 Training Memory (GB): 10.96 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 47.34 mIoU(ms+flip): 49.05 Config: configs/setr/setr_mla_512x512_160k_b8_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b8_ade20k/setr_mla_512x512_160k_b8_ade20k_20210619_191118-c6d21df0.pth - Name: setr_mla_512x512_160k_b16_ade20k In Collection: SETR Metadata: backbone: ViT-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 190.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 17.3 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 47.39 mIoU(ms+flip): 49.37 Config: configs/setr/setr_mla_512x512_160k_b16_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_512x512_160k_b16_ade20k/setr_mla_512x512_160k_b16_ade20k_20210619_191057-f9741de7.pth - Name: setr_vit-large_naive_8x1_768x768_80k_cityscapes In Collection: SETR Metadata: backbone: ViT-L crop size: (768,768) lr schd: 80000 inference time (ms/im): - value: 2564.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (768,768) Training Memory (GB): 24.06 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.1 mIoU(ms+flip): 80.22 Config: configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_naive_vit-large_8x1_768x768_80k_cityscapes/setr_naive_vit-large_8x1_768x768_80k_cityscapes_20211123_000505-20728e80.pth - Name: setr_vit-large_pup_8x1_768x768_80k_cityscapes In Collection: SETR Metadata: backbone: ViT-L crop size: (768,768) lr schd: 80000 inference time (ms/im): - value: 2702.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (768,768) Training Memory (GB): 27.96 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.21 mIoU(ms+flip): 81.02 Config: configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_pup_vit-large_8x1_768x768_80k_cityscapes/setr_pup_vit-large_8x1_768x768_80k_cityscapes_20211122_155115-f6f37b8f.pth - Name: setr_vit-large_mla_8x1_768x768_80k_cityscapes In Collection: SETR Metadata: backbone: ViT-L crop size: (768,768) lr schd: 80000 inference time (ms/im): - value: 2439.02 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (768,768) Training Memory (GB): 24.1 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.0 mIoU(ms+flip): 79.59 Config: configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/setr/setr_mla_vit-large_8x1_768x768_80k_cityscapes/setr_mla_vit-large_8x1_768x768_80k_cityscapes_20211119_101003-7f8dccbe.pth
5,204
30.545455
191
yml
mmsegmentation
mmsegmentation-master/configs/setr/setr_mla_512x512_160k_b16_ade20k.py
_base_ = ['./setr_mla_512x512_160k_b8_ade20k.py'] # num_gpus: 8 -> batch_size: 16 data = dict(samples_per_gpu=2)
114
22
49
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_mla_512x512_160k_b8_ade20k.py
_base_ = [ '../_base_/models/setr_mla.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( pretrained=None, backbone=dict( img_size=(512, 512), drop_rate=0., init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( type='FCNHead', in_channels=256, channels=256, in_index=0, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=0, kernel_size=1, concat_input=False, num_classes=150, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=1, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=0, kernel_size=1, concat_input=False, num_classes=150, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=2, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=0, kernel_size=1, concat_input=False, num_classes=150, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=3, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=0, kernel_size=1, concat_input=False, num_classes=150, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), ], test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), ) optimizer = dict( lr=0.001, weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) # num_gpus: 8 -> batch_size: 8 data = dict(samples_per_gpu=1)
2,635
29.651163
78
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_naive_512x512_160k_b16_ade20k.py
_base_ = [ '../_base_/models/setr_naive.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( pretrained=None, backbone=dict( img_size=(512, 512), drop_rate=0., init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=0, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=1, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=2, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) ], test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), ) optimizer = dict( lr=0.01, weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) # num_gpus: 8 -> batch_size: 16 data = dict(samples_per_gpu=2)
2,077
29.558824
78
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_pup_512x512_160k_b16_ade20k.py
_base_ = [ '../_base_/models/setr_pup.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( pretrained=None, backbone=dict( img_size=(512, 512), drop_rate=0., init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), decode_head=dict(num_classes=150), auxiliary_head=[ dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=0, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=1, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=2, num_classes=150, dropout_ratio=0, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), num_convs=2, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), ], test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341)), ) optimizer = dict( lr=0.001, weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) # num_gpus: 8 -> batch_size: 16 data = dict(samples_per_gpu=2)
2,077
29.558824
78
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_vit-large_mla_8x1_768x768_80k_cityscapes.py
_base_ = [ '../_base_/models/setr_mla.py', '../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( pretrained=None, backbone=dict( drop_rate=0, init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) optimizer = dict( lr=0.002, weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) data = dict(samples_per_gpu=1)
564
30.388889
79
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_vit-large_naive_8x1_768x768_80k_cityscapes.py
_base_ = [ '../_base_/models/setr_naive.py', '../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( pretrained=None, backbone=dict( drop_rate=0., init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), test_cfg=dict(mode='slide', crop_size=(768, 768), stride=(512, 512))) optimizer = dict( weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) data = dict(samples_per_gpu=1)
558
28.421053
79
py
mmsegmentation
mmsegmentation-master/configs/setr/setr_vit-large_pup_8x1_768x768_80k_cityscapes.py
_base_ = [ '../_base_/models/setr_pup.py', '../_base_/datasets/cityscapes_768x768.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) crop_size = (768, 768) model = dict( pretrained=None, backbone=dict( drop_rate=0., init_cfg=dict( type='Pretrained', checkpoint='pretrain/vit_large_p16.pth')), auxiliary_head=[ dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=0, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=2, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=1, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=2, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=2, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=2, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) ], test_cfg=dict(mode='slide', crop_size=crop_size, stride=(512, 512))) optimizer = dict( weight_decay=0.0, paramwise_cfg=dict(custom_keys={'head': dict(lr_mult=10.)})) data = dict(samples_per_gpu=1)
1,946
28.953846
79
py
mmsegmentation
mmsegmentation-master/configs/stdc/README.md
# STDC [Rethinking BiSeNet For Real-time Semantic Segmentation](https://arxiv.org/abs/2104.13188) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/MichaelFan01/STDC-Seg">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/stdc.py#L394">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> BiSeNet has been proved to be a popular two-stream network for real-time segmentation. However, its principle of adding an extra path to encode spatial information is time-consuming, and the backbones borrowed from pretrained tasks, e.g., image classification, may be inefficient for image segmentation due to the deficiency of task-specific design. To handle these problems, we propose a novel and efficient structure named Short-Term Dense Concatenate network (STDC network) by removing structure redundancy. Specifically, we gradually reduce the dimension of feature maps and use the aggregation of them for image representation, which forms the basic module of STDC network. In the decoder, we propose a Detail Aggregation module by integrating the learning of spatial information into low-level layers in single-stream manner. Finally, the low-level features and deep features are fused to predict the final segmentation results. Extensive experiments on Cityscapes and CamVid dataset demonstrate the effectiveness of our method by achieving promising trade-off between segmentation accuracy and inference speed. On Cityscapes, we achieve 71.9% mIoU on the test set with a speed of 250.4 FPS on NVIDIA GTX 1080Ti, which is 45.2% faster than the latest methods, and achieve 76.8% mIoU with 97.0 FPS while inferring on higher resolution images. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/143640374-d0709587-edb2-4821-bb60-340035f6ad8f.png" width="60%"/> </div> ## Citation ```bibtex @inproceedings{fan2021rethinking, title={Rethinking BiSeNet For Real-time Semantic Segmentation}, author={Fan, Mingyuan and Lai, Shenqi and Huang, Junshi and Wei, Xiaoming and Chai, Zhenhua and Luo, Junfeng and Wei, Xiaolin}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={9716--9725}, year={2021} } ``` ## Usage We have provided [ImageNet Pretrained STDCNet Weights](https://drive.google.com/drive/folders/1wROFwRt8qWHD4jSo8Zu1gp1d6oYJ3ns1) models converted from [official repo](https://github.com/MichaelFan01/STDC-Seg). If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`stdc2mmseg.py`](../../tools/model_converters/stdc2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/MichaelFan01/STDC-Seg) to MMSegmentation style. ```shell python tools/model_converters/stdc2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${STDC_TYPE} ``` E.g. ```shell python tools/model_converters/stdc2mmseg.py ./STDCNet813M_73.91.tar ./pretrained/stdc1.pth STDC1 python tools/model_converters/stdc2mmseg.py ./STDCNet1446_76.47.tar ./pretrained/stdc2.pth STDC2 ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | -------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | STDC 1 (No Pretrain) | STDC1 | 512x1024 | 80000 | 7.15 | 23.06 | 71.82 | 73.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc1_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048.log.json) | | STDC 1 | STDC1 | 512x1024 | 80000 | - | - | 74.94 | 76.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648.log.json) | | STDC 2 (No Pretrain) | STDC2 | 512x1024 | 80000 | 8.27 | 23.71 | 73.15 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc2_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015.log.json) | | STDC 2 | STDC2 | 512x1024 | 80000 | - | - | 76.67 | 78.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048.log.json) | Note: - For STDC on Cityscapes dataset, default setting is 4 GPUs with 12 samples per GPU in training. - `No Pretrain` means the model is trained from scratch. - The FPS is for reference only. The environment is also different from paper setting, whose input size is `512x1024` and `768x1536`, i.e., 50% and 75% of our input size, respectively and using TensorRT. - The parameter `fusion_kernel` in `STDCHead` is not learnable. In official repo, `find_unused_parameters=True` is set [here](https://github.com/MichaelFan01/STDC-Seg/blob/59ff37fbd693b99972c76fcefe97caa14aeb619f/train.py#L220). You may check it by printing model parameters of original repo on your own.
7,406
99.094595
1,347
md
mmsegmentation
mmsegmentation-master/configs/stdc/stdc.yml
Collections: - Name: STDC Metadata: Training Data: - Cityscapes Paper: URL: https://arxiv.org/abs/2104.13188 Title: Rethinking BiSeNet For Real-time Semantic Segmentation README: configs/stdc/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/stdc.py#L394 Version: v0.20.0 Converted From: Code: https://github.com/MichaelFan01/STDC-Seg Models: - Name: stdc1_512x1024_80k_cityscapes In Collection: STDC Metadata: backbone: STDC1 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 43.37 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 7.15 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 71.82 mIoU(ms+flip): 73.89 Config: configs/stdc/stdc1_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_512x1024_80k_cityscapes/stdc1_512x1024_80k_cityscapes_20220224_073048-74e6920a.pth - Name: stdc1_in1k-pre_512x1024_80k_cityscapes In Collection: STDC Metadata: backbone: STDC1 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.94 mIoU(ms+flip): 76.97 Config: configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes/stdc1_in1k-pre_512x1024_80k_cityscapes_20220224_141648-3d4c2981.pth - Name: stdc2_512x1024_80k_cityscapes In Collection: STDC Metadata: backbone: STDC2 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 42.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.27 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.15 mIoU(ms+flip): 76.13 Config: configs/stdc/stdc2_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_512x1024_80k_cityscapes/stdc2_512x1024_80k_cityscapes_20220222_132015-fb1e3a1a.pth - Name: stdc2_in1k-pre_512x1024_80k_cityscapes In Collection: STDC Metadata: backbone: STDC2 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.67 mIoU(ms+flip): 78.67 Config: configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes/stdc2_in1k-pre_512x1024_80k_cityscapes_20220224_073048-1f8f0f6c.pth
2,777
30.568182
173
yml
mmsegmentation
mmsegmentation-master/configs/stdc/stdc1_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/stdc.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] lr_config = dict(warmup='linear', warmup_iters=1000) data = dict( samples_per_gpu=12, workers_per_gpu=4, )
270
26.1
73
py
mmsegmentation
mmsegmentation-master/configs/stdc/stdc1_in1k-pre_512x1024_80k_cityscapes.py
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc1_20220308-5368626c.pth' # noqa _base_ = './stdc1_512x1024_80k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint))))
293
41
115
py
mmsegmentation
mmsegmentation-master/configs/stdc/stdc2_512x1024_80k_cityscapes.py
_base_ = './stdc1_512x1024_80k_cityscapes.py' model = dict(backbone=dict(backbone_cfg=dict(stdc_type='STDCNet2')))
115
37.666667
68
py
mmsegmentation
mmsegmentation-master/configs/stdc/stdc2_in1k-pre_512x1024_80k_cityscapes.py
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/stdc/stdc2_20220308-7dbd9127.pth' # noqa _base_ = './stdc2_512x1024_80k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint))))
293
41
115
py
mmsegmentation
mmsegmentation-master/configs/swin/README.md
# Swin Transformer [Swin Transformer: Hierarchical Vision Transformer using Shifted Windows](https://arxiv.org/abs/2103.14030) ## Introduction <!-- [BACKBONE] --> <a href="https://github.com/microsoft/Swin-Transformer">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/swin.py#L524">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> This paper presents a new vision Transformer, called Swin Transformer, that capably serves as a general-purpose backbone for computer vision. Challenges in adapting Transformer from language to vision arise from differences between the two domains, such as large variations in the scale of visual entities and the high resolution of pixels in images compared to words in text. To address these differences, we propose a hierarchical Transformer whose representation is computed with Shifted windows. The shifted windowing scheme brings greater efficiency by limiting self-attention computation to non-overlapping local windows while also allowing for cross-window connection. This hierarchical architecture has the flexibility to model at various scales and has linear computational complexity with respect to image size. These qualities of Swin Transformer make it compatible with a broad range of vision tasks, including image classification (87.3 top-1 accuracy on ImageNet-1K) and dense prediction tasks such as object detection (58.7 box AP and 51.1 mask AP on COCO test-dev) and semantic segmentation (53.5 mIoU on ADE20K val). Its performance surpasses the previous state-of-the-art by a large margin of +2.7 box AP and +2.6 mask AP on COCO, and +3.2 mIoU on ADE20K, demonstrating the potential of Transformer-based models as vision backbones. The hierarchical design and the shifted window approach also prove beneficial for all-MLP architectures. The code and models are publicly available at [this https URL](https://github.com/microsoft/Swin-Transformer). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902882-3fb9014c-11b6-47e9-aa14-500dfe7cbb1c.png" width="80%"/> </div> ## Citation ```bibtex @article{liu2021Swin, title={Swin Transformer: Hierarchical Vision Transformer using Shifted Windows}, author={Liu, Ze and Lin, Yutong and Cao, Yue and Hu, Han and Wei, Yixuan and Zhang, Zheng and Lin, Stephen and Guo, Baining}, journal={arXiv preprint arXiv:2103.14030}, year={2021} } ``` ## Usage We have provided pretrained models converted from [official repo](https://github.com/microsoft/Swin-Transformer). If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`swin2mmseg.py`](../../tools/model_converters/swin2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation) to MMSegmentation style. ```shell python tools/model_converters/swin2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/swin2mmseg.py https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth pretrain/swin_base_patch4_window7_224.pth ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. In our default setting, pretrained models and their corresponding [original models](https://github.com/microsoft/Swin-Transforme) models could be defined below: | pretrained models | original models | | ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------- | | pretrain/swin_tiny_patch4_window7_224.pth | [swin_tiny_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_tiny_patch4_window7_224.pth) | | pretrain/swin_small_patch4_window7_224.pth | [swin_small_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_small_patch4_window7_224.pth) | | pretrain/swin_base_patch4_window7_224.pth | [swin_base_patch4_window7_224.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224.pth) | | pretrain/swin_base_patch4_window7_224_22k.pth | [swin_base_patch4_window7_224_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window7_224_22k.pth) | | pretrain/swin_base_patch4_window12_384.pth | [swin_base_patch4_window12_384.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384.pth) | | pretrain/swin_base_patch4_window12_384_22k.pth | [swin_base_patch4_window12_384_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_base_patch4_window12_384_22k.pth) | | pretrain/swin_large_patch4_window7_224_22k.pth | [swin_large_patch4_window7_224_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window7_224_22k.pth) | | pretrain/swin_large_patch4_window12_384_22k.pth | [swin_large_patch4_window12_384_22k.pth](https://github.com/SwinTransformer/storage/releases/download/v1.0.0/swin_large_patch4_window12_384_22k.pth) | ## Results and models ### ADE20K | Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------- | -------- | --------- | ------------ | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | UPerNet | Swin-T | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 5.02 | 21.06 | 44.41 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542.log.json) | | UPerNet | Swin-S | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 6.17 | 14.72 | 47.72 | 49.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015.log.json) | | UPerNet | Swin-B | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 7.61 | 12.65 | 47.99 | 49.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340.log.json) | | UPerNet | Swin-B | 512x512 | ImageNet-22K | 224x224 | 16 | 160000 | - | - | 50.13 | 51.9 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650.log.json) | | UPerNet | Swin-B | 512x512 | ImageNet-1K | 384x384 | 16 | 160000 | 8.52 | 12.10 | 48.35 | 49.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020.log.json) | | UPerNet | Swin-B | 512x512 | ImageNet-22K | 384x384 | 16 | 160000 | - | - | 50.76 | 52.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459.log.json) | | UPerNet | Swin-L | 512x512 | ImageNet-22K | 224x224 | 16 | 160000 | 10.98 | 8.23 | 51.17 | 52.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k_20220318_015320-48d180dd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k_20220318_015320.log.json) | | UPerNet | Swin-L | 512x512 | ImageNet-22K | 384x384 | 16 | 160000 | 12.42 | 7.57 | 52.25 | 54.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743.log.json) |
13,529
166.037037
1,566
md
mmsegmentation
mmsegmentation-master/configs/swin/swin.yml
Models: - Name: upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K In Collection: UPerNet Metadata: backbone: Swin-T crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 47.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 5.02 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.41 mIoU(ms+flip): 45.79 Config: configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210531_112542-e380ad3e.pth - Name: upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K In Collection: UPerNet Metadata: backbone: Swin-S crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 67.93 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.17 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 47.72 mIoU(ms+flip): 49.24 Config: configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192015-ee2fff1c.pth - Name: upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K In Collection: UPerNet Metadata: backbone: Swin-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 79.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.61 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 47.99 mIoU(ms+flip): 49.57 Config: configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K_20210526_192340-593b0e13.pth - Name: upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K In Collection: UPerNet Metadata: backbone: Swin-B crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 50.13 mIoU(ms+flip): 51.9 Config: configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K_20210526_211650-762e2178.pth - Name: upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K In Collection: UPerNet Metadata: backbone: Swin-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.52 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.35 mIoU(ms+flip): 49.65 Config: configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K_20210531_132020-05b22ea4.pth - Name: upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K In Collection: UPerNet Metadata: backbone: Swin-B crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 50.76 mIoU(ms+flip): 52.4 Config: configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K_20210531_125459-429057bf.pth - Name: upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k In Collection: UPerNet Metadata: backbone: Swin-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 121.51 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 10.98 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 51.17 mIoU(ms+flip): 52.99 Config: configs/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k_20220318_015320-48d180dd.pth - Name: upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k In Collection: UPerNet Metadata: backbone: Swin-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 132.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.42 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 52.25 mIoU(ms+flip): 54.12 Config: configs/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k_20220318_091743-9ba68901.pth
6,339
38.135802
247
yml
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_1K.py
_base_ = [ 'upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' 'pretrain_224x224_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_20220317-55b0104a.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), pretrain_img_size=384, embed_dims=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12), decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), auxiliary_head=dict(in_channels=512, num_classes=150))
627
38.25
144
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_base_patch4_window12_512x512_160k_ade20k_pretrain_384x384_22K.py
_base_ = [ './upernet_swin_base_patch4_window12_512x512_160k_ade20k_' 'pretrain_384x384_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window12_384_22k_20220317-e5c09f74.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file)))
358
38.888889
148
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py
_base_ = [ './upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' 'pretrain_224x224_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_20220317-e9b98025.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), embed_dims=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32]), decode_head=dict(in_channels=[128, 256, 512, 1024], num_classes=150), auxiliary_head=dict(in_channels=512, num_classes=150))
573
40
143
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_base_patch4_window7_512x512_160k_ade20k_pretrain_224x224_22K.py
_base_ = [ './upernet_swin_base_patch4_window7_512x512_160k_ade20k_' 'pretrain_224x224_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_base_patch4_window7_224_22k_20220317-4f79f7c0.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file)))
356
38.666667
147
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_large_patch4_window12_512x512_pretrain_384x384_22K_160k_ade20k.py
_base_ = [ 'upernet_swin_large_patch4_window7_512x512_' 'pretrain_224x224_22K_160k_ade20k.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window12_384_22k_20220412-6580f57d.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), pretrain_img_size=384, window_size=12))
413
36.636364
149
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_large_patch4_window7_512x512_pretrain_224x224_22K_160k_ade20k.py
_base_ = [ 'upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' 'pretrain_224x224_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), pretrain_img_size=224, embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7), decode_head=dict(in_channels=[192, 384, 768, 1536], num_classes=150), auxiliary_head=dict(in_channels=768, num_classes=150))
631
38.5
148
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_small_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py
_base_ = [ './upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_' 'pretrain_224x224_1K.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_small_patch4_window7_224_20220317-7ba6d6dd.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), depths=[2, 2, 18, 2]), decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), auxiliary_head=dict(in_channels=384, num_classes=150))
514
41.916667
144
py
mmsegmentation
mmsegmentation-master/configs/swin/upernet_swin_tiny_patch4_window7_512x512_160k_ade20k_pretrain_224x224_1K.py
_base_ = [ '../_base_/models/upernet_swin.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220317-1cdeb081.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint_file), embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, use_abs_pos_embed=False, drop_path_rate=0.3, patch_norm=True), decode_head=dict(in_channels=[96, 192, 384, 768], num_classes=150), auxiliary_head=dict(in_channels=384, num_classes=150)) # AdamW optimizer, no weight decay for position embedding & layer norm # in backbone optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) # By default, models are trained on 8 GPUs with 2 images per GPU data = dict(samples_per_gpu=2)
1,428
30.065217
143
py
mmsegmentation
mmsegmentation-master/configs/twins/README.md
# Twins [Twins: Revisiting the Design of Spatial Attention in Vision Transformers](https://arxiv.org/pdf/2104.13840.pdf) ## Introduction <!-- [BACKBONE] --> <a href = "https://github.com/Meituan-AutoML/Twins">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.20.0/mmseg/models/backbones/twins.py#L352">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Very recently, a variety of vision transformer architectures for dense prediction tasks have been proposed and they show that the design of spatial attention is critical to their success in these tasks. In this work, we revisit the design of the spatial attention and demonstrate that a carefully-devised yet simple spatial attention mechanism performs favourably against the state-of-the-art schemes. As a result, we propose two vision transformer architectures, namely, Twins-PCPVT and Twins-SVT. Our proposed architectures are highly-efficient and easy to implement, only involving matrix multiplications that are highly optimized in modern deep learning frameworks. More importantly, the proposed architectures achieve excellent performance on a wide range of visual tasks, including image level classification as well as dense detection and segmentation. The simplicity and strong performance suggest that our proposed architectures may serve as stronger backbones for many vision tasks. Our code is released at [this https URL](https://github.com/Meituan-AutoML/Twins). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/145021310-57826cf5-5e03-4c7c-9081-ffa744bdae27.png" width="80%"/> </div> ## Citation ```bibtex @article{chu2021twins, title={Twins: Revisiting spatial attention design in vision transformers}, author={Chu, Xiangxiang and Tian, Zhi and Wang, Yuqing and Zhang, Bo and Ren, Haibing and Wei, Xiaolin and Xia, Huaxia and Shen, Chunhua}, journal={arXiv preprint arXiv:2104.13840}, year={2021}altgvt } ``` ## Usage We have provided pretrained models converted from [official repo](https://github.com/Meituan-AutoML/Twins). If you want to convert keys on your own to use official repositories' pre-trained models, we also provide a script [`twins2mmseg.py`](../../tools/model_converters/twins2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/Meituan-AutoML/Twins) to MMSegmentation style. ```shell python tools/model_converters/twins2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ${MODEL_TYPE} ``` This script convert `pcpvt` or `svt` pretrained model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. For example, ```shell python tools/model_converters/twins2mmseg.py ./alt_gvt_base.pth ./pretrained/alt_gvt_base.pth svt ``` ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | Twins-FPN | PCPVT-S | 512x512 | 80000 | 6.60 | 27.15 | 43.26 | 44.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132.log.json) | | Twins-UPerNet | PCPVT-S | 512x512 | 160000 | 9.67 | 14.24 | 46.04 | 46.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537.log.json) | | Twins-FPN | PCPVT-B | 512x512 | 80000 | 8.41 | 19.67 | 45.66 | 46.48 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019.log.json) | | Twins-UPerNet (8x2) | PCPVT-B | 512x512 | 160000 | 6.46 | 12.04 | 47.91 | 48.64 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020.log.json) | | Twins-FPN | PCPVT-L | 512x512 | 80000 | 10.78 | 14.32 | 45.94 | 46.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226.log.json) | | Twins-UPerNet (8x2) | PCPVT-L | 512x512 | 160000 | 7.82 | 10.70 | 49.35 | 50.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053.log.json) | | Twins-FPN | SVT-S | 512x512 | 80000 | 5.80 | 29.79 | 44.47 | 45.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006.log.json) | | Twins-UPerNet (8x2) | SVT-S | 512x512 | 160000 | 4.93 | 15.09 | 46.08 | 46.96 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json) | | Twins-FPN | SVT-B | 512x512 | 80000 | 8.75 | 21.10 | 46.77 | 47.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849.log.json) | | Twins-UPerNet (8x2) | SVT-B | 512x512 | 160000 | 6.77 | 12.66 | 48.04 | 48.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826.log.json) | | Twins-FPN | SVT-L | 512x512 | 80000 | 11.20 | 17.80 | 46.55 | 47.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005.log.json) | | Twins-UPerNet (8x2) | SVT-L | 512x512 | 160000 | 8.41 | 10.73 | 49.65 | 50.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005.log.json) | Note: - `8x2` means 8 GPUs with 2 samples per GPU in training. Default setting of Twins on ADE20K is 8 GPUs with 4 samples per GPU in training. - `UPerNet` and `FPN` are decoder heads utilized in corresponding Twins model, which is `UPerHead` and `FPNHead`, respectively. Specifically, models in [official repo](https://github.com/Meituan-AutoML/Twins) all use `UPerHead`.
11,985
154.662338
1,075
md
mmsegmentation
mmsegmentation-master/configs/twins/twins.yml
Models: - Name: twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: PCPVT-S crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 36.83 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.26 mIoU(ms+flip): 44.11 Config: configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_204132-41acd132.pth - Name: twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: PCPVT-S crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 70.22 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.67 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.04 mIoU(ms+flip): 46.92 Config: configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k_20211201_233537-8e99c07a.pth - Name: twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: PCPVT-B crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 50.84 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.41 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.66 mIoU(ms+flip): 46.48 Config: configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141019-d396db72.pth - Name: twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: PCPVT-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 83.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.46 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 47.91 mIoU(ms+flip): 48.64 Config: configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k_20211130_141020-02094ea5.pth - Name: twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: PCPVT-L crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 69.83 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 10.78 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.94 mIoU(ms+flip): 46.7 Config: configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_105226-bc6d61dc.pth - Name: twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: PCPVT-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 93.46 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.82 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 49.35 mIoU(ms+flip): 50.08 Config: configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k_20211201_075053-c6095c07.pth - Name: twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: SVT-S crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 33.57 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 5.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.47 mIoU(ms+flip): 45.42 Config: configs/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-s_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141006-0a0d3317.pth - Name: twins_svt-s_uperhead_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: SVT-S crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 66.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.93 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.08 mIoU(ms+flip): 46.96 Config: configs/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-s_uperhead_8x2_512x512_160k_ade20k/twins_svt-s_uperhead_8x2_512x512_160k_ade20k_20211130_141005-e48a2d94.pth - Name: twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: SVT-B crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 47.39 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.75 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.77 mIoU(ms+flip): 47.47 Config: configs/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-b_fpn_fpnhead_8x4_512x512_80k_ade20k_20211201_113849-88b2907c.pth - Name: twins_svt-b_uperhead_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: SVT-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 78.99 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.77 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.04 mIoU(ms+flip): 48.87 Config: configs/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-b_uperhead_8x2_512x512_160k_ade20k/twins_svt-b_uperhead_8x2_512x512_160k_ade20k_20211202_040826-0943a1f1.pth - Name: twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k In Collection: FPN Metadata: backbone: SVT-L crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 56.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 11.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 46.55 mIoU(ms+flip): 47.74 Config: configs/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k/twins_svt-l_fpn_fpnhead_8x4_512x512_80k_ade20k_20211130_141005-1d59bee2.pth - Name: twins_svt-l_uperhead_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: SVT-L crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 93.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.41 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 49.65 mIoU(ms+flip): 50.63 Config: configs/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/twins/twins_svt-l_uperhead_8x2_512x512_160k_ade20k/twins_svt-l_uperhead_8x2_512x512_160k_ade20k_20211130_141005-3e2cae61.pth
8,747
31.887218
194
yml
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-b_fpn_fpnhead_8x4_512x512_80k_ade20k.py
_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), depths=[3, 4, 18, 3]), )
322
34.888889
121
py
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-b_uperhead_8x2_512x512_160k_ade20k.py
_base_ = ['./twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_base_20220308-0621964c.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), depths=[3, 4, 18, 3], drop_path_rate=0.3)) data = dict(samples_per_gpu=2, workers_per_gpu=2)
397
32.166667
121
py
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-l_fpn_fpnhead_8x4_512x512_80k_ade20k.py
_base_ = ['./twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), depths=[3, 8, 27, 3]))
321
34.777778
122
py
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-l_uperhead_8x2_512x512_160k_ade20k.py
_base_ = ['./twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py'] checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_large_20220308-37579dc6.pth' # noqa model = dict( backbone=dict( init_cfg=dict(type='Pretrained', checkpoint=checkpoint), depths=[3, 8, 27, 3], drop_path_rate=0.3)) data = dict(samples_per_gpu=2, workers_per_gpu=2)
398
32.25
122
py
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-s_fpn_fpnhead_8x4_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/twins_pcpvt-s_fpn.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0001)
243
33.857143
77
py
mmsegmentation
mmsegmentation-master/configs/twins/twins_pcpvt-s_uperhead_8x4_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/twins_pcpvt-s_upernet.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.01, paramwise_cfg=dict(custom_keys={ 'pos_block': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False)
590
20.888889
67
py