Search is not available for this dataset
repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py
_base_ = './fcn_hr18_480x480_80k_pascal_context_59.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
414
36.727273
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py
_base_ = './fcn_hr18_4x4_512x512_80k_vaihingen.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
410
36.363636
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py
_base_ = './fcn_hr18_4x4_896x896_80k_isaid.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
406
36
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py
_base_ = './fcn_hr18_512x1024_160k_cityscapes.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
409
36.272727
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py
_base_ = './fcn_hr18_512x1024_40k_cityscapes.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
408
36.181818
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py
_base_ = './fcn_hr18_512x1024_80k_cityscapes.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
408
36.181818
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_160k_ade20k.py
_base_ = './fcn_hr18_512x512_160k_ade20k.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
404
35.818182
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py
_base_ = './fcn_hr18_512x512_20k_voc12aug.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
405
35.909091
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py
_base_ = './fcn_hr18_512x512_40k_voc12aug.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
405
35.909091
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_80k_ade20k.py
_base_ = './fcn_hr18_512x512_80k_ade20k.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
403
35.727273
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_80k_loveda.py
_base_ = './fcn_hr18_512x512_80k_loveda.py' model = dict( backbone=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://msra/hrnetv2_w48'), extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
454
36.916667
75
py
mmsegmentation
mmsegmentation-master/configs/hrnet/fcn_hr48_512x512_80k_potsdam.py
_base_ = './fcn_hr18_512x512_80k_potsdam.py' model = dict( pretrained='open-mmlab://msra/hrnetv2_w48', backbone=dict( extra=dict( stage2=dict(num_channels=(48, 96)), stage3=dict(num_channels=(48, 96, 192)), stage4=dict(num_channels=(48, 96, 192, 384)))), decode_head=dict( in_channels=[48, 96, 192, 384], channels=sum([48, 96, 192, 384])))
404
35.818182
74
py
mmsegmentation
mmsegmentation-master/configs/hrnet/hrnet.yml
Models: - Name: fcn_hr18s_512x1024_40k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 42.12 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 1.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.86 mIoU(ms+flip): 75.91 Config: configs/hrnet/fcn_hr18s_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_40k_cityscapes/fcn_hr18s_512x1024_40k_cityscapes_20200601_014216-93db27d0.pth - Name: fcn_hr18_512x1024_40k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 77.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 2.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.19 mIoU(ms+flip): 78.92 Config: configs/hrnet/fcn_hr18_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_40k_cityscapes/fcn_hr18_512x1024_40k_cityscapes_20200601_014216-f196fb4e.pth - Name: fcn_hr48_512x1024_40k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 155.76 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.48 mIoU(ms+flip): 79.69 Config: configs/hrnet/fcn_hr48_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_40k_cityscapes/fcn_hr48_512x1024_40k_cityscapes_20200601_014240-a989b146.pth - Name: fcn_hr18s_512x1024_80k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.31 mIoU(ms+flip): 77.48 Config: configs/hrnet/fcn_hr18s_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_80k_cityscapes/fcn_hr18s_512x1024_80k_cityscapes_20200601_202700-1462b75d.pth - Name: fcn_hr18_512x1024_80k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.65 mIoU(ms+flip): 80.35 Config: configs/hrnet/fcn_hr18_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_80k_cityscapes/fcn_hr18_512x1024_80k_cityscapes_20200601_223255-4e7b345e.pth - Name: fcn_hr48_512x1024_80k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.93 mIoU(ms+flip): 80.72 Config: configs/hrnet/fcn_hr48_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_80k_cityscapes/fcn_hr48_512x1024_80k_cityscapes_20200601_202606-58ea95d6.pth - Name: fcn_hr18s_512x1024_160k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.31 mIoU(ms+flip): 78.31 Config: configs/hrnet/fcn_hr18s_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x1024_160k_cityscapes/fcn_hr18s_512x1024_160k_cityscapes_20200602_190901-4a0797ea.pth - Name: fcn_hr18_512x1024_160k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.8 mIoU(ms+flip): 80.74 Config: configs/hrnet/fcn_hr18_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x1024_160k_cityscapes/fcn_hr18_512x1024_160k_cityscapes_20200602_190822-221e4a4f.pth - Name: fcn_hr48_512x1024_160k_cityscapes In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.65 mIoU(ms+flip): 81.92 Config: configs/hrnet/fcn_hr48_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x1024_160k_cityscapes/fcn_hr48_512x1024_160k_cityscapes_20200602_190946-59b7973e.pth - Name: fcn_hr18s_512x512_80k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 25.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 3.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 31.38 mIoU(ms+flip): 32.45 Config: configs/hrnet/fcn_hr18s_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_ade20k/fcn_hr18s_512x512_80k_ade20k_20200614_144345-77fc814a.pth - Name: fcn_hr18_512x512_80k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 44.31 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.9 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 36.27 mIoU(ms+flip): 37.28 Config: configs/hrnet/fcn_hr18_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_ade20k/fcn_hr18_512x512_80k_ade20k_20210827_114910-6c9382c0.pth - Name: fcn_hr48_512x512_80k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 47.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.9 mIoU(ms+flip): 43.27 Config: configs/hrnet/fcn_hr48_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_ade20k/fcn_hr48_512x512_80k_ade20k_20200614_193946-7ba5258d.pth - Name: fcn_hr18s_512x512_160k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 33.07 mIoU(ms+flip): 34.56 Config: configs/hrnet/fcn_hr18s_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_160k_ade20k/fcn_hr18s_512x512_160k_ade20k_20210829_174739-f1e7c2e7.pth - Name: fcn_hr18_512x512_160k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 36.79 mIoU(ms+flip): 38.58 Config: configs/hrnet/fcn_hr18_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_160k_ade20k/fcn_hr18_512x512_160k_ade20k_20200614_214426-ca961836.pth - Name: fcn_hr48_512x512_160k_ade20k In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.02 mIoU(ms+flip): 43.86 Config: configs/hrnet/fcn_hr48_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_160k_ade20k/fcn_hr48_512x512_160k_ade20k_20200614_214407-a52fc02c.pth - Name: fcn_hr18s_512x512_20k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 23.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.8 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 65.5 mIoU(ms+flip): 68.89 Config: configs/hrnet/fcn_hr18s_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_20k_voc12aug/fcn_hr18s_512x512_20k_voc12aug_20210829_174910-0aceadb4.pth - Name: fcn_hr18_512x512_20k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 42.59 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.9 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 72.3 mIoU(ms+flip): 74.71 Config: configs/hrnet/fcn_hr18_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_20k_voc12aug/fcn_hr18_512x512_20k_voc12aug_20200617_224503-488d45f7.pth - Name: fcn_hr48_512x512_20k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 45.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 75.87 mIoU(ms+flip): 78.58 Config: configs/hrnet/fcn_hr48_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_20k_voc12aug/fcn_hr48_512x512_20k_voc12aug_20200617_224419-89de05cd.pth - Name: fcn_hr18s_512x512_40k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 66.61 mIoU(ms+flip): 70.0 Config: configs/hrnet/fcn_hr18s_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_40k_voc12aug/fcn_hr18s_512x512_40k_voc12aug_20200614_000648-4f8d6e7f.pth - Name: fcn_hr18_512x512_40k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 72.9 mIoU(ms+flip): 75.59 Config: configs/hrnet/fcn_hr18_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_40k_voc12aug/fcn_hr18_512x512_40k_voc12aug_20200613_224401-1b4b76cd.pth - Name: fcn_hr48_512x512_40k_voc12aug In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.24 mIoU(ms+flip): 78.49 Config: configs/hrnet/fcn_hr48_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_40k_voc12aug/fcn_hr48_512x512_40k_voc12aug_20200613_222111-1b0f18bc.pth - Name: fcn_hr48_480x480_40k_pascal_context In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (480,480) lr schd: 40000 inference time (ms/im): - value: 112.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (480,480) Training Memory (GB): 6.1 Results: - Task: Semantic Segmentation Dataset: Pascal Context Metrics: mIoU: 45.14 mIoU(ms+flip): 47.42 Config: configs/hrnet/fcn_hr48_480x480_40k_pascal_context.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context/fcn_hr48_480x480_40k_pascal_context_20200911_164852-667d00b0.pth - Name: fcn_hr48_480x480_80k_pascal_context In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (480,480) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Pascal Context Metrics: mIoU: 45.84 mIoU(ms+flip): 47.84 Config: configs/hrnet/fcn_hr48_480x480_80k_pascal_context.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context/fcn_hr48_480x480_80k_pascal_context_20200911_155322-847a6711.pth - Name: fcn_hr48_480x480_40k_pascal_context_59 In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (480,480) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal Context 59 Metrics: mIoU: 50.33 mIoU(ms+flip): 52.83 Config: configs/hrnet/fcn_hr48_480x480_40k_pascal_context_59.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_40k_pascal_context_59/fcn_hr48_480x480_40k_pascal_context_59_20210410_122738-b808b8b2.pth - Name: fcn_hr48_480x480_80k_pascal_context_59 In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (480,480) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Pascal Context 59 Metrics: mIoU: 51.12 mIoU(ms+flip): 53.56 Config: configs/hrnet/fcn_hr48_480x480_80k_pascal_context_59.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_480x480_80k_pascal_context_59/fcn_hr48_480x480_80k_pascal_context_59_20210411_003240-3ae7081e.pth - Name: fcn_hr18s_512x512_80k_loveda In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 40.21 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.59 Results: - Task: Semantic Segmentation Dataset: LoveDA Metrics: mIoU: 49.28 mIoU(ms+flip): 49.42 Config: configs/hrnet/fcn_hr18s_512x512_80k_loveda.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_loveda/fcn_hr18s_512x512_80k_loveda_20211210_203228-60a86a7a.pth - Name: fcn_hr18_512x512_80k_loveda In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 77.4 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.76 Results: - Task: Semantic Segmentation Dataset: LoveDA Metrics: mIoU: 50.81 mIoU(ms+flip): 50.95 Config: configs/hrnet/fcn_hr18_512x512_80k_loveda.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_loveda/fcn_hr18_512x512_80k_loveda_20211210_203952-93d9c3b3.pth - Name: fcn_hr48_512x512_80k_loveda In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 104.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: LoveDA Metrics: mIoU: 51.42 mIoU(ms+flip): 51.64 Config: configs/hrnet/fcn_hr48_512x512_80k_loveda.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_loveda/fcn_hr48_512x512_80k_loveda_20211211_044756-67072f55.pth - Name: fcn_hr18s_512x512_80k_potsdam In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 27.78 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.58 Results: - Task: Semantic Segmentation Dataset: Potsdam Metrics: mIoU: 77.64 mIoU(ms+flip): 78.8 Config: configs/hrnet/fcn_hr18s_512x512_80k_potsdam.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_512x512_80k_potsdam/fcn_hr18s_512x512_80k_potsdam_20211218_205517-ba32af63.pth - Name: fcn_hr18_512x512_80k_potsdam In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 51.95 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.76 Results: - Task: Semantic Segmentation Dataset: Potsdam Metrics: mIoU: 78.26 mIoU(ms+flip): 79.24 Config: configs/hrnet/fcn_hr18_512x512_80k_potsdam.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_512x512_80k_potsdam/fcn_hr18_512x512_80k_potsdam_20211218_205517-5d0387ad.pth - Name: fcn_hr48_512x512_80k_potsdam In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 60.9 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: Potsdam Metrics: mIoU: 78.39 mIoU(ms+flip): 79.34 Config: configs/hrnet/fcn_hr48_512x512_80k_potsdam.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_512x512_80k_potsdam/fcn_hr48_512x512_80k_potsdam_20211219_020601-97434c78.pth - Name: fcn_hr18s_4x4_512x512_80k_vaihingen In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 26.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 1.58 Results: - Task: Semantic Segmentation Dataset: Vaihingen Metrics: mIoU: 71.81 mIoU(ms+flip): 73.1 Config: configs/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_512x512_80k_vaihingen/fcn_hr18s_4x4_512x512_80k_vaihingen_20211231_230909-b23aae02.pth - Name: fcn_hr18_4x4_512x512_80k_vaihingen In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 51.15 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 2.76 Results: - Task: Semantic Segmentation Dataset: Vaihingen Metrics: mIoU: 72.57 mIoU(ms+flip): 74.09 Config: configs/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_512x512_80k_vaihingen/fcn_hr18_4x4_512x512_80k_vaihingen_20211231_231216-2ec3ae8a.pth - Name: fcn_hr48_4x4_512x512_80k_vaihingen In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 57.97 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.2 Results: - Task: Semantic Segmentation Dataset: Vaihingen Metrics: mIoU: 72.5 mIoU(ms+flip): 73.52 Config: configs/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_512x512_80k_vaihingen/fcn_hr48_4x4_512x512_80k_vaihingen_20211231_231244-7133cb22.pth - Name: fcn_hr18s_4x4_896x896_80k_isaid In Collection: FCN Metadata: backbone: HRNetV2p-W18-Small crop size: (896,896) lr schd: 80000 inference time (ms/im): - value: 72.25 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (896,896) Training Memory (GB): 4.95 Results: - Task: Semantic Segmentation Dataset: iSAID Metrics: mIoU: 62.3 mIoU(ms+flip): 62.97 Config: configs/hrnet/fcn_hr18s_4x4_896x896_80k_isaid.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18s_4x4_896x896_80k_isaid/fcn_hr18s_4x4_896x896_80k_isaid_20220118_001603-3cc0769b.pth - Name: fcn_hr18_4x4_896x896_80k_isaid In Collection: FCN Metadata: backbone: HRNetV2p-W18 crop size: (896,896) lr schd: 80000 inference time (ms/im): - value: 129.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (896,896) Training Memory (GB): 8.3 Results: - Task: Semantic Segmentation Dataset: iSAID Metrics: mIoU: 65.06 mIoU(ms+flip): 65.6 Config: configs/hrnet/fcn_hr18_4x4_896x896_80k_isaid.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr18_4x4_896x896_80k_isaid/fcn_hr18_4x4_896x896_80k_isaid_20220110_182230-49bf752e.pth - Name: fcn_hr48_4x4_896x896_80k_isaid In Collection: FCN Metadata: backbone: HRNetV2p-W48 crop size: (896,896) lr schd: 80000 inference time (ms/im): - value: 136.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (896,896) Training Memory (GB): 16.89 Results: - Task: Semantic Segmentation Dataset: iSAID Metrics: mIoU: 67.8 mIoU(ms+flip): 68.53 Config: configs/hrnet/fcn_hr48_4x4_896x896_80k_isaid.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/hrnet/fcn_hr48_4x4_896x896_80k_isaid/fcn_hr48_4x4_896x896_80k_isaid_20220114_174643-547fc420.pth
22,287
31.022989
174
yml
mmsegmentation
mmsegmentation-master/configs/icnet/README.md
# ICNet [ICNet for Real-time Semantic Segmentation on High-resolution Images](https://arxiv.org/abs/1704.08545) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/hszhao/ICNet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/ic_neck.py#L77">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We focus on the challenging task of real-time semantic segmentation in this paper. It finds many practical applications and yet is with fundamental difficulty of reducing a large portion of computation for pixel-wise label inference. We propose an image cascade network (ICNet) that incorporates multi-resolution branches under proper label guidance to address this challenge. We provide in-depth analysis of our framework and introduce the cascade feature fusion unit to quickly achieve high-quality segmentation. Our system yields real-time inference on a single GPU card with decent quality results evaluated on challenging datasets like Cityscapes, CamVid and COCO-Stuff. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901772-4570455d-7b27-44ae-a690-47dd9fde8445.png" width="70%"/> </div> ## Citation ```bibtext @inproceedings{zhao2018icnet, title={Icnet for real-time semantic segmentation on high-resolution images}, author={Zhao, Hengshuang and Qi, Xiaojuan and Shen, Xiaoyong and Shi, Jianping and Jia, Jiaya}, booktitle={Proceedings of the European conference on computer vision (ECCV)}, pages={405--420}, year={2018} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ICNet | R-18-D8 | 832x832 | 80000 | 1.70 | 27.12 | 68.14 | 70.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521.log.json) | | ICNet | R-18-D8 | 832x832 | 160000 | - | - | 71.64 | 74.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153.log.json) | | ICNet (in1k-pre) | R-18-D8 | 832x832 | 80000 | - | - | 72.51 | 74.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354.log.json) | | ICNet (in1k-pre) | R-18-D8 | 832x832 | 160000 | - | - | 74.43 | 76.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702.log.json) | | ICNet | R-50-D8 | 832x832 | 80000 | 2.53 | 20.08 | 68.91 | 69.72 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625.log.json) | | ICNet | R-50-D8 | 832x832 | 160000 | - | - | 73.82 | 75.67 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612.log.json) | | ICNet (in1k-pre) | R-50-D8 | 832x832 | 80000 | - | - | 74.58 | 76.41 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943.log.json) | | ICNet (in1k-pre) | R-50-D8 | 832x832 | 160000 | - | - | 76.29 | 78.09 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715.log.json) | | ICNet | R-101-D8 | 832x832 | 80000 | 3.08 | 16.95 | 70.28 | 71.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447.log.json) | | ICNet | R-101-D8 | 832x832 | 160000 | - | - | 73.80 | 76.10 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350.log.json) | | ICNet (in1k-pre) | R-101-D8 | 832x832 | 80000 | - | - | 75.57 | 77.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414.log.json) | | ICNet (in1k-pre) | R-101-D8 | 832x832 | 160000 | - | - | 76.15 | 77.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612.log.json) | Note: `in1k-pre` means pretrained model is used.
10,282
179.403509
675
md
mmsegmentation
mmsegmentation-master/configs/icnet/icnet.yml
Collections: - Name: ICNet Metadata: Training Data: - Cityscapes Paper: URL: https://arxiv.org/abs/1704.08545 Title: ICNet for Real-time Semantic Segmentation on High-resolution Images README: configs/icnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/necks/ic_neck.py#L77 Version: v0.18.0 Converted From: Code: https://github.com/hszhao/ICNet Models: - Name: icnet_r18-d8_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-18-D8 crop size: (832,832) lr schd: 80000 inference time (ms/im): - value: 36.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (832,832) Training Memory (GB): 1.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 68.14 mIoU(ms+flip): 70.16 Config: configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_80k_cityscapes/icnet_r18-d8_832x832_80k_cityscapes_20210925_225521-2e36638d.pth - Name: icnet_r18-d8_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-18-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 71.64 mIoU(ms+flip): 74.18 Config: configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_832x832_160k_cityscapes/icnet_r18-d8_832x832_160k_cityscapes_20210925_230153-2c6eb6e0.pth - Name: icnet_r18-d8_in1k-pre_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-18-D8 crop size: (832,832) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 72.51 mIoU(ms+flip): 74.78 Config: configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes_20210925_230354-1cbe3022.pth - Name: icnet_r18-d8_in1k-pre_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-18-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.43 mIoU(ms+flip): 76.72 Config: configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes_20210926_052702-619c8ae1.pth - Name: icnet_r50-d8_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-50-D8 crop size: (832,832) lr schd: 80000 inference time (ms/im): - value: 49.8 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (832,832) Training Memory (GB): 2.53 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 68.91 mIoU(ms+flip): 69.72 Config: configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_80k_cityscapes/icnet_r50-d8_832x832_80k_cityscapes_20210926_044625-c6407341.pth - Name: icnet_r50-d8_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-50-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.82 mIoU(ms+flip): 75.67 Config: configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_832x832_160k_cityscapes/icnet_r50-d8_832x832_160k_cityscapes_20210925_232612-a95f0d4e.pth - Name: icnet_r50-d8_in1k-pre_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-50-D8 crop size: (832,832) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.58 mIoU(ms+flip): 76.41 Config: configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes_20210926_032943-1743dc7b.pth - Name: icnet_r50-d8_in1k-pre_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-50-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.29 mIoU(ms+flip): 78.09 Config: configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes_20210926_042715-ce310aea.pth - Name: icnet_r101-d8_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-101-D8 crop size: (832,832) lr schd: 80000 inference time (ms/im): - value: 59.0 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (832,832) Training Memory (GB): 3.08 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 70.28 mIoU(ms+flip): 71.95 Config: configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_80k_cityscapes/icnet_r101-d8_832x832_80k_cityscapes_20210926_072447-b52f936e.pth - Name: icnet_r101-d8_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-101-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.8 mIoU(ms+flip): 76.1 Config: configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_832x832_160k_cityscapes/icnet_r101-d8_832x832_160k_cityscapes_20210926_092350-3a1ebf1a.pth - Name: icnet_r101-d8_in1k-pre_832x832_80k_cityscapes In Collection: ICNet Metadata: backbone: R-101-D8 crop size: (832,832) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.57 mIoU(ms+flip): 77.86 Config: configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes_20210926_020414-7ceb12c5.pth - Name: icnet_r101-d8_in1k-pre_832x832_160k_cityscapes In Collection: ICNet Metadata: backbone: R-101-D8 crop size: (832,832) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.15 mIoU(ms+flip): 77.98 Config: configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes_20210925_232612-9484ae8a.pth
7,283
34.019231
190
yml
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r101-d8_832x832_160k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' model = dict(backbone=dict(backbone_cfg=dict(depth=101)))
111
36.333333
57
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r101-d8_832x832_80k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' model = dict(backbone=dict(backbone_cfg=dict(depth=101)))
110
36
57
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_160k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))))
242
29.375
78
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r101-d8_in1k-pre_832x832_80k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( depth=101, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))))
241
29.25
78
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r18-d8_832x832_160k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' model = dict( backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18)))
142
34.75
74
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r18-d8_832x832_80k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' model = dict( backbone=dict(layer_channels=(128, 512), backbone_cfg=dict(depth=18)))
141
34.5
74
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_160k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' model = dict( backbone=dict( layer_channels=(128, 512), backbone_cfg=dict( depth=18, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))))
275
29.666667
77
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r18-d8_in1k-pre_832x832_80k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' model = dict( backbone=dict( layer_channels=(128, 512), backbone_cfg=dict( depth=18, init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))))
274
29.555556
77
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r50-d8_832x832_160k_cityscapes.py
_base_ = [ '../_base_/models/icnet_r50-d8.py', '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ]
176
28.5
79
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r50-d8_832x832_80k_cityscapes.py
_base_ = [ '../_base_/models/icnet_r50-d8.py', '../_base_/datasets/cityscapes_832x832.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
175
28.333333
79
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_160k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_160k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet50_v1c'))))
218
30.285714
77
py
mmsegmentation
mmsegmentation-master/configs/icnet/icnet_r50-d8_in1k-pre_832x832_80k_cityscapes.py
_base_ = './icnet_r50-d8_832x832_80k_cityscapes.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet50_v1c'))))
217
30.142857
77
py
mmsegmentation
mmsegmentation-master/configs/imagenets/README.md
# ImageNet-S > [Large-scale Unsupervised Semantic Segmentation](https://arxiv.org/abs/2106.03149) <!-- [DATASET] --> ## Abstract <!-- [ABSTRACT] --> Powered by the ImageNet dataset, unsupervised learning on large-scale data has made significant advances for classification tasks. There are two major challenges to allowing such an attractive learning modality for segmentation tasks: i) a large-scale benchmark for assessing algorithms is missing; ii) unsupervised shape representation learning is difficult. We propose a new problem of large-scale unsupervised semantic segmentation (LUSS) with a newly created benchmark dataset to track the research progress. Based on the ImageNet dataset, we propose the ImageNet-S dataset has 1.2 million training images and 50k high-quality semantic segmentation annotations to support unsupervised/semi-supervised semantic segmentation on the ImageNet dataset. The ImageNet-S dataset contains 1183322 training, 12419 validation, and 27423 testing images from 919 categories. We annotate 39842 val/test images and 9190 training images with precise pixel-level masks. | Dataset | category | train | train-semi | val | test | | ------------- | -------- | ------- | ---------- | ----- | ----- | | ImageNet-S50 | 50 | 64431 | 500 | 752 | 1682 | | ImageNet-S300 | 300 | 384862 | 3000 | 4097 | 9088 | | ImageNet-S | 919 | 1183322 | 9190 | 12419 | 27423 | <!-- [IMAGE] --> ![image](https://user-images.githubusercontent.com/76149310/219024498-68ce5abb-e0f2-42a0-be56-5921be188fc3.jpg) ## Introduction ### Training We provide the training configs using ViT models. The pretraining weights of ViT backbone should be converted following [vit](../vit/README.md): ```shell python tools/model_converters/vit2mmseg.py https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth pretrain/mae_pretrain_vit_base_mmcls.pth ``` ### Evaluation - The mmsegmentation supports the evaluation on the val set. - To evaluate the test set, please submit the prediction to the online benchmarks: ([Fully unsupervised](https://codalab.lisn.upsaclay.fr/competitions/1317)|[Distance matching](https://codalab.lisn.upsaclay.fr/competitions/1315)|[Semi-supervised](https://codalab.lisn.upsaclay.fr/competitions/1318)|[Free](https://codalab.lisn.upsaclay.fr/competitions/1316)). More details about online benchmark is on the [project page](https://LUSSeg.github.io/). #### Submit test set results to online benchmarks: 1. Change the evaluation dataset from 'val set' to 'test set' in the data config file `configs/_base_/datasets/imagenets.py`: ```python test=dict( type=dataset_type, subset=subset, data_root=data_root, img_dir='test', ann_dir='test-segmentation', pipeline=test_pipeline) ``` 2. Generate the prediction results of the test set. ```shell python ./tools/test.py [CONFIG] \ [CHECKPOINT] \ --format-only --eval-options "imgfile_prefix=[path/to/the/saved/test/prediction/results.]" ``` 3. Generate the method description file `method.txt` and zip the prediction results. The generated zip file can be submit to the online evaluation server. ```shell cd configs/imagenets python tools/imagenets_submit.py --imgfile_prefix [path/to/the/saved/test/prediction/results.] \ --method [Method name.] \ --arch [The model architecture.] \ --train_data [Training data.] \ --train_scheme [Training scheme description, e.g., SSL, Sup, SSL+Sup.] \ --link [Paper/project link.] \ --description [Method description.] ``` Note that the `method`, `arch`, `train_data`, `train_scheme`, `link` and `description` are the description of your method and are set as none by default. ## Other Apps and Sourcecode using ImageNet-S - Unsupervised semantic segmentation: [PASS](https://github.com/LUSSeg/PASS) - Semi-supervised semantic segmentation: [ImageNetSegModel](https://github.com/LUSSeg/ImageNetSegModel) ## Results and Models <a href="https://paperswithcode.com/dataset/imagenet-s">**PaperWithCode Leaderboard**</a> | Method | Backbone | Pre-training epochs | Pre-training mode | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU (test) | Pre-trained | Config | Download | | ------ | -------- | ------------------- | ----------------- | --------- | ------: | -------: | -------------: | ---: | ----------: | ------------------------------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | MAE | ViT-B/16 | 100 | SSL | 224x224 | 3600 | 6.8 | 58.20 | 40.4 | 40.1 | [pre-trained](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth) | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919_20230208_130849-b837aa90.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919_20230208_130849.log.json) | | MAE | ViT-B/16 | 100 | SSL+Sup | 224x224 | 3600 | - | - | 61.7 | 61.4 | [pre-trained](https://dl.fbaipublicfiles.com/mae/finetune/mae_finetuned_vit_base.pth) | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_134621-16194326.pth) \| [log](https://download.openmmlab.com/msegmentation/v0.5/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_134621.log.json) | | SERE | ViT-S/16 | 100 | SSL | 224x224 | 3600 | 3.1 | 58.95 | 40.9 | 40.7 | [pre-trained](https://github.com/LUSSeg/ImageNetSegModel/releases/download/vit/sere_pretrained_vit_small_ep100.pth) | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/imagenets/fcn_sere-small_pretrained_fp16_8x32_224x224_3600_imagenets919.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834-ee33230c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834.log.json) | | SERE | ViT-S/16 | 100 | SSL+Sup | 224x224 | 3600 | - | - | 59.4 | 58.9 | [pre-trained](https://github.com/LUSSeg/ImageNetSegModel/releases/download/vit/sere_finetuned_vit_small_ep100.pth) | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834-ee33230c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834.log.json) | ## Citation ```bibtex @article{gao2022luss, title={Large-scale Unsupervised Semantic Segmentation}, author={Gao, Shanghua and Li, Zhong-Yu and Yang, Ming-Hsuan and Cheng, Ming-Ming and Han, Junwei and Torr, Philip}, journal=TPAMI, year={2022} } ```
9,373
86.607477
845
md
mmsegmentation
mmsegmentation-master/configs/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919.py
_base_ = [ '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/imagenets.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( pretrained='./pretrain/mae_finetuned_vit_base_mmcls.pth', backbone=dict( _delete_=True, type='VisionTransformer', img_size=(224, 224), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, final_norm=True, interpolate_mode='bicubic'), decode_head=dict( in_channels=768, channels=768, num_convs=0, dropout_ratio=0.0, num_classes=920, ignore_index=1000, downsample_label_ratio=8, init_cfg=dict( type='TruncNormal', std=2e-5, override=dict(name='conv_seg'))), auxiliary_head=None) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( num_layers=12, decay_rate=0.40, decay_type='layer_wise')) lr_config = dict( _delete_=True, policy='CosineAnnealing', warmup='linear', warmup_iters=180, warmup_ratio=1e-6, min_lr=1e-6, by_epoch=False) # mixed precision fp16 = dict(loss_scale='dynamic') # By default, models are trained on 8 GPUs with 32 images per GPU data = dict(samples_per_gpu=32) # runtime settings runner = dict(type='IterBasedRunner', max_iters=3600) checkpoint_config = dict(by_epoch=False, interval=3600) evaluation = dict(interval=360, metric='mIoU', pre_eval=True)
1,915
26.371429
75
py
mmsegmentation
mmsegmentation-master/configs/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919.py
_base_ = [ '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/imagenets.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( pretrained='./pretrain/mae_pretrain_vit_base_mmcls.pth', backbone=dict( _delete_=True, type='VisionTransformer', img_size=(224, 224), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, final_norm=True, interpolate_mode='bicubic'), decode_head=dict( in_channels=768, channels=768, num_convs=0, dropout_ratio=0.0, num_classes=920, ignore_index=1000, downsample_label_ratio=8, init_cfg=dict( type='TruncNormal', std=2e-5, override=dict(name='conv_seg'))), auxiliary_head=None) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( num_layers=12, decay_rate=0.60, decay_type='layer_wise')) lr_config = dict( _delete_=True, policy='CosineAnnealing', warmup='linear', warmup_iters=180, warmup_ratio=1e-6, min_lr=1e-6, by_epoch=False) # mixed precision fp16 = dict(loss_scale='dynamic') # By default, models are trained on 8 GPUs with 32 images per GPU data = dict(samples_per_gpu=32) # runtime settings runner = dict(type='IterBasedRunner', max_iters=3600) checkpoint_config = dict(by_epoch=False, interval=3600) evaluation = dict(interval=360, metric='mIoU', pre_eval=True)
1,914
26.357143
75
py
mmsegmentation
mmsegmentation-master/configs/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919.py
_base_ = [ '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/imagenets.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( pretrained='./pretrain/sere_finetuned_vit_small_ep100_mmcls.pth', backbone=dict( _delete_=True, type='VisionTransformer', img_size=(224, 224), patch_size=16, in_channels=3, embed_dims=384, num_layers=12, num_heads=6, mlp_ratio=4, out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, final_norm=True, interpolate_mode='bicubic'), decode_head=dict( in_channels=384, channels=384, num_convs=0, dropout_ratio=0.0, num_classes=920, ignore_index=1000, downsample_label_ratio=8, init_cfg=dict( type='TruncNormal', std=2e-5, override=dict(name='conv_seg'))), auxiliary_head=None) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( num_layers=12, decay_rate=0.50, decay_type='layer_wise')) lr_config = dict( _delete_=True, policy='CosineAnnealing', warmup='linear', warmup_iters=180, warmup_ratio=1e-6, min_lr=1e-6, by_epoch=False) # mixed precision fp16 = dict(loss_scale='dynamic') # By default, models are trained on 8 GPUs with 32 images per GPU data = dict(samples_per_gpu=32) # runtime settings runner = dict(type='IterBasedRunner', max_iters=3600) checkpoint_config = dict(by_epoch=False, interval=3600) evaluation = dict(interval=360, metric='mIoU', pre_eval=True)
1,922
26.471429
75
py
mmsegmentation
mmsegmentation-master/configs/imagenets/fcn_sere-small_pretrained_fp16_8x32_224x224_3600_imagenets919.py
_base_ = [ '../_base_/models/fcn_r50-d8.py', '../_base_/datasets/imagenets.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( pretrained='./pretrain/sere_pretrained_vit_small_ep100_mmcls.pth', backbone=dict( _delete_=True, type='VisionTransformer', img_size=(224, 224), patch_size=16, in_channels=3, embed_dims=384, num_layers=12, num_heads=6, mlp_ratio=4, out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, final_norm=True, interpolate_mode='bicubic'), decode_head=dict( in_channels=384, channels=384, num_convs=0, dropout_ratio=0.0, num_classes=920, ignore_index=1000, downsample_label_ratio=8, init_cfg=dict( type='TruncNormal', std=2e-5, override=dict(name='conv_seg'))), auxiliary_head=None) optimizer = dict( _delete_=True, constructor='LearningRateDecayOptimizerConstructor', type='AdamW', lr=5e-4, betas=(0.9, 0.999), weight_decay=0.05, paramwise_cfg=dict( num_layers=12, decay_rate=0.50, decay_type='layer_wise')) lr_config = dict( _delete_=True, policy='CosineAnnealing', warmup='linear', warmup_iters=180, warmup_ratio=1e-6, min_lr=1e-6, by_epoch=False) # mixed precision fp16 = dict(loss_scale='dynamic') # By default, models are trained on 8 GPUs with 32 images per GPU data = dict(samples_per_gpu=32) # runtime settings runner = dict(type='IterBasedRunner', max_iters=3600) checkpoint_config = dict(by_epoch=False, interval=3600) evaluation = dict(interval=360, metric='mIoU', pre_eval=True)
1,923
26.485714
75
py
mmsegmentation
mmsegmentation-master/configs/imagenets/imagenets.yml
Models: - Name: fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919 In Collection: FCN Metadata: backbone: ViT-B/16 crop size: (224,224) lr schd: 3600 inference time (ms/im): - value: 17.18 hardware: A100 backend: PyTorch batch size: 32 mode: FP16 resolution: (224,224) Training Memory (GB): 6.8 Results: - Task: Semantic Segmentation Dataset: ImageNet-S validation Metrics: mIoU: 40.4 Config: configs/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_pretrained_fp16_8x32_224x224_3600_imagenets919_20230208_130849-b837aa90.pth - Name: fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919 In Collection: FCN Metadata: backbone: ViT-B/16 crop size: (224,224) lr schd: 3600 Results: - Task: Semantic Segmentation Dataset: ImageNet-S validation Metrics: mIoU: 61.7 Config: configs/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_mae-base_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_134621-16194326.pth - Name: fcn_sere-small_pretrained_fp16_8x32_224x224_3600_imagenets919 In Collection: FCN Metadata: backbone: ViT-S/16 crop size: (224,224) lr schd: 3600 inference time (ms/im): - value: 16.96 hardware: A100 backend: PyTorch batch size: 32 mode: FP16 resolution: (224,224) Training Memory (GB): 3.1 Results: - Task: Semantic Segmentation Dataset: ImageNet-S Metrics: mIoU: 40.9 Config: configs/imagenets/fcn_sere-small_pretrained_fp16_8x32_224x224_3600_imagenets919.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834-ee33230c.pth - Name: fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919 In Collection: FCN Metadata: backbone: ViT-S/16 crop size: (224,224) lr schd: 3600 Results: - Task: Semantic Segmentation Dataset: ImageNet-S Metrics: mIoU: 59.4 Config: configs/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/imagenets/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919/fcn_sere-small_finetuned_fp16_8x32_224x224_3600_imagenets919_20230208_151834-ee33230c.pth
2,714
37.785714
222
yml
mmsegmentation
mmsegmentation-master/configs/isanet/README.md
# ISANet [Interlaced Sparse Self-Attention for Semantic Segmentation](https://arxiv.org/abs/1907.12273) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/openseg-group/openseg.pytorch">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/decode_heads/isa_head.py#L58">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> In this paper, we present a so-called interlaced sparse self-attention approach to improve the efficiency of the \\emph{self-attention} mechanism for semantic segmentation. The main idea is that we factorize the dense affinity matrix as the product of two sparse affinity matrices. There are two successive attention modules each estimating a sparse affinity matrix. The first attention module is used to estimate the affinities within a subset of positions that have long spatial interval distances and the second attention module is used to estimate the affinities within a subset of positions that have short spatial interval distances. These two attention modules are designed so that each position is able to receive the information from all the other positions. In contrast to the original self-attention module, our approach decreases the computation and memory complexity substantially especially when processing high-resolution feature maps. We empirically verify the effectiveness of our approach on six challenging semantic segmentation benchmarks. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901868-03d80da4-b9c0-4df9-8509-5f684ba9dadc.png" width="80%"/> </div> ## Citation ```bibetex @article{huang2019isa, title={Interlaced Sparse Self-Attention for Semantic Segmentation}, author={Huang, Lang and Yuan, Yuhui and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, journal={arXiv preprint arXiv:1907.12273}, year={2019} } ``` The technical report above is also presented at: ```bibetex @article{yuan2021ocnet, title={OCNet: Object Context for Semantic Segmentation}, author={Yuan, Yuhui and Huang, Lang and Guo, Jianyuan and Zhang, Chao and Chen, Xilin and Wang, Jingdong}, journal={International Journal of Computer Vision}, pages={1--24}, year={2021}, publisher={Springer} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | --------------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ISANet | R-50-D8 | 512x1024 | 40000 | 5.869 | 2.91 | 78.49 | 79.44 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739.log.json) | | ISANet | R-50-D8 | 512x1024 | 80000 | 5.869 | 2.91 | 78.68 | 80.25 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202.log.json) | | ISANet | R-50-D8 | 769x769 | 40000 | 6.759 | 1.54 | 78.70 | 80.28 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200.log.json) | | ISANet | R-50-D8 | 769x769 | 80000 | 6.759 | 1.54 | 79.29 | 80.53 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126.log.json) | | ISANet | R-101-D8 | 512x1024 | 40000 | 9.425 | 2.35 | 79.58 | 81.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553.log.json) | | ISANet | R-101-D8 | 512x1024 | 80000 | 9.425 | 2.35 | 80.32 | 81.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243.log.json) | | ISANet | R-101-D8 | 769x769 | 40000 | 10.815 | 0.92 | 79.68 | 80.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320.log.json) | | ISANet | R-101-D8 | 769x769 | 80000 | 10.815 | 0.92 | 80.61 | 81.59 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | ----------------------------------------------------------------------------------------------------------------------: | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ISANet | R-50-D8 | 512x512 | 80000 | 9.0 | 22.55 | 41.12 | 42.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557.log.json) | | ISANet | R-50-D8 | 512x512 | 160000 | 9.0 | 22.55 | 42.59 | 43.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850.log.json) | | ISANet | R-101-D8 | 512x512 | 80000 | 12.562 | 10.56 | 43.51 | 44.38 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056.log.json) | | ISANet | R-101-D8 | 512x512 | 160000 | 12.562 | 10.56 | 43.80 | 45.4 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431.log.json) | ### Pascal VOC 2012 + Aug | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------- | -------: | -------------- | ----- | ------------: | -----------------------------------------------------------------------------------------------------------------------: | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ISANet | R-50-D8 | 512x512 | 20000 | 5.9 | 23.08 | 76.78 | 77.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838.log.json) | | ISANet | R-50-D8 | 512x512 | 40000 | 5.9 | 23.08 | 76.20 | 77.22 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349.log.json) | | ISANet | R-101-D8 | 512x512 | 20000 | 9.465 | 7.42 | 78.46 | 79.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805.log.json) | | ISANet | R-101-D8 | 512x512 | 40000 | 9.465 | 7.42 | 78.12 | 79.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814.log.json) |
14,667
180.08642
1,059
md
mmsegmentation
mmsegmentation-master/configs/isanet/isanet.yml
Collections: - Name: ISANet Metadata: Training Data: - Cityscapes - ADE20K - Pascal VOC 2012 + Aug Paper: URL: https://arxiv.org/abs/1907.12273 Title: Interlaced Sparse Self-Attention for Semantic Segmentation README: configs/isanet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/decode_heads/isa_head.py#L58 Version: v0.18.0 Converted From: Code: https://github.com/openseg-group/openseg.pytorch Models: - Name: isanet_r50-d8_512x1024_40k_cityscapes In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 343.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.869 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.49 mIoU(ms+flip): 79.44 Config: configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_40k_cityscapes/isanet_r50-d8_512x1024_40k_cityscapes_20210901_054739-981bd763.pth - Name: isanet_r50-d8_512x1024_80k_cityscapes In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 343.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.869 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.68 mIoU(ms+flip): 80.25 Config: configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x1024_80k_cityscapes/isanet_r50-d8_512x1024_80k_cityscapes_20210901_074202-89384497.pth - Name: isanet_r50-d8_769x769_40k_cityscapes In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 649.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 6.759 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.7 mIoU(ms+flip): 80.28 Config: configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_40k_cityscapes/isanet_r50-d8_769x769_40k_cityscapes_20210903_050200-4ae7e65b.pth - Name: isanet_r50-d8_769x769_80k_cityscapes In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 inference time (ms/im): - value: 649.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 6.759 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.29 mIoU(ms+flip): 80.53 Config: configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_769x769_80k_cityscapes/isanet_r50-d8_769x769_80k_cityscapes_20210903_101126-99b54519.pth - Name: isanet_r101-d8_512x1024_40k_cityscapes In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 425.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 9.425 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.58 mIoU(ms+flip): 81.05 Config: configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_40k_cityscapes/isanet_r101-d8_512x1024_40k_cityscapes_20210901_145553-293e6bd6.pth - Name: isanet_r101-d8_512x1024_80k_cityscapes In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 425.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 9.425 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.32 mIoU(ms+flip): 81.58 Config: configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x1024_80k_cityscapes/isanet_r101-d8_512x1024_80k_cityscapes_20210901_145243-5b99c9b2.pth - Name: isanet_r101-d8_769x769_40k_cityscapes In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 1086.96 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 10.815 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.68 mIoU(ms+flip): 80.95 Config: configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_40k_cityscapes/isanet_r101-d8_769x769_40k_cityscapes_20210903_111320-509e7224.pth - Name: isanet_r101-d8_769x769_80k_cityscapes In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 inference time (ms/im): - value: 1086.96 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 10.815 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.61 mIoU(ms+flip): 81.59 Config: configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_769x769_80k_cityscapes/isanet_r101-d8_769x769_80k_cityscapes_20210903_111319-24f71dfa.pth - Name: isanet_r50-d8_512x512_80k_ade20k In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 44.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.0 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.12 mIoU(ms+flip): 42.35 Config: configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_80k_ade20k/isanet_r50-d8_512x512_80k_ade20k_20210903_124557-6ed83a0c.pth - Name: isanet_r50-d8_512x512_160k_ade20k In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 44.35 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.0 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.59 mIoU(ms+flip): 43.07 Config: configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_160k_ade20k/isanet_r50-d8_512x512_160k_ade20k_20210903_104850-f752d0a3.pth - Name: isanet_r101-d8_512x512_80k_ade20k In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 94.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.562 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.51 mIoU(ms+flip): 44.38 Config: configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_80k_ade20k/isanet_r101-d8_512x512_80k_ade20k_20210903_162056-68b235c2.pth - Name: isanet_r101-d8_512x512_160k_ade20k In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 94.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.562 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.8 mIoU(ms+flip): 45.4 Config: configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_160k_ade20k/isanet_r101-d8_512x512_160k_ade20k_20210903_211431-a7879dcd.pth - Name: isanet_r50-d8_512x512_20k_voc12aug In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 43.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 5.9 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.78 mIoU(ms+flip): 77.79 Config: configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_20k_voc12aug/isanet_r50-d8_512x512_20k_voc12aug_20210901_164838-79d59b80.pth - Name: isanet_r50-d8_512x512_40k_voc12aug In Collection: ISANet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 40000 inference time (ms/im): - value: 43.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 5.9 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.2 mIoU(ms+flip): 77.22 Config: configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r50-d8_512x512_40k_voc12aug/isanet_r50-d8_512x512_40k_voc12aug_20210901_151349-7d08a54e.pth - Name: isanet_r101-d8_512x512_20k_voc12aug In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 134.77 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.465 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 78.46 mIoU(ms+flip): 79.16 Config: configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_20k_voc12aug/isanet_r101-d8_512x512_20k_voc12aug_20210901_115805-3ccbf355.pth - Name: isanet_r101-d8_512x512_40k_voc12aug In Collection: ISANet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 40000 inference time (ms/im): - value: 134.77 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.465 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 78.12 mIoU(ms+flip): 79.04 Config: configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/isanet/isanet_r101-d8_512x512_40k_voc12aug/isanet_r101-d8_512x512_40k_voc12aug_20210901_145814-bc71233b.pth
11,656
30.505405
175
yml
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x1024_40k_cityscapes.py
_base_ = './isanet_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './isanet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x512_160k_ade20k.py
_base_ = './isanet_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x512_20k_voc12aug.py
_base_ = './isanet_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x512_40k_voc12aug.py
_base_ = './isanet_r50-d8_512x512_40k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_512x512_80k_ade20k.py
_base_ = './isanet_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
129
42.333333
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_769x769_40k_cityscapes.py
_base_ = './isanet_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r101-d8_769x769_80k_cityscapes.py
_base_ = './isanet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
252
35.142857
76
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
263
32
77
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/isanet/isanet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/isanet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/knet/README.md
# K-Net [K-Net: Towards Unified Image Segmentation](https://arxiv.org/abs/2106.14855) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/ZwwWayne/K-Net/">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.23.0/mmseg/models/decode_heads/knet_head.py#L392">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Semantic, instance, and panoptic segmentations have been addressed using different and specialized frameworks despite their underlying connections. This paper presents a unified, simple, and effective framework for these essentially similar tasks. The framework, named K-Net, segments both instances and semantic categories consistently by a group of learnable kernels, where each kernel is responsible for generating a mask for either a potential instance or a stuff class. To remedy the difficulties of distinguishing various instances, we propose a kernel update strategy that enables each kernel dynamic and conditional on its meaningful group in the input image. K-Net can be trained in an end-to-end manner with bipartite matching, and its training and inference are naturally NMS-free and box-free. Without bells and whistles, K-Net surpasses all previous published state-of-the-art single-model results of panoptic segmentation on MS COCO test-dev split and semantic segmentation on ADE20K val split with 55.2% PQ and 54.3% mIoU, respectively. Its instance segmentation performance is also on par with Cascade Mask R-CNN on MS COCO with 60%-90% faster inference speeds. Code and models will be released at [this https URL](https://github.com/ZwwWayne/K-Net/). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/157008300-9f40905c-b8e8-4a2a-9593-c1177fa35b2c.png" width="90%"/> </div> ```bibtex @inproceedings{zhang2021knet, title={{K-Net: Towards} Unified Image Segmentation}, author={Wenwei Zhang and Jiangmiao Pang and Kai Chen and Chen Change Loy}, year={2021}, booktitle={NeurIPS}, } ``` ## Results and models ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------------- | -------- | --------- | ------- | -------- | -------------- | ----- | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | KNet + FCN | R-50-D8 | 512x512 | 80000 | 7.01 | 19.24 | 43.60 | 45.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751.log.json) | | KNet + PSPNet | R-50-D8 | 512x512 | 80000 | 6.98 | 20.04 | 44.18 | 45.58 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634.log.json) | | KNet + DeepLabV3 | R-50-D8 | 512x512 | 80000 | 7.42 | 12.10 | 45.06 | 46.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642.log.json) | | KNet + UPerNet | R-50-D8 | 512x512 | 80000 | 7.34 | 17.11 | 43.45 | 44.07 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657.log.json) | | KNet + UPerNet | Swin-T | 512x512 | 80000 | 7.57 | 15.56 | 45.84 | 46.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059.log.json) | | KNet + UPerNet | Swin-L | 512x512 | 80000 | 13.5 | 8.29 | 52.05 | 53.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559.log.json) | | KNet + UPerNet | Swin-L | 640x640 | 80000 | 18.31 | 5.55 | 52.46 | 53.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220720_165636-cbcaed32.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220720_165636.log.json) | Note: - All experiments of K-Net are implemented with 8 V100 (32G) GPUs with 2 samplers per GPU.
7,987
155.627451
1,267
md
mmsegmentation
mmsegmentation-master/configs/knet/knet.yml
Collections: - Name: KNet Metadata: Training Data: - ADE20K Paper: URL: https://arxiv.org/abs/2106.14855 Title: 'K-Net: Towards Unified Image Segmentation' README: configs/knet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.23.0/mmseg/models/decode_heads/knet_head.py#L392 Version: v0.23.0 Converted From: Code: https://github.com/ZwwWayne/K-Net/ Models: - Name: knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 51.98 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.01 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.6 mIoU(ms+flip): 45.12 Config: configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_043751-abcab920.pth - Name: knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 49.9 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.98 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.18 mIoU(ms+flip): 45.58 Config: configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_054634-d2c72240.pth - Name: knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 82.64 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.42 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.06 mIoU(ms+flip): 46.11 Config: configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k_20220228_041642-00c8fbeb.pth - Name: knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 58.45 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.34 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.45 mIoU(ms+flip): 44.07 Config: configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k_20220304_125657-215753b0.pth - Name: knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: Swin-T crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 64.27 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.57 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.84 mIoU(ms+flip): 46.27 Config: configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k_20220303_133059-7545e1dc.pth - Name: knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k In Collection: KNet Metadata: backbone: Swin-L crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 120.63 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 13.5 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 52.05 mIoU(ms+flip): 53.24 Config: configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k_20220303_154559-d8da9a90.pth - Name: knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k In Collection: KNet Metadata: backbone: Swin-L crop size: (640,640) lr schd: 80000 inference time (ms/im): - value: 180.18 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (640,640) Training Memory (GB): 18.31 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 52.46 mIoU(ms+flip): 53.78 Config: configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k_20220720_165636-cbcaed32.pth
5,628
32.111765
203
yml
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_deeplabv3_r50-d8_8x2_512x512_adamw_80k_ade20k.py
_base_ = [ '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) num_stages = 3 conv_kernel_size = 1 model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='IterativeDecodeHead', num_stages=num_stages, kernel_update_head=[ dict( type='KernelUpdateHead', num_classes=150, num_ffn_fcs=2, num_heads=8, num_mask_fcs=1, feedforward_channels=2048, in_channels=512, out_channels=512, dropout=0.0, conv_kernel_size=conv_kernel_size, ffn_act_cfg=dict(type='ReLU', inplace=True), with_ffn=True, feat_transform_cfg=dict( conv_cfg=dict(type='Conv2d'), act_cfg=None), kernel_updator_cfg=dict( type='KernelUpdator', in_channels=256, feat_channels=256, out_channels=256, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'))) for _ in range(num_stages) ], kernel_generate_head=dict( type='ASPPHead', in_channels=2048, in_index=3, channels=512, dilations=(1, 12, 24, 36), dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
2,984
30.755319
79
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_fcn_r50-d8_8x2_512x512_adamw_80k_ade20k.py
_base_ = [ '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) num_stages = 3 conv_kernel_size = 1 model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='IterativeDecodeHead', num_stages=num_stages, kernel_update_head=[ dict( type='KernelUpdateHead', num_classes=150, num_ffn_fcs=2, num_heads=8, num_mask_fcs=1, feedforward_channels=2048, in_channels=512, out_channels=512, dropout=0.0, conv_kernel_size=conv_kernel_size, ffn_act_cfg=dict(type='ReLU', inplace=True), with_ffn=True, feat_transform_cfg=dict( conv_cfg=dict(type='Conv2d'), act_cfg=None), kernel_updator_cfg=dict( type='KernelUpdator', in_channels=256, feat_channels=256, out_channels=256, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'))) for _ in range(num_stages) ], kernel_generate_head=dict( type='FCNHead', in_channels=2048, in_index=3, channels=512, num_convs=2, concat_input=True, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
2,999
30.914894
79
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_pspnet_r50-d8_8x2_512x512_adamw_80k_ade20k.py
_base_ = [ '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) num_stages = 3 conv_kernel_size = 1 model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='IterativeDecodeHead', num_stages=num_stages, kernel_update_head=[ dict( type='KernelUpdateHead', num_classes=150, num_ffn_fcs=2, num_heads=8, num_mask_fcs=1, feedforward_channels=2048, in_channels=512, out_channels=512, dropout=0.0, conv_kernel_size=conv_kernel_size, ffn_act_cfg=dict(type='ReLU', inplace=True), with_ffn=True, feat_transform_cfg=dict( conv_cfg=dict(type='Conv2d'), act_cfg=None), kernel_updator_cfg=dict( type='KernelUpdator', in_channels=256, feat_channels=256, out_channels=256, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'))) for _ in range(num_stages) ], kernel_generate_head=dict( type='PSPHead', in_channels=2048, in_index=3, channels=512, pool_scales=(1, 2, 3, 6), dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
2,981
31.064516
79
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py
_base_ = [ '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) num_stages = 3 conv_kernel_size = 1 model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=(1, 2, 2, 2), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='IterativeDecodeHead', num_stages=num_stages, kernel_update_head=[ dict( type='KernelUpdateHead', num_classes=150, num_ffn_fcs=2, num_heads=8, num_mask_fcs=1, feedforward_channels=2048, in_channels=512, out_channels=512, dropout=0.0, conv_kernel_size=conv_kernel_size, ffn_act_cfg=dict(type='ReLU', inplace=True), with_ffn=True, feat_transform_cfg=dict( conv_cfg=dict(type='Conv2d'), act_cfg=None), kernel_updator_cfg=dict( type='KernelUpdator', in_channels=256, feat_channels=256, out_channels=256, act_cfg=dict(type='ReLU', inplace=True), norm_cfg=dict(type='LN'))) for _ in range(num_stages) ], kernel_generate_head=dict( type='UPerHead', in_channels=[256, 512, 1024, 2048], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # optimizer optimizer = dict(_delete_=True, type='AdamW', lr=0.0001, weight_decay=0.0005) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
3,012
31.053191
79
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_upernet_swin-l_8x2_512x512_adamw_80k_ade20k.py
_base_ = 'knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py' checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa # model settings model = dict( pretrained=checkpoint_file, backbone=dict( embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7, use_abs_pos_embed=False, drop_path_rate=0.3, patch_norm=True), decode_head=dict( kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), auxiliary_head=dict(in_channels=768)) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
747
36.4
148
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_upernet_swin-l_8x2_640x640_adamw_80k_ade20k.py
_base_ = 'knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py' checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_large_patch4_window7_224_22k_20220308-d5bdebaf.pth' # noqa # model settings model = dict( pretrained=checkpoint_file, backbone=dict( embed_dims=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7, use_abs_pos_embed=False, drop_path_rate=0.4, patch_norm=True), decode_head=dict( kernel_generate_head=dict(in_channels=[192, 384, 768, 1536])), auxiliary_head=dict(in_channels=768)) img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) crop_size = (640, 640) train_pipeline = [ dict(type='LoadImageFromFile'), dict(type='LoadAnnotations', reduce_zero_label=True), dict(type='Resize', img_scale=(2048, 640), ratio_range=(0.5, 2.0)), dict(type='RandomCrop', crop_size=crop_size, cat_max_ratio=0.75), dict(type='RandomFlip', prob=0.5), dict(type='PhotoMetricDistortion'), dict(type='Normalize', **img_norm_cfg), dict(type='Pad', size=crop_size, pad_val=0, seg_pad_val=255), dict(type='DefaultFormatBundle'), dict(type='Collect', keys=['img', 'gt_semantic_seg']), ] test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 640), # img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=False, transforms=[ dict(type='Resize', keep_ratio=True), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] # In K-Net implementation we use batch size 2 per GPU as default data = dict( samples_per_gpu=2, workers_per_gpu=2, train=dict(pipeline=train_pipeline), val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
2,028
35.232143
148
py
mmsegmentation
mmsegmentation-master/configs/knet/knet_s3_upernet_swin-t_8x2_512x512_adamw_80k_ade20k.py
_base_ = 'knet_s3_upernet_r50-d8_8x2_512x512_adamw_80k_ade20k.py' checkpoint_file = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/swin/swin_tiny_patch4_window7_224_20220308-f41b89d3.pth' # noqa # model settings norm_cfg = dict(type='SyncBN', requires_grad=True) num_stages = 3 conv_kernel_size = 1 model = dict( type='EncoderDecoder', pretrained=checkpoint_file, backbone=dict( _delete_=True, type='SwinTransformer', embed_dims=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7, mlp_ratio=4, qkv_bias=True, qk_scale=None, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, use_abs_pos_embed=False, patch_norm=True, out_indices=(0, 1, 2, 3)), decode_head=dict( kernel_generate_head=dict(in_channels=[96, 192, 384, 768])), auxiliary_head=dict(in_channels=384)) # modify learning rate following the official implementation of Swin Transformer # noqa optimizer = dict( _delete_=True, type='AdamW', lr=0.00006, betas=(0.9, 0.999), weight_decay=0.0005, paramwise_cfg=dict( custom_keys={ 'absolute_pos_embed': dict(decay_mult=0.), 'relative_position_bias_table': dict(decay_mult=0.), 'norm': dict(decay_mult=0.) })) optimizer_config = dict(grad_clip=dict(max_norm=1, norm_type=2)) # learning policy lr_config = dict( _delete_=True, policy='step', warmup='linear', warmup_iters=1000, warmup_ratio=0.001, step=[60000, 72000], by_epoch=False) # In K-Net implementation we use batch size 2 per GPU as default data = dict(samples_per_gpu=2, workers_per_gpu=2)
1,737
28.965517
143
py
mmsegmentation
mmsegmentation-master/configs/mae/README.md
# MAE [Masked Autoencoders Are Scalable Vision Learners](https://arxiv.org/abs/2111.06377) ## Introduction <!-- [BACKBONE] --> <a href="https://github.com/facebookresearch/mae">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.24.0/mmseg/models/backbones/mae.py#L46">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> This paper shows that masked autoencoders (MAE) are scalable self-supervised learners for computer vision. Our MAE approach is simple: we mask random patches of the input image and reconstruct the missing pixels. It is based on two core designs. First, we develop an asymmetric encoder-decoder architecture, with an encoder that operates only on the visible subset of patches (without mask tokens), along with a lightweight decoder that reconstructs the original image from the latent representation and mask tokens. Second, we find that masking a high proportion of the input image, e.g., 75%, yields a nontrivial and meaningful self-supervisory task. Coupling these two designs enables us to train large models efficiently and effectively: we accelerate training (by 3x or more) and improve accuracy. Our scalable approach allows for learning high-capacity models that generalize well: e.g., a vanilla ViT-Huge model achieves the best accuracy (87.8%) among methods that use only ImageNet-1K data. Transfer performance in downstream tasks outperforms supervised pre-training and shows promising scaling behavior. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/165456416-1cba54bf-b1b5-4bdf-ad86-d6390de7f342.png" width="70%"/> </div> ## Citation ```bibtex @article{he2021masked, title={Masked autoencoders are scalable vision learners}, author={He, Kaiming and Chen, Xinlei and Xie, Saining and Li, Yanghao and Doll{\'a}r, Piotr and Girshick, Ross}, journal={arXiv preprint arXiv:2111.06377}, year={2021} } ``` ## Usage To use other repositories' pre-trained models, it is necessary to convert keys. We provide a script [`beit2mmseg.py`](../../tools/model_converters/beit2mmseg.py) in the tools directory to convert the key of MAE model from [the official repo](https://github.com/facebookresearch/mae) to MMSegmentation style. ```shell python tools/model_converters/beit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/beit2mmseg.py https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth pretrain/mae_pretrain_vit_base_mmcls.pth ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. In our default setting, pretrained models could be defined below: | pretrained models | original models | | ------------------------------- | ------------------------------------------------------------------------------------------------ | | mae_pretrain_vit_base_mmcls.pth | ['mae_pretrain_vit_base'](https://dl.fbaipublicfiles.com/mae/pretrain/mae_pretrain_vit_base.pth) | Verify the single-scale results of the model: ```shell sh tools/dist_test.sh \ configs/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py \ upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth $GPUS --eval mIoU ``` Since relative position embedding requires the input length and width to be equal, the sliding window is adopted for multi-scale inference. So we set min_size=512, that is, the shortest edge is 512. So the multi-scale inference of config is performed separately, instead of '--aug-test'. For multi-scale inference: ```shell sh tools/dist_test.sh \ configs/mae/upernet_mae-base_fp16_512x512_160k_ade20k_ms.py \ upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth $GPUS --eval mIoU ``` ## Results and models ### ADE20K | Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------- | -------- | --------- | ----------- | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | UPerNet | ViT-B | 512x512 | ImageNet-1K | 224x224 | 16 | 160000 | 9.96 | 7.14 | 48.13 | 48.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752.log.json) |
5,795
68.831325
1,114
md
mmsegmentation
mmsegmentation-master/configs/mae/mae.yml
Models: - Name: upernet_mae-base_fp16_8x2_512x512_160k_ade20k In Collection: UPerNet Metadata: backbone: ViT-B crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 140.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (512,512) Training Memory (GB): 9.96 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 48.13 mIoU(ms+flip): 48.7 Config: configs/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k/upernet_mae-base_fp16_8x2_512x512_160k_ade20k_20220426_174752-f92a2975.pth
730
29.458333
186
yml
mmsegmentation
mmsegmentation-master/configs/mae/upernet_mae-base_fp16_512x512_160k_ade20k_ms.py
_base_ = './upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2048, 512), img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=True, transforms=[ dict(type='Resize', keep_ratio=True, min_size=512), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline), samples_per_gpu=2)
767
29.72
77
py
mmsegmentation
mmsegmentation-master/configs/mae/upernet_mae-base_fp16_8x2_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/upernet_mae.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( pretrained='./pretrain/mae_pretrain_vit_base_mmcls.pth', backbone=dict( type='MAE', img_size=(512, 512), patch_size=16, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, init_values=1.0, drop_path_rate=0.1, out_indices=[3, 5, 7, 11]), neck=dict(embed_dim=768, rescales=[4, 2, 1, 0.5]), decode_head=dict( in_channels=[768, 768, 768, 768], num_classes=150, channels=768), auxiliary_head=dict(in_channels=768, num_classes=150), test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(341, 341))) optimizer = dict( _delete_=True, type='AdamW', lr=1e-4, betas=(0.9, 0.999), weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.65)) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) # mixed precision fp16 = dict(loss_scale='dynamic') # By default, models are trained on 8 GPUs with 2 images per GPU data = dict(samples_per_gpu=2)
1,342
26.408163
74
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/README.md
# MobileNetV2 [MobileNetV2: Inverted Residuals and Linear Bottlenecks](https://arxiv.org/abs/1801.04381) ## Introduction <!-- [BACKBONE] --> <a href="https://github.com/tensorflow/models/tree/master/research/deeplab">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mobilenet_v2.py#L14">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> In this paper we describe a new mobile architecture, MobileNetV2, that improves the state of the art performance of mobile models on multiple tasks and benchmarks as well as across a spectrum of different model sizes. We also describe efficient ways of applying these mobile models to object detection in a novel framework we call SSDLite. Additionally, we demonstrate how to build mobile semantic segmentation models through a reduced form of DeepLabv3 which we call Mobile DeepLabv3. The MobileNetV2 architecture is based on an inverted residual structure where the input and output of the residual block are thin bottleneck layers opposite to traditional residual models which use expanded representations in the input an MobileNetV2 uses lightweight depthwise convolutions to filter features in the intermediate expansion layer. Additionally, we find that it is important to remove non-linearities in the narrow layers in order to maintain representational power. We demonstrate that this improves performance and provide an intuition that led to this design. Finally, our approach allows decoupling of the input/output domains from the expressiveness of the transformation, which provides a convenient framework for further analysis. We measure our performance on Imagenet classification, COCO object detection, VOC image segmentation. We evaluate the trade-offs between accuracy, and number of operations measured by multiply-adds (MAdd), as well as the number of parameters. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142901935-fa22700e-4b77-477f-90b9-334a4197506f.png" width="50%"/> </div> ## Citation ```bibtex @inproceedings{sandler2018mobilenetv2, title={Mobilenetv2: Inverted residuals and linear bottlenecks}, author={Sandler, Mark and Howard, Andrew and Zhu, Menglong and Zhmoginov, Andrey and Chen, Liang-Chieh}, booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, pages={4510--4520}, year={2018} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | FCN | M-V2-D8 | 512x1024 | 80000 | 3.4 | 14.2 | 70.16 | 72.1 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20230227-144821-0d3a4e51.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20230227-144821.log.json) | | PSPNet | M-V2-D8 | 512x1024 | 80000 | 3.6 | 11.2 | 70.23 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes-20200825_124817.log.json) | | DeepLabV3 | M-V2-D8 | 512x1024 | 80000 | 3.9 | 8.4 | 73.84 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | | DeepLabV3+ | M-V2-D8 | 512x1024 | 80000 | 5.1 | 8.4 | 75.20 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes-20200825_124836.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------- | -------- | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | FCN | M-V2-D8 | 512x512 | 160000 | 6.5 | 64.4 | 19.71 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | | PSPNet | M-V2-D8 | 512x512 | 160000 | 6.5 | 57.7 | 29.68 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k-20200825_214953.log.json) | | DeepLabV3 | M-V2-D8 | 512x512 | 160000 | 6.8 | 39.9 | 34.08 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) | | DeepLabV3+ | M-V2-D8 | 512x512 | 160000 | 8.2 | 43.1 | 34.02 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k-20200825_223255.log.json) |
9,873
172.22807
995
md
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py
_base_ = '../deeplabv3/deeplabv3_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
470
32.642857
68
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py
_base_ = '../deeplabv3/deeplabv3_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
466
32.357143
64
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320, c1_in_channels=24), auxiliary_head=dict(in_channels=96))
497
34.571429
76
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py
_base_ = '../deeplabv3plus/deeplabv3plus_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320, c1_in_channels=24), auxiliary_head=dict(in_channels=96))
493
34.285714
72
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py
_base_ = '../fcn/fcn_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
458
31.785714
58
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py
_base_ = '../fcn/fcn_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
454
31.5
58
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/mobilenet_v2.yml
Models: - Name: fcn_m-v2-d8_512x1024_80k_cityscapes In Collection: FCN Metadata: backbone: M-V2-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 70.42 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 3.4 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 70.16 mIoU(ms+flip): 72.1 Config: configs/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x1024_80k_cityscapes/fcn_m-v2-d8_512x1024_80k_cityscapes_20230227-144821-0d3a4e51.pth - Name: pspnet_m-v2-d8_512x1024_80k_cityscapes In Collection: PSPNet Metadata: backbone: M-V2-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 89.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 3.6 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 70.23 Config: configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes/pspnet_m-v2-d8_512x1024_80k_cityscapes_20200825_124817-19e81d51.pth - Name: deeplabv3_m-v2-d8_512x1024_80k_cityscapes In Collection: DeepLabV3 Metadata: backbone: M-V2-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 119.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 3.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.84 Config: configs/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x1024_80k_cityscapes/deeplabv3_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-bef03590.pth - Name: deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes In Collection: DeepLabV3+ Metadata: backbone: M-V2-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 119.05 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.1 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.2 Config: configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes/deeplabv3plus_m-v2-d8_512x1024_80k_cityscapes_20200825_124836-d256dd4b.pth - Name: fcn_m-v2-d8_512x512_160k_ade20k In Collection: FCN Metadata: backbone: M-V2-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 15.53 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.5 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 19.71 Config: configs/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/fcn_m-v2-d8_512x512_160k_ade20k/fcn_m-v2-d8_512x512_160k_ade20k_20200825_214953-c40e1095.pth - Name: pspnet_m-v2-d8_512x512_160k_ade20k In Collection: PSPNet Metadata: backbone: M-V2-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 17.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.5 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 29.68 Config: configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k/pspnet_m-v2-d8_512x512_160k_ade20k_20200825_214953-f5942f7a.pth - Name: deeplabv3_m-v2-d8_512x512_160k_ade20k In Collection: DeepLabV3 Metadata: backbone: M-V2-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 25.06 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 34.08 Config: configs/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3_m-v2-d8_512x512_160k_ade20k/deeplabv3_m-v2-d8_512x512_160k_ade20k_20200825_223255-63986343.pth - Name: deeplabv3plus_m-v2-d8_512x512_160k_ade20k In Collection: DeepLabV3+ Metadata: backbone: M-V2-D8 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 23.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 34.02 Config: configs/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v2/deeplabv3plus_m-v2-d8_512x512_160k_ade20k/deeplabv3plus_m-v2-d8_512x512_160k_ade20k_20200825_223255-465a01d4.pth
5,534
31.368421
195
yml
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/pspnet_m-v2-d8_512x1024_80k_cityscapes.py
_base_ = '../pspnet/pspnet_r101-d8_512x1024_80k_cityscapes.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
464
32.214286
62
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v2/pspnet_m-v2-d8_512x512_160k_ade20k.py
_base_ = '../pspnet/pspnet_r101-d8_512x512_160k_ade20k.py' model = dict( pretrained='mmcls://mobilenet_v2', backbone=dict( _delete_=True, type='MobileNetV2', widen_factor=1., strides=(1, 2, 2, 1, 1, 1, 1), dilations=(1, 1, 1, 2, 2, 4, 4), out_indices=(1, 2, 4, 6), norm_cfg=dict(type='SyncBN', requires_grad=True)), decode_head=dict(in_channels=320), auxiliary_head=dict(in_channels=96))
460
31.928571
58
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/README.md
# MobileNetV3 [Searching for MobileNetV3](https://arxiv.org/abs/1905.02244) ## Introduction <!-- [BACKBONE] --> <!-- [ALGORITHM] --> <a href="https://github.com/tensorflow/models/tree/master/research/deeplab">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mobilenet_v3.py#L15">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We present the next generation of MobileNets based on a combination of complementary search techniques as well as a novel architecture design. MobileNetV3 is tuned to mobile phone CPUs through a combination of hardware-aware network architecture search (NAS) complemented by the NetAdapt algorithm and then subsequently improved through novel architecture advances. This paper starts the exploration of how automated search algorithms and network design can work together to harness complementary approaches improving the overall state of the art. Through this process we create two new MobileNet models for release: MobileNetV3-Large and MobileNetV3-Small which are targeted for high and low resource use cases. These models are then adapted and applied to the tasks of object detection and semantic segmentation. For the task of semantic segmentation (or any dense pixel prediction), we propose a new efficient segmentation decoder Lite Reduced Atrous Spatial Pyramid Pooling (LR-ASPP). We achieve new state of the art results for mobile classification, detection and segmentation. MobileNetV3-Large is 3.2% more accurate on ImageNet classification while reducing latency by 15% compared to MobileNetV2. MobileNetV3-Small is 4.6% more accurate while reducing latency by 5% compared to MobileNetV2. MobileNetV3-Large detection is 25% faster at roughly the same accuracy as MobileNetV2 on COCO detection. MobileNetV3-Large LR-ASPP is 30% faster than MobileNetV2 R-ASPP at similar accuracy for Cityscapes segmentation. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902036-3dc2e0c0-d475-4816-b1ac-961836b41f5c.png" width="60%"/> </div> ## Citation ```bibtex @inproceedings{Howard_2019_ICCV, title={Searching for MobileNetV3}, author={Howard, Andrew and Sandler, Mark and Chu, Grace and Chen, Liang-Chieh and Chen, Bo and Tan, Mingxing and Wang, Weijun and Zhu, Yukun and Pang, Ruoming and Vasudevan, Vijay and Le, Quoc V. and Adam, Hartwig}, booktitle={The IEEE International Conference on Computer Vision (ICCV)}, pages={1314-1324}, month={October}, year={2019}, doi={10.1109/ICCV.2019.00140}} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | ------------------ | --------- | ------: | -------: | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | LRASPP | M-V3-D8 | 512x1024 | 320000 | 8.9 | 15.22 | 69.54 | 70.89 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes-20201224_220337.log.json) | | LRASPP | M-V3-D8 (scratch) | 512x1024 | 320000 | 8.9 | 14.77 | 67.87 | 69.78 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes-20201224_220337.log.json) | | LRASPP | M-V3s-D8 | 512x1024 | 320000 | 5.3 | 23.64 | 64.11 | 66.42 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes-20201224_223935.log.json) | | LRASPP | M-V3s-D8 (scratch) | 512x1024 | 320000 | 5.3 | 24.50 | 62.74 | 65.01 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes-20201224_223935.log.json) |
6,489
126.254902
1,517
md
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py
_base_ = [ '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict(pretrained='open-mmlab://contrib/mobilenet_v3_large') # Re-config the data sampler. data = dict(samples_per_gpu=4, workers_per_gpu=4) runner = dict(type='IterBasedRunner', max_iters=320000)
372
30.083333
77
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py
_base_ = [ '../_base_/models/lraspp_m-v3-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] # Re-config the data sampler. data = dict(samples_per_gpu=4, workers_per_gpu=4) runner = dict(type='IterBasedRunner', max_iters=320000)
304
29.5
77
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py
_base_ = './lraspp_m-v3-d8_512x1024_320k_cityscapes.py' norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://contrib/mobilenet_v3_small', backbone=dict( type='MobileNetV3', arch='small', out_indices=(0, 1, 12), norm_cfg=norm_cfg), decode_head=dict( type='LRASPPHead', in_channels=(16, 16, 576), in_index=(0, 1, 2), channels=128, input_transform='multiple_select', dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
766
30.958333
74
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py
_base_ = './lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py' norm_cfg = dict(type='SyncBN', eps=0.001, requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='MobileNetV3', arch='small', out_indices=(0, 1, 12), norm_cfg=norm_cfg), decode_head=dict( type='LRASPPHead', in_channels=(16, 16, 576), in_index=(0, 1, 2), channels=128, input_transform='multiple_select', dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)))
716
30.173913
74
py
mmsegmentation
mmsegmentation-master/configs/mobilenet_v3/mobilenet_v3.yml
Collections: - Name: LRASPP Metadata: Training Data: - Cityscapes Paper: URL: https://arxiv.org/abs/1905.02244 Title: Searching for MobileNetV3 README: configs/mobilenet_v3/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/mobilenet_v3.py#L15 Version: v0.17.0 Converted From: Code: https://github.com/tensorflow/models/tree/master/research/deeplab Models: - Name: lraspp_m-v3-d8_512x1024_320k_cityscapes In Collection: LRASPP Metadata: backbone: M-V3-D8 crop size: (512,1024) lr schd: 320000 inference time (ms/im): - value: 65.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 69.54 mIoU(ms+flip): 70.89 Config: configs/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_512x1024_320k_cityscapes/lraspp_m-v3-d8_512x1024_320k_cityscapes_20201224_220337-cfe8fb07.pth - Name: lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes In Collection: LRASPP Metadata: backbone: M-V3-D8 (scratch) crop size: (512,1024) lr schd: 320000 inference time (ms/im): - value: 67.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 67.87 mIoU(ms+flip): 69.78 Config: configs/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3-d8_scratch_512x1024_320k_cityscapes_20201224_220337-9f29cd72.pth - Name: lraspp_m-v3s-d8_512x1024_320k_cityscapes In Collection: LRASPP Metadata: backbone: M-V3s-D8 crop size: (512,1024) lr schd: 320000 inference time (ms/im): - value: 42.3 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.3 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 64.11 mIoU(ms+flip): 66.42 Config: configs/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_512x1024_320k_cityscapes/lraspp_m-v3s-d8_512x1024_320k_cityscapes_20201224_223935-61565b34.pth - Name: lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes In Collection: LRASPP Metadata: backbone: M-V3s-D8 (scratch) crop size: (512,1024) lr schd: 320000 inference time (ms/im): - value: 40.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 5.3 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 62.74 mIoU(ms+flip): 65.01 Config: configs/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/mobilenet_v3/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes/lraspp_m-v3s-d8_scratch_512x1024_320k_cityscapes_20201224_223935-03daeabb.pth
3,425
31.942308
201
yml
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/README.md
# NonLocal Net [Non-local Neural Networks](https://arxiv.org/abs/1711.07971) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/facebookresearch/video-nonlocal-net">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/nl_head.py#L10">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Both convolutional and recurrent operations are building blocks that process one local neighborhood at a time. In this paper, we present non-local operations as a generic family of building blocks for capturing long-range dependencies. Inspired by the classical non-local means method in computer vision, our non-local operation computes the response at a position as a weighted sum of the features at all positions. This building block can be plugged into many computer vision architectures. On the task of video classification, even without any bells and whistles, our non-local models can compete or outperform current competition winners on both Kinetics and Charades datasets. In static image recognition, our non-local models improve object detection/segmentation and pose estimation on the COCO suite of tasks. Code is available at [this https URL](https://github.com/facebookresearch/video-nonlocal-net). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902128-17e29678-bf12-4ff4-b3d6-a39b47dfd253.png" width="50%"/> </div> ## Citation ```bibtex @inproceedings{wang2018non, title={Non-local neural networks}, author={Wang, Xiaolong and Girshick, Ross and Gupta, Abhinav and He, Kaiming}, booktitle={Proceedings of the IEEE conference on computer vision and pattern recognition}, pages={7794--7803}, year={2018} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ----------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | NonLocalNet | R-50-D8 | 512x1024 | 40000 | 7.4 | 2.72 | 78.24 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | | NonLocalNet | R-101-D8 | 512x1024 | 40000 | 10.9 | 1.95 | 78.66 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748.log.json) | | NonLocalNet | R-50-D8 | 769x769 | 40000 | 8.9 | 1.52 | 78.33 | 79.92 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243.log.json) | | NonLocalNet | R-101-D8 | 769x769 | 40000 | 12.8 | 1.05 | 78.57 | 80.29 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348.log.json) | | NonLocalNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.01 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518.log.json) | | NonLocalNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.93 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411.log.json) | | NonLocalNet | R-50-D8 | 769x769 | 80000 | - | - | 79.05 | 80.68 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506.log.json) | | NonLocalNet | R-101-D8 | 769x769 | 80000 | - | - | 79.40 | 80.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | NonLocalNet | R-50-D8 | 512x512 | 80000 | 9.1 | 21.37 | 40.75 | 42.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801.log.json) | | NonLocalNet | R-101-D8 | 512x512 | 80000 | 12.6 | 13.97 | 42.90 | 44.27 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758.log.json) | | NonLocalNet | R-50-D8 | 512x512 | 160000 | - | - | 42.03 | 43.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410.log.json) | | NonLocalNet | R-101-D8 | 512x512 | 160000 | - | - | 44.63 | 45.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502.log.json) | ### Pascal VOC 2012 + Aug | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ----------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | NonLocalNet | R-50-D8 | 512x512 | 20000 | 6.4 | 21.21 | 76.20 | 77.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613.log.json) | | NonLocalNet | R-101-D8 | 512x512 | 20000 | 9.8 | 14.01 | 78.15 | 78.86 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615.log.json) | | NonLocalNet | R-50-D8 | 512x512 | 40000 | - | - | 76.65 | 77.47 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028.log.json) | | NonLocalNet | R-101-D8 | 512x512 | 40000 | - | - | 78.27 | 79.12 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028.log.json) |
14,868
214.492754
912
md
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_net.yml
Collections: - Name: NonLocalNet Metadata: Training Data: - Cityscapes - ADE20K - Pascal VOC 2012 + Aug Paper: URL: https://arxiv.org/abs/1711.07971 Title: Non-local Neural Networks README: configs/nonlocal_net/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/nl_head.py#L10 Version: v0.17.0 Converted From: Code: https://github.com/facebookresearch/video-nonlocal-net Models: - Name: nonlocal_r50-d8_512x1024_40k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 367.65 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 7.4 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.24 Config: configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes/nonlocal_r50-d8_512x1024_40k_cityscapes_20200605_210748-c75e81e3.pth - Name: nonlocal_r101-d8_512x1024_40k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 512.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 10.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.66 Config: configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes/nonlocal_r101-d8_512x1024_40k_cityscapes_20200605_210748-d63729fa.pth - Name: nonlocal_r50-d8_769x769_40k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 657.89 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 8.9 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.33 mIoU(ms+flip): 79.92 Config: configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes/nonlocal_r50-d8_769x769_40k_cityscapes_20200530_045243-82ef6749.pth - Name: nonlocal_r101-d8_769x769_40k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 952.38 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 12.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.57 mIoU(ms+flip): 80.29 Config: configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes/nonlocal_r101-d8_769x769_40k_cityscapes_20200530_045348-8fe9a9dc.pth - Name: nonlocal_r50-d8_512x1024_80k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.01 Config: configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes/nonlocal_r50-d8_512x1024_80k_cityscapes_20200607_193518-d6839fae.pth - Name: nonlocal_r101-d8_512x1024_80k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.93 Config: configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes/nonlocal_r101-d8_512x1024_80k_cityscapes_20200607_183411-32700183.pth - Name: nonlocal_r50-d8_769x769_80k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.05 mIoU(ms+flip): 80.68 Config: configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes/nonlocal_r50-d8_769x769_80k_cityscapes_20200607_193506-1f9792f6.pth - Name: nonlocal_r101-d8_769x769_80k_cityscapes In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.4 mIoU(ms+flip): 80.85 Config: configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes/nonlocal_r101-d8_769x769_80k_cityscapes_20200607_183428-0e1fa4f9.pth - Name: nonlocal_r50-d8_512x512_80k_ade20k In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 46.79 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 40.75 mIoU(ms+flip): 42.05 Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k/nonlocal_r50-d8_512x512_80k_ade20k_20200615_015801-5ae0aa33.pth - Name: nonlocal_r101-d8_512x512_80k_ade20k In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 71.58 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.9 mIoU(ms+flip): 44.27 Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k/nonlocal_r101-d8_512x512_80k_ade20k_20200615_015758-24105919.pth - Name: nonlocal_r50-d8_512x512_160k_ade20k In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.03 mIoU(ms+flip): 43.04 Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k/nonlocal_r50-d8_512x512_160k_ade20k_20200616_005410-baef45e3.pth - Name: nonlocal_r101-d8_512x512_160k_ade20k In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 44.63 mIoU(ms+flip): 45.79 Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k/nonlocal_r101-d8_512x512_160k_ade20k_20210827_221502-7881aa1a.pth - Name: nonlocal_r50-d8_512x512_20k_voc12aug In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 47.15 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.4 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.2 mIoU(ms+flip): 77.12 Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug/nonlocal_r50-d8_512x512_20k_voc12aug_20200617_222613-07f2a57c.pth - Name: nonlocal_r101-d8_512x512_20k_voc12aug In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 71.38 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.8 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 78.15 mIoU(ms+flip): 78.86 Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug/nonlocal_r101-d8_512x512_20k_voc12aug_20200617_222615-948c68ab.pth - Name: nonlocal_r50-d8_512x512_40k_voc12aug In Collection: NonLocalNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.65 mIoU(ms+flip): 77.47 Config: configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug/nonlocal_r50-d8_512x512_40k_voc12aug_20200614_000028-0139d4a9.pth - Name: nonlocal_r101-d8_512x512_40k_voc12aug In Collection: NonLocalNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 78.27 mIoU(ms+flip): 79.12 Config: configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug/nonlocal_r101-d8_512x512_40k_voc12aug_20200614_000028-7e5ff470.pth
10,411
33.476821
185
yml
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_40k_cityscapes.py
_base_ = './nonlocal_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
136
44.666667
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x1024_80k_cityscapes.py
_base_ = './nonlocal_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
136
44.666667
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x512_160k_ade20k.py
_base_ = './nonlocal_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
132
43.333333
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x512_20k_voc12aug.py
_base_ = './nonlocal_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x512_40k_voc12aug.py
_base_ = './nonlocal_r50-d8_512x512_40k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_512x512_80k_ade20k.py
_base_ = './nonlocal_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_769x769_40k_cityscapes.py
_base_ = './nonlocal_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
135
44.333333
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r101-d8_769x769_80k_cityscapes.py
_base_ = './nonlocal_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
135
44.333333
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
166
32.4
78
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
166
32.4
78
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
254
35.428571
76
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
265
32.25
77
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
265
32.25
77
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
253
35.285714
76
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
353
34.4
79
py
mmsegmentation
mmsegmentation-master/configs/nonlocal_net/nonlocal_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/nonlocal_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
353
34.4
79
py
mmsegmentation
mmsegmentation-master/configs/ocrnet/README.md
# OCRNet [Object-Contextual Representations for Semantic Segmentation](https://arxiv.org/abs/1909.11065) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/openseg-group/OCNet.pytorch">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ocr_head.py#L86">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> In this paper, we address the problem of semantic segmentation and focus on the context aggregation strategy for robust segmentation. Our motivation is that the label of a pixel is the category of the object that the pixel belongs to. We present a simple yet effective approach, object-contextual representations, characterizing a pixel by exploiting the representation of the corresponding object class. First, we construct object regions based on a feature map supervised by the ground-truth segmentation, and then compute the object region representations. Second, we compute the representation similarity between each pixel and each object region, and augment the representation of each pixel with an object contextual representation, which is a weighted aggregation of all the object region representations according to their similarities with the pixel. We empirically demonstrate that the proposed approach achieves competitive performance on six challenging semantic segmentation benchmarks: Cityscapes, ADE20K, LIP, PASCAL VOC 2012, PASCAL-Context and COCO-Stuff. Notably, we achieved the \\nth{2} place on the Cityscapes leader-board with a single model. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142902197-b06b1e04-57ab-44ac-adc8-cea6695bb236.png" width="70%"/> </div> ## Citation ```bibtex @article{YuanW18, title={Ocnet: Object context network for scene parsing}, author={Yuhui Yuan and Jingdong Wang}, booktitle={arXiv preprint arXiv:1809.00916}, year={2018} } @article{YuanCW20, title={Object-Contextual Representations for Semantic Segmentation}, author={Yuhui Yuan and Xilin Chen and Jingdong Wang}, booktitle={ECCV}, year={2020} } ``` ## Results and models ### Cityscapes #### HRNet backbone | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | -------------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | OCRNet | HRNetV2p-W18-Small | 512x1024 | 40000 | 3.5 | 10.45 | 74.30 | 75.95 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304.log.json) | | OCRNet | HRNetV2p-W18 | 512x1024 | 40000 | 4.7 | 7.50 | 77.72 | 79.49 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320.log.json) | | OCRNet | HRNetV2p-W48 | 512x1024 | 40000 | 8 | 4.22 | 80.58 | 81.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336.log.json) | | OCRNet | HRNetV2p-W18-Small | 512x1024 | 80000 | - | - | 77.16 | 78.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735.log.json) | | OCRNet | HRNetV2p-W18 | 512x1024 | 80000 | - | - | 78.57 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521.log.json) | | OCRNet | HRNetV2p-W48 | 512x1024 | 80000 | - | - | 80.70 | 81.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752.log.json) | | OCRNet | HRNetV2p-W18-Small | 512x1024 | 160000 | - | - | 78.45 | 79.97 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005.log.json) | | OCRNet | HRNetV2p-W18 | 512x1024 | 160000 | - | - | 79.47 | 80.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001.log.json) | | OCRNet | HRNetV2p-W48 | 512x1024 | 160000 | - | - | 81.35 | 82.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037.log.json) | #### ResNet backbone | Method | Backbone | Crop Size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | OCRNet | R-101-D8 | 512x1024 | 8 | 40000 | - | - | 80.09 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721.log.json) | | OCRNet | R-101-D8 | 512x1024 | 16 | 40000 | 8.8 | 3.02 | 80.30 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726.log.json) | | OCRNet | R-101-D8 | 512x1024 | 16 | 80000 | 8.8 | 3.02 | 80.81 | - | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | OCRNet | HRNetV2p-W18-Small | 512x512 | 80000 | 6.7 | 28.98 | 35.06 | 35.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600.log.json) | | OCRNet | HRNetV2p-W18 | 512x512 | 80000 | 7.9 | 18.93 | 37.79 | 39.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157.log.json) | | OCRNet | HRNetV2p-W48 | 512x512 | 80000 | 11.2 | 16.99 | 43.00 | 44.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518.log.json) | | OCRNet | HRNetV2p-W18-Small | 512x512 | 160000 | - | - | 37.19 | 38.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505.log.json) | | OCRNet | HRNetV2p-W18 | 512x512 | 160000 | - | - | 39.32 | 40.80 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940.log.json) | | OCRNet | HRNetV2p-W48 | 512x512 | 160000 | - | - | 43.25 | 44.88 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705.log.json) | ### Pascal VOC 2012 + Aug | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | ------------------ | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | OCRNet | HRNetV2p-W18-Small | 512x512 | 20000 | 3.5 | 31.55 | 71.70 | 73.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913.log.json) | | OCRNet | HRNetV2p-W18 | 512x512 | 20000 | 4.7 | 19.91 | 74.75 | 77.11 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932.log.json) | | OCRNet | HRNetV2p-W48 | 512x512 | 20000 | 8.1 | 17.83 | 77.72 | 79.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932.log.json) | | OCRNet | HRNetV2p-W18-Small | 512x512 | 40000 | - | - | 72.76 | 74.60 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025.log.json) | | OCRNet | HRNetV2p-W18 | 512x512 | 40000 | - | - | 74.98 | 77.40 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958.log.json) | | OCRNet | HRNetV2p-W48 | 512x512 | 40000 | - | - | 77.14 | 79.71 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958.log.json) |
20,386
225.522222
1,164
md
mmsegmentation
mmsegmentation-master/configs/ocrnet/ocrnet.yml
Collections: - Name: OCRNet Metadata: Training Data: - Cityscapes - ADE20K - Pascal VOC 2012 + Aug Paper: URL: https://arxiv.org/abs/1909.11065 Title: Object-Contextual Representations for Semantic Segmentation README: configs/ocrnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ocr_head.py#L86 Version: v0.17.0 Converted From: Code: https://github.com/openseg-group/OCNet.pytorch Models: - Name: ocrnet_hr18s_512x1024_40k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 95.69 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 3.5 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.3 mIoU(ms+flip): 75.95 Config: configs/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_40k_cityscapes/ocrnet_hr18s_512x1024_40k_cityscapes_20200601_033304-fa2436c2.pth - Name: ocrnet_hr18_512x1024_40k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 133.33 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 4.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.72 mIoU(ms+flip): 79.49 Config: configs/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_40k_cityscapes/ocrnet_hr18_512x1024_40k_cityscapes_20200601_033320-401c5bdd.pth - Name: ocrnet_hr48_512x1024_40k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 236.97 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.58 mIoU(ms+flip): 81.79 Config: configs/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_40k_cityscapes/ocrnet_hr48_512x1024_40k_cityscapes_20200601_033336-55b32491.pth - Name: ocrnet_hr18s_512x1024_80k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.16 mIoU(ms+flip): 78.66 Config: configs/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_80k_cityscapes/ocrnet_hr18s_512x1024_80k_cityscapes_20200601_222735-55979e63.pth - Name: ocrnet_hr18_512x1024_80k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.57 mIoU(ms+flip): 80.46 Config: configs/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_80k_cityscapes/ocrnet_hr18_512x1024_80k_cityscapes_20200614_230521-c2e1dd4a.pth - Name: ocrnet_hr48_512x1024_80k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.7 mIoU(ms+flip): 81.87 Config: configs/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_80k_cityscapes/ocrnet_hr48_512x1024_80k_cityscapes_20200601_222752-9076bcdf.pth - Name: ocrnet_hr18s_512x1024_160k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.45 mIoU(ms+flip): 79.97 Config: configs/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x1024_160k_cityscapes/ocrnet_hr18s_512x1024_160k_cityscapes_20200602_191005-f4a7af28.pth - Name: ocrnet_hr18_512x1024_160k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.47 mIoU(ms+flip): 80.91 Config: configs/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x1024_160k_cityscapes/ocrnet_hr18_512x1024_160k_cityscapes_20200602_191001-b9172d0c.pth - Name: ocrnet_hr48_512x1024_160k_cityscapes In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,1024) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 81.35 mIoU(ms+flip): 82.7 Config: configs/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x1024_160k_cityscapes/ocrnet_hr48_512x1024_160k_cityscapes_20200602_191037-dfbf1b0c.pth - Name: ocrnet_r101-d8_512x1024_40k_b8_cityscapes In Collection: OCRNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.09 Config: configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes/ocrnet_r101-d8_512x1024_40k_b8_cityscapes_20200717_110721-02ac0f13.pth - Name: ocrnet_r101-d8_512x1024_40k_b16_cityscapes In Collection: OCRNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 331.13 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.3 Config: configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_40k_b16_cityscapes/ocrnet_r101-d8_512x1024_40k_b16_cityscapes_20200723_193726-db500f80.pth - Name: ocrnet_r101-d8_512x1024_80k_b16_cityscapes In Collection: OCRNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 inference time (ms/im): - value: 331.13 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 8.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 80.81 Config: configs/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_r101-d8_512x1024_80k_b16_cityscapes/ocrnet_r101-d8_512x1024_80k_b16_cityscapes_20200723_192421-78688424.pth - Name: ocrnet_hr18s_512x512_80k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 34.51 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.7 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 35.06 mIoU(ms+flip): 35.8 Config: configs/ocrnet/ocrnet_hr18s_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_80k_ade20k/ocrnet_hr18s_512x512_80k_ade20k_20200615_055600-e80b62af.pth - Name: ocrnet_hr18_512x512_80k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 52.83 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 7.9 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 37.79 mIoU(ms+flip): 39.16 Config: configs/ocrnet/ocrnet_hr18_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_80k_ade20k/ocrnet_hr18_512x512_80k_ade20k_20200615_053157-d173d83b.pth - Name: ocrnet_hr48_512x512_80k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 58.86 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 11.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.0 mIoU(ms+flip): 44.3 Config: configs/ocrnet/ocrnet_hr48_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_80k_ade20k/ocrnet_hr48_512x512_80k_ade20k_20200615_021518-d168c2d1.pth - Name: ocrnet_hr18s_512x512_160k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 37.19 mIoU(ms+flip): 38.4 Config: configs/ocrnet/ocrnet_hr18s_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_160k_ade20k/ocrnet_hr18s_512x512_160k_ade20k_20200615_184505-8e913058.pth - Name: ocrnet_hr18_512x512_160k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 39.32 mIoU(ms+flip): 40.8 Config: configs/ocrnet/ocrnet_hr18_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_160k_ade20k/ocrnet_hr18_512x512_160k_ade20k_20200615_200940-d8fcd9d1.pth - Name: ocrnet_hr48_512x512_160k_ade20k In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.25 mIoU(ms+flip): 44.88 Config: configs/ocrnet/ocrnet_hr48_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_160k_ade20k/ocrnet_hr48_512x512_160k_ade20k_20200615_184705-a073726d.pth - Name: ocrnet_hr18s_512x512_20k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 31.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 3.5 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 71.7 mIoU(ms+flip): 73.84 Config: configs/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_20k_voc12aug/ocrnet_hr18s_512x512_20k_voc12aug_20200617_233913-02b04fcb.pth - Name: ocrnet_hr18_512x512_20k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 50.23 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 4.7 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 74.75 mIoU(ms+flip): 77.11 Config: configs/ocrnet/ocrnet_hr18_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_20k_voc12aug/ocrnet_hr18_512x512_20k_voc12aug_20200617_233932-8954cbb7.pth - Name: ocrnet_hr48_512x512_20k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 56.09 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.1 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 77.72 mIoU(ms+flip): 79.87 Config: configs/ocrnet/ocrnet_hr48_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_20k_voc12aug/ocrnet_hr48_512x512_20k_voc12aug_20200617_233932-9e82080a.pth - Name: ocrnet_hr18s_512x512_40k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W18-Small crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 72.76 mIoU(ms+flip): 74.6 Config: configs/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18s_512x512_40k_voc12aug/ocrnet_hr18s_512x512_40k_voc12aug_20200614_002025-42b587ac.pth - Name: ocrnet_hr18_512x512_40k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W18 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 74.98 mIoU(ms+flip): 77.4 Config: configs/ocrnet/ocrnet_hr18_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr18_512x512_40k_voc12aug/ocrnet_hr18_512x512_40k_voc12aug_20200614_015958-714302be.pth - Name: ocrnet_hr48_512x512_40k_voc12aug In Collection: OCRNet Metadata: backbone: HRNetV2p-W48 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 77.14 mIoU(ms+flip): 79.71 Config: configs/ocrnet/ocrnet_hr48_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ocrnet/ocrnet_hr48_512x512_40k_voc12aug/ocrnet_hr48_512x512_40k_voc12aug_20200614_015958-255bc5ce.pth
14,727
32.548975
183
yml