repo
stringlengths
2
152
file
stringlengths
15
239
code
stringlengths
0
58.4M
file_length
int64
0
58.4M
avg_line_length
float64
0
1.81M
max_line_length
int64
0
12.7M
extension_type
stringclasses
364 values
mmsegmentation
mmsegmentation-master/configs/_base_/models/ocrnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='CascadeEncoderDecoder', num_stages=2, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=[ dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='OCRHead', in_channels=2048, in_index=3, channels=512, ocr_channels=256, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) ], # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,385
27.875
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/pointrend_r50.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='CascadeEncoderDecoder', num_stages=2, pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=(1, 2, 2, 2), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), neck=dict( type='FPN', in_channels=[256, 512, 1024, 2048], out_channels=256, num_outs=4), decode_head=[ dict( type='FPNHead', in_channels=[256, 256, 256, 256], in_index=[0, 1, 2, 3], feature_strides=[4, 8, 16, 32], channels=128, dropout_ratio=-1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='PointHead', in_channels=[256], in_index=[0], channels=256, num_fcs=3, coarse_pred_each_layer=True, dropout_ratio=-1, num_classes=19, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)) ], # model training and testing settings train_cfg=dict( num_points=2048, oversample_ratio=3, importance_sample_ratio=0.75), test_cfg=dict( mode='whole', subdivision_steps=2, subdivision_num_points=8196, scale_factor=2))
1,704
28.912281
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/psanet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='PSAHead', in_channels=2048, in_index=3, channels=512, mask_size=(97, 97), psa_type='bi-direction', compact=False, shrink_factor=2, normalization_factor=1.0, psa_softmax=True, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,406
27.14
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/pspnet_r50-d8.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 2, 4), strides=(1, 2, 1, 1), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='PSPHead', in_channels=2048, in_index=3, channels=512, pool_scales=(1, 2, 3, 6), dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,271
27.266667
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/pspnet_unet_s5-d16.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='UNet', in_channels=3, base_channels=64, num_stages=5, strides=(1, 1, 1, 1, 1), enc_num_convs=(2, 2, 2, 2, 2), dec_num_convs=(2, 2, 2, 2), downsamples=(True, True, True, True), enc_dilations=(1, 1, 1, 1, 1), dec_dilations=(1, 1, 1, 1), with_cp=False, conv_cfg=None, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), upsample_cfg=dict(type='InterpConv'), norm_eval=False), decode_head=dict( type='PSPHead', in_channels=64, in_index=4, channels=16, pool_scales=(1, 2, 3, 6), dropout_ratio=0.1, num_classes=2, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=128, in_index=3, channels=64, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=2, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='slide', crop_size=(256, 256), stride=(170, 170)))
1,511
28.647059
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/segformer_mit-b0.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='MixVisionTransformer', in_channels=3, embed_dims=32, num_stages=4, num_layers=[2, 2, 2, 2], num_heads=[1, 2, 5, 8], patch_sizes=[7, 3, 3, 3], sr_ratios=[8, 4, 2, 1], out_indices=(0, 1, 2, 3), mlp_ratio=4, qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.1), decode_head=dict( type='SegformerHead', in_channels=[32, 64, 160, 256], in_index=[0, 1, 2, 3], channels=256, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
993
27.4
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/segmenter_vit-b16_mask.py
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/segmenter/vit_base_p16_384_20220308-96dfe169.pth' # noqa # model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) model = dict( type='EncoderDecoder', pretrained=checkpoint, backbone=dict( type='VisionTransformer', img_size=(512, 512), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, drop_path_rate=0.1, attn_drop_rate=0.0, drop_rate=0.0, final_norm=True, norm_cfg=backbone_norm_cfg, with_cls_token=True, interpolate_mode='bicubic', ), decode_head=dict( type='SegmenterMaskTransformerHead', in_channels=768, channels=768, num_classes=150, num_layers=2, num_heads=12, embed_dims=768, dropout_ratio=0.0, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0), ), test_cfg=dict(mode='slide', crop_size=(512, 512), stride=(480, 480)), )
1,109
29
131
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/setr_mla.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), patch_size=16, in_channels=3, embed_dims=1024, num_layers=24, num_heads=16, out_indices=(5, 11, 17, 23), drop_rate=0.1, norm_cfg=backbone_norm_cfg, with_cls_token=False, interpolate_mode='bilinear', ), neck=dict( type='MLANeck', in_channels=[1024, 1024, 1024, 1024], out_channels=256, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), ), decode_head=dict( type='SETRMLAHead', in_channels=(256, 256, 256, 256), channels=512, in_index=(0, 1, 2, 3), dropout_ratio=0, mla_channels=128, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=[ dict( type='FCNHead', in_channels=256, channels=256, in_index=0, dropout_ratio=0, num_convs=0, kernel_size=1, concat_input=False, num_classes=19, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=1, dropout_ratio=0, num_convs=0, kernel_size=1, concat_input=False, num_classes=19, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=2, dropout_ratio=0, num_convs=0, kernel_size=1, concat_input=False, num_classes=19, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='FCNHead', in_channels=256, channels=256, in_index=3, dropout_ratio=0, num_convs=0, kernel_size=1, concat_input=False, num_classes=19, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), ], train_cfg=dict(), test_cfg=dict(mode='whole'))
2,860
28.802083
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/setr_naive.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), patch_size=16, in_channels=3, embed_dims=1024, num_layers=24, num_heads=16, out_indices=(9, 14, 19, 23), drop_rate=0.1, norm_cfg=backbone_norm_cfg, with_cls_token=True, interpolate_mode='bilinear', ), decode_head=dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=3, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=[ dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=0, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=1, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=2, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=1, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)) ], train_cfg=dict(), test_cfg=dict(mode='whole'))
2,365
28.209877
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/setr_pup.py
# model settings backbone_norm_cfg = dict(type='LN', eps=1e-6, requires_grad=True) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_large_p16_384-b3be5167.pth', backbone=dict( type='VisionTransformer', img_size=(768, 768), patch_size=16, in_channels=3, embed_dims=1024, num_layers=24, num_heads=16, out_indices=(9, 14, 19, 23), drop_rate=0.1, norm_cfg=backbone_norm_cfg, with_cls_token=True, interpolate_mode='bilinear', ), decode_head=dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=3, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=4, up_scale=2, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=[ dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=0, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=1, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), dict( type='SETRUPHead', in_channels=1024, channels=256, in_index=2, num_classes=19, dropout_ratio=0, norm_cfg=norm_cfg, num_convs=1, up_scale=4, kernel_size=3, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), ], train_cfg=dict(), test_cfg=dict(mode='whole'))
2,366
28.222222
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/stdc.py
norm_cfg = dict(type='BN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='STDCContextPathNet', backbone_cfg=dict( type='STDCNet', stdc_type='STDCNet1', in_channels=3, channels=(32, 64, 256, 512, 1024), bottleneck_type='cat', num_convs=4, norm_cfg=norm_cfg, act_cfg=dict(type='ReLU'), with_final_conv=False), last_in_channels=(1024, 512), out_channels=128, ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4)), decode_head=dict( type='FCNHead', in_channels=256, channels=256, num_convs=1, num_classes=19, in_index=3, concat_input=False, dropout_ratio=0.1, norm_cfg=norm_cfg, align_corners=True, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=[ dict( type='FCNHead', in_channels=128, channels=64, num_convs=1, num_classes=19, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=128, channels=64, num_convs=1, num_classes=19, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='STDCHead', in_channels=256, channels=64, num_convs=1, num_classes=2, boundary_threshold=0.1, in_index=0, norm_cfg=norm_cfg, concat_input=False, align_corners=True, loss_decode=[ dict( type='CrossEntropyLoss', loss_name='loss_ce', use_sigmoid=True, loss_weight=1.0), dict(type='DiceLoss', loss_name='loss_dice', loss_weight=1.0) ]), ], # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
2,721
31.404762
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/twins_pcpvt-s_fpn.py
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa # model settings backbone_norm_cfg = dict(type='LN') norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='PCPVT', init_cfg=dict(type='Pretrained', checkpoint=checkpoint), in_channels=3, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], patch_sizes=[4, 2, 2, 2], strides=[4, 2, 2, 2], mlp_ratios=[8, 8, 4, 4], out_indices=(0, 1, 2, 3), qkv_bias=True, norm_cfg=backbone_norm_cfg, depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], norm_after_stage=False, drop_rate=0.0, attn_drop_rate=0., drop_path_rate=0.2), neck=dict( type='FPN', in_channels=[64, 128, 320, 512], out_channels=256, num_outs=4), decode_head=dict( type='FPNHead', in_channels=[256, 256, 256, 256], in_index=[0, 1, 2, 3], feature_strides=[4, 8, 16, 32], channels=128, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,442
30.369565
122
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/twins_pcpvt-s_upernet.py
checkpoint = 'https://download.openmmlab.com/mmsegmentation/v0.5/pretrain/twins/pcpvt_small_20220308-e638c41c.pth' # noqa # model settings backbone_norm_cfg = dict(type='LN') norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='PCPVT', init_cfg=dict(type='Pretrained', checkpoint=checkpoint), in_channels=3, embed_dims=[64, 128, 320, 512], num_heads=[1, 2, 5, 8], patch_sizes=[4, 2, 2, 2], strides=[4, 2, 2, 2], mlp_ratios=[8, 8, 4, 4], out_indices=(0, 1, 2, 3), qkv_bias=True, norm_cfg=backbone_norm_cfg, depths=[3, 4, 6, 3], sr_ratios=[8, 4, 2, 1], norm_after_stage=False, drop_rate=0.0, attn_drop_rate=0., drop_path_rate=0.2), decode_head=dict( type='UPerHead', in_channels=[64, 128, 320, 512], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=320, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,687
30.259259
122
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_beit.py
norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='BEiT', img_size=(640, 640), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, out_indices=(3, 5, 7, 11), qv_bias=True, attn_drop_rate=0.0, drop_path_rate=0.1, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, init_values=0.1), neck=dict(type='Feature2Pyramid', embed_dim=768, rescales=[4, 2, 1, 0.5]), decode_head=dict( type='UPerHead', in_channels=[768, 768, 768, 768], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=768, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=768, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=150, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,496
28.352941
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_convnext.py
norm_cfg = dict(type='SyncBN', requires_grad=True) custom_imports = dict(imports='mmcls.models', allow_failed_imports=False) checkpoint_file = 'https://download.openmmlab.com/mmclassification/v0/convnext/downstream/convnext-base_3rdparty_32xb128-noema_in1k_20220301-2a0ee547.pth' # noqa model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='mmcls.ConvNeXt', arch='base', out_indices=[0, 1, 2, 3], drop_path_rate=0.4, layer_scale_init_value=1.0, gap_before_final_norm=False, init_cfg=dict( type='Pretrained', checkpoint=checkpoint_file, prefix='backbone.')), decode_head=dict( type='UPerHead', in_channels=[128, 256, 512, 1024], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=384, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,527
32.955556
162
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_mae.py
norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='MAE', img_size=(640, 640), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, out_indices=(3, 5, 7, 11), attn_drop_rate=0.0, drop_path_rate=0.1, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, init_values=0.1), neck=dict(type='Feature2Pyramid', embed_dim=768, rescales=[4, 2, 1, 0.5]), decode_head=dict( type='UPerHead', in_channels=[384, 384, 384, 384], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=384, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,471
28.44
78
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_r50.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='open-mmlab://resnet50_v1c', backbone=dict( type='ResNetV1c', depth=50, num_stages=4, out_indices=(0, 1, 2, 3), dilations=(1, 1, 1, 1), strides=(1, 2, 2, 2), norm_cfg=norm_cfg, norm_eval=False, style='pytorch', contract_dilation=True), decode_head=dict( type='UPerHead', in_channels=[256, 512, 1024, 2048], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=1024, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,301
27.933333
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_swin.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) backbone_norm_cfg = dict(type='LN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained=None, backbone=dict( type='SwinTransformer', pretrain_img_size=224, embed_dims=96, patch_size=4, window_size=7, mlp_ratio=4, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], strides=(4, 2, 2, 2), out_indices=(0, 1, 2, 3), qkv_bias=True, qk_scale=None, patch_norm=True, drop_rate=0., attn_drop_rate=0., drop_path_rate=0.3, use_abs_pos_embed=False, act_cfg=dict(type='GELU'), norm_cfg=backbone_norm_cfg), decode_head=dict( type='UPerHead', in_channels=[96, 192, 384, 768], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=384, in_index=2, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole'))
1,590
27.927273
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/models/upernet_vit-b16_ln_mln.py
# model settings norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', pretrained='pretrain/jx_vit_base_p16_224-80ecf9dd.pth', backbone=dict( type='VisionTransformer', img_size=(512, 512), patch_size=16, in_channels=3, embed_dims=768, num_layers=12, num_heads=12, mlp_ratio=4, out_indices=(2, 5, 8, 11), qkv_bias=True, drop_rate=0.0, attn_drop_rate=0.0, drop_path_rate=0.0, with_cls_token=True, norm_cfg=dict(type='LN', eps=1e-6), act_cfg=dict(type='GELU'), norm_eval=False, interpolate_mode='bicubic'), neck=dict( type='MultiLevelNeck', in_channels=[768, 768, 768, 768], out_channels=768, scales=[4, 2, 1, 0.5]), decode_head=dict( type='UPerHead', in_channels=[768, 768, 768, 768], in_index=[0, 1, 2, 3], pool_scales=(1, 2, 3, 6), channels=512, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), auxiliary_head=dict( type='FCNHead', in_channels=768, in_index=3, channels=256, num_convs=1, concat_input=False, dropout_ratio=0.1, num_classes=19, norm_cfg=norm_cfg, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4)), # model training and testing settings train_cfg=dict(), test_cfg=dict(mode='whole')) # yapf: disable
1,711
28.517241
74
py
mmsegmentation
mmsegmentation-master/configs/_base_/schedules/schedule_160k.py
# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=160000) checkpoint_config = dict(by_epoch=False, interval=16000) evaluation = dict(interval=16000, metric='mIoU', pre_eval=True)
397
38.8
72
py
mmsegmentation
mmsegmentation-master/configs/_base_/schedules/schedule_20k.py
# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=20000) checkpoint_config = dict(by_epoch=False, interval=2000) evaluation = dict(interval=2000, metric='mIoU', pre_eval=True)
394
38.5
72
py
mmsegmentation
mmsegmentation-master/configs/_base_/schedules/schedule_320k.py
# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=320000) checkpoint_config = dict(by_epoch=False, interval=32000) evaluation = dict(interval=32000, metric='mIoU')
382
37.3
72
py
mmsegmentation
mmsegmentation-master/configs/_base_/schedules/schedule_40k.py
# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=40000) checkpoint_config = dict(by_epoch=False, interval=4000) evaluation = dict(interval=4000, metric='mIoU', pre_eval=True)
394
38.5
72
py
mmsegmentation
mmsegmentation-master/configs/_base_/schedules/schedule_80k.py
# optimizer optimizer = dict(type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0005) optimizer_config = dict() # learning policy lr_config = dict(policy='poly', power=0.9, min_lr=1e-4, by_epoch=False) # runtime settings runner = dict(type='IterBasedRunner', max_iters=80000) checkpoint_config = dict(by_epoch=False, interval=8000) evaluation = dict(interval=8000, metric='mIoU', pre_eval=True)
394
38.5
72
py
mmsegmentation
mmsegmentation-master/configs/ann/README.md
# ANN [Asymmetric Non-local Neural Networks for Semantic Segmentation](https://arxiv.org/abs/1908.07678) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/MendelXu/ANN">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ann_head.py#L185">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The non-local module works as a particularly useful technique for semantic segmentation while criticized for its prohibitive computation and GPU memory occupation. In this paper, we present Asymmetric Non-local Neural Network to semantic segmentation, which has two prominent components: Asymmetric Pyramid Non-local Block (APNB) and Asymmetric Fusion Non-local Block (AFNB). APNB leverages a pyramid sampling module into the non-local block to largely reduce the computation and memory consumption without sacrificing the performance. AFNB is adapted from APNB to fuse the features of different levels under a sufficient consideration of long range dependencies and thus considerably improves the performance. Extensive experiments on semantic segmentation benchmarks demonstrate the effectiveness and efficiency of our work. In particular, we report the state-of-the-art performance of 81.3 mIoU on the Cityscapes test set. For a 256x128 input, APNB is around 6 times faster than a non-local block on GPU while 28 times smaller in GPU running memory occupation. Code is available at: [this https URL](https://github.com/MendelXu/ANN). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142898322-3bbd578c-e488-4bae-9c14-7598adac5cbd.png" width="70%"/> </div> ## Citation ```bibtex @inproceedings{zhu2019asymmetric, title={Asymmetric non-local neural networks for semantic segmentation}, author={Zhu, Zhen and Xu, Mengde and Bai, Song and Huang, Tengteng and Bai, Xiang}, booktitle={Proceedings of the IEEE/CVF International Conference on Computer Vision}, pages={593--602}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ANN | R-50-D8 | 512x1024 | 40000 | 6 | 3.71 | 77.40 | 78.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211.log.json) | | ANN | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.55 | 76.55 | 78.85 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243.log.json) | | ANN | R-50-D8 | 769x769 | 40000 | 6.8 | 1.70 | 78.89 | 80.46 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712.log.json) | | ANN | R-101-D8 | 769x769 | 40000 | 10.7 | 1.15 | 79.32 | 80.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720.log.json) | | ANN | R-50-D8 | 512x1024 | 80000 | - | - | 77.34 | 78.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911.log.json) | | ANN | R-101-D8 | 512x1024 | 80000 | - | - | 77.14 | 78.81 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728.log.json) | | ANN | R-50-D8 | 769x769 | 80000 | - | - | 78.88 | 80.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426.log.json) | | ANN | R-101-D8 | 769x769 | 80000 | - | - | 78.80 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ANN | R-50-D8 | 512x512 | 80000 | 9.1 | 21.01 | 41.01 | 42.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818.log.json) | | ANN | R-101-D8 | 512x512 | 80000 | 12.5 | 14.12 | 42.94 | 44.18 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818.log.json) | | ANN | R-50-D8 | 512x512 | 160000 | - | - | 41.74 | 42.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733.log.json) | | ANN | R-101-D8 | 512x512 | 160000 | - | - | 42.94 | 44.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733.log.json) | ### Pascal VOC 2012 + Aug | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | ANN | R-50-D8 | 512x512 | 20000 | 6 | 20.92 | 74.86 | 76.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246.log.json) | | ANN | R-101-D8 | 512x512 | 20000 | 9.5 | 13.94 | 77.47 | 78.70 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246.log.json) | | ANN | R-50-D8 | 512x512 | 40000 | - | - | 76.56 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314.log.json) | | ANN | R-101-D8 | 512x512 | 40000 | - | - | 76.70 | 78.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314.log.json) |
13,885
200.246377
1,136
md
mmsegmentation
mmsegmentation-master/configs/ann/ann.yml
Collections: - Name: ANN Metadata: Training Data: - Cityscapes - ADE20K - Pascal VOC 2012 + Aug Paper: URL: https://arxiv.org/abs/1908.07678 Title: Asymmetric Non-local Neural Networks for Semantic Segmentation README: configs/ann/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/ann_head.py#L185 Version: v0.17.0 Converted From: Code: https://github.com/MendelXu/ANN Models: - Name: ann_r50-d8_512x1024_40k_cityscapes In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 269.54 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 6.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.4 mIoU(ms+flip): 78.57 Config: configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_40k_cityscapes/ann_r50-d8_512x1024_40k_cityscapes_20200605_095211-049fc292.pth - Name: ann_r101-d8_512x1024_40k_cityscapes In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 392.16 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 9.5 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.55 mIoU(ms+flip): 78.85 Config: configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_40k_cityscapes/ann_r101-d8_512x1024_40k_cityscapes_20200605_095243-adf6eece.pth - Name: ann_r50-d8_769x769_40k_cityscapes In Collection: ANN Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 588.24 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 6.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.89 mIoU(ms+flip): 80.46 Config: configs/ann/ann_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_40k_cityscapes/ann_r50-d8_769x769_40k_cityscapes_20200530_025712-2b46b04d.pth - Name: ann_r101-d8_769x769_40k_cityscapes In Collection: ANN Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 869.57 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 10.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.32 mIoU(ms+flip): 80.94 Config: configs/ann/ann_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_40k_cityscapes/ann_r101-d8_769x769_40k_cityscapes_20200530_025720-059bff28.pth - Name: ann_r50-d8_512x1024_80k_cityscapes In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.34 mIoU(ms+flip): 78.65 Config: configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x1024_80k_cityscapes/ann_r50-d8_512x1024_80k_cityscapes_20200607_101911-5a9ad545.pth - Name: ann_r101-d8_512x1024_80k_cityscapes In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.14 mIoU(ms+flip): 78.81 Config: configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x1024_80k_cityscapes/ann_r101-d8_512x1024_80k_cityscapes_20200607_013728-aceccc6e.pth - Name: ann_r50-d8_769x769_80k_cityscapes In Collection: ANN Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.88 mIoU(ms+flip): 80.57 Config: configs/ann/ann_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_769x769_80k_cityscapes/ann_r50-d8_769x769_80k_cityscapes_20200607_044426-cc7ff323.pth - Name: ann_r101-d8_769x769_80k_cityscapes In Collection: ANN Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.8 mIoU(ms+flip): 80.34 Config: configs/ann/ann_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_769x769_80k_cityscapes/ann_r101-d8_769x769_80k_cityscapes_20200607_013713-a9d4be8d.pth - Name: ann_r50-d8_512x512_80k_ade20k In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 47.6 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.01 mIoU(ms+flip): 42.3 Config: configs/ann/ann_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_80k_ade20k/ann_r50-d8_512x512_80k_ade20k_20200615_014818-26f75e11.pth - Name: ann_r101-d8_512x512_80k_ade20k In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 70.82 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.5 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.94 mIoU(ms+flip): 44.18 Config: configs/ann/ann_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_80k_ade20k/ann_r101-d8_512x512_80k_ade20k_20200615_014818-c0153543.pth - Name: ann_r50-d8_512x512_160k_ade20k In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.74 mIoU(ms+flip): 42.62 Config: configs/ann/ann_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_160k_ade20k/ann_r50-d8_512x512_160k_ade20k_20200615_231733-892247bc.pth - Name: ann_r101-d8_512x512_160k_ade20k In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.94 mIoU(ms+flip): 44.06 Config: configs/ann/ann_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_160k_ade20k/ann_r101-d8_512x512_160k_ade20k_20200615_231733-955eb1ec.pth - Name: ann_r50-d8_512x512_20k_voc12aug In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 47.8 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.0 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 74.86 mIoU(ms+flip): 76.13 Config: configs/ann/ann_r50-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_20k_voc12aug/ann_r50-d8_512x512_20k_voc12aug_20200617_222246-dfcb1c62.pth - Name: ann_r101-d8_512x512_20k_voc12aug In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 71.74 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.5 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 77.47 mIoU(ms+flip): 78.7 Config: configs/ann/ann_r101-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_20k_voc12aug/ann_r101-d8_512x512_20k_voc12aug_20200617_222246-2fad0042.pth - Name: ann_r50-d8_512x512_40k_voc12aug In Collection: ANN Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.56 mIoU(ms+flip): 77.51 Config: configs/ann/ann_r50-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r50-d8_512x512_40k_voc12aug/ann_r50-d8_512x512_40k_voc12aug_20200613_231314-b5dac322.pth - Name: ann_r101-d8_512x512_40k_voc12aug In Collection: ANN Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.7 mIoU(ms+flip): 78.06 Config: configs/ann/ann_r101-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ann/ann_r101-d8_512x512_40k_voc12aug/ann_r101-d8_512x512_40k_voc12aug_20200613_231314-bd205bbe.pth
9,777
30.954248
166
yml
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x1024_40k_cityscapes.py
_base_ = './ann_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x1024_80k_cityscapes.py
_base_ = './ann_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
131
43
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x512_160k_ade20k.py
_base_ = './ann_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
127
41.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x512_20k_voc12aug.py
_base_ = './ann_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
128
42
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x512_40k_voc12aug.py
_base_ = './ann_r50-d8_512x512_40k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
128
42
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_512x512_80k_ade20k.py
_base_ = './ann_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
126
41.333333
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_769x769_40k_cityscapes.py
_base_ = './ann_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r101-d8_769x769_80k_cityscapes.py
_base_ = './ann_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
161
31.4
73
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
161
31.4
73
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
249
34.714286
76
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
256
35.714286
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
256
35.714286
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
248
34.571429
76
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
348
33.9
79
py
mmsegmentation
mmsegmentation-master/configs/ann/ann_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/ann_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
348
33.9
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/README.md
# APCNet [Adaptive Pyramid Context Network for Semantic Segmentation](https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/Junjun2016/APCNet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Recent studies witnessed that context features can significantly improve the performance of deep semantic segmentation networks. Current context based segmentation methods differ with each other in how to construct context features and perform differently in practice. This paper firstly introduces three desirable properties of context features in segmentation task. Specially, we find that Global-guided Local Affinity (GLA) can play a vital role in constructing effective context features, while this property has been largely ignored in previous works. Based on this analysis, this paper proposes Adaptive Pyramid Context Network (APCNet)for semantic segmentation. APCNet adaptively constructs multi-scale contextual representations with multiple welldesigned Adaptive Context Modules (ACMs). Specifically, each ACM leverages a global image representation as a guidance to estimate the local affinity coefficients for each sub-region, and then calculates a context vector with these affinities. We empirically evaluate our APCNet on three semantic segmentation and scene parsing datasets, including PASCAL VOC 2012, Pascal-Context, and ADE20K dataset. Experimental results show that APCNet achieves state-ofthe-art performance on all three benchmarks, and obtains a new record 84.2% on PASCAL VOC 2012 test set without MS COCO pre-trained and any post-processing. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142898638-e1c0c6ae-9270-448e-aa01-bbac3a236db5.png" width="70%"/> </div> ## Citation ```bibtex @InProceedings{He_2019_CVPR, author = {He, Junjun and Deng, Zhongying and Zhou, Lei and Wang, Yali and Qiao, Yu}, title = {Adaptive Pyramid Context Network for Semantic Segmentation}, booktitle = {Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)}, month = {June}, year = {2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | APCNet | R-50-D8 | 512x1024 | 40000 | 7.7 | 3.57 | 78.02 | 79.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes-20201214_115717.log.json) | | APCNet | R-101-D8 | 512x1024 | 40000 | 11.2 | 2.15 | 79.08 | 80.34 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes-20201214_115716.log.json) | | APCNet | R-50-D8 | 769x769 | 40000 | 8.7 | 1.52 | 77.89 | 79.75 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes-20201214_115717.log.json) | | APCNet | R-101-D8 | 769x769 | 40000 | 12.7 | 1.03 | 77.96 | 79.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes-20201214_115718.log.json) | | APCNet | R-50-D8 | 512x1024 | 80000 | - | - | 78.96 | 79.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes-20201214_115716.log.json) | | APCNet | R-101-D8 | 512x1024 | 80000 | - | - | 79.64 | 80.61 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes-20201214_115705.log.json) | | APCNet | R-50-D8 | 769x769 | 80000 | - | - | 78.79 | 80.35 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes-20201214_115718.log.json) | | APCNet | R-101-D8 | 769x769 | 80000 | - | - | 78.45 | 79.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes-20201214_115716.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | APCNet | R-50-D8 | 512x512 | 80000 | 10.1 | 19.61 | 42.20 | 43.30 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k-20201214_115705.log.json) | | APCNet | R-101-D8 | 512x512 | 80000 | 13.6 | 13.10 | 45.54 | 46.65 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k-20201214_115704.log.json) | | APCNet | R-50-D8 | 512x512 | 160000 | - | - | 43.40 | 43.94 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k-20201214_115706.log.json) | | APCNet | R-101-D8 | 512x512 | 160000 | - | - | 45.41 | 46.63 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k-20201214_115705.log.json) |
11,402
189.05
1,367
md
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet.yml
Collections: - Name: APCNet Metadata: Training Data: - Cityscapes - ADE20K Paper: URL: https://openaccess.thecvf.com/content_CVPR_2019/html/He_Adaptive_Pyramid_Context_Network_for_Semantic_Segmentation_CVPR_2019_paper.html Title: Adaptive Pyramid Context Network for Semantic Segmentation README: configs/apcnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 Version: v0.17.0 Converted From: Code: https://github.com/Junjun2016/APCNet Models: - Name: apcnet_r50-d8_512x1024_40k_cityscapes In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 280.11 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 7.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.02 mIoU(ms+flip): 79.26 Config: configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes/apcnet_r50-d8_512x1024_40k_cityscapes_20201214_115717-5e88fa33.pth - Name: apcnet_r101-d8_512x1024_40k_cityscapes In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 465.12 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 11.2 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.08 mIoU(ms+flip): 80.34 Config: configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes/apcnet_r101-d8_512x1024_40k_cityscapes_20201214_115716-abc9d111.pth - Name: apcnet_r50-d8_769x769_40k_cityscapes In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 657.89 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 8.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.89 mIoU(ms+flip): 79.75 Config: configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_40k_cityscapes/apcnet_r50-d8_769x769_40k_cityscapes_20201214_115717-2a2628d7.pth - Name: apcnet_r101-d8_769x769_40k_cityscapes In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 970.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 12.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.96 mIoU(ms+flip): 79.24 Config: configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_40k_cityscapes/apcnet_r101-d8_769x769_40k_cityscapes_20201214_115718-b650de90.pth - Name: apcnet_r50-d8_512x1024_80k_cityscapes In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.96 mIoU(ms+flip): 79.94 Config: configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes/apcnet_r50-d8_512x1024_80k_cityscapes_20201214_115716-987f51e3.pth - Name: apcnet_r101-d8_512x1024_80k_cityscapes In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.64 mIoU(ms+flip): 80.61 Config: configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes/apcnet_r101-d8_512x1024_80k_cityscapes_20201214_115705-b1ff208a.pth - Name: apcnet_r50-d8_769x769_80k_cityscapes In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.79 mIoU(ms+flip): 80.35 Config: configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_769x769_80k_cityscapes/apcnet_r50-d8_769x769_80k_cityscapes_20201214_115718-7ea9fa12.pth - Name: apcnet_r101-d8_769x769_80k_cityscapes In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.45 mIoU(ms+flip): 79.91 Config: configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_769x769_80k_cityscapes/apcnet_r101-d8_769x769_80k_cityscapes_20201214_115716-a7fbc2ab.pth - Name: apcnet_r50-d8_512x512_80k_ade20k In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 50.99 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 10.1 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.2 mIoU(ms+flip): 43.3 Config: configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_80k_ade20k/apcnet_r50-d8_512x512_80k_ade20k_20201214_115705-a8626293.pth - Name: apcnet_r101-d8_512x512_80k_ade20k In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 76.34 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 13.6 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.54 mIoU(ms+flip): 46.65 Config: configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_80k_ade20k/apcnet_r101-d8_512x512_80k_ade20k_20201214_115704-c656c3fb.pth - Name: apcnet_r50-d8_512x512_160k_ade20k In Collection: APCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.4 mIoU(ms+flip): 43.94 Config: configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r50-d8_512x512_160k_ade20k/apcnet_r50-d8_512x512_160k_ade20k_20201214_115706-25fb92c2.pth - Name: apcnet_r101-d8_512x512_160k_ade20k In Collection: APCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 45.41 mIoU(ms+flip): 46.63 Config: configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/apcnet/apcnet_r101-d8_512x512_160k_ade20k/apcnet_r101-d8_512x512_160k_ade20k_20201214_115705-73f9a8d7.pth
7,771
32.356223
175
yml
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_512x1024_40k_cityscapes.py
_base_ = './apcnet_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './apcnet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
134
44
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_512x512_160k_ade20k.py
_base_ = './apcnet_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_512x512_80k_ade20k.py
_base_ = './apcnet_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
129
42.333333
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_769x769_40k_cityscapes.py
_base_ = './apcnet_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r101-d8_769x769_80k_cityscapes.py
_base_ = './apcnet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
164
32
76
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
252
35.142857
76
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/apcnet/apcnet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/apcnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
351
34.2
79
py
mmsegmentation
mmsegmentation-master/configs/beit/README.md
# BEiT [BEiT: BERT Pre-Training of Image Transformers](https://arxiv.org/abs/2106.08254) ## Introduction <!-- [BACKBONE] --> <a href="https://github.com/microsoft/unilm/tree/master/beit">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.23.0/mmseg/models/backbones/beit.py#1404">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> We introduce a self-supervised vision representation model BEiT, which stands for Bidirectional Encoder representation from Image Transformers. Following BERT developed in the natural language processing area, we propose a masked image modeling task to pretrain vision Transformers. Specifically, each image has two views in our pre-training, i.e, image patches (such as 16x16 pixels), and visual tokens (i.e., discrete tokens). We first "tokenize" the original image into visual tokens. Then we randomly mask some image patches and fed them into the backbone Transformer. The pre-training objective is to recover the original visual tokens based on the corrupted image patches. After pre-training BEiT, we directly fine-tune the model parameters on downstream tasks by appending task layers upon the pretrained encoder. Experimental results on image classification and semantic segmentation show that our model achieves competitive results with previous pre-training methods. For example, base-size BEiT achieves 83.2% top-1 accuracy on ImageNet-1K, significantly outperforming from-scratch DeiT training (81.8%) with the same setup. Moreover, large-size BEiT obtains 86.3% only using ImageNet-1K, even outperforming ViT-L with supervised pre-training on ImageNet-22K (85.2%). The code and pretrained models are available at [this https URL](https://github.com/microsoft/unilm/tree/master/beit). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/93248678/160155758-781c9a45-b1d7-4530-9015-88eca6645006.png" width="70%"/> </div> ## Citation ```bibtex @inproceedings{beit, title={{BEiT}: {BERT} Pre-Training of Image Transformers}, author={Hangbo Bao and Li Dong and Songhao Piao and Furu Wei}, booktitle={International Conference on Learning Representations}, year={2022}, url={https://openreview.net/forum?id=p-BhZSz59o4} } ``` ## Usage To use other repositories' pre-trained models, it is necessary to convert keys. We provide a script [`beit2mmseg.py`](../../tools/model_converters/beit2mmseg.py) in the tools directory to convert the key of models from [the official repo](https://github.com/microsoft/unilm/tree/master/beit/semantic_segmentation) to MMSegmentation style. ```shell python tools/model_converters/beit2mmseg.py ${PRETRAIN_PATH} ${STORE_PATH} ``` E.g. ```shell python tools/model_converters/beit2mmseg.py https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth pretrain/beit_base_patch16_224_pt22k_ft22k.pth ``` This script convert model from `PRETRAIN_PATH` and store the converted model in `STORE_PATH`. In our default setting, pretrained models could be defined below: | pretrained models | original models | | ----------------- | --------------------------------------------------------------------------------------------------------------------------- | | BEiT_base.pth | ['BEiT_base'](https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_base_patch16_224_pt22k_ft22k.pth) | | BEiT_large.pth | ['BEiT_large'](https://conversationhub.blob.core.windows.net/beit-share-public/beit/beit_large_patch16_224_pt22k_ft22k.pth) | Verify the single-scale results of the model: ```shell sh tools/dist_test.sh \ configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py \ upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU ``` Since relative position embedding requires the input length and width to be equal, the sliding window is adopted for multi-scale inference. So we set min_size=640, that is, the shortest edge is 640. So the multi-scale inference of config is performed separately, instead of '--aug-test'. For multi-scale inference: ```shell sh tools/dist_test.sh \ configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py \ upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth $GPUS --eval mIoU ``` ## Results and models ### ADE20K | Method | Backbone | Crop Size | pretrain | pretrain img size | Batch Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------- | -------- | --------- | ------------ | ----------------- | ---------- | ------- | -------- | -------------- | ----- | ------------: | ---------------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | UPerNet | BEiT-B | 640x640 | ImageNet-22K | 224x224 | 16 | 160000 | 15.88 | 2.00 | 53.08 | 53.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k.log.json) | | UPerNet | BEiT-L | 640x640 | ImageNet-22K | 224x224 | 8 | 320000 | 22.64 | 0.96 | 56.33 | 56.84 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.log.json) |
6,944
79.755814
1,396
md
mmsegmentation
mmsegmentation-master/configs/beit/beit.yml
Models: - Name: upernet_beit-base_8x2_640x640_160k_ade20k In Collection: UPerNet Metadata: backbone: BEiT-B crop size: (640,640) lr schd: 160000 inference time (ms/im): - value: 500.0 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (640,640) Training Memory (GB): 15.88 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 53.08 mIoU(ms+flip): 53.84 Config: configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-base_8x2_640x640_160k_ade20k/upernet_beit-base_8x2_640x640_160k_ade20k-eead221d.pth - Name: upernet_beit-large_fp16_8x1_640x640_160k_ade20k In Collection: UPerNet Metadata: backbone: BEiT-L crop size: (640,640) lr schd: 320000 inference time (ms/im): - value: 1041.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (640,640) Training Memory (GB): 22.64 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 56.33 mIoU(ms+flip): 56.84 Config: configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k/upernet_beit-large_fp16_8x1_640x640_160k_ade20k-8fc0dd5d.pth
1,422
29.934783
175
yml
mmsegmentation
mmsegmentation-master/configs/beit/upernet_beit-base_640x640_160k_ade20k_ms.py
_base_ = './upernet_beit-base_8x2_640x640_160k_ade20k.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2560, 640), img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=True, transforms=[ dict(type='Resize', keep_ratio=True, min_size=640), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline), samples_per_gpu=2)
763
29.56
77
py
mmsegmentation
mmsegmentation-master/configs/beit/upernet_beit-base_8x2_640x640_160k_ade20k.py
_base_ = [ '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( pretrained='pretrain/beit_base_patch16_224_pt22k_ft22k.pth', test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) optimizer = dict( _delete_=True, type='AdamW', lr=3e-5, betas=(0.9, 0.999), weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=12, layer_decay_rate=0.9)) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=1500, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) # By default, models are trained on 8 GPUs with 2 images per GPU data = dict(samples_per_gpu=2)
821
25.516129
79
py
mmsegmentation
mmsegmentation-master/configs/beit/upernet_beit-large_fp16_640x640_160k_ade20k_ms.py
_base_ = './upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py' img_norm_cfg = dict( mean=[123.675, 116.28, 103.53], std=[58.395, 57.12, 57.375], to_rgb=True) test_pipeline = [ dict(type='LoadImageFromFile'), dict( type='MultiScaleFlipAug', img_scale=(2560, 640), img_ratios=[0.5, 0.75, 1.0, 1.25, 1.5, 1.75], flip=True, transforms=[ dict(type='Resize', keep_ratio=True, min_size=640), dict(type='RandomFlip'), dict(type='Normalize', **img_norm_cfg), dict(type='ImageToTensor', keys=['img']), dict(type='Collect', keys=['img']), ]) ] data = dict( val=dict(pipeline=test_pipeline), test=dict(pipeline=test_pipeline))
742
31.304348
77
py
mmsegmentation
mmsegmentation-master/configs/beit/upernet_beit-large_fp16_8x1_640x640_160k_ade20k.py
_base_ = [ '../_base_/models/upernet_beit.py', '../_base_/datasets/ade20k_640x640.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_320k.py' ] model = dict( pretrained='pretrain/beit_large_patch16_224_pt22k_ft22k.pth', backbone=dict( type='BEiT', embed_dims=1024, num_layers=24, num_heads=16, mlp_ratio=4, qv_bias=True, init_values=1e-6, drop_path_rate=0.2, out_indices=[7, 11, 15, 23]), neck=dict(embed_dim=1024, rescales=[4, 2, 1, 0.5]), decode_head=dict( in_channels=[1024, 1024, 1024, 1024], num_classes=150, channels=1024), auxiliary_head=dict(in_channels=1024, num_classes=150), test_cfg=dict(mode='slide', crop_size=(640, 640), stride=(426, 426))) optimizer = dict( _delete_=True, type='AdamW', lr=2e-5, betas=(0.9, 0.999), weight_decay=0.05, constructor='LayerDecayOptimizerConstructor', paramwise_cfg=dict(num_layers=24, layer_decay_rate=0.95)) lr_config = dict( _delete_=True, policy='poly', warmup='linear', warmup_iters=3000, warmup_ratio=1e-6, power=1.0, min_lr=0.0, by_epoch=False) data = dict(samples_per_gpu=1) optimizer_config = dict( type='GradientCumulativeFp16OptimizerHook', cumulative_iters=2) fp16 = dict()
1,328
26.6875
79
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/README.md
# BiSeNetV1 [BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation](https://arxiv.org/abs/1808.00897) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/ycszen/TorchSeg/tree/master/model/bisenet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv1.py#L266">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Semantic segmentation requires both rich spatial information and sizeable receptive field. However, modern approaches usually compromise spatial resolution to achieve real-time inference speed, which leads to poor performance. In this paper, we address this dilemma with a novel Bilateral Segmentation Network (BiSeNet). We first design a Spatial Path with a small stride to preserve the spatial information and generate high-resolution features. Meanwhile, a Context Path with a fast downsampling strategy is employed to obtain sufficient receptive field. On top of the two paths, we introduce a new Feature Fusion Module to combine features efficiently. The proposed architecture makes a right balance between the speed and segmentation performance on Cityscapes, CamVid, and COCO-Stuff datasets. Specifically, for a 2048x1024 input, we achieve 68.4% Mean IOU on the Cityscapes test dataset with speed of 105 FPS on one NVIDIA Titan XP card, which is significantly faster than the existing methods with comparable performance. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142898839-a0a78148-848a-41b2-8682-b1f61ac004ba.png" width="70%"/> </div> ## Citation ```bibtex @inproceedings{yu2018bisenet, title={Bisenet: Bilateral segmentation network for real-time semantic segmentation}, author={Yu, Changqian and Wang, Jingbo and Peng, Chao and Gao, Changxin and Yu, Gang and Sang, Nong}, booktitle={Proceedings of the European conference on computer vision (ECCV)}, pages={325--341}, year={2018} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ----------------------- | -------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | BiSeNetV1 (No Pretrain) | R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.44 | 77.05 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239.log.json) | | BiSeNetV1 | R-18-D32 | 1024x1024 | 160000 | 5.69 | 31.77 | 74.37 | 76.91 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251.log.json) | | BiSeNetV1 (4x8) | R-18-D32 | 1024x1024 | 160000 | 11.17 | 31.77 | 75.16 | 77.24 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322.log.json) | | BiSeNetV1 (No Pretrain) | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 76.92 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639.log.json) | | BiSeNetV1 | R-50-D32 | 1024x1024 | 160000 | 15.39 | 7.71 | 77.68 | 79.57 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628.log.json) | ### COCO-Stuff 164k | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ----------------------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | BiSeNetV1 (No Pretrain) | R-18-D32 | 512x512 | 160000 | - | - | 25.45 | 26.15 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328.log.json) | | BiSeNetV1 | R-18-D32 | 512x512 | 160000 | 6.33 | 74.24 | 28.55 | 29.26 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100.log.json) | | BiSeNetV1 (No Pretrain) | R-50-D32 | 512x512 | 160000 | - | - | 29.82 | 30.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616.log.json) | | BiSeNetV1 | R-50-D32 | 512x512 | 160000 | 9.28 | 32.60 | 34.88 | 35.37 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932.log.json) | | BiSeNetV1 (No Pretrain) | R-101-D32 | 512x512 | 160000 | - | - | 31.14 | 31.76 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147.log.json) | | BiSeNetV1 | R-101-D32 | 512x512 | 160000 | 10.36 | 25.25 | 37.38 | 37.99 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220.log.json) | Note: - `4x8`: Using 4 GPUs with 8 samples per GPU in training. - For BiSeNetV1 on Cityscapes dataset, default setting is 4 GPUs with 4 samples per GPU in training. - `No Pretrain` means the model is trained from scratch.
12,951
198.261538
1,028
md
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1.yml
Collections: - Name: BiSeNetV1 Metadata: Training Data: - Cityscapes - COCO-Stuff 164k Paper: URL: https://arxiv.org/abs/1808.00897 Title: 'BiSeNet: Bilateral Segmentation Network for Real-time Semantic Segmentation' README: configs/bisenetv1/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv1.py#L266 Version: v0.18.0 Converted From: Code: https://github.com/ycszen/TorchSeg/tree/master/model/bisenet Models: - Name: bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV1 Metadata: backbone: R-18-D32 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 31.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 5.69 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.44 mIoU(ms+flip): 77.05 Config: configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes_20210922_172239-c55e78e2.pth - Name: bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV1 Metadata: backbone: R-18-D32 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 31.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 5.69 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 74.37 mIoU(ms+flip): 76.91 Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210905_220251-8ba80eff.pth - Name: bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes In Collection: BiSeNetV1 Metadata: backbone: R-18-D32 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 31.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 11.17 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.16 mIoU(ms+flip): 77.24 Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes_20210905_220322-bb8db75f.pth - Name: bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV1 Metadata: backbone: R-50-D32 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 129.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 15.39 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.92 mIoU(ms+flip): 78.87 Config: configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes_20210923_222639-7b28a2a6.pth - Name: bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV1 Metadata: backbone: R-50-D32 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 129.7 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 15.39 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.68 mIoU(ms+flip): 79.57 Config: configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes_20210917_234628-8b304447.pth - Name: bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-18-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 25.45 mIoU(ms+flip): 26.15 Config: configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211022_054328-046aa2f2.pth - Name: bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-18-D32 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 13.47 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.33 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 28.55 mIoU(ms+flip): 29.26 Config: configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211023_013100-f700dbf7.pth - Name: bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 29.82 mIoU(ms+flip): 30.33 Config: configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_040616-d2bb0df4.pth - Name: bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-50-D32 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 30.67 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.28 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 34.88 mIoU(ms+flip): 35.37 Config: configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_181932-66747911.pth - Name: bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-101-D32 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 31.14 mIoU(ms+flip): 31.76 Config: configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211102_164147-c6b32c3b.pth - Name: bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k In Collection: BiSeNetV1 Metadata: backbone: R-101-D32 crop size: (512,512) lr schd: 160000 inference time (ms/im): - value: 39.6 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 10.36 Results: - Task: Semantic Segmentation Dataset: COCO-Stuff 164k Metrics: mIoU: 37.38 mIoU(ms+flip): 37.99 Config: configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k_20211101_225220-28c8f092.pth
8,708
36.059574
234
yml
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r101-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = './bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet101_v1c'))))
240
33.428571
78
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r101-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( context_channels=(512, 1024, 2048), spatial_channels=(256, 256, 256, 512), out_channels=1024, backbone_cfg=dict(type='ResNet', depth=101)), decode_head=dict(in_channels=1024, channels=1024, num_classes=171), auxiliary_head=[ dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=171, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=171, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), ]) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.005)
1,373
31.714286
78
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r18-d32_4x4_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.025) data = dict( samples_per_gpu=4, workers_per_gpu=4, )
324
26.083333
74
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet18_v1c')))) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.025) data = dict( samples_per_gpu=4, workers_per_gpu=4, )
489
27.823529
77
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_4x8_1024x1024_160k_cityscapes.py
_base_ = './bisenetv1_r18-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py' data = dict( samples_per_gpu=8, workers_per_gpu=4, )
134
21.5
72
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r18-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = './bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet18_v1c'))), )
240
33.428571
79
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r18-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict(num_classes=171), auxiliary_head=[ dict( type='FCNHead', in_channels=128, channels=64, num_convs=1, num_classes=171, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=128, channels=64, num_convs=1, num_classes=171, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), ]) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.005)
1,147
30.027027
78
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( type='EncoderDecoder', backbone=dict( type='BiSeNetV1', context_channels=(512, 1024, 2048), spatial_channels=(256, 256, 256, 512), out_channels=1024, backbone_cfg=dict(type='ResNet', depth=50)), decode_head=dict( type='FCNHead', in_channels=1024, in_index=0, channels=1024), auxiliary_head=[ dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=19, in_index=1, norm_cfg=norm_cfg, concat_input=False), dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=19, in_index=2, norm_cfg=norm_cfg, concat_input=False), ]) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.05) data = dict( samples_per_gpu=4, workers_per_gpu=4, )
1,227
27.55814
74
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_4x4_1024x1024_160k_cityscapes.py
_base_ = './bisenetv1_r50-d32_4x4_1024x1024_160k_cityscapes.py' model = dict( type='EncoderDecoder', backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet50_v1c'))))
256
31.125
77
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r50-d32_in1k-pre_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = './bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py' model = dict( backbone=dict( backbone_cfg=dict( init_cfg=dict( type='Pretrained', checkpoint='open-mmlab://resnet50_v1c'))))
239
29
77
py
mmsegmentation
mmsegmentation-master/configs/bisenetv1/bisenetv1_r50-d32_lr5e-3_4x4_512x512_160k_coco-stuff164k.py
_base_ = [ '../_base_/models/bisenetv1_r18-d32.py', '../_base_/datasets/coco-stuff164k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( backbone=dict( context_channels=(512, 1024, 2048), spatial_channels=(256, 256, 256, 512), out_channels=1024, backbone_cfg=dict(type='ResNet', depth=50)), decode_head=dict(in_channels=1024, channels=1024, num_classes=171), auxiliary_head=[ dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=171, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=512, channels=256, num_convs=1, num_classes=171, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), ]) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.005)
1,372
31.690476
78
py
mmsegmentation
mmsegmentation-master/configs/bisenetv2/README.md
# BiSeNetV2 [Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation](https://arxiv.org/abs/2004.02147) ## Introduction <!-- [ALGORITHM] --> <a href="">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv2.py#L545">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The low-level details and high-level semantics are both essential to the semantic segmentation task. However, to speed up the model inference, current approaches almost always sacrifice the low-level details, which leads to a considerable accuracy decrease. We propose to treat these spatial details and categorical semantics separately to achieve high accuracy and high efficiency for realtime semantic segmentation. To this end, we propose an efficient and effective architecture with a good trade-off between speed and accuracy, termed Bilateral Segmentation Network (BiSeNet V2). This architecture involves: (i) a Detail Branch, with wide channels and shallow layers to capture low-level details and generate high-resolution feature representation; (ii) a Semantic Branch, with narrow channels and deep layers to obtain high-level semantic context. The Semantic Branch is lightweight due to reducing the channel capacity and a fast-downsampling strategy. Furthermore, we design a Guided Aggregation Layer to enhance mutual connections and fuse both types of feature representation. Besides, a booster training strategy is designed to improve the segmentation performance without any extra inference cost. Extensive quantitative and qualitative evaluations demonstrate that the proposed architecture performs favourably against a few state-of-the-art real-time semantic segmentation approaches. Specifically, for a 2,048x1,024 input, we achieve 72.6% Mean IoU on the Cityscapes test set with a speed of 156 FPS on one NVIDIA GeForce GTX 1080 Ti card, which is significantly faster than existing methods, yet we achieve better segmentation accuracy. <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142898966-ec4a81da-b4b0-41ee-b083-1d964582c18a.png" width="70%"/> </div> ## Citation ```bibtex @article{yu2021bisenet, title={Bisenet v2: Bilateral network with guided aggregation for real-time semantic segmentation}, author={Yu, Changqian and Gao, Changxin and Wang, Jingbo and Yu, Gang and Shen, Chunhua and Sang, Nong}, journal={International Journal of Computer Vision}, pages={1--18}, year={2021}, publisher={Springer} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ---------------- | --------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | BiSeNetV2 | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | 31.77 | 73.21 | 75.74 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551.log.json) | | BiSeNetV2 (OHEM) | BiSeNetV2 | 1024x1024 | 160000 | 7.64 | - | 75.30 | 77.06 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20220808_172324-8bf0aaba.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20220808_172324.log.json) | | BiSeNetV2 (4x8) | BiSeNetV2 | 1024x1024 | 160000 | 15.05 | - | 75.76 | 77.79 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032.log.json) | | BiSeNetV2 (FP16) | BiSeNetV2 | 1024x1024 | 160000 | 5.77 | 36.65 | 73.07 | 75.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942.log.json) | Note: - `OHEM` means Online Hard Example Mining (OHEM) is adopted in training. - `FP16` means Mixed Precision (FP16) is adopted in training. - `4x8` means 4 GPUs with 8 samples per GPU in training.
6,639
121.962963
1,651
md
mmsegmentation
mmsegmentation-master/configs/bisenetv2/bisenetv2.yml
Collections: - Name: BiSeNetV2 Metadata: Training Data: - Cityscapes Paper: URL: https://arxiv.org/abs/2004.02147 Title: 'Bisenet v2: Bilateral Network with Guided Aggregation for Real-time Semantic Segmentation' README: configs/bisenetv2/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.18.0/mmseg/models/backbones/bisenetv2.py#L545 Version: v0.18.0 Models: - Name: bisenetv2_fcn_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV2 Metadata: backbone: BiSeNetV2 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 31.48 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (1024,1024) Training Memory (GB): 7.64 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.21 mIoU(ms+flip): 75.74 Config: configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes_20210902_015551-bcf10f09.pth - Name: bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV2 Metadata: backbone: BiSeNetV2 crop size: (1024,1024) lr schd: 160000 Training Memory (GB): 7.64 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.3 mIoU(ms+flip): 77.06 Config: configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes_20220808_172324-8bf0aaba.pth - Name: bisenetv2_fcn_4x8_1024x1024_160k_cityscapes In Collection: BiSeNetV2 Metadata: backbone: BiSeNetV2 crop size: (1024,1024) lr schd: 160000 Training Memory (GB): 15.05 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 75.76 mIoU(ms+flip): 77.79 Config: configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes_20210903_000032-e1a2eed6.pth - Name: bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes In Collection: BiSeNetV2 Metadata: backbone: BiSeNetV2 crop size: (1024,1024) lr schd: 160000 inference time (ms/im): - value: 27.29 hardware: V100 backend: PyTorch batch size: 1 mode: FP16 resolution: (1024,1024) Training Memory (GB): 5.77 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 73.07 mIoU(ms+flip): 75.13 Config: configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes_20210902_045942-b979777b.pth
3,106
33.910112
198
yml
mmsegmentation
mmsegmentation-master/configs/bisenetv2/bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv2.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.05) data = dict( samples_per_gpu=4, workers_per_gpu=4, )
315
25.333333
74
py
mmsegmentation
mmsegmentation-master/configs/bisenetv2/bisenetv2_fcn_4x8_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv2.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.05) data = dict( samples_per_gpu=8, workers_per_gpu=4, )
315
25.333333
74
py
mmsegmentation
mmsegmentation-master/configs/bisenetv2/bisenetv2_fcn_fp16_4x4_1024x1024_160k_cityscapes.py
_base_ = './bisenetv2_fcn_4x4_1024x1024_160k_cityscapes.py' # fp16 settings optimizer_config = dict(type='Fp16OptimizerHook', loss_scale=512.) # fp16 placeholder fp16 = dict()
176
28.5
66
py
mmsegmentation
mmsegmentation-master/configs/bisenetv2/bisenetv2_fcn_ohem_4x4_1024x1024_160k_cityscapes.py
_base_ = [ '../_base_/models/bisenetv2.py', '../_base_/datasets/cityscapes_1024x1024.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] # sampler = dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000) norm_cfg = dict(type='SyncBN', requires_grad=True) model = dict( decode_head=dict( sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000)), auxiliary_head=[ dict( type='FCNHead', in_channels=16, channels=16, num_convs=2, num_classes=19, in_index=1, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=32, channels=64, num_convs=2, num_classes=19, in_index=2, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=64, channels=256, num_convs=2, num_classes=19, in_index=3, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), dict( type='FCNHead', in_channels=128, channels=1024, num_convs=2, num_classes=19, in_index=4, norm_cfg=norm_cfg, concat_input=False, align_corners=False, sampler=dict(type='OHEMPixelSampler', thresh=0.7, min_kept=10000), loss_decode=dict( type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)), ], ) lr_config = dict(warmup='linear', warmup_iters=1000) optimizer = dict(lr=0.05) data = dict( samples_per_gpu=4, workers_per_gpu=4, )
2,407
31.986301
78
py
mmsegmentation
mmsegmentation-master/configs/ccnet/README.md
# CCNet [CCNet: Criss-Cross Attention for Semantic Segmentation](https://arxiv.org/abs/1811.11721) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/speedinghzl/CCNet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> Contextual information is vital in visual understanding problems, such as semantic segmentation and object detection. We propose a Criss-Cross Network (CCNet) for obtaining full-image contextual information in a very effective and efficient way. Concretely, for each pixel, a novel criss-cross attention module harvests the contextual information of all the pixels on its criss-cross path. By taking a further recurrent operation, each pixel can finally capture the full-image dependencies. Besides, a category consistent loss is proposed to enforce the criss-cross attention module to produce more discriminative features. Overall, CCNet is with the following merits: 1) GPU memory friendly. Compared with the non-local block, the proposed recurrent criss-cross attention module requires 11x less GPU memory usage. 2) High computational efficiency. The recurrent criss-cross attention significantly reduces FLOPs by about 85% of the non-local block. 3) The state-of-the-art performance. We conduct extensive experiments on semantic segmentation benchmarks including Cityscapes, ADE20K, human parsing benchmark LIP, instance segmentation benchmark COCO, video segmentation benchmark CamVid. In particular, our CCNet achieves the mIoU scores of 81.9%, 45.76% and 55.47% on the Cityscapes test set, the ADE20K validation set and the LIP validation set respectively, which are the new state-of-the-art results. The source codes are available at [this https URL](https://github.com/speedinghzl/CCNet). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142899159-b329c12a-0fde-44df-8718-def6cfb004e4.png" width="70%"/> </div> ## Citation ```bibtex @article{huang2018ccnet, title={CCNet: Criss-Cross Attention for Semantic Segmentation}, author={Huang, Zilong and Wang, Xinggang and Huang, Lichao and Huang, Chang and Wei, Yunchao and Liu, Wenyu}, booktitle={ICCV}, year={2019} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ------------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | CCNet | R-50-D8 | 512x1024 | 40000 | 6 | 3.32 | 77.76 | 78.87 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517.log.json) | | CCNet | R-101-D8 | 512x1024 | 40000 | 9.5 | 2.31 | 76.35 | 78.19 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540.log.json) | | CCNet | R-50-D8 | 769x769 | 40000 | 6.8 | 1.43 | 78.46 | 79.93 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125.log.json) | | CCNet | R-101-D8 | 769x769 | 40000 | 10.7 | 1.01 | 76.94 | 78.62 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428.log.json) | | CCNet | R-50-D8 | 512x1024 | 80000 | - | - | 79.03 | 80.16 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421.log.json) | | CCNet | R-101-D8 | 512x1024 | 80000 | - | - | 78.87 | 79.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935.log.json) | | CCNet | R-50-D8 | 769x769 | 80000 | - | - | 79.29 | 81.08 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421.log.json) | | CCNet | R-101-D8 | 769x769 | 80000 | - | - | 79.45 | 80.66 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502.log.json) | ### ADE20K | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | --------------------------------------------------------------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | CCNet | R-50-D8 | 512x512 | 80000 | 8.8 | 20.89 | 41.78 | 42.98 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848.log.json) | | CCNet | R-101-D8 | 512x512 | 80000 | 12.2 | 14.11 | 43.97 | 45.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848.log.json) | | CCNet | R-50-D8 | 512x512 | 160000 | - | - | 42.08 | 43.13 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435.log.json) | | CCNet | R-101-D8 | 512x512 | 160000 | - | - | 43.71 | 45.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644.log.json) | ### Pascal VOC 2012 + Aug | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ---------------------------------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | | CCNet | R-50-D8 | 512x512 | 20000 | 6 | 20.45 | 76.17 | 77.51 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212.log.json) | | CCNet | R-101-D8 | 512x512 | 20000 | 9.5 | 13.64 | 77.27 | 79.02 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212.log.json) | | CCNet | R-50-D8 | 512x512 | 40000 | - | - | 75.96 | 77.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127.log.json) | | CCNet | R-101-D8 | 512x512 | 40000 | - | - | 77.87 | 78.90 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127.log.json) |
14,527
212.647059
1,497
md
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet.yml
Collections: - Name: CCNet Metadata: Training Data: - Cityscapes - ADE20K - Pascal VOC 2012 + Aug Paper: URL: https://arxiv.org/abs/1811.11721 Title: 'CCNet: Criss-Cross Attention for Semantic Segmentation' README: configs/ccnet/README.md Code: URL: https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/decode_heads/apc_head.py#L111 Version: v0.17.0 Converted From: Code: https://github.com/speedinghzl/CCNet Models: - Name: ccnet_r50-d8_512x1024_40k_cityscapes In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 301.2 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 6.0 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 77.76 mIoU(ms+flip): 78.87 Config: configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes/ccnet_r50-d8_512x1024_40k_cityscapes_20200616_142517-4123f401.pth - Name: ccnet_r101-d8_512x1024_40k_cityscapes In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 40000 inference time (ms/im): - value: 432.9 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,1024) Training Memory (GB): 9.5 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.35 mIoU(ms+flip): 78.19 Config: configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes/ccnet_r101-d8_512x1024_40k_cityscapes_20200616_142540-a3b84ba6.pth - Name: ccnet_r50-d8_769x769_40k_cityscapes In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 699.3 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 6.8 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.46 mIoU(ms+flip): 79.93 Config: configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_40k_cityscapes/ccnet_r50-d8_769x769_40k_cityscapes_20200616_145125-76d11884.pth - Name: ccnet_r101-d8_769x769_40k_cityscapes In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 40000 inference time (ms/im): - value: 990.1 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (769,769) Training Memory (GB): 10.7 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 76.94 mIoU(ms+flip): 78.62 Config: configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_40k_cityscapes/ccnet_r101-d8_769x769_40k_cityscapes_20200617_101428-4f57c8d0.pth - Name: ccnet_r50-d8_512x1024_80k_cityscapes In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.03 mIoU(ms+flip): 80.16 Config: configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes/ccnet_r50-d8_512x1024_80k_cityscapes_20200617_010421-869a3423.pth - Name: ccnet_r101-d8_512x1024_80k_cityscapes In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,1024) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 78.87 mIoU(ms+flip): 79.9 Config: configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes/ccnet_r101-d8_512x1024_80k_cityscapes_20200617_203935-ffae8917.pth - Name: ccnet_r50-d8_769x769_80k_cityscapes In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.29 mIoU(ms+flip): 81.08 Config: configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_769x769_80k_cityscapes/ccnet_r50-d8_769x769_80k_cityscapes_20200617_010421-73eed8ca.pth - Name: ccnet_r101-d8_769x769_80k_cityscapes In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (769,769) lr schd: 80000 Results: - Task: Semantic Segmentation Dataset: Cityscapes Metrics: mIoU: 79.45 mIoU(ms+flip): 80.66 Config: configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_769x769_80k_cityscapes/ccnet_r101-d8_769x769_80k_cityscapes_20200618_011502-ad3cd481.pth - Name: ccnet_r50-d8_512x512_80k_ade20k In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 47.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 8.8 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 41.78 mIoU(ms+flip): 42.98 Config: configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_80k_ade20k/ccnet_r50-d8_512x512_80k_ade20k_20200615_014848-aa37f61e.pth - Name: ccnet_r101-d8_512x512_80k_ade20k In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 80000 inference time (ms/im): - value: 70.87 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 12.2 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.97 mIoU(ms+flip): 45.13 Config: configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_80k_ade20k/ccnet_r101-d8_512x512_80k_ade20k_20200615_014848-1f4929a3.pth - Name: ccnet_r50-d8_512x512_160k_ade20k In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 42.08 mIoU(ms+flip): 43.13 Config: configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_160k_ade20k/ccnet_r50-d8_512x512_160k_ade20k_20200616_084435-7c97193b.pth - Name: ccnet_r101-d8_512x512_160k_ade20k In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 160000 Results: - Task: Semantic Segmentation Dataset: ADE20K Metrics: mIoU: 43.71 mIoU(ms+flip): 45.04 Config: configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_160k_ade20k/ccnet_r101-d8_512x512_160k_ade20k_20200616_000644-e849e007.pth - Name: ccnet_r50-d8_512x512_20k_voc12aug In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 48.9 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 6.0 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 76.17 mIoU(ms+flip): 77.51 Config: configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_20k_voc12aug/ccnet_r50-d8_512x512_20k_voc12aug_20200617_193212-fad81784.pth - Name: ccnet_r101-d8_512x512_20k_voc12aug In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 20000 inference time (ms/im): - value: 73.31 hardware: V100 backend: PyTorch batch size: 1 mode: FP32 resolution: (512,512) Training Memory (GB): 9.5 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 77.27 mIoU(ms+flip): 79.02 Config: configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_20k_voc12aug/ccnet_r101-d8_512x512_20k_voc12aug_20200617_193212-0007b61d.pth - Name: ccnet_r50-d8_512x512_40k_voc12aug In Collection: CCNet Metadata: backbone: R-50-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 75.96 mIoU(ms+flip): 77.04 Config: configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r50-d8_512x512_40k_voc12aug/ccnet_r50-d8_512x512_40k_voc12aug_20200613_232127-c2a15f02.pth - Name: ccnet_r101-d8_512x512_40k_voc12aug In Collection: CCNet Metadata: backbone: R-101-D8 crop size: (512,512) lr schd: 40000 Results: - Task: Semantic Segmentation Dataset: Pascal VOC 2012 + Aug Metrics: mIoU: 77.87 mIoU(ms+flip): 78.9 Config: configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py Weights: https://download.openmmlab.com/mmsegmentation/v0.5/ccnet/ccnet_r101-d8_512x512_40k_voc12aug/ccnet_r101-d8_512x512_40k_voc12aug_20200613_232127-c30da577.pth
10,004
31.696078
172
yml
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x1024_40k_cityscapes.py
_base_ = './ccnet_r50-d8_512x1024_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x1024_80k_cityscapes.py
_base_ = './ccnet_r50-d8_512x1024_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
133
43.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x512_160k_ade20k.py
_base_ = './ccnet_r50-d8_512x512_160k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
129
42.333333
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x512_20k_voc12aug.py
_base_ = './ccnet_r50-d8_512x512_20k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x512_40k_voc12aug.py
_base_ = './ccnet_r50-d8_512x512_40k_voc12aug.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
130
42.666667
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_512x512_80k_ade20k.py
_base_ = './ccnet_r50-d8_512x512_80k_ade20k.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
128
42
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_769x769_40k_cityscapes.py
_base_ = './ccnet_r50-d8_769x769_40k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
132
43.333333
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r101-d8_769x769_80k_cityscapes.py
_base_ = './ccnet_r50-d8_769x769_80k_cityscapes.py' model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101))
132
43.333333
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x1024_40k_cityscapes.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ]
163
31.8
75
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x1024_80k_cityscapes.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ]
163
31.8
75
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x512_160k_ade20k.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_160k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
251
35
76
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x512_20k_voc12aug.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_20k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
262
31.875
77
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x512_40k_voc12aug.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/pascal_voc12_aug.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(num_classes=21), auxiliary_head=dict(num_classes=21))
262
31.875
77
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_512x512_80k_ade20k.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/ade20k.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(num_classes=150), auxiliary_head=dict(num_classes=150))
250
34.857143
76
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_769x769_40k_cityscapes.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
350
34.1
79
py
mmsegmentation
mmsegmentation-master/configs/ccnet/ccnet_r50-d8_769x769_80k_cityscapes.py
_base_ = [ '../_base_/models/ccnet_r50-d8.py', '../_base_/datasets/cityscapes_769x769.py', '../_base_/default_runtime.py', '../_base_/schedules/schedule_80k.py' ] model = dict( decode_head=dict(align_corners=True), auxiliary_head=dict(align_corners=True), test_cfg=dict(mode='slide', crop_size=(769, 769), stride=(513, 513)))
350
34.1
79
py
mmsegmentation
mmsegmentation-master/configs/cgnet/README.md
# CGNet [CGNet: A Light-weight Context Guided Network for Semantic Segmentation](https://arxiv.org/abs/1811.08201) ## Introduction <!-- [ALGORITHM] --> <a href="https://github.com/wutianyiRosun/CGNet">Official Repo</a> <a href="https://github.com/open-mmlab/mmsegmentation/blob/v0.17.0/mmseg/models/backbones/cgnet.py#L187">Code Snippet</a> ## Abstract <!-- [ABSTRACT] --> The demand of applying semantic segmentation model on mobile devices has been increasing rapidly. Current state-of-the-art networks have enormous amount of parameters hence unsuitable for mobile devices, while other small memory footprint models follow the spirit of classification network and ignore the inherent characteristic of semantic segmentation. To tackle this problem, we propose a novel Context Guided Network (CGNet), which is a light-weight and efficient network for semantic segmentation. We first propose the Context Guided (CG) block, which learns the joint feature of both local feature and surrounding context, and further improves the joint feature with the global context. Based on the CG block, we develop CGNet which captures contextual information in all stages of the network and is specially tailored for increasing segmentation accuracy. CGNet is also elaborately designed to reduce the number of parameters and save memory footprint. Under an equivalent number of parameters, the proposed CGNet significantly outperforms existing segmentation networks. Extensive experiments on Cityscapes and CamVid datasets verify the effectiveness of the proposed approach. Specifically, without any post-processing and multi-scale testing, the proposed CGNet achieves 64.8% mean IoU on Cityscapes with less than 0.5 M parameters. The source code for the complete system can be found at [this https URL](https://github.com/wutianyiRosun/CGNet). <!-- [IMAGE] --> <div align=center> <img src="https://user-images.githubusercontent.com/24582831/142900351-89559574-79cc-4f57-8f69-5d88765ec38d.png" width="80%"/> </div> ## Citation ```bibtext @article{wu2020cgnet, title={Cgnet: A light-weight context guided network for semantic segmentation}, author={Wu, Tianyi and Tang, Sheng and Zhang, Rui and Cao, Juan and Zhang, Yongdong}, journal={IEEE Transactions on Image Processing}, volume={30}, pages={1169--1179}, year={2020}, publisher={IEEE} } ``` ## Results and models ### Cityscapes | Method | Backbone | Crop Size | Lr schd | Mem (GB) | Inf time (fps) | mIoU | mIoU(ms+flip) | config | download | | ------ | -------- | --------- | ------: | -------- | -------------- | ----: | ------------: | ----------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | | CGNet | M3N21 | 680x680 | 60000 | 7.5 | 30.51 | 65.63 | 68.04 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_680x680_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes_20201101_110253-4c0b2f2d.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_680x680_60k_cityscapes/cgnet_680x680_60k_cityscapes-20201101_110253.log.json) | | CGNet | M3N21 | 512x1024 | 60000 | 8.3 | 31.14 | 68.27 | 70.33 | [config](https://github.com/open-mmlab/mmsegmentation/blob/master/configs/cgnet/cgnet_512x1024_60k_cityscapes.py) | [model](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes_20201101_110254-124ea03b.pth) \| [log](https://download.openmmlab.com/mmsegmentation/v0.5/cgnet/cgnet_512x1024_60k_cityscapes/cgnet_512x1024_60k_cityscapes-20201101_110254.log.json) |
4,480
94.340426
1,457
md