Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_beit.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones.beit import BEiT
from .utils import check_norm_state
def test_beit_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = BEiT()
model.init_weights(pretrained=0)
with pytest.raises(TypeError):
# img_size must be int or tuple
model = BEiT(img_size=512.0)
with pytest.raises(TypeError):
# out_indices must be int ,list or tuple
model = BEiT(out_indices=1.)
with pytest.raises(AssertionError):
# The length of img_size tuple must be lower than 3.
BEiT(img_size=(224, 224, 224))
with pytest.raises(TypeError):
# Pretrained must be None or Str.
BEiT(pretrained=123)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = BEiT(img_size=(224, ))
model.init_weights()
model(imgs)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = BEiT(img_size=(224, 224))
model(imgs)
# Test norm_eval = True
model = BEiT(norm_eval=True)
model.train()
# Test BEiT backbone with input size of 224 and patch size of 16
model = BEiT()
model.init_weights()
model.train()
# Test qv_bias
model = BEiT(qv_bias=False)
model.train()
# Test out_indices = list
model = BEiT(out_indices=[2, 4, 8, 12])
model.train()
assert check_norm_state(model.modules(), True)
# Test image size = (224, 224)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test BEiT backbone with input size of 256 and patch size of 16
model = BEiT(img_size=(256, 256))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 16, 16)
# Test BEiT backbone with input size of 32 and patch size of 16
model = BEiT(img_size=(32, 32))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 2, 2)
# Test unbalanced size input image
model = BEiT(img_size=(112, 224))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 112, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 7, 14)
# Test irregular input image
model = BEiT(img_size=(234, 345))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 234, 345)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 21)
# Test init_values=0
model = BEiT(init_values=0)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test final norm
model = BEiT(final_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test patch norm
model = BEiT(patch_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
def test_beit_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = BEiT(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = BEiT(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# test resize_rel_pos_embed
value = torch.randn(732, 16)
ckpt = {
'state_dict': {
'layers.0.attn.relative_position_index': 0,
'layers.0.attn.relative_position_bias_table': value
}
}
model = BEiT(img_size=(512, 512))
ckpt = model.resize_rel_pos_embed(ckpt)
# pretrained=None
# init_cfg=123, whose type is unsupported
model = BEiT(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = BEiT(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = BEiT(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = BEiT(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
model = BEiT(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = BEiT(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
model = BEiT(pretrained=123, init_cfg=123)
| 5,498 | 29.214286 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_bisenetv1.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import BiSeNetV1
from mmseg.models.backbones.bisenetv1 import (AttentionRefinementModule,
ContextPath, FeatureFusionModule,
SpatialPath)
def test_bisenetv1_backbone():
# Test BiSeNetV1 Standard Forward
backbone_cfg = dict(
type='ResNet',
in_channels=3,
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_eval=False,
style='pytorch',
contract_dilation=True)
model = BiSeNetV1(in_channels=3, backbone_cfg=backbone_cfg)
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 64, 128)
feat = model(imgs)
assert len(feat) == 3
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 256, 8, 16])
# for auxiliary head 1
assert feat[1].shape == torch.Size([batch_size, 128, 8, 16])
# for auxiliary head 2
assert feat[2].shape == torch.Size([batch_size, 128, 4, 8])
# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 95, 27)
feat = model(imgs)
assert len(feat) == 3
with pytest.raises(AssertionError):
# BiSeNetV1 spatial path channel constraints.
BiSeNetV1(
backbone_cfg=backbone_cfg,
in_channels=3,
spatial_channels=(16, 16, 16))
with pytest.raises(AssertionError):
# BiSeNetV1 context path constraints.
BiSeNetV1(
backbone_cfg=backbone_cfg,
in_channels=3,
context_channels=(16, 32, 64, 128))
def test_bisenetv1_spatial_path():
with pytest.raises(AssertionError):
# BiSeNetV1 spatial path channel constraints.
SpatialPath(num_channels=(16, 16, 16), in_channels=3)
def test_bisenetv1_context_path():
backbone_cfg = dict(
type='ResNet',
in_channels=3,
depth=50,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 1, 1),
strides=(1, 2, 2, 2),
norm_eval=False,
style='pytorch',
contract_dilation=True)
with pytest.raises(AssertionError):
# BiSeNetV1 context path constraints.
ContextPath(
backbone_cfg=backbone_cfg, context_channels=(16, 32, 64, 128))
def test_bisenetv1_attention_refinement_module():
x_arm = AttentionRefinementModule(32, 8)
assert x_arm.conv_layer.in_channels == 32
assert x_arm.conv_layer.out_channels == 8
assert x_arm.conv_layer.kernel_size == (3, 3)
x = torch.randn(2, 32, 8, 16)
x_out = x_arm(x)
assert x_out.shape == torch.Size([2, 8, 8, 16])
def test_bisenetv1_feature_fusion_module():
ffm = FeatureFusionModule(16, 32)
assert ffm.conv1.in_channels == 16
assert ffm.conv1.out_channels == 32
assert ffm.conv1.kernel_size == (1, 1)
assert ffm.gap.output_size == (1, 1)
assert ffm.conv_atten[0].in_channels == 32
assert ffm.conv_atten[0].out_channels == 32
assert ffm.conv_atten[0].kernel_size == (1, 1)
ffm = FeatureFusionModule(16, 16)
x1 = torch.randn(2, 8, 8, 16)
x2 = torch.randn(2, 8, 8, 16)
x_out = ffm(x1, x2)
assert x_out.shape == torch.Size([2, 16, 8, 16])
| 3,410 | 30.009091 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_bisenetv2.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from mmseg.models.backbones import BiSeNetV2
from mmseg.models.backbones.bisenetv2 import (BGALayer, DetailBranch,
SemanticBranch)
def test_bisenetv2_backbone():
# Test BiSeNetV2 Standard Forward
model = BiSeNetV2()
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 128, 256)
feat = model(imgs)
assert len(feat) == 5
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 128, 16, 32])
# for auxiliary head 1
assert feat[1].shape == torch.Size([batch_size, 16, 32, 64])
# for auxiliary head 2
assert feat[2].shape == torch.Size([batch_size, 32, 16, 32])
# for auxiliary head 3
assert feat[3].shape == torch.Size([batch_size, 64, 8, 16])
# for auxiliary head 4
assert feat[4].shape == torch.Size([batch_size, 128, 4, 8])
# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 95, 27)
feat = model(imgs)
assert len(feat) == 5
def test_bisenetv2_DetailBranch():
x = torch.randn(1, 3, 32, 64)
detail_branch = DetailBranch(detail_channels=(64, 16, 32))
assert isinstance(detail_branch.detail_branch[0][0], ConvModule)
x_out = detail_branch(x)
assert x_out.shape == torch.Size([1, 32, 4, 8])
def test_bisenetv2_SemanticBranch():
semantic_branch = SemanticBranch(semantic_channels=(16, 32, 64, 128))
assert semantic_branch.stage1.pool.stride == 2
def test_bisenetv2_BGALayer():
x_a = torch.randn(1, 8, 8, 16)
x_b = torch.randn(1, 8, 2, 4)
bga = BGALayer(out_channels=8)
assert isinstance(bga.conv, ConvModule)
x_out = bga(x_a, x_b)
assert x_out.shape == torch.Size([1, 8, 8, 16])
| 1,843 | 30.793103 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_blocks.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import pytest
import torch
from mmcv.utils import TORCH_VERSION, digit_version
from mmseg.models.utils import (InvertedResidual, InvertedResidualV3, SELayer,
make_divisible)
def test_make_divisible():
# test with min_value = None
assert make_divisible(10, 4) == 12
assert make_divisible(9, 4) == 12
assert make_divisible(1, 4) == 4
# test with min_value = 8
assert make_divisible(10, 4, 8) == 12
assert make_divisible(9, 4, 8) == 12
assert make_divisible(1, 4, 8) == 8
def test_inv_residual():
with pytest.raises(AssertionError):
# test stride assertion.
InvertedResidual(32, 32, 3, 4)
# test default config with res connection.
# set expand_ratio = 4, stride = 1 and inp=oup.
inv_module = InvertedResidual(32, 32, 1, 4)
assert inv_module.use_res_connect
assert inv_module.conv[0].kernel_size == (1, 1)
assert inv_module.conv[0].padding == 0
assert inv_module.conv[1].kernel_size == (3, 3)
assert inv_module.conv[1].padding == 1
assert inv_module.conv[0].with_norm
assert inv_module.conv[1].with_norm
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert output.shape == (1, 32, 64, 64)
# test inv_residual module without res connection.
# set expand_ratio = 4, stride = 2.
inv_module = InvertedResidual(32, 32, 2, 4)
assert not inv_module.use_res_connect
assert inv_module.conv[0].kernel_size == (1, 1)
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert output.shape == (1, 32, 32, 32)
# test expand_ratio == 1
inv_module = InvertedResidual(32, 32, 1, 1)
assert inv_module.conv[0].kernel_size == (3, 3)
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert output.shape == (1, 32, 64, 64)
# test with checkpoint forward
inv_module = InvertedResidual(32, 32, 1, 1, with_cp=True)
assert inv_module.with_cp
x = torch.rand(1, 32, 64, 64, requires_grad=True)
output = inv_module(x)
assert output.shape == (1, 32, 64, 64)
def test_inv_residualv3():
with pytest.raises(AssertionError):
# test stride assertion.
InvertedResidualV3(32, 32, 16, stride=3)
with pytest.raises(AssertionError):
# test assertion.
InvertedResidualV3(32, 32, 16, with_expand_conv=False)
# test with se_cfg=None, with_expand_conv=False
inv_module = InvertedResidualV3(32, 32, 32, with_expand_conv=False)
assert inv_module.with_res_shortcut is True
assert inv_module.with_se is False
assert inv_module.with_expand_conv is False
assert not hasattr(inv_module, 'expand_conv')
assert isinstance(inv_module.depthwise_conv.conv, torch.nn.Conv2d)
assert inv_module.depthwise_conv.conv.kernel_size == (3, 3)
assert inv_module.depthwise_conv.conv.stride == (1, 1)
assert inv_module.depthwise_conv.conv.padding == (1, 1)
assert isinstance(inv_module.depthwise_conv.bn, torch.nn.BatchNorm2d)
assert isinstance(inv_module.depthwise_conv.activate, torch.nn.ReLU)
assert inv_module.linear_conv.conv.kernel_size == (1, 1)
assert inv_module.linear_conv.conv.stride == (1, 1)
assert inv_module.linear_conv.conv.padding == (0, 0)
assert isinstance(inv_module.linear_conv.bn, torch.nn.BatchNorm2d)
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert output.shape == (1, 32, 64, 64)
# test with se_cfg and with_expand_conv
se_cfg = dict(
channels=16,
ratio=4,
act_cfg=(dict(type='ReLU'),
dict(type='HSigmoid', bias=3.0, divisor=6.0)))
act_cfg = dict(type='HSwish')
inv_module = InvertedResidualV3(
32, 40, 16, 3, 2, se_cfg=se_cfg, act_cfg=act_cfg)
assert inv_module.with_res_shortcut is False
assert inv_module.with_se is True
assert inv_module.with_expand_conv is True
assert inv_module.expand_conv.conv.kernel_size == (1, 1)
assert inv_module.expand_conv.conv.stride == (1, 1)
assert inv_module.expand_conv.conv.padding == (0, 0)
assert isinstance(inv_module.depthwise_conv.conv,
mmcv.cnn.bricks.Conv2dAdaptivePadding)
assert inv_module.depthwise_conv.conv.kernel_size == (3, 3)
assert inv_module.depthwise_conv.conv.stride == (2, 2)
assert inv_module.depthwise_conv.conv.padding == (0, 0)
assert isinstance(inv_module.depthwise_conv.bn, torch.nn.BatchNorm2d)
assert inv_module.linear_conv.conv.kernel_size == (1, 1)
assert inv_module.linear_conv.conv.stride == (1, 1)
assert inv_module.linear_conv.conv.padding == (0, 0)
assert isinstance(inv_module.linear_conv.bn, torch.nn.BatchNorm2d)
if (TORCH_VERSION == 'parrots'
or digit_version(TORCH_VERSION) < digit_version('1.7')):
# Note: Use PyTorch official HSwish
# when torch>=1.7 after MMCV >= 1.4.5.
# Hardswish is not supported when PyTorch version < 1.6.
# And Hardswish in PyTorch 1.6 does not support inplace.
# More details could be found from:
# https://github.com/open-mmlab/mmcv/pull/1709
assert isinstance(inv_module.expand_conv.activate, mmcv.cnn.HSwish)
assert isinstance(inv_module.depthwise_conv.activate, mmcv.cnn.HSwish)
else:
assert isinstance(inv_module.expand_conv.activate, torch.nn.Hardswish)
assert isinstance(inv_module.depthwise_conv.activate,
torch.nn.Hardswish)
x = torch.rand(1, 32, 64, 64)
output = inv_module(x)
assert output.shape == (1, 40, 32, 32)
# test with checkpoint forward
inv_module = InvertedResidualV3(
32, 40, 16, 3, 2, se_cfg=se_cfg, act_cfg=act_cfg, with_cp=True)
assert inv_module.with_cp
x = torch.randn(2, 32, 64, 64, requires_grad=True)
output = inv_module(x)
assert output.shape == (2, 40, 32, 32)
def test_se_layer():
with pytest.raises(AssertionError):
# test act_cfg assertion.
SELayer(32, act_cfg=(dict(type='ReLU'), ))
# test config with channels = 16.
se_layer = SELayer(16)
assert se_layer.conv1.conv.kernel_size == (1, 1)
assert se_layer.conv1.conv.stride == (1, 1)
assert se_layer.conv1.conv.padding == (0, 0)
assert isinstance(se_layer.conv1.activate, torch.nn.ReLU)
assert se_layer.conv2.conv.kernel_size == (1, 1)
assert se_layer.conv2.conv.stride == (1, 1)
assert se_layer.conv2.conv.padding == (0, 0)
assert isinstance(se_layer.conv2.activate, mmcv.cnn.HSigmoid)
x = torch.rand(1, 16, 64, 64)
output = se_layer(x)
assert output.shape == (1, 16, 64, 64)
# test config with channels = 16, act_cfg = dict(type='ReLU').
se_layer = SELayer(16, act_cfg=dict(type='ReLU'))
assert se_layer.conv1.conv.kernel_size == (1, 1)
assert se_layer.conv1.conv.stride == (1, 1)
assert se_layer.conv1.conv.padding == (0, 0)
assert isinstance(se_layer.conv1.activate, torch.nn.ReLU)
assert se_layer.conv2.conv.kernel_size == (1, 1)
assert se_layer.conv2.conv.stride == (1, 1)
assert se_layer.conv2.conv.padding == (0, 0)
assert isinstance(se_layer.conv2.activate, torch.nn.ReLU)
x = torch.rand(1, 16, 64, 64)
output = se_layer(x)
assert output.shape == (1, 16, 64, 64)
| 7,301 | 38.048128 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_cgnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import CGNet
from mmseg.models.backbones.cgnet import (ContextGuidedBlock,
GlobalContextExtractor)
def test_cgnet_GlobalContextExtractor():
block = GlobalContextExtractor(16, 16, with_cp=True)
x = torch.randn(2, 16, 64, 64, requires_grad=True)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 64, 64])
def test_cgnet_context_guided_block():
with pytest.raises(AssertionError):
# cgnet ContextGuidedBlock GlobalContextExtractor channel and reduction
# constraints.
ContextGuidedBlock(8, 8)
# test cgnet ContextGuidedBlock with checkpoint forward
block = ContextGuidedBlock(
16, 16, act_cfg=dict(type='PReLU'), with_cp=True)
assert block.with_cp
x = torch.randn(2, 16, 64, 64, requires_grad=True)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 64, 64])
# test cgnet ContextGuidedBlock without checkpoint forward
block = ContextGuidedBlock(32, 32)
assert not block.with_cp
x = torch.randn(3, 32, 32, 32)
x_out = block(x)
assert x_out.shape == torch.Size([3, 32, 32, 32])
# test cgnet ContextGuidedBlock with down sampling
block = ContextGuidedBlock(32, 32, downsample=True)
assert block.conv1x1.conv.in_channels == 32
assert block.conv1x1.conv.out_channels == 32
assert block.conv1x1.conv.kernel_size == (3, 3)
assert block.conv1x1.conv.stride == (2, 2)
assert block.conv1x1.conv.padding == (1, 1)
assert block.f_loc.in_channels == 32
assert block.f_loc.out_channels == 32
assert block.f_loc.kernel_size == (3, 3)
assert block.f_loc.stride == (1, 1)
assert block.f_loc.padding == (1, 1)
assert block.f_loc.groups == 32
assert block.f_loc.dilation == (1, 1)
assert block.f_loc.bias is None
assert block.f_sur.in_channels == 32
assert block.f_sur.out_channels == 32
assert block.f_sur.kernel_size == (3, 3)
assert block.f_sur.stride == (1, 1)
assert block.f_sur.padding == (2, 2)
assert block.f_sur.groups == 32
assert block.f_sur.dilation == (2, 2)
assert block.f_sur.bias is None
assert block.bottleneck.in_channels == 64
assert block.bottleneck.out_channels == 32
assert block.bottleneck.kernel_size == (1, 1)
assert block.bottleneck.stride == (1, 1)
assert block.bottleneck.bias is None
x = torch.randn(1, 32, 32, 32)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 16, 16])
# test cgnet ContextGuidedBlock without down sampling
block = ContextGuidedBlock(32, 32, downsample=False)
assert block.conv1x1.conv.in_channels == 32
assert block.conv1x1.conv.out_channels == 16
assert block.conv1x1.conv.kernel_size == (1, 1)
assert block.conv1x1.conv.stride == (1, 1)
assert block.conv1x1.conv.padding == (0, 0)
assert block.f_loc.in_channels == 16
assert block.f_loc.out_channels == 16
assert block.f_loc.kernel_size == (3, 3)
assert block.f_loc.stride == (1, 1)
assert block.f_loc.padding == (1, 1)
assert block.f_loc.groups == 16
assert block.f_loc.dilation == (1, 1)
assert block.f_loc.bias is None
assert block.f_sur.in_channels == 16
assert block.f_sur.out_channels == 16
assert block.f_sur.kernel_size == (3, 3)
assert block.f_sur.stride == (1, 1)
assert block.f_sur.padding == (2, 2)
assert block.f_sur.groups == 16
assert block.f_sur.dilation == (2, 2)
assert block.f_sur.bias is None
x = torch.randn(1, 32, 32, 32)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 32, 32])
def test_cgnet_backbone():
with pytest.raises(AssertionError):
# check invalid num_channels
CGNet(num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# check invalid num_blocks
CGNet(num_blocks=(3, 21, 3))
with pytest.raises(AssertionError):
# check invalid dilation
CGNet(num_blocks=2)
with pytest.raises(AssertionError):
# check invalid reduction
CGNet(reductions=16)
with pytest.raises(AssertionError):
# check invalid num_channels and reduction
CGNet(num_channels=(32, 64, 128), reductions=(64, 129))
# Test CGNet with default settings
model = CGNet()
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([2, 35, 112, 112])
assert feat[1].shape == torch.Size([2, 131, 56, 56])
assert feat[2].shape == torch.Size([2, 256, 28, 28])
# Test CGNet with norm_eval True and with_cp True
model = CGNet(norm_eval=True, with_cp=True)
with pytest.raises(TypeError):
# check invalid pretrained
model.init_weights(pretrained=8)
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([2, 35, 112, 112])
assert feat[1].shape == torch.Size([2, 131, 56, 56])
assert feat[2].shape == torch.Size([2, 256, 28, 28])
| 5,214 | 33.309211 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_erfnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import ERFNet
from mmseg.models.backbones.erfnet import (DownsamplerBlock, NonBottleneck1d,
UpsamplerBlock)
def test_erfnet_backbone():
# Test ERFNet Standard Forward.
model = ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 256, 512)
output = model(imgs)
# output for segment Head
assert output[0].shape == torch.Size([batch_size, 16, 128, 256])
# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 527, 279)
output = model(imgs)
assert len(output[0]) == batch_size
with pytest.raises(AssertionError):
# Number of encoder downsample block and decoder upsample block.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(128, 64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
with pytest.raises(AssertionError):
# Number of encoder downsample block and encoder Non-bottleneck block.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8, 10),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
with pytest.raises(AssertionError):
# Number of encoder downsample block and
# channels of encoder Non-bottleneck block.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128, 256),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
with pytest.raises(AssertionError):
# Number of encoder Non-bottleneck block and number of its channels.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8, 3),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
with pytest.raises(AssertionError):
# Number of decoder upsample block and decoder Non-bottleneck block.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2, 3),
dec_non_bottleneck_channels=(64, 16),
dropout_ratio=0.1,
)
with pytest.raises(AssertionError):
# Number of decoder Non-bottleneck block and number of its channels.
ERFNet(
in_channels=3,
enc_downsample_channels=(16, 64, 128),
enc_stage_non_bottlenecks=(5, 8),
enc_non_bottleneck_dilations=(2, 4, 8, 16),
enc_non_bottleneck_channels=(64, 128),
dec_upsample_channels=(64, 16),
dec_stages_non_bottleneck=(2, 2),
dec_non_bottleneck_channels=(64, 16, 8),
dropout_ratio=0.1,
)
def test_erfnet_downsampler_block():
x_db = DownsamplerBlock(16, 64)
assert x_db.conv.in_channels == 16
assert x_db.conv.out_channels == 48
assert len(x_db.bn.weight) == 64
assert x_db.pool.kernel_size == 2
assert x_db.pool.stride == 2
def test_erfnet_non_bottleneck_1d():
x_nb1d = NonBottleneck1d(16, 0, 1)
assert x_nb1d.convs_layers[0].in_channels == 16
assert x_nb1d.convs_layers[0].out_channels == 16
assert x_nb1d.convs_layers[2].in_channels == 16
assert x_nb1d.convs_layers[2].out_channels == 16
assert x_nb1d.convs_layers[5].in_channels == 16
assert x_nb1d.convs_layers[5].out_channels == 16
assert x_nb1d.convs_layers[7].in_channels == 16
assert x_nb1d.convs_layers[7].out_channels == 16
assert x_nb1d.convs_layers[9].p == 0
def test_erfnet_upsampler_block():
x_ub = UpsamplerBlock(64, 16)
assert x_ub.conv.in_channels == 64
assert x_ub.conv.out_channels == 16
assert len(x_ub.bn.weight) == 16
| 5,419 | 35.870748 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_fast_scnn.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import FastSCNN
def test_fastscnn_backbone():
with pytest.raises(AssertionError):
# Fast-SCNN channel constraints.
FastSCNN(
3, (32, 48),
64, (64, 96, 128), (2, 2, 1),
global_out_channels=127,
higher_in_channels=64,
lower_in_channels=128)
# Test FastSCNN Standard Forward
model = FastSCNN(
in_channels=3,
downsample_dw_channels=(4, 6),
global_in_channels=8,
global_block_channels=(8, 12, 16),
global_block_strides=(2, 2, 1),
global_out_channels=16,
higher_in_channels=8,
lower_in_channels=16,
fusion_out_channels=16,
)
model.init_weights()
model.train()
batch_size = 4
imgs = torch.randn(batch_size, 3, 64, 128)
feat = model(imgs)
assert len(feat) == 3
# higher-res
assert feat[0].shape == torch.Size([batch_size, 8, 8, 16])
# lower-res
assert feat[1].shape == torch.Size([batch_size, 16, 2, 4])
# FFM output
assert feat[2].shape == torch.Size([batch_size, 16, 8, 16])
| 1,189 | 26.674419 | 63 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_hrnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils.parrots_wrapper import _BatchNorm
from mmseg.models.backbones.hrnet import HRModule, HRNet
from mmseg.models.backbones.resnet import BasicBlock, Bottleneck
@pytest.mark.parametrize('block', [BasicBlock, Bottleneck])
def test_hrmodule(block):
# Test multiscale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 2
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
assert feats[1].shape == torch.Size([1, in_channels[1], 32, 32])
# Test single scale forward
num_channles = (32, 64)
in_channels = [c * block.expansion for c in num_channles]
hrmodule = HRModule(
num_branches=2,
blocks=block,
in_channels=in_channels,
num_blocks=(4, 4),
num_channels=num_channles,
multiscale_output=False,
)
feats = [
torch.randn(1, in_channels[0], 64, 64),
torch.randn(1, in_channels[1], 32, 32)
]
feats = hrmodule(feats)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, in_channels[0], 64, 64])
def test_hrnet_backbone():
# only have 3 stages
extra = dict(
stage1=dict(
num_modules=1,
num_branches=1,
block='BOTTLENECK',
num_blocks=(4, ),
num_channels=(64, )),
stage2=dict(
num_modules=1,
num_branches=2,
block='BASIC',
num_blocks=(4, 4),
num_channels=(32, 64)),
stage3=dict(
num_modules=4,
num_branches=3,
block='BASIC',
num_blocks=(4, 4, 4),
num_channels=(32, 64, 128)))
with pytest.raises(AssertionError):
# HRNet now only support 4 stages
HRNet(extra=extra)
extra['stage4'] = dict(
num_modules=3,
num_branches=3, # should be 4
block='BASIC',
num_blocks=(4, 4, 4, 4),
num_channels=(32, 64, 128, 256))
with pytest.raises(AssertionError):
# len(num_blocks) should equal num_branches
HRNet(extra=extra)
extra['stage4']['num_branches'] = 4
# Test hrnetv2p_w32
model = HRNet(extra=extra)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 64, 64)
feats = model(imgs)
assert len(feats) == 4
assert feats[0].shape == torch.Size([1, 32, 16, 16])
assert feats[3].shape == torch.Size([1, 256, 2, 2])
# Test single scale output
model = HRNet(extra=extra, multiscale_output=False)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 64, 64)
feats = model(imgs)
assert len(feats) == 1
assert feats[0].shape == torch.Size([1, 32, 16, 16])
# Test HRNET with two stage frozen
frozen_stages = 2
model = HRNet(extra, frozen_stages=frozen_stages)
model.init_weights()
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
if i == 1:
layer = getattr(model, f'layer{i}')
transition = getattr(model, f'transition{i}')
elif i == 4:
layer = getattr(model, f'stage{i}')
else:
layer = getattr(model, f'stage{i}')
transition = getattr(model, f'transition{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
for mod in transition.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in transition.parameters():
assert param.requires_grad is False
| 4,266 | 28.427586 | 68 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_icnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import ICNet
def test_icnet_backbone():
with pytest.raises(TypeError):
# Must give backbone dict in config file.
ICNet(
in_channels=3,
layer_channels=(128, 512),
light_branch_middle_channels=8,
psp_out_channels=128,
out_channels=(16, 128, 128),
backbone_cfg=None)
# Test ICNet Standard Forward
model = ICNet(
layer_channels=(128, 512),
backbone_cfg=dict(
type='ResNetV1c',
in_channels=3,
depth=18,
num_stages=4,
out_indices=(0, 1, 2, 3),
dilations=(1, 1, 2, 4),
strides=(1, 2, 1, 1),
norm_cfg=dict(type='BN', requires_grad=True),
norm_eval=False,
style='pytorch',
contract_dilation=True),
)
assert hasattr(model.backbone,
'maxpool') and model.backbone.maxpool.ceil_mode is True
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 32, 64)
feat = model(imgs)
assert model.psp_modules[0][0].output_size == 1
assert model.psp_modules[1][0].output_size == 2
assert model.psp_modules[2][0].output_size == 3
assert model.psp_bottleneck.padding == 1
assert model.conv_sub1[0].padding == 1
assert len(feat) == 3
assert feat[0].shape == torch.Size([batch_size, 64, 4, 8])
| 1,526 | 28.941176 | 74 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_mae.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones.mae import MAE
from .utils import check_norm_state
def test_mae_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = MAE()
model.init_weights(pretrained=0)
with pytest.raises(TypeError):
# img_size must be int or tuple
model = MAE(img_size=512.0)
with pytest.raises(TypeError):
# out_indices must be int ,list or tuple
model = MAE(out_indices=1.)
with pytest.raises(AssertionError):
# The length of img_size tuple must be lower than 3.
MAE(img_size=(224, 224, 224))
with pytest.raises(TypeError):
# Pretrained must be None or Str.
MAE(pretrained=123)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = MAE(img_size=(224, ))
model.init_weights()
model(imgs)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = MAE(img_size=(224, 224))
model(imgs)
# Test norm_eval = True
model = MAE(norm_eval=True)
model.train()
# Test BEiT backbone with input size of 224 and patch size of 16
model = MAE()
model.init_weights()
model.train()
# Test out_indices = list
model = MAE(out_indices=[2, 4, 8, 12])
model.train()
assert check_norm_state(model.modules(), True)
# Test image size = (224, 224)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test MAE backbone with input size of 256 and patch size of 16
model = MAE(img_size=(256, 256))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 16, 16)
# Test MAE backbone with input size of 32 and patch size of 16
model = MAE(img_size=(32, 32))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 2, 2)
# Test unbalanced size input image
model = MAE(img_size=(112, 224))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 112, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 7, 14)
# Test irregular input image
model = MAE(img_size=(234, 345))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 234, 345)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 21)
# Test init_values=0
model = MAE(init_values=0)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test final norm
model = MAE(final_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test patch norm
model = MAE(patch_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
def test_mae_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = MAE(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = MAE(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# test resize_rel_pos_embed
value = torch.randn(732, 16)
abs_pos_embed_value = torch.rand(1, 17, 768)
ckpt = {
'state_dict': {
'layers.0.attn.relative_position_index': 0,
'layers.0.attn.relative_position_bias_table': value,
'pos_embed': abs_pos_embed_value
}
}
model = MAE(img_size=(512, 512))
ckpt = model.resize_rel_pos_embed(ckpt)
# test resize abs pos embed
value = torch.randn(732, 16)
abs_pos_embed_value = torch.rand(1, 17, 768)
ckpt = {
'state_dict': {
'layers.0.attn.relative_position_index': 0,
'layers.0.attn.relative_position_bias_table': value,
'pos_embed': abs_pos_embed_value
}
}
model = MAE(img_size=(512, 512))
ckpt = model.resize_abs_pos_embed(ckpt['state_dict'])
# pretrained=None
# init_cfg=123, whose type is unsupported
model = MAE(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = MAE(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = MAE(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = MAE(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
model = MAE(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = MAE(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
model = MAE(pretrained=123, init_cfg=123)
| 5,918 | 29.668394 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_mit.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import MixVisionTransformer
from mmseg.models.backbones.mit import (EfficientMultiheadAttention, MixFFN,
TransformerEncoderLayer)
def test_mit():
with pytest.raises(TypeError):
# Pretrained represents pretrain url and must be str or None.
MixVisionTransformer(pretrained=123)
# Test normal input
H, W = (224, 224)
temp = torch.randn((1, 3, H, W))
model = MixVisionTransformer(
embed_dims=32, num_heads=[1, 2, 5, 8], out_indices=(0, 1, 2, 3))
model.init_weights()
outs = model(temp)
assert outs[0].shape == (1, 32, H // 4, W // 4)
assert outs[1].shape == (1, 64, H // 8, W // 8)
assert outs[2].shape == (1, 160, H // 16, W // 16)
assert outs[3].shape == (1, 256, H // 32, W // 32)
# Test non-squared input
H, W = (224, 256)
temp = torch.randn((1, 3, H, W))
outs = model(temp)
assert outs[0].shape == (1, 32, H // 4, W // 4)
assert outs[1].shape == (1, 64, H // 8, W // 8)
assert outs[2].shape == (1, 160, H // 16, W // 16)
assert outs[3].shape == (1, 256, H // 32, W // 32)
# Test MixFFN
FFN = MixFFN(64, 128)
hw_shape = (32, 32)
token_len = 32 * 32
temp = torch.randn((1, token_len, 64))
# Self identity
out = FFN(temp, hw_shape)
assert out.shape == (1, token_len, 64)
# Out identity
outs = FFN(temp, hw_shape, temp)
assert out.shape == (1, token_len, 64)
# Test EfficientMHA
MHA = EfficientMultiheadAttention(64, 2)
hw_shape = (32, 32)
token_len = 32 * 32
temp = torch.randn((1, token_len, 64))
# Self identity
out = MHA(temp, hw_shape)
assert out.shape == (1, token_len, 64)
# Out identity
outs = MHA(temp, hw_shape, temp)
assert out.shape == (1, token_len, 64)
# Test TransformerEncoderLayer with checkpoint forward
block = TransformerEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_mit_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = MixVisionTransformer(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = MixVisionTransformer(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained=None
# init_cfg=123, whose type is unsupported
model = MixVisionTransformer(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = MixVisionTransformer(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
MixVisionTransformer(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
MixVisionTransformer(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
MixVisionTransformer(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
MixVisionTransformer(pretrained=123, init_cfg=123)
| 4,378 | 34.601626 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_mobilenet_v3.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import MobileNetV3
def test_mobilenet_v3():
with pytest.raises(AssertionError):
# check invalid arch
MobileNetV3('big')
with pytest.raises(AssertionError):
# check invalid reduction_factor
MobileNetV3(reduction_factor=0)
with pytest.raises(ValueError):
# check invalid out_indices
MobileNetV3(out_indices=(0, 1, 15))
with pytest.raises(ValueError):
# check invalid frozen_stages
MobileNetV3(frozen_stages=15)
with pytest.raises(TypeError):
# check invalid pretrained
model = MobileNetV3()
model.init_weights(pretrained=8)
# Test MobileNetV3 with default settings
model = MobileNetV3()
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 16, 14, 14)
assert feat[2].shape == (2, 576, 7, 7)
# Test MobileNetV3 with arch = 'large'
model = MobileNetV3(arch='large', out_indices=(1, 3, 16))
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 24, 14, 14)
assert feat[2].shape == (2, 960, 7, 7)
# Test MobileNetV3 with norm_eval True, with_cp True and frozen_stages=5
model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5)
with pytest.raises(TypeError):
# check invalid pretrained
model.init_weights(pretrained=8)
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 56, 56)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == (2, 16, 28, 28)
assert feat[1].shape == (2, 16, 14, 14)
assert feat[2].shape == (2, 576, 7, 7)
| 1,961 | 27.852941 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_mscan.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.backbones import MSCAN
from mmseg.models.backbones.mscan import (MSCAAttention, MSCASpatialAttention,
OverlapPatchEmbed, StemConv)
def test_mscan_backbone():
# Test MSCAN Standard Forward
model = MSCAN(
embed_dims=[8, 16, 32, 64],
norm_cfg=dict(type='BN', requires_grad=True))
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 64, 128)
feat = model(imgs)
assert len(feat) == 4
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 8, 16, 32])
assert feat[1].shape == torch.Size([batch_size, 16, 8, 16])
assert feat[2].shape == torch.Size([batch_size, 32, 4, 8])
assert feat[3].shape == torch.Size([batch_size, 64, 2, 4])
# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 95, 27)
feat = model(imgs)
assert len(feat) == 4
def test_mscan_overlap_patch_embed_module():
x_overlap_patch_embed = OverlapPatchEmbed(
norm_cfg=dict(type='BN', requires_grad=True))
assert x_overlap_patch_embed.proj.in_channels == 3
assert x_overlap_patch_embed.norm.weight.shape == torch.Size([768])
x = torch.randn(2, 3, 16, 32)
x_out, H, W = x_overlap_patch_embed(x)
assert x_out.shape == torch.Size([2, 32, 768])
def test_mscan_spatial_attention_module():
x_spatial_attention = MSCASpatialAttention(8)
assert x_spatial_attention.proj_1.kernel_size == (1, 1)
assert x_spatial_attention.proj_2.stride == (1, 1)
x = torch.randn(2, 8, 16, 32)
x_out = x_spatial_attention(x)
assert x_out.shape == torch.Size([2, 8, 16, 32])
def test_mscan_attention_module():
x_attention = MSCAAttention(8)
assert x_attention.conv0.weight.shape[0] == 8
assert x_attention.conv3.kernel_size == (1, 1)
x = torch.randn(2, 8, 16, 32)
x_out = x_attention(x)
assert x_out.shape == torch.Size([2, 8, 16, 32])
def test_mscan_stem_module():
x_stem = StemConv(8, 8, norm_cfg=dict(type='BN', requires_grad=True))
assert x_stem.proj[0].weight.shape[0] == 4
assert x_stem.proj[-1].weight.shape[0] == 8
x = torch.randn(2, 8, 16, 32)
x_out, H, W = x_stem(x)
assert x_out.shape == torch.Size([2, 32, 8])
assert (H, W) == (4, 8)
| 2,378 | 32.985714 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import ResNeSt
from mmseg.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 256
# Test ResNeSt Bottleneck forward
block = BottleneckS(64, 16, radix=2, reduction_factor=4)
x = torch.randn(2, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 64, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3))
model.init_weights()
model.train()
imgs = torch.randn(2, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 256, 56, 56])
assert feat[1].shape == torch.Size([2, 512, 28, 28])
assert feat[2].shape == torch.Size([2, 1024, 14, 14])
assert feat[3].shape == torch.Size([2, 2048, 7, 7])
| 1,468 | 31.644444 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.ops import DeformConv2dPack
from mmcv.utils.parrots_wrapper import _BatchNorm
from torch.nn.modules import AvgPool2d, GroupNorm
from mmseg.models.backbones import ResNet, ResNetV1d
from mmseg.models.backbones.resnet import BasicBlock, Bottleneck
from mmseg.models.utils import ResLayer
from .utils import all_zeros, check_norm_state, is_block, is_norm
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# Test BasicBlock with checkpoint forward
block = BasicBlock(16, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 16, 28, 28)
x_out = block(x)
assert x_out.shape == torch.Size([1, 16, 28, 28])
# test BasicBlock structure and forward
block = BasicBlock(32, 32)
assert block.conv1.in_channels == 32
assert block.conv1.out_channels == 32
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 32
assert block.conv2.out_channels == 32
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 32, 28, 28)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 28, 28])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2d
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with dilation=2
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2)
for i in range(len(layer)):
assert layer[i].conv2.dilation == (2, 2)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with dilation=2, contract_dilation=True
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, contract_dilation=True)
assert layer[0].conv2.dilation == (1, 1)
for i in range(1, len(layer)):
assert layer[i].conv2.dilation == (2, 2)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with dilation=2, multi_grid
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, multi_grid=(1, 2, 4))
assert layer[0].conv2.dilation == (1, 1)
assert layer[1].conv2.dilation == (2, 2)
assert layer[2].conv2.dilation == (4, 4)
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(18, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(18, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(18, pretrained=0)
model.init_weights()
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet18 norm_eval=True
model = ResNet(18, norm_eval=True)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet18 with torchvision pretrained weight
model = ResNet(
depth=18, norm_eval=True, pretrained='torchvision://resnet18')
model.init_weights()
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet18 with first stage frozen
frozen_stages = 1
model = ResNet(18, frozen_stages=frozen_stages)
model.init_weights()
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18V1d with first stage frozen
model = ResNetV1d(depth=18, frozen_stages=frozen_stages)
assert len(model.stem) == 9
model.init_weights()
model.train()
check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, 'layer{}'.format(i))
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet18 with BatchNorm forward
model = ResNet(18)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet18 with layers 1, 2, 3 out forward
model = ResNet(18, out_indices=(0, 1, 2))
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 112, 112)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 64, 28, 28])
assert feat[1].shape == torch.Size([1, 128, 14, 14])
assert feat[2].shape == torch.Size([1, 256, 7, 7])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet18 with GroupNorm forward
model = ResNet(
18, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2d
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 64
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 128
assert m.gen_attention_block.in_channels == 128
assert m.context_block.in_channels == 512
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 256
assert m.gen_attention_block.in_channels == 256
assert m.context_block.in_channels == 1024
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 512
assert m.gen_attention_block.in_channels == 512
assert not hasattr(m, 'context_block')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 512
assert m.context_block2.in_channels == 512
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 1024
assert m.context_block2.in_channels == 1024
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
# Test ResNet18 zero initialization of residual
model = ResNet(18, zero_init_residual=True)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
# Test ResNetV1d forward
model = ResNetV1d(depth=18)
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 56, 56])
assert feat[1].shape == torch.Size([1, 128, 28, 28])
assert feat[2].shape == torch.Size([1, 256, 14, 14])
assert feat[3].shape == torch.Size([1, 512, 7, 7])
| 20,399 | 34.416667 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_resnext.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import ResNeXt
from mmseg.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
print(model)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.init_weights()
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 56, 56])
assert feat[1].shape == torch.Size([1, 512, 28, 28])
assert feat[2].shape == torch.Size([1, 1024, 14, 14])
assert feat[3].shape == torch.Size([1, 2048, 7, 7])
| 1,982 | 30.47619 | 72 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_stdc.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import STDCContextPathNet
from mmseg.models.backbones.stdc import (AttentionRefinementModule,
FeatureFusionModule, STDCModule,
STDCNet)
def test_stdc_context_path_net():
# Test STDCContextPathNet Standard Forward
model = STDCContextPathNet(
backbone_cfg=dict(
type='STDCNet',
stdc_type='STDCNet1',
in_channels=3,
channels=(32, 64, 256, 512, 1024),
bottleneck_type='cat',
num_convs=4,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU'),
with_final_conv=True),
last_in_channels=(1024, 512),
out_channels=128,
ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4))
model.init_weights()
model.train()
batch_size = 2
imgs = torch.randn(batch_size, 3, 256, 512)
feat = model(imgs)
assert len(feat) == 4
# output for segment Head
assert feat[0].shape == torch.Size([batch_size, 256, 32, 64])
# for auxiliary head 1
assert feat[1].shape == torch.Size([batch_size, 128, 16, 32])
# for auxiliary head 2
assert feat[2].shape == torch.Size([batch_size, 128, 32, 64])
# for auxiliary head 3
assert feat[3].shape == torch.Size([batch_size, 256, 32, 64])
# Test input with rare shape
batch_size = 2
imgs = torch.randn(batch_size, 3, 527, 279)
model = STDCContextPathNet(
backbone_cfg=dict(
type='STDCNet',
stdc_type='STDCNet1',
in_channels=3,
channels=(32, 64, 256, 512, 1024),
bottleneck_type='add',
num_convs=4,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU'),
with_final_conv=False),
last_in_channels=(1024, 512),
out_channels=128,
ffm_cfg=dict(in_channels=384, out_channels=256, scale_factor=4))
model.init_weights()
model.train()
feat = model(imgs)
assert len(feat) == 4
def test_stdcnet():
with pytest.raises(AssertionError):
# STDC backbone constraints.
STDCNet(
stdc_type='STDCNet3',
in_channels=3,
channels=(32, 64, 256, 512, 1024),
bottleneck_type='cat',
num_convs=4,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU'),
with_final_conv=False)
with pytest.raises(AssertionError):
# STDC bottleneck type constraints.
STDCNet(
stdc_type='STDCNet1',
in_channels=3,
channels=(32, 64, 256, 512, 1024),
bottleneck_type='dog',
num_convs=4,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU'),
with_final_conv=False)
with pytest.raises(AssertionError):
# STDC channels length constraints.
STDCNet(
stdc_type='STDCNet1',
in_channels=3,
channels=(16, 32, 64, 256, 512, 1024),
bottleneck_type='cat',
num_convs=4,
norm_cfg=dict(type='BN', requires_grad=True),
act_cfg=dict(type='ReLU'),
with_final_conv=False)
def test_feature_fusion_module():
x_ffm = FeatureFusionModule(in_channels=64, out_channels=32)
assert x_ffm.conv0.in_channels == 64
assert x_ffm.attention[1].in_channels == 32
assert x_ffm.attention[2].in_channels == 8
assert x_ffm.attention[2].out_channels == 32
x1 = torch.randn(2, 32, 32, 64)
x2 = torch.randn(2, 32, 32, 64)
x_out = x_ffm(x1, x2)
assert x_out.shape == torch.Size([2, 32, 32, 64])
def test_attention_refinement_module():
x_arm = AttentionRefinementModule(128, 32)
assert x_arm.conv_layer.in_channels == 128
assert x_arm.atten_conv_layer[1].conv.out_channels == 32
x = torch.randn(2, 128, 32, 64)
x_out = x_arm(x)
assert x_out.shape == torch.Size([2, 32, 32, 64])
def test_stdc_module():
x_stdc = STDCModule(in_channels=32, out_channels=32, stride=4)
assert x_stdc.layers[0].conv.in_channels == 32
assert x_stdc.layers[3].conv.out_channels == 4
x = torch.randn(2, 32, 32, 64)
x_out = x_stdc(x)
assert x_out.shape == torch.Size([2, 32, 32, 64])
| 4,458 | 32.780303 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_swin.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=32, num_heads=4, feedforward_channels=128)
assert block.ffn.embed_dims == 32
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 128
x = torch.randn(1, 56 * 56, 32)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 32])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(112, 112, 112))
# Test absolute position embedding
temp = torch.randn((1, 3, 112, 112))
model = SwinTransformer(pretrain_img_size=112, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 256, 256))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 64, 64)
assert outs[1].shape == (1, 192, 32, 32)
assert outs[2].shape == (1, 384, 16, 16)
assert outs[3].shape == (1, 768, 8, 8)
# Test abnormal inference size
temp = torch.randn((1, 3, 255, 255))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 64, 64)
assert outs[1].shape == (1, 192, 32, 32)
assert outs[2].shape == (1, 384, 16, 16)
assert outs[3].shape == (1, 768, 8, 8)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
# Test frozen
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
# Test absolute position embedding frozen
model = SwinTransformer(frozen_stages=4, use_abs_pos_embed=True)
model.train()
for p in model.parameters():
assert not p.requires_grad
# Test Swin with checkpoint forward
temp = torch.randn((1, 3, 56, 56))
model = SwinTransformer(with_cp=True)
for m in model.modules():
if isinstance(m, SwinBlock):
assert m.with_cp
model.init_weights()
model.train()
model(temp)
| 3,209 | 30.782178 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_timm_backbone.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones import TIMMBackbone
from .utils import check_norm_state
def test_timm_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = TIMMBackbone()
model.init_weights(pretrained=0)
# Test different norm_layer, can be: 'SyncBN', 'BN2d', 'GN', 'LN', 'IN'
# Test resnet18 from timm, norm_layer='BN2d'
model = TIMMBackbone(
model_name='resnet18',
features_only=True,
pretrained=False,
output_stride=32,
norm_layer='BN2d')
# Test resnet18 from timm, norm_layer='SyncBN'
model = TIMMBackbone(
model_name='resnet18',
features_only=True,
pretrained=False,
output_stride=32,
norm_layer='SyncBN')
# Test resnet18 from timm, features_only=True, output_stride=32
model = TIMMBackbone(
model_name='resnet18',
features_only=True,
pretrained=False,
output_stride=32)
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 64, 112, 112))
assert feats[1] == torch.Size((1, 64, 56, 56))
assert feats[2] == torch.Size((1, 128, 28, 28))
assert feats[3] == torch.Size((1, 256, 14, 14))
assert feats[4] == torch.Size((1, 512, 7, 7))
# Test resnet18 from timm, features_only=True, output_stride=16
model = TIMMBackbone(
model_name='resnet18',
features_only=True,
pretrained=False,
output_stride=16)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 64, 112, 112))
assert feats[1] == torch.Size((1, 64, 56, 56))
assert feats[2] == torch.Size((1, 128, 28, 28))
assert feats[3] == torch.Size((1, 256, 14, 14))
assert feats[4] == torch.Size((1, 512, 14, 14))
# Test resnet18 from timm, features_only=True, output_stride=8
model = TIMMBackbone(
model_name='resnet18',
features_only=True,
pretrained=False,
output_stride=8)
imgs = torch.randn(1, 3, 224, 224)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 64, 112, 112))
assert feats[1] == torch.Size((1, 64, 56, 56))
assert feats[2] == torch.Size((1, 128, 28, 28))
assert feats[3] == torch.Size((1, 256, 28, 28))
assert feats[4] == torch.Size((1, 512, 28, 28))
# Test efficientnet_b1 with pretrained weights
model = TIMMBackbone(model_name='efficientnet_b1', pretrained=True)
# Test resnetv2_50x1_bitm from timm, features_only=True, output_stride=8
model = TIMMBackbone(
model_name='resnetv2_50x1_bitm',
features_only=True,
pretrained=False,
output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 64, 4, 4))
assert feats[1] == torch.Size((1, 256, 2, 2))
assert feats[2] == torch.Size((1, 512, 1, 1))
assert feats[3] == torch.Size((1, 1024, 1, 1))
assert feats[4] == torch.Size((1, 2048, 1, 1))
# Test resnetv2_50x3_bitm from timm, features_only=True, output_stride=8
model = TIMMBackbone(
model_name='resnetv2_50x3_bitm',
features_only=True,
pretrained=False,
output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 192, 4, 4))
assert feats[1] == torch.Size((1, 768, 2, 2))
assert feats[2] == torch.Size((1, 1536, 1, 1))
assert feats[3] == torch.Size((1, 3072, 1, 1))
assert feats[4] == torch.Size((1, 6144, 1, 1))
# Test resnetv2_101x1_bitm from timm, features_only=True, output_stride=8
model = TIMMBackbone(
model_name='resnetv2_101x1_bitm',
features_only=True,
pretrained=False,
output_stride=8)
imgs = torch.randn(1, 3, 8, 8)
feats = model(imgs)
feats = [feat.shape for feat in feats]
assert len(feats) == 5
assert feats[0] == torch.Size((1, 64, 4, 4))
assert feats[1] == torch.Size((1, 256, 2, 2))
assert feats[2] == torch.Size((1, 512, 1, 1))
assert feats[3] == torch.Size((1, 1024, 1, 1))
assert feats[4] == torch.Size((1, 2048, 1, 1))
| 4,674 | 33.88806 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_twins.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones.twins import (PCPVT, SVT,
ConditionalPositionEncoding,
LocallyGroupedSelfAttention)
def test_pcpvt():
# Test normal input
H, W = (224, 224)
temp = torch.randn((1, 3, H, W))
model = PCPVT(
embed_dims=[32, 64, 160, 256],
num_heads=[1, 2, 5, 8],
mlp_ratios=[8, 8, 4, 4],
qkv_bias=True,
depths=[3, 4, 6, 3],
sr_ratios=[8, 4, 2, 1],
norm_after_stage=False)
model.init_weights()
outs = model(temp)
assert outs[0].shape == (1, 32, H // 4, W // 4)
assert outs[1].shape == (1, 64, H // 8, W // 8)
assert outs[2].shape == (1, 160, H // 16, W // 16)
assert outs[3].shape == (1, 256, H // 32, W // 32)
def test_svt():
# Test normal input
H, W = (224, 224)
temp = torch.randn((1, 3, H, W))
model = SVT(
embed_dims=[32, 64, 128],
num_heads=[1, 2, 4],
mlp_ratios=[4, 4, 4],
qkv_bias=False,
depths=[4, 4, 4],
windiow_sizes=[7, 7, 7],
norm_after_stage=True)
model.init_weights()
outs = model(temp)
assert outs[0].shape == (1, 32, H // 4, W // 4)
assert outs[1].shape == (1, 64, H // 8, W // 8)
assert outs[2].shape == (1, 128, H // 16, W // 16)
def test_svt_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = SVT(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = SVT(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained=None
# init_cfg=123, whose type is unsupported
model = SVT(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = SVT(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = SVT(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = SVT(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
model = SVT(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = SVT(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
model = SVT(pretrained=123, init_cfg=123)
def test_pcpvt_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = PCPVT(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = PCPVT(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained=None
# init_cfg=123, whose type is unsupported
model = PCPVT(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = PCPVT(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = PCPVT(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = PCPVT(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
model = PCPVT(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = PCPVT(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
model = PCPVT(pretrained=123, init_cfg=123)
def test_locallygrouped_self_attention_module():
LSA = LocallyGroupedSelfAttention(embed_dims=32, window_size=3)
outs = LSA(torch.randn(1, 3136, 32), (56, 56))
assert outs.shape == torch.Size([1, 3136, 32])
def test_conditional_position_encoding_module():
CPE = ConditionalPositionEncoding(in_channels=32, embed_dims=32, stride=2)
outs = CPE(torch.randn(1, 3136, 32), (56, 56))
assert outs.shape == torch.Size([1, 784, 32])
| 5,967 | 33.697674 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_unet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import ConvModule
from mmseg.models.backbones.unet import (BasicConvBlock, DeconvModule,
InterpConv, UNet, UpConvBlock)
from mmseg.ops import Upsample
from .utils import check_norm_state
def test_unet_basic_conv_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicConvBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicConvBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicConvBlock(64, 64, plugins=plugins)
# test BasicConvBlock with checkpoint forward
block = BasicConvBlock(16, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 16, 64, 64, requires_grad=True)
x_out = block(x)
assert x_out.shape == torch.Size([1, 16, 64, 64])
block = BasicConvBlock(16, 16, with_cp=False)
assert not block.with_cp
x = torch.randn(1, 16, 64, 64)
x_out = block(x)
assert x_out.shape == torch.Size([1, 16, 64, 64])
# test BasicConvBlock with stride convolution to downsample
block = BasicConvBlock(16, 16, stride=2)
x = torch.randn(1, 16, 64, 64)
x_out = block(x)
assert x_out.shape == torch.Size([1, 16, 32, 32])
# test BasicConvBlock structure and forward
block = BasicConvBlock(16, 64, num_convs=3, dilation=3)
assert block.convs[0].conv.in_channels == 16
assert block.convs[0].conv.out_channels == 64
assert block.convs[0].conv.kernel_size == (3, 3)
assert block.convs[0].conv.dilation == (1, 1)
assert block.convs[0].conv.padding == (1, 1)
assert block.convs[1].conv.in_channels == 64
assert block.convs[1].conv.out_channels == 64
assert block.convs[1].conv.kernel_size == (3, 3)
assert block.convs[1].conv.dilation == (3, 3)
assert block.convs[1].conv.padding == (3, 3)
assert block.convs[2].conv.in_channels == 64
assert block.convs[2].conv.out_channels == 64
assert block.convs[2].conv.kernel_size == (3, 3)
assert block.convs[2].conv.dilation == (3, 3)
assert block.convs[2].conv.padding == (3, 3)
def test_deconv_module():
with pytest.raises(AssertionError):
# kernel_size should be greater than or equal to scale_factor and
# (kernel_size - scale_factor) should be even numbers
DeconvModule(64, 32, kernel_size=1, scale_factor=2)
with pytest.raises(AssertionError):
# kernel_size should be greater than or equal to scale_factor and
# (kernel_size - scale_factor) should be even numbers
DeconvModule(64, 32, kernel_size=3, scale_factor=2)
with pytest.raises(AssertionError):
# kernel_size should be greater than or equal to scale_factor and
# (kernel_size - scale_factor) should be even numbers
DeconvModule(64, 32, kernel_size=5, scale_factor=4)
# test DeconvModule with checkpoint forward and upsample 2X.
block = DeconvModule(64, 32, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 128, 128, requires_grad=True)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
block = DeconvModule(64, 32, with_cp=False)
assert not block.with_cp
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test DeconvModule with different kernel size for upsample 2X.
x = torch.randn(1, 64, 64, 64)
block = DeconvModule(64, 32, kernel_size=2, scale_factor=2)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 128, 128])
block = DeconvModule(64, 32, kernel_size=6, scale_factor=2)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 128, 128])
# test DeconvModule with different kernel size for upsample 4X.
x = torch.randn(1, 64, 64, 64)
block = DeconvModule(64, 32, kernel_size=4, scale_factor=4)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
block = DeconvModule(64, 32, kernel_size=6, scale_factor=4)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
def test_interp_conv():
# test InterpConv with checkpoint forward and upsample 2X.
block = InterpConv(64, 32, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 128, 128, requires_grad=True)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
block = InterpConv(64, 32, with_cp=False)
assert not block.with_cp
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test InterpConv with conv_first=False for upsample 2X.
block = InterpConv(64, 32, conv_first=False)
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert isinstance(block.interp_upsample[0], Upsample)
assert isinstance(block.interp_upsample[1], ConvModule)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test InterpConv with conv_first=True for upsample 2X.
block = InterpConv(64, 32, conv_first=True)
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert isinstance(block.interp_upsample[0], ConvModule)
assert isinstance(block.interp_upsample[1], Upsample)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test InterpConv with bilinear upsample for upsample 2X.
block = InterpConv(
64,
32,
conv_first=False,
upsample_cfg=dict(
scale_factor=2, mode='bilinear', align_corners=False))
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert isinstance(block.interp_upsample[0], Upsample)
assert isinstance(block.interp_upsample[1], ConvModule)
assert x_out.shape == torch.Size([1, 32, 256, 256])
assert block.interp_upsample[0].mode == 'bilinear'
# test InterpConv with nearest upsample for upsample 2X.
block = InterpConv(
64,
32,
conv_first=False,
upsample_cfg=dict(scale_factor=2, mode='nearest'))
x = torch.randn(1, 64, 128, 128)
x_out = block(x)
assert isinstance(block.interp_upsample[0], Upsample)
assert isinstance(block.interp_upsample[1], ConvModule)
assert x_out.shape == torch.Size([1, 32, 256, 256])
assert block.interp_upsample[0].mode == 'nearest'
def test_up_conv_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
UpConvBlock(BasicConvBlock, 64, 32, 32, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
UpConvBlock(BasicConvBlock, 64, 32, 32, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
UpConvBlock(BasicConvBlock, 64, 32, 32, plugins=plugins)
# test UpConvBlock with checkpoint forward and upsample 2X.
block = UpConvBlock(BasicConvBlock, 64, 32, 32, with_cp=True)
skip_x = torch.randn(1, 32, 256, 256, requires_grad=True)
x = torch.randn(1, 64, 128, 128, requires_grad=True)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test UpConvBlock with upsample=True for upsample 2X. The spatial size of
# skip_x is 2X larger than x.
block = UpConvBlock(
BasicConvBlock, 64, 32, 32, upsample_cfg=dict(type='InterpConv'))
skip_x = torch.randn(1, 32, 256, 256)
x = torch.randn(1, 64, 128, 128)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test UpConvBlock with upsample=False for upsample 2X. The spatial size of
# skip_x is the same as that of x.
block = UpConvBlock(BasicConvBlock, 64, 32, 32, upsample_cfg=None)
skip_x = torch.randn(1, 32, 256, 256)
x = torch.randn(1, 64, 256, 256)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test UpConvBlock with different upsample method for upsample 2X.
# The upsample method is interpolation upsample (bilinear or nearest).
block = UpConvBlock(
BasicConvBlock,
64,
32,
32,
upsample_cfg=dict(
type='InterpConv',
upsample_cfg=dict(
scale_factor=2, mode='bilinear', align_corners=False)))
skip_x = torch.randn(1, 32, 256, 256)
x = torch.randn(1, 64, 128, 128)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test UpConvBlock with different upsample method for upsample 2X.
# The upsample method is deconvolution upsample.
block = UpConvBlock(
BasicConvBlock,
64,
32,
32,
upsample_cfg=dict(type='DeconvModule', kernel_size=4, scale_factor=2))
skip_x = torch.randn(1, 32, 256, 256)
x = torch.randn(1, 64, 128, 128)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
# test BasicConvBlock structure and forward
block = UpConvBlock(
conv_block=BasicConvBlock,
in_channels=64,
skip_channels=32,
out_channels=32,
num_convs=3,
dilation=3,
upsample_cfg=dict(
type='InterpConv',
upsample_cfg=dict(
scale_factor=2, mode='bilinear', align_corners=False)))
skip_x = torch.randn(1, 32, 256, 256)
x = torch.randn(1, 64, 128, 128)
x_out = block(skip_x, x)
assert x_out.shape == torch.Size([1, 32, 256, 256])
assert block.conv_block.convs[0].conv.in_channels == 64
assert block.conv_block.convs[0].conv.out_channels == 32
assert block.conv_block.convs[0].conv.kernel_size == (3, 3)
assert block.conv_block.convs[0].conv.dilation == (1, 1)
assert block.conv_block.convs[0].conv.padding == (1, 1)
assert block.conv_block.convs[1].conv.in_channels == 32
assert block.conv_block.convs[1].conv.out_channels == 32
assert block.conv_block.convs[1].conv.kernel_size == (3, 3)
assert block.conv_block.convs[1].conv.dilation == (3, 3)
assert block.conv_block.convs[1].conv.padding == (3, 3)
assert block.conv_block.convs[2].conv.in_channels == 32
assert block.conv_block.convs[2].conv.out_channels == 32
assert block.conv_block.convs[2].conv.kernel_size == (3, 3)
assert block.conv_block.convs[2].conv.dilation == (3, 3)
assert block.conv_block.convs[2].conv.padding == (3, 3)
assert block.upsample.interp_upsample[1].conv.in_channels == 64
assert block.upsample.interp_upsample[1].conv.out_channels == 32
assert block.upsample.interp_upsample[1].conv.kernel_size == (1, 1)
assert block.upsample.interp_upsample[1].conv.dilation == (1, 1)
assert block.upsample.interp_upsample[1].conv.padding == (0, 0)
def test_unet():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
UNet(3, 64, 5, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
UNet(3, 64, 5, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
UNet(3, 64, 5, plugins=plugins)
with pytest.raises(AssertionError):
# Check whether the input image size can be divisible by the whole
# downsample rate of the encoder. The whole downsample rate of this
# case is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=4,
strides=(1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2),
dec_num_convs=(2, 2, 2),
downsamples=(True, True, True),
enc_dilations=(1, 1, 1, 1),
dec_dilations=(1, 1, 1))
x = torch.randn(2, 3, 65, 65)
unet(x)
with pytest.raises(AssertionError):
# Check whether the input image size can be divisible by the whole
# downsample rate of the encoder. The whole downsample rate of this
# case is 16.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 65, 65)
unet(x)
with pytest.raises(AssertionError):
# Check whether the input image size can be divisible by the whole
# downsample rate of the encoder. The whole downsample rate of this
# case is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 65, 65)
unet(x)
with pytest.raises(AssertionError):
# Check whether the input image size can be divisible by the whole
# downsample rate of the encoder. The whole downsample rate of this
# case is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 2, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 65, 65)
unet(x)
with pytest.raises(AssertionError):
# Check whether the input image size can be divisible by the whole
# downsample rate of the encoder. The whole downsample rate of this
# case is 32.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=6,
strides=(1, 1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2, 2),
downsamples=(True, True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1, 1))
x = torch.randn(2, 3, 65, 65)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(strides)=num_stages
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(enc_num_convs)=num_stages
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(dec_num_convs)=num_stages-1
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(downsamples)=num_stages-1
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(enc_dilations)=num_stages
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
with pytest.raises(AssertionError):
# Check if num_stages matches strides, len(dec_dilations)=num_stages-1
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1, 1))
x = torch.randn(2, 3, 64, 64)
unet(x)
# test UNet norm_eval=True
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1),
norm_eval=True)
unet.train()
assert check_norm_state(unet.modules(), False)
# test UNet norm_eval=False
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1),
norm_eval=False)
unet.train()
assert check_norm_state(unet.modules(), True)
# test UNet forward and outputs. The whole downsample rate is 16.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 8, 8])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 16, 16])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 2, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 16, 16])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 4.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 32, 32])
assert x_outs[1].shape == torch.Size([2, 32, 32, 32])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 4.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 32, 32])
assert x_outs[1].shape == torch.Size([2, 32, 32, 32])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 16, 16])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 4.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 32, 32])
assert x_outs[1].shape == torch.Size([2, 32, 32, 32])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 2.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, False, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 64, 64])
assert x_outs[1].shape == torch.Size([2, 32, 64, 64])
assert x_outs[2].shape == torch.Size([2, 16, 64, 64])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 1.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 1, 1, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(False, False, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 128, 128])
assert x_outs[1].shape == torch.Size([2, 32, 128, 128])
assert x_outs[2].shape == torch.Size([2, 16, 128, 128])
assert x_outs[3].shape == torch.Size([2, 8, 128, 128])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 16.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, True),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 8, 8])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 16, 16])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 8.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 2, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, True, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 16, 16])
assert x_outs[1].shape == torch.Size([2, 32, 16, 16])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet forward and outputs. The whole downsample rate is 4.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1))
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 32, 32])
assert x_outs[1].shape == torch.Size([2, 32, 32, 32])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
# test UNet init_weights method.
unet = UNet(
in_channels=3,
base_channels=4,
num_stages=5,
strides=(1, 2, 2, 1, 1),
enc_num_convs=(2, 2, 2, 2, 2),
dec_num_convs=(2, 2, 2, 2),
downsamples=(True, True, False, False),
enc_dilations=(1, 1, 1, 1, 1),
dec_dilations=(1, 1, 1, 1),
pretrained=None)
unet.init_weights()
x = torch.randn(2, 3, 128, 128)
x_outs = unet(x)
assert x_outs[0].shape == torch.Size([2, 64, 32, 32])
assert x_outs[1].shape == torch.Size([2, 32, 32, 32])
assert x_outs[2].shape == torch.Size([2, 16, 32, 32])
assert x_outs[3].shape == torch.Size([2, 8, 64, 64])
assert x_outs[4].shape == torch.Size([2, 4, 128, 128])
| 30,122 | 35.601458 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/test_vit.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.backbones.vit import (TransformerEncoderLayer,
VisionTransformer)
from .utils import check_norm_state
def test_vit_backbone():
with pytest.raises(TypeError):
# pretrained must be a string path
model = VisionTransformer()
model.init_weights(pretrained=0)
with pytest.raises(TypeError):
# img_size must be int or tuple
model = VisionTransformer(img_size=512.0)
with pytest.raises(TypeError):
# out_indices must be int ,list or tuple
model = VisionTransformer(out_indices=1.)
with pytest.raises(TypeError):
# test upsample_pos_embed function
x = torch.randn(1, 196)
VisionTransformer.resize_pos_embed(x, 512, 512, 224, 224, 'bilinear')
with pytest.raises(AssertionError):
# The length of img_size tuple must be lower than 3.
VisionTransformer(img_size=(224, 224, 224))
with pytest.raises(TypeError):
# Pretrained must be None or Str.
VisionTransformer(pretrained=123)
with pytest.raises(AssertionError):
# with_cls_token must be True when output_cls_token == True
VisionTransformer(with_cls_token=False, output_cls_token=True)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = VisionTransformer(img_size=(224, ))
model.init_weights()
model(imgs)
# Test img_size isinstance tuple
imgs = torch.randn(1, 3, 224, 224)
model = VisionTransformer(img_size=(224, 224))
model(imgs)
# Test norm_eval = True
model = VisionTransformer(norm_eval=True)
model.train()
# Test ViT backbone with input size of 224 and patch size of 16
model = VisionTransformer()
model.init_weights()
model.train()
assert check_norm_state(model.modules(), True)
# Test normal size input image
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test large size input image
imgs = torch.randn(1, 3, 256, 256)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 16, 16)
# Test small size input image
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 2, 2)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test unbalanced size input image
imgs = torch.randn(1, 3, 112, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 7, 14)
# Test irregular input image
imgs = torch.randn(1, 3, 234, 345)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 15, 22)
# Test with_cp=True
model = VisionTransformer(with_cp=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test with_cls_token=False
model = VisionTransformer(with_cls_token=False)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test final norm
model = VisionTransformer(final_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test patch norm
model = VisionTransformer(patch_norm=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[-1].shape == (1, 768, 14, 14)
# Test output_cls_token
model = VisionTransformer(with_cls_token=True, output_cls_token=True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0][0].shape == (1, 768, 14, 14)
assert feat[0][1].shape == (1, 768)
# Test TransformerEncoderLayer with checkpoint forward
block = TransformerEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x)
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_vit_init():
path = 'PATH_THAT_DO_NOT_EXIST'
# Test all combinations of pretrained and init_cfg
# pretrained=None, init_cfg=None
model = VisionTransformer(pretrained=None, init_cfg=None)
assert model.init_cfg is None
model.init_weights()
# pretrained=None
# init_cfg loads pretrain from an non-existent file
model = VisionTransformer(
pretrained=None, init_cfg=dict(type='Pretrained', checkpoint=path))
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained=None
# init_cfg=123, whose type is unsupported
model = VisionTransformer(pretrained=None, init_cfg=123)
with pytest.raises(TypeError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg=None
model = VisionTransformer(pretrained=path, init_cfg=None)
assert model.init_cfg == dict(type='Pretrained', checkpoint=path)
# Test loading a checkpoint from an non-existent file
with pytest.raises(OSError):
model.init_weights()
# pretrained loads pretrain from an non-existent file
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = VisionTransformer(
pretrained=path, init_cfg=dict(type='Pretrained', checkpoint=path))
with pytest.raises(AssertionError):
model = VisionTransformer(pretrained=path, init_cfg=123)
# pretrain=123, whose type is unsupported
# init_cfg=None
with pytest.raises(TypeError):
model = VisionTransformer(pretrained=123, init_cfg=None)
# pretrain=123, whose type is unsupported
# init_cfg loads pretrain from an non-existent file
with pytest.raises(AssertionError):
model = VisionTransformer(
pretrained=123, init_cfg=dict(type='Pretrained', checkpoint=path))
# pretrain=123, whose type is unsupported
# init_cfg=123, whose type is unsupported
with pytest.raises(AssertionError):
model = VisionTransformer(pretrained=123, init_cfg=123)
| 6,203 | 32.354839 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_backbones/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmseg.models.backbones.resnet import BasicBlock, Bottleneck
from mmseg.models.backbones.resnext import Bottleneck as BottleneckX
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def all_zeros(modules):
"""Check if the weight(and bias) is all zero."""
weight_zero = torch.allclose(modules.weight.data,
torch.zeros_like(modules.weight.data))
if hasattr(modules, 'bias'):
bias_zero = torch.allclose(modules.bias.data,
torch.zeros_like(modules.bias.data))
else:
bias_zero = True
return weight_zero and bias_zero
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,306 | 28.704545 | 71 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_ann_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import ANNHead
from .utils import to_cuda
def test_ann_head():
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)]
head = ANNHead(
in_channels=[4, 8],
channels=2,
num_classes=19,
in_index=[-2, -1],
project_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
| 538 | 24.666667 | 67 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_apc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import APCHead
from .utils import _conv_has_norm, to_cuda
def test_apc_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
APCHead(in_channels=8, channels=2, num_classes=19, pool_scales=1)
# test no norm_cfg
head = APCHead(in_channels=8, channels=2, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = APCHead(
in_channels=8,
channels=2,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
# fusion=True
inputs = [torch.randn(1, 8, 45, 45)]
head = APCHead(
in_channels=8,
channels=2,
num_classes=19,
pool_scales=(1, 2, 3),
fusion=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is True
assert head.acm_modules[0].pool_scale == 1
assert head.acm_modules[1].pool_scale == 2
assert head.acm_modules[2].pool_scale == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# fusion=False
inputs = [torch.randn(1, 8, 45, 45)]
head = APCHead(
in_channels=8,
channels=2,
num_classes=19,
pool_scales=(1, 2, 3),
fusion=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is False
assert head.acm_modules[0].pool_scale == 1
assert head.acm_modules[1].pool_scale == 2
assert head.acm_modules[2].pool_scale == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
| 1,751 | 28.2 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_aspp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import ASPPHead, DepthwiseSeparableASPPHead
from .utils import _conv_has_norm, to_cuda
def test_aspp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
ASPPHead(in_channels=8, channels=4, num_classes=19, dilations=1)
# test no norm_cfg
head = ASPPHead(in_channels=8, channels=4, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = ASPPHead(
in_channels=8,
channels=4,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 8, 45, 45)]
head = ASPPHead(
in_channels=8, channels=4, num_classes=19, dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].conv.dilation == (12, 12)
assert head.aspp_modules[2].conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
def test_dw_aspp_head():
# test w.o. c1
inputs = [torch.randn(1, 8, 45, 45)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=0,
c1_channels=0,
in_channels=8,
channels=4,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck is None
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
# test with c1
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 16, 21, 21)]
head = DepthwiseSeparableASPPHead(
c1_in_channels=4,
c1_channels=2,
in_channels=16,
channels=8,
num_classes=19,
dilations=(1, 12, 24))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.c1_bottleneck.in_channels == 4
assert head.c1_bottleneck.out_channels == 2
assert head.aspp_modules[0].conv.dilation == (1, 1)
assert head.aspp_modules[1].depthwise_conv.dilation == (12, 12)
assert head.aspp_modules[2].depthwise_conv.dilation == (24, 24)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
| 2,577 | 32.480519 | 74 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_cc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import CCHead
from .utils import to_cuda
def test_cc_head():
head = CCHead(in_channels=16, channels=8, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'cca')
if not torch.cuda.is_available():
pytest.skip('CCHead requires CUDA')
inputs = [torch.randn(1, 16, 23, 23)]
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 546 | 27.789474 | 61 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_da_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import DAHead
from .utils import to_cuda
def test_da_head():
inputs = [torch.randn(1, 16, 23, 23)]
head = DAHead(in_channels=16, channels=8, num_classes=19, pam_channels=8)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 3
for output in outputs:
assert output.shape == (1, head.num_classes, 23, 23)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 23, 23)
| 649 | 31.5 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_decode_head.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import patch
import pytest
import torch
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
from .utils import to_cuda
@patch.multiple(BaseDecodeHead, __abstractmethods__=set())
def test_decode_head():
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead([32, 16], 16, num_classes=19)
with pytest.raises(AssertionError):
# default input_transform doesn't accept multiple inputs
BaseDecodeHead(32, 16, num_classes=19, in_index=[-1, -2])
with pytest.raises(AssertionError):
# supported mode is resize_concat only
BaseDecodeHead(32, 16, num_classes=19, input_transform='concat')
with pytest.raises(AssertionError):
# in_channels should be list|tuple
BaseDecodeHead(32, 16, num_classes=19, input_transform='resize_concat')
with pytest.raises(AssertionError):
# in_index should be list|tuple
BaseDecodeHead([32],
16,
in_index=-1,
num_classes=19,
input_transform='resize_concat')
with pytest.raises(AssertionError):
# len(in_index) should equal len(in_channels)
BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[-1],
input_transform='resize_concat')
with pytest.raises(ValueError):
# out_channels should be equal to num_classes
BaseDecodeHead(32, 16, num_classes=19, out_channels=18)
# test out_channels
head = BaseDecodeHead(32, 16, num_classes=2)
assert head.out_channels == 2
# test out_channels == 1 and num_classes == 2
head = BaseDecodeHead(32, 16, num_classes=2, out_channels=1)
assert head.out_channels == 1 and head.num_classes == 2
# test default dropout
head = BaseDecodeHead(32, 16, num_classes=19)
assert hasattr(head, 'dropout') and head.dropout.p == 0.1
# test set dropout
head = BaseDecodeHead(32, 16, num_classes=19, dropout_ratio=0.2)
assert hasattr(head, 'dropout') and head.dropout.p == 0.2
# test no input_transform
inputs = [torch.randn(1, 32, 45, 45)]
head = BaseDecodeHead(32, 16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 32
assert head.input_transform is None
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 32, 45, 45)
# test input_transform = resize_concat
inputs = [torch.randn(1, 32, 45, 45), torch.randn(1, 16, 21, 21)]
head = BaseDecodeHead([32, 16],
16,
num_classes=19,
in_index=[0, 1],
input_transform='resize_concat')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == 48
assert head.input_transform == 'resize_concat'
transformed_inputs = head._transform_inputs(inputs)
assert transformed_inputs.shape == (1, 48, 45, 45)
# test multi-loss, loss_decode is dict
with pytest.raises(TypeError):
# loss_decode must be a dict or sequence of dict.
BaseDecodeHead(3, 16, num_classes=19, loss_decode=['CrossEntropyLoss'])
inputs = torch.randn(2, 19, 8, 8).float()
target = torch.ones(2, 1, 64, 64).long()
head = BaseDecodeHead(
3,
16,
num_classes=19,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
head, target = to_cuda(head, target)
loss = head.losses(seg_logit=inputs, seg_label=target)
assert 'loss_ce' in loss
# test multi-loss, loss_decode is list of dict
inputs = torch.randn(2, 19, 8, 8).float()
target = torch.ones(2, 1, 64, 64).long()
head = BaseDecodeHead(
3,
16,
num_classes=19,
loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_1'),
dict(type='CrossEntropyLoss', loss_name='loss_2')
])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
head, target = to_cuda(head, target)
loss = head.losses(seg_logit=inputs, seg_label=target)
assert 'loss_1' in loss
assert 'loss_2' in loss
# 'loss_decode' must be a dict or sequence of dict
with pytest.raises(TypeError):
BaseDecodeHead(3, 16, num_classes=19, loss_decode=['CrossEntropyLoss'])
with pytest.raises(TypeError):
BaseDecodeHead(3, 16, num_classes=19, loss_decode=0)
# test multi-loss, loss_decode is list of dict
inputs = torch.randn(2, 19, 8, 8).float()
target = torch.ones(2, 1, 64, 64).long()
head = BaseDecodeHead(
3,
16,
num_classes=19,
loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_1'),
dict(type='CrossEntropyLoss', loss_name='loss_2'),
dict(type='CrossEntropyLoss', loss_name='loss_3')))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
head, target = to_cuda(head, target)
loss = head.losses(seg_logit=inputs, seg_label=target)
assert 'loss_1' in loss
assert 'loss_2' in loss
assert 'loss_3' in loss
# test multi-loss, loss_decode is list of dict, names of them are identical
inputs = torch.randn(2, 19, 8, 8).float()
target = torch.ones(2, 1, 64, 64).long()
head = BaseDecodeHead(
3,
16,
num_classes=19,
loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_ce'),
dict(type='CrossEntropyLoss', loss_name='loss_ce'),
dict(type='CrossEntropyLoss', loss_name='loss_ce')))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
head, target = to_cuda(head, target)
loss_3 = head.losses(seg_logit=inputs, seg_label=target)
head = BaseDecodeHead(
3,
16,
num_classes=19,
loss_decode=(dict(type='CrossEntropyLoss', loss_name='loss_ce')))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
head, target = to_cuda(head, target)
loss = head.losses(seg_logit=inputs, seg_label=target)
assert 'loss_ce' in loss
assert 'loss_ce' in loss_3
assert loss_3['loss_ce'] == 3 * loss['loss_ce']
| 6,590 | 36.02809 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_dm_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import DMHead
from .utils import _conv_has_norm, to_cuda
def test_dm_head():
with pytest.raises(AssertionError):
# filter_sizes must be list|tuple
DMHead(in_channels=8, channels=4, num_classes=19, filter_sizes=1)
# test no norm_cfg
head = DMHead(in_channels=8, channels=4, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = DMHead(
in_channels=8,
channels=4,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
# fusion=True
inputs = [torch.randn(1, 8, 23, 23)]
head = DMHead(
in_channels=8,
channels=4,
num_classes=19,
filter_sizes=(1, 3, 5),
fusion=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is True
assert head.dcm_modules[0].filter_size == 1
assert head.dcm_modules[1].filter_size == 3
assert head.dcm_modules[2].filter_size == 5
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# fusion=False
inputs = [torch.randn(1, 8, 23, 23)]
head = DMHead(
in_channels=8,
channels=4,
num_classes=19,
filter_sizes=(1, 3, 5),
fusion=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.fusion is False
assert head.dcm_modules[0].filter_size == 1
assert head.dcm_modules[1].filter_size == 3
assert head.dcm_modules[2].filter_size == 5
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 1,754 | 28.25 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_dnl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import DNLHead
from .utils import to_cuda
def test_dnl_head():
# DNL with 'embedded_gaussian' mode
head = DNLHead(in_channels=8, channels=4, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'dnl_block')
assert head.dnl_block.temperature == 0.05
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# NonLocal2d with 'dot_product' mode
head = DNLHead(
in_channels=8, channels=4, num_classes=19, mode='dot_product')
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# NonLocal2d with 'gaussian' mode
head = DNLHead(in_channels=8, channels=4, num_classes=19, mode='gaussian')
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# NonLocal2d with 'concatenation' mode
head = DNLHead(
in_channels=8, channels=4, num_classes=19, mode='concatenation')
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 1,584 | 34.222222 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_dpt_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import DPTHead
def test_dpt_head():
with pytest.raises(AssertionError):
# input_transform must be 'multiple_select'
head = DPTHead(
in_channels=[768, 768, 768, 768],
channels=4,
num_classes=19,
in_index=[0, 1, 2, 3])
head = DPTHead(
in_channels=[768, 768, 768, 768],
channels=4,
num_classes=19,
in_index=[0, 1, 2, 3],
input_transform='multiple_select')
inputs = [[torch.randn(4, 768, 2, 2),
torch.randn(4, 768)] for _ in range(4)]
output = head(inputs)
assert output.shape == torch.Size((4, 19, 16, 16))
# test readout operation
head = DPTHead(
in_channels=[768, 768, 768, 768],
channels=4,
num_classes=19,
in_index=[0, 1, 2, 3],
input_transform='multiple_select',
readout_type='add')
output = head(inputs)
assert output.shape == torch.Size((4, 19, 16, 16))
head = DPTHead(
in_channels=[768, 768, 768, 768],
channels=4,
num_classes=19,
in_index=[0, 1, 2, 3],
input_transform='multiple_select',
readout_type='project')
output = head(inputs)
assert output.shape == torch.Size((4, 19, 16, 16))
| 1,368 | 26.38 | 54 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_ema_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import EMAHead
from .utils import to_cuda
def test_emanet_head():
head = EMAHead(
in_channels=4,
ema_channels=3,
channels=2,
num_stages=3,
num_bases=2,
num_classes=19)
for param in head.ema_mid_conv.parameters():
assert not param.requires_grad
assert hasattr(head, 'ema_module')
inputs = [torch.randn(1, 4, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 647 | 26 | 57 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_enc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import EncHead
from .utils import to_cuda
def test_enc_head():
# with se_loss, w.o. lateral
inputs = [torch.randn(1, 8, 21, 21)]
head = EncHead(in_channels=[8], channels=4, num_classes=19, in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
# w.o se_loss, w.o. lateral
inputs = [torch.randn(1, 8, 21, 21)]
head = EncHead(
in_channels=[8],
channels=4,
use_se_loss=False,
num_classes=19,
in_index=[-1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 21, 21)
# with se_loss, with lateral
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 8, 21, 21)]
head = EncHead(
in_channels=[4, 8],
channels=4,
add_lateral=True,
num_classes=19,
in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, tuple) and len(outputs) == 2
assert outputs[0].shape == (1, head.num_classes, 21, 21)
assert outputs[1].shape == (1, head.num_classes)
test_output = head.forward_test(inputs, None, None)
assert test_output.shape == (1, head.num_classes, 21, 21)
| 1,613 | 32.625 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_fcn_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmcv.utils.parrots_wrapper import SyncBatchNorm
from mmseg.models.decode_heads import DepthwiseSeparableFCNHead, FCNHead
from .utils import to_cuda
def test_fcn_head():
with pytest.raises(AssertionError):
# num_convs must be not less than 0
FCNHead(num_classes=19, num_convs=-1)
# test no norm_cfg
head = FCNHead(in_channels=8, channels=4, num_classes=19)
for m in head.modules():
if isinstance(m, ConvModule):
assert not m.with_norm
# test with norm_cfg
head = FCNHead(
in_channels=8,
channels=4,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
for m in head.modules():
if isinstance(m, ConvModule):
assert m.with_norm and isinstance(m.bn, SyncBatchNorm)
# test concat_input=False
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(
in_channels=8, channels=4, num_classes=19, concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert not head.concat_input and not hasattr(head, 'conv_cat')
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# test concat_input=True
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(
in_channels=8, channels=4, num_classes=19, concat_input=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 2
assert head.concat_input
assert head.conv_cat.in_channels == 12
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# test kernel_size=3
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(in_channels=8, channels=4, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (3, 3)
assert head.convs[i].padding == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# test kernel_size=1
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(in_channels=8, channels=4, num_classes=19, kernel_size=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
for i in range(len(head.convs)):
assert head.convs[i].kernel_size == (1, 1)
assert head.convs[i].padding == 0
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# test num_conv
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(in_channels=8, channels=4, num_classes=19, num_convs=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert len(head.convs) == 1
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
# test num_conv = 0
inputs = [torch.randn(1, 8, 23, 23)]
head = FCNHead(
in_channels=8,
channels=8,
num_classes=19,
num_convs=0,
concat_input=False)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert isinstance(head.convs, torch.nn.Identity)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
def test_sep_fcn_head():
# test sep_fcn_head with concat_input=False
head = DepthwiseSeparableFCNHead(
in_channels=128,
channels=128,
concat_input=False,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(2, 128, 8, 8)]
output = head(x)
assert output.shape == (2, head.num_classes, 8, 8)
assert not head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
assert head.conv_seg.kernel_size == (1, 1)
head = DepthwiseSeparableFCNHead(
in_channels=64,
channels=64,
concat_input=True,
num_classes=19,
in_index=-1,
norm_cfg=dict(type='BN', requires_grad=True, momentum=0.01))
x = [torch.rand(3, 64, 8, 8)]
output = head(x)
assert output.shape == (3, head.num_classes, 8, 8)
assert head.concat_input
assert isinstance(head.convs[0], DepthwiseSeparableConvModule)
assert isinstance(head.convs[1], DepthwiseSeparableConvModule)
| 4,511 | 33.181818 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_gc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import GCHead
from .utils import to_cuda
def test_gc_head():
head = GCHead(in_channels=4, channels=4, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'gc_block')
inputs = [torch.randn(1, 4, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 491 | 27.941176 | 60 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_ham_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import LightHamHead
from .utils import _conv_has_norm, to_cuda
ham_norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
def test_ham_head():
# test without sync_bn
head = LightHamHead(
in_channels=[16, 32, 64],
in_index=[1, 2, 3],
channels=64,
ham_channels=64,
dropout_ratio=0.1,
num_classes=19,
norm_cfg=ham_norm_cfg,
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
ham_kwargs=dict(
MD_S=1,
MD_R=64,
train_steps=6,
eval_steps=7,
inv_t=100,
rand_init=True))
assert not _conv_has_norm(head, sync_bn=False)
inputs = [
torch.randn(1, 8, 32, 32),
torch.randn(1, 16, 16, 16),
torch.randn(1, 32, 8, 8),
torch.randn(1, 64, 4, 4)
]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.in_channels == [16, 32, 64]
assert head.hamburger.ham_in.in_channels == 64
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 16, 16)
| 1,259 | 27 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_isa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import ISAHead
from .utils import to_cuda
def test_isa_head():
inputs = [torch.randn(1, 8, 23, 23)]
isa_head = ISAHead(
in_channels=8,
channels=4,
num_classes=19,
isa_channels=4,
down_factor=(8, 8))
if torch.cuda.is_available():
isa_head, inputs = to_cuda(isa_head, inputs)
output = isa_head(inputs)
assert output.shape == (1, isa_head.num_classes, 23, 23)
| 521 | 23.857143 | 60 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_knet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads.knet_head import (IterativeDecodeHead,
KernelUpdateHead)
from .utils import to_cuda
num_stages = 3
conv_kernel_size = 1
kernel_updator_cfg = dict(
type='KernelUpdator',
in_channels=16,
feat_channels=16,
out_channels=16,
gate_norm_act=True,
activate_out=True,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN'))
def test_knet_head():
# test init function of kernel update head
kernel_update_head = KernelUpdateHead(
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=1,
feedforward_channels=128,
in_channels=32,
out_channels=32,
dropout=0.0,
conv_kernel_size=conv_kernel_size,
ffn_act_cfg=dict(type='ReLU', inplace=True),
with_ffn=True,
feat_transform_cfg=dict(conv_cfg=dict(type='Conv2d'), act_cfg=None),
kernel_init=True,
kernel_updator_cfg=kernel_updator_cfg)
kernel_update_head.init_weights()
head = IterativeDecodeHead(
num_stages=num_stages,
kernel_update_head=[
dict(
type='KernelUpdateHead',
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=1,
feedforward_channels=128,
in_channels=32,
out_channels=32,
dropout=0.0,
conv_kernel_size=conv_kernel_size,
ffn_act_cfg=dict(type='ReLU', inplace=True),
with_ffn=True,
feat_transform_cfg=dict(
conv_cfg=dict(type='Conv2d'), act_cfg=None),
kernel_init=False,
kernel_updator_cfg=kernel_updator_cfg)
for _ in range(num_stages)
],
kernel_generate_head=dict(
type='FCNHead',
in_channels=128,
in_index=3,
channels=32,
num_convs=2,
concat_input=True,
dropout_ratio=0.1,
num_classes=150,
align_corners=False))
head.init_weights()
inputs = [
torch.randn(1, 16, 27, 32),
torch.randn(1, 32, 27, 16),
torch.randn(1, 64, 27, 16),
torch.randn(1, 128, 27, 16)
]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs[-1].shape == (1, head.num_classes, 27, 16)
# test whether only return the prediction of
# the last stage during testing
with torch.no_grad():
head.eval()
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 27, 16)
# test K-Net without `feat_transform_cfg`
head = IterativeDecodeHead(
num_stages=num_stages,
kernel_update_head=[
dict(
type='KernelUpdateHead',
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=1,
feedforward_channels=128,
in_channels=32,
out_channels=32,
dropout=0.0,
conv_kernel_size=conv_kernel_size,
ffn_act_cfg=dict(type='ReLU', inplace=True),
with_ffn=True,
feat_transform_cfg=None,
kernel_updator_cfg=kernel_updator_cfg)
for _ in range(num_stages)
],
kernel_generate_head=dict(
type='FCNHead',
in_channels=128,
in_index=3,
channels=32,
num_convs=2,
concat_input=True,
dropout_ratio=0.1,
num_classes=150,
align_corners=False))
head.init_weights()
inputs = [
torch.randn(1, 16, 27, 32),
torch.randn(1, 32, 27, 16),
torch.randn(1, 64, 27, 16),
torch.randn(1, 128, 27, 16)
]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs[-1].shape == (1, head.num_classes, 27, 16)
# test K-Net with
# self.mask_transform_stride == 2 and self.feat_gather_stride == 1
head = IterativeDecodeHead(
num_stages=num_stages,
kernel_update_head=[
dict(
type='KernelUpdateHead',
num_classes=150,
num_ffn_fcs=2,
num_heads=8,
num_mask_fcs=1,
feedforward_channels=128,
in_channels=32,
out_channels=32,
dropout=0.0,
conv_kernel_size=conv_kernel_size,
ffn_act_cfg=dict(type='ReLU', inplace=True),
with_ffn=True,
feat_transform_cfg=dict(
conv_cfg=dict(type='Conv2d'), act_cfg=None),
kernel_init=False,
mask_transform_stride=2,
feat_gather_stride=1,
kernel_updator_cfg=kernel_updator_cfg)
for _ in range(num_stages)
],
kernel_generate_head=dict(
type='FCNHead',
in_channels=128,
in_index=3,
channels=32,
num_convs=2,
concat_input=True,
dropout_ratio=0.1,
num_classes=150,
align_corners=False))
head.init_weights()
inputs = [
torch.randn(1, 16, 27, 32),
torch.randn(1, 32, 27, 16),
torch.randn(1, 64, 27, 16),
torch.randn(1, 128, 27, 16)
]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs[-1].shape == (1, head.num_classes, 26, 16)
# test loss function in K-Net
fake_label = torch.ones_like(
outputs[-1][:, 0:1, :, :], dtype=torch.int16).long()
loss = head.losses(seg_logit=outputs, seg_label=fake_label)
assert loss['loss_ce.s0'] != torch.zeros_like(loss['loss_ce.s0'])
assert loss['loss_ce.s1'] != torch.zeros_like(loss['loss_ce.s1'])
assert loss['loss_ce.s2'] != torch.zeros_like(loss['loss_ce.s2'])
assert loss['loss_ce.s3'] != torch.zeros_like(loss['loss_ce.s3'])
| 6,292 | 31.107143 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_lraspp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import LRASPPHead
def test_lraspp_head():
with pytest.raises(ValueError):
# check invalid input_transform
LRASPPHead(
in_channels=(4, 4, 123),
in_index=(0, 1, 2),
channels=32,
input_transform='resize_concat',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
with pytest.raises(AssertionError):
# check invalid branch_channels
LRASPPHead(
in_channels=(4, 4, 123),
in_index=(0, 1, 2),
channels=32,
branch_channels=64,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
# test with default settings
lraspp_head = LRASPPHead(
in_channels=(4, 4, 123),
in_index=(0, 1, 2),
channels=32,
input_transform='multiple_select',
dropout_ratio=0.1,
num_classes=19,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
align_corners=False,
loss_decode=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0))
inputs = [
torch.randn(2, 4, 45, 45),
torch.randn(2, 4, 28, 28),
torch.randn(2, 123, 14, 14)
]
with pytest.raises(RuntimeError):
# check invalid inputs
output = lraspp_head(inputs)
inputs = [
torch.randn(2, 4, 111, 111),
torch.randn(2, 4, 77, 77),
torch.randn(2, 123, 55, 55)
]
output = lraspp_head(inputs)
assert output.shape == (2, 19, 111, 111)
| 2,093 | 29.347826 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_nl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import NLHead
from .utils import to_cuda
def test_nl_head():
head = NLHead(in_channels=8, channels=4, num_classes=19)
assert len(head.convs) == 2
assert hasattr(head, 'nl_block')
inputs = [torch.randn(1, 8, 23, 23)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 491 | 27.941176 | 60 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_ocr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import FCNHead, OCRHead
from .utils import to_cuda
def test_ocr_head():
inputs = [torch.randn(1, 8, 23, 23)]
ocr_head = OCRHead(
in_channels=8, channels=4, num_classes=19, ocr_channels=8)
fcn_head = FCNHead(in_channels=8, channels=4, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(ocr_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
output = ocr_head(inputs, prev_output)
assert output.shape == (1, ocr_head.num_classes, 23, 23)
| 637 | 30.9 | 66 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_point_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.utils import ConfigDict
from mmseg.models.decode_heads import FCNHead, PointHead
from .utils import to_cuda
def test_point_head():
inputs = [torch.randn(1, 32, 45, 45)]
point_head = PointHead(
in_channels=[32], in_index=[0], channels=16, num_classes=19)
assert len(point_head.fcs) == 3
fcn_head = FCNHead(in_channels=32, channels=16, num_classes=19)
if torch.cuda.is_available():
head, inputs = to_cuda(point_head, inputs)
head, inputs = to_cuda(fcn_head, inputs)
prev_output = fcn_head(inputs)
test_cfg = ConfigDict(
subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
output = point_head.forward_test(inputs, prev_output, None, test_cfg)
assert output.shape == (1, point_head.num_classes, 180, 180)
# test multiple losses case
inputs = [torch.randn(1, 32, 45, 45)]
point_head_multiple_losses = PointHead(
in_channels=[32],
in_index=[0],
channels=16,
num_classes=19,
loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_1'),
dict(type='CrossEntropyLoss', loss_name='loss_2')
])
assert len(point_head_multiple_losses.fcs) == 3
fcn_head_multiple_losses = FCNHead(
in_channels=32,
channels=16,
num_classes=19,
loss_decode=[
dict(type='CrossEntropyLoss', loss_name='loss_1'),
dict(type='CrossEntropyLoss', loss_name='loss_2')
])
if torch.cuda.is_available():
head, inputs = to_cuda(point_head_multiple_losses, inputs)
head, inputs = to_cuda(fcn_head_multiple_losses, inputs)
prev_output = fcn_head_multiple_losses(inputs)
test_cfg = ConfigDict(
subdivision_steps=2, subdivision_num_points=8196, scale_factor=2)
output = point_head_multiple_losses.forward_test(inputs, prev_output, None,
test_cfg)
assert output.shape == (1, point_head.num_classes, 180, 180)
fake_label = torch.ones([1, 180, 180], dtype=torch.long)
if torch.cuda.is_available():
fake_label = fake_label.cuda()
loss = point_head_multiple_losses.losses(output, fake_label)
assert 'pointloss_1' in loss
assert 'pointloss_2' in loss
| 2,340 | 36.758065 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_psa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import PSAHead
from .utils import _conv_has_norm, to_cuda
def test_psa_head():
with pytest.raises(AssertionError):
# psa_type must be in 'bi-direction', 'collect', 'distribute'
PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
psa_type='gather')
# test no norm_cfg
head = PSAHead(
in_channels=4, channels=2, num_classes=19, mask_size=(13, 13))
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
# test 'bi-direction' psa_type
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4, channels=2, num_classes=19, mask_size=(13, 13))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'bi-direction' psa_type, shrink_factor=1
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
shrink_factor=1)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'bi-direction' psa_type with soft_max
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
psa_softmax=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'collect' psa_type
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'collect' psa_type, shrink_factor=1
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
shrink_factor=1,
psa_type='collect')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'collect' psa_type, shrink_factor=1, compact=True
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
psa_type='collect',
shrink_factor=1,
compact=True)
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
# test 'distribute' psa_type
inputs = [torch.randn(1, 4, 13, 13)]
head = PSAHead(
in_channels=4,
channels=2,
num_classes=19,
mask_size=(13, 13),
psa_type='distribute')
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 13, 13)
| 3,617 | 28.414634 | 70 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_psp_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import PSPHead
from .utils import _conv_has_norm, to_cuda
def test_psp_head():
with pytest.raises(AssertionError):
# pool_scales must be list|tuple
PSPHead(in_channels=4, channels=2, num_classes=19, pool_scales=1)
# test no norm_cfg
head = PSPHead(in_channels=4, channels=2, num_classes=19)
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = PSPHead(
in_channels=4,
channels=2,
num_classes=19,
norm_cfg=dict(type='SyncBN'))
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 4, 23, 23)]
head = PSPHead(
in_channels=4, channels=2, num_classes=19, pool_scales=(1, 2, 3))
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
assert head.psp_modules[0][0].output_size == 1
assert head.psp_modules[1][0].output_size == 2
assert head.psp_modules[2][0].output_size == 3
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 23, 23)
| 1,131 | 29.594595 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_segformer_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import SegformerHead
def test_segformer_head():
with pytest.raises(AssertionError):
# `in_channels` must have same length as `in_index`
SegformerHead(
in_channels=(1, 2, 3), in_index=(0, 1), channels=5, num_classes=2)
H, W = (64, 64)
in_channels = (32, 64, 160, 256)
shapes = [(H // 2**(i + 2), W // 2**(i + 2))
for i in range(len(in_channels))]
model = SegformerHead(
in_channels=in_channels,
in_index=[0, 1, 2, 3],
channels=256,
num_classes=19)
with pytest.raises(IndexError):
# in_index must match the input feature maps.
inputs = [
torch.randn((1, in_channel, *shape))
for in_channel, shape in zip(in_channels, shapes)
][:3]
temp = model(inputs)
# Normal Input
# ((1, 32, 16, 16), (1, 64, 8, 8), (1, 160, 4, 4), (1, 256, 2, 2)
inputs = [
torch.randn((1, in_channel, *shape))
for in_channel, shape in zip(in_channels, shapes)
]
temp = model(inputs)
assert temp.shape == (1, 19, H // 4, W // 4)
| 1,204 | 28.390244 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_segmenter_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import SegmenterMaskTransformerHead
from .utils import _conv_has_norm, to_cuda
def test_segmenter_mask_transformer_head():
head = SegmenterMaskTransformerHead(
in_channels=2,
channels=2,
num_classes=150,
num_layers=2,
num_heads=3,
embed_dims=192,
dropout_ratio=0.0)
assert _conv_has_norm(head, sync_bn=True)
head.init_weights()
inputs = [torch.randn(1, 2, 32, 32)]
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 32, 32)
| 697 | 26.92 | 66 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_setr_mla_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import SETRMLAHead
from .utils import to_cuda
def test_setr_mla_head(capsys):
with pytest.raises(AssertionError):
# MLA requires input multiple stage feature information.
SETRMLAHead(in_channels=8, channels=4, num_classes=19, in_index=1)
with pytest.raises(AssertionError):
# multiple in_indexs requires multiple in_channels.
SETRMLAHead(
in_channels=8, channels=4, num_classes=19, in_index=(0, 1, 2, 3))
with pytest.raises(AssertionError):
# channels should be len(in_channels) * mla_channels
SETRMLAHead(
in_channels=(8, 8, 8, 8),
channels=8,
mla_channels=4,
in_index=(0, 1, 2, 3),
num_classes=19)
# test inference of MLA head
img_size = (8, 8)
patch_size = 4
head = SETRMLAHead(
in_channels=(8, 8, 8, 8),
channels=16,
mla_channels=4,
in_index=(0, 1, 2, 3),
num_classes=19,
norm_cfg=dict(type='BN'))
h, w = img_size[0] // patch_size, img_size[1] // patch_size
# Input square NCHW format feature information
x = [
torch.randn(1, 8, h, w),
torch.randn(1, 8, h, w),
torch.randn(1, 8, h, w),
torch.randn(1, 8, h, w)
]
if torch.cuda.is_available():
head, x = to_cuda(head, x)
out = head(x)
assert out.shape == (1, head.num_classes, h * 4, w * 4)
# Input non-square NCHW format feature information
x = [
torch.randn(1, 8, h, w * 2),
torch.randn(1, 8, h, w * 2),
torch.randn(1, 8, h, w * 2),
torch.randn(1, 8, h, w * 2)
]
if torch.cuda.is_available():
head, x = to_cuda(head, x)
out = head(x)
assert out.shape == (1, head.num_classes, h * 4, w * 8)
| 1,887 | 28.5 | 77 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_setr_up_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import SETRUPHead
from .utils import to_cuda
def test_setr_up_head(capsys):
with pytest.raises(AssertionError):
# kernel_size must be [1/3]
SETRUPHead(num_classes=19, kernel_size=2)
with pytest.raises(AssertionError):
# in_channels must be int type and in_channels must be same
# as embed_dim.
SETRUPHead(in_channels=(4, 4), channels=2, num_classes=19)
# test init_cfg of head
head = SETRUPHead(
in_channels=4,
channels=2,
norm_cfg=dict(type='SyncBN'),
num_classes=19,
init_cfg=dict(type='Kaiming'))
super(SETRUPHead, head).init_weights()
# test inference of Naive head
# the auxiliary head of Naive head is same as Naive head
img_size = (4, 4)
patch_size = 2
head = SETRUPHead(
in_channels=4,
channels=2,
num_classes=19,
num_convs=1,
up_scale=4,
kernel_size=1,
norm_cfg=dict(type='BN'))
h, w = img_size[0] // patch_size, img_size[1] // patch_size
# Input square NCHW format feature information
x = [torch.randn(1, 4, h, w)]
if torch.cuda.is_available():
head, x = to_cuda(head, x)
out = head(x)
assert out.shape == (1, head.num_classes, h * 4, w * 4)
# Input non-square NCHW format feature information
x = [torch.randn(1, 4, h, w * 2)]
if torch.cuda.is_available():
head, x = to_cuda(head, x)
out = head(x)
assert out.shape == (1, head.num_classes, h * 4, w * 8)
| 1,616 | 27.368421 | 67 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_stdc_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.decode_heads import STDCHead
from .utils import to_cuda
def test_stdc_head():
inputs = [torch.randn(1, 32, 21, 21)]
head = STDCHead(
in_channels=32,
channels=8,
num_convs=1,
num_classes=2,
in_index=-1,
loss_decode=[
dict(
type='CrossEntropyLoss', loss_name='loss_ce', loss_weight=1.0),
dict(type='DiceLoss', loss_name='loss_dice', loss_weight=1.0)
])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert isinstance(outputs, torch.Tensor) and len(outputs) == 1
assert outputs.shape == torch.Size([1, head.num_classes, 21, 21])
fake_label = torch.ones_like(
outputs[:, 0:1, :, :], dtype=torch.int16).long()
loss = head.losses(seg_logit=outputs, seg_label=fake_label)
assert loss['loss_ce'] != torch.zeros_like(loss['loss_ce'])
assert loss['loss_dice'] != torch.zeros_like(loss['loss_dice'])
| 1,068 | 32.40625 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/test_uper_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.decode_heads import UPerHead
from .utils import _conv_has_norm, to_cuda
def test_uper_head():
with pytest.raises(AssertionError):
# fpn_in_channels must be list|tuple
UPerHead(in_channels=4, channels=2, num_classes=19)
# test no norm_cfg
head = UPerHead(
in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
assert not _conv_has_norm(head, sync_bn=False)
# test with norm_cfg
head = UPerHead(
in_channels=[4, 2],
channels=2,
num_classes=19,
norm_cfg=dict(type='SyncBN'),
in_index=[-2, -1])
assert _conv_has_norm(head, sync_bn=True)
inputs = [torch.randn(1, 4, 45, 45), torch.randn(1, 2, 21, 21)]
head = UPerHead(
in_channels=[4, 2], channels=2, num_classes=19, in_index=[-2, -1])
if torch.cuda.is_available():
head, inputs = to_cuda(head, inputs)
outputs = head(inputs)
assert outputs.shape == (1, head.num_classes, 45, 45)
| 1,066 | 28.638889 | 74 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_heads/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import ConvModule
from mmcv.utils.parrots_wrapper import SyncBatchNorm
def _conv_has_norm(module, sync_bn):
for m in module.modules():
if isinstance(m, ConvModule):
if not m.with_norm:
return False
if sync_bn:
if not isinstance(m.bn, SyncBatchNorm):
return False
return True
def to_cuda(module, data):
module = module.cuda()
if isinstance(data, list):
for i in range(len(data)):
data[i] = data[i].cuda()
return module, data
| 614 | 25.73913 | 55 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_ce_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.losses.cross_entropy_loss import _expand_onehot_labels
@pytest.mark.parametrize('use_sigmoid', [True, False])
@pytest.mark.parametrize('reduction', ('mean', 'sum', 'none'))
@pytest.mark.parametrize('avg_non_ignore', [True, False])
@pytest.mark.parametrize('bce_input_same_dim', [True, False])
def test_ce_loss(use_sigmoid, reduction, avg_non_ignore, bce_input_same_dim):
from mmseg.models import build_loss
# use_mask and use_sigmoid cannot be true at the same time
with pytest.raises(AssertionError):
loss_cfg = dict(
type='CrossEntropyLoss',
use_mask=True,
use_sigmoid=True,
loss_weight=1.0)
build_loss(loss_cfg)
# test loss with simple case for ce/bce
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=use_sigmoid,
loss_weight=1.0,
avg_non_ignore=avg_non_ignore,
loss_name='loss_ce')
loss_cls = build_loss(loss_cls_cfg)
if use_sigmoid:
assert torch.allclose(
loss_cls(fake_pred, fake_label), torch.tensor(100.))
else:
assert torch.allclose(
loss_cls(fake_pred, fake_label), torch.tensor(200.))
# test loss with complicated case for ce/bce
# when avg_non_ignore is False, `avg_factor` would not be calculated
fake_pred = torch.full(size=(2, 21, 8, 8), fill_value=0.5)
fake_label = torch.ones(2, 8, 8).long()
fake_label[:, 0, 0] = 255
fake_weight = None
# extra test bce loss when pred.shape == label.shape
if use_sigmoid and bce_input_same_dim:
fake_pred = torch.randn(2, 10).float()
fake_label = torch.rand(2, 10).float()
fake_weight = torch.rand(2, 10) # set weight in forward function
fake_label[0, [1, 2, 5, 7]] = 255 # set ignore_index
fake_label[1, [0, 5, 8, 9]] = 255
loss_cls = build_loss(loss_cls_cfg)
loss = loss_cls(
fake_pred, fake_label, weight=fake_weight, ignore_index=255)
if use_sigmoid:
if fake_pred.dim() != fake_label.dim():
fake_label, weight, valid_mask = _expand_onehot_labels(
labels=fake_label,
label_weights=None,
target_shape=fake_pred.shape,
ignore_index=255)
else:
# should mask out the ignored elements
valid_mask = ((fake_label >= 0) & (fake_label != 255)).float()
weight = valid_mask
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred,
fake_label.float(),
reduction='none',
weight=fake_weight)
if avg_non_ignore:
avg_factor = valid_mask.sum().item()
torch_loss = (torch_loss * weight).sum() / avg_factor
else:
torch_loss = (torch_loss * weight).mean()
else:
if avg_non_ignore:
torch_loss = torch.nn.functional.cross_entropy(
fake_pred, fake_label, reduction='mean', ignore_index=255)
else:
torch_loss = torch.nn.functional.cross_entropy(
fake_pred, fake_label, reduction='sum',
ignore_index=255) / fake_label.numel()
assert torch.allclose(loss, torch_loss)
if use_sigmoid:
# test loss with complicated case for ce/bce
# when avg_non_ignore is False, `avg_factor` would not be calculated
fake_pred = torch.full(size=(2, 21, 8, 8), fill_value=0.5)
fake_label = torch.ones(2, 8, 8).long()
fake_label[:, 0, 0] = 255
fake_weight = torch.rand(2, 8, 8)
loss_cls = build_loss(loss_cls_cfg)
loss = loss_cls(
fake_pred, fake_label, weight=fake_weight, ignore_index=255)
if use_sigmoid:
fake_label, weight, valid_mask = _expand_onehot_labels(
labels=fake_label,
label_weights=None,
target_shape=fake_pred.shape,
ignore_index=255)
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred,
fake_label.float(),
reduction='none',
weight=fake_weight.unsqueeze(1).expand(fake_pred.shape))
if avg_non_ignore:
avg_factor = valid_mask.sum().item()
torch_loss = (torch_loss * weight).sum() / avg_factor
else:
torch_loss = (torch_loss * weight).mean()
assert torch.allclose(loss, torch_loss)
# test loss with class weights from file
fake_pred = torch.Tensor([[100, -100]])
fake_label = torch.Tensor([1]).long()
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([0.8, 0.2], f'{tmp_file.name}.pkl', 'pkl') # from pkl file
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
loss_name='loss_ce')
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))
np.save(f'{tmp_file.name}.npy', np.array([0.8, 0.2])) # from npy file
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=False,
class_weight=f'{tmp_file.name}.npy',
loss_weight=1.0,
loss_name='loss_ce')
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(40.))
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
loss_cls_cfg = dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
loss_cls = build_loss(loss_cls_cfg)
assert torch.allclose(loss_cls(fake_pred, fake_label), torch.tensor(200.))
# test `avg_non_ignore` without ignore index would not affect ce/bce loss
# when reduction='sum'/'none'/'mean'
loss_cls_cfg1 = dict(
type='CrossEntropyLoss',
use_sigmoid=use_sigmoid,
reduction=reduction,
loss_weight=1.0,
avg_non_ignore=True)
loss_cls1 = build_loss(loss_cls_cfg1)
loss_cls_cfg2 = dict(
type='CrossEntropyLoss',
use_sigmoid=use_sigmoid,
reduction=reduction,
loss_weight=1.0,
avg_non_ignore=False)
loss_cls2 = build_loss(loss_cls_cfg2)
assert torch.allclose(
loss_cls1(fake_pred, fake_label, ignore_index=255) / fake_pred.numel(),
loss_cls2(fake_pred, fake_label, ignore_index=255) / fake_pred.numel(),
atol=1e-4)
# test ce/bce loss with ignore index and class weight
# in 5-way classification
if use_sigmoid:
# test bce loss when pred.shape == or != label.shape
if bce_input_same_dim:
fake_pred = torch.randn(2, 10).float()
fake_label = torch.rand(2, 10).float()
class_weight = torch.rand(2, 10)
else:
fake_pred = torch.full(size=(2, 21, 8, 8), fill_value=0.5)
fake_label = torch.ones(2, 8, 8).long()
class_weight = torch.randn(2, 21, 8, 8)
fake_label, weight, valid_mask = _expand_onehot_labels(
labels=fake_label,
label_weights=None,
target_shape=fake_pred.shape,
ignore_index=-100)
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred,
fake_label.float(),
reduction='mean',
pos_weight=class_weight)
else:
fake_pred = torch.randn(2, 5, 10).float() # 5-way classification
fake_label = torch.randint(0, 5, (2, 10)).long()
class_weight = torch.rand(5)
class_weight /= class_weight.sum()
torch_loss = torch.nn.functional.cross_entropy(
fake_pred, fake_label, reduction='sum',
weight=class_weight) / fake_label.numel()
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=use_sigmoid,
reduction='mean',
class_weight=class_weight,
loss_weight=1.0,
avg_non_ignore=avg_non_ignore)
loss_cls = build_loss(loss_cls_cfg)
# test cross entropy loss has name `loss_ce`
assert loss_cls.loss_name == 'loss_ce'
# test avg_non_ignore is in extra_repr
assert loss_cls.extra_repr() == f'avg_non_ignore={avg_non_ignore}'
loss = loss_cls(fake_pred, fake_label)
assert torch.allclose(loss, torch_loss)
fake_label[0, [1, 2, 5, 7]] = 10 # set ignore_index
fake_label[1, [0, 5, 8, 9]] = 10
loss = loss_cls(fake_pred, fake_label, ignore_index=10)
if use_sigmoid:
if avg_non_ignore:
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred[fake_label != 10],
fake_label[fake_label != 10].float(),
pos_weight=class_weight[fake_label != 10],
reduction='mean')
else:
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred[fake_label != 10],
fake_label[fake_label != 10].float(),
pos_weight=class_weight[fake_label != 10],
reduction='sum') / fake_label.numel()
else:
if avg_non_ignore:
torch_loss = torch.nn.functional.cross_entropy(
fake_pred,
fake_label,
ignore_index=10,
reduction='sum',
weight=class_weight) / fake_label[fake_label != 10].numel()
else:
torch_loss = torch.nn.functional.cross_entropy(
fake_pred,
fake_label,
ignore_index=10,
reduction='sum',
weight=class_weight) / fake_label.numel()
assert torch.allclose(loss, torch_loss)
@pytest.mark.parametrize('avg_non_ignore', [True, False])
@pytest.mark.parametrize('with_weight', [True, False])
def test_binary_class_ce_loss(avg_non_ignore, with_weight):
from mmseg.models import build_loss
fake_pred = torch.rand(3, 1, 10, 10)
fake_label = torch.randint(0, 2, (3, 10, 10))
fake_weight = torch.rand(3, 10, 10)
valid_mask = ((fake_label >= 0) & (fake_label != 255)).float()
weight = valid_mask
torch_loss = torch.nn.functional.binary_cross_entropy_with_logits(
fake_pred,
fake_label.unsqueeze(1).float(),
reduction='none',
weight=fake_weight.unsqueeze(1).float() if with_weight else None)
if avg_non_ignore:
eps = torch.finfo(torch.float32).eps
avg_factor = valid_mask.sum().item()
torch_loss = (torch_loss * weight.unsqueeze(1)).sum() / (
avg_factor + eps)
else:
torch_loss = (torch_loss * weight.unsqueeze(1)).mean()
loss_cls_cfg = dict(
type='CrossEntropyLoss',
use_sigmoid=True,
loss_weight=1.0,
avg_non_ignore=avg_non_ignore,
reduction='mean',
loss_name='loss_ce')
loss_cls = build_loss(loss_cls_cfg)
loss = loss_cls(
fake_pred,
fake_label,
weight=fake_weight if with_weight else None,
ignore_index=255)
assert torch.allclose(loss, torch_loss)
| 11,382 | 37.586441 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_dice_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
def test_dice_lose():
from mmseg.models import build_loss
# test dice loss with loss_type = 'multi_class'
loss_cfg = dict(
type='DiceLoss',
reduction='none',
class_weight=[1.0, 2.0, 3.0],
loss_weight=1.0,
ignore_index=1,
loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
logits = torch.rand(8, 3, 4, 4)
labels = (torch.rand(8, 4, 4) * 3).long()
dice_loss(logits, labels)
# test loss with class weights from file
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', 'pkl') # from pkl file
loss_cfg = dict(
type='DiceLoss',
reduction='none',
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
ignore_index=1,
loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
dice_loss(logits, labels, ignore_index=None)
np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0])) # from npy file
loss_cfg = dict(
type='DiceLoss',
reduction='none',
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
ignore_index=1,
loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
dice_loss(logits, labels, ignore_index=None)
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
# test dice loss with loss_type = 'binary'
loss_cfg = dict(
type='DiceLoss',
smooth=2,
exponent=3,
reduction='sum',
loss_weight=1.0,
ignore_index=0,
loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
logits = torch.rand(8, 2, 4, 4)
labels = (torch.rand(8, 4, 4) * 2).long()
dice_loss(logits, labels)
# test dice loss has name `loss_dice`
loss_cfg = dict(
type='DiceLoss',
smooth=2,
exponent=3,
reduction='sum',
loss_weight=1.0,
ignore_index=0,
loss_name='loss_dice')
dice_loss = build_loss(loss_cfg)
assert dice_loss.loss_name == 'loss_dice'
| 2,207 | 26.949367 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_focal_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmseg.models import build_loss
# test focal loss with use_sigmoid=False
def test_use_sigmoid():
# can't init with use_sigmoid=True
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', use_sigmoid=False)
build_loss(loss_cfg)
# can't forward with use_sigmoid=True
with pytest.raises(NotImplementedError):
loss_cfg = dict(type='FocalLoss', use_sigmoid=True)
focal_loss = build_loss(loss_cfg)
focal_loss.use_sigmoid = False
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
focal_loss(fake_pred, fake_target)
# reduction type must be 'none', 'mean' or 'sum'
def test_wrong_reduction_type():
# can't init with wrong reduction
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', reduction='test')
build_loss(loss_cfg)
# can't forward with wrong reduction override
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
focal_loss(fake_pred, fake_target, reduction_override='test')
# test focal loss can handle input parameters with
# unacceptable types
def test_unacceptable_parameters():
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', gamma='test')
build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', alpha='test')
build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', class_weight='test')
build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', loss_weight='test')
build_loss(loss_cfg)
with pytest.raises(AssertionError):
loss_cfg = dict(type='FocalLoss', loss_name=123)
build_loss(loss_cfg)
# test if focal loss can be correctly initialize
def test_init_focal_loss():
loss_cfg = dict(
type='FocalLoss',
use_sigmoid=True,
gamma=3.0,
alpha=3.0,
class_weight=[1, 2, 3, 4],
reduction='sum')
focal_loss = build_loss(loss_cfg)
assert focal_loss.use_sigmoid is True
assert focal_loss.gamma == 3.0
assert focal_loss.alpha == 3.0
assert focal_loss.reduction == 'sum'
assert focal_loss.class_weight == [1, 2, 3, 4]
assert focal_loss.loss_weight == 1.0
assert focal_loss.loss_name == 'loss_focal'
# test reduction override
def test_reduction_override():
loss_cfg = dict(type='FocalLoss', reduction='mean')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
loss = focal_loss(fake_pred, fake_target, reduction_override='none')
assert loss.shape == fake_pred.shape
# test wrong pred and target shape
def test_wrong_pred_and_target_shape():
loss_cfg = dict(type='FocalLoss')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 2, 2))
fake_target = F.one_hot(fake_target, num_classes=4)
fake_target = fake_target.permute(0, 3, 1, 2)
with pytest.raises(AssertionError):
focal_loss(fake_pred, fake_target)
# test forward with different shape of target
def test_forward_with_different_shape_of_target():
loss_cfg = dict(type='FocalLoss')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
loss1 = focal_loss(fake_pred, fake_target)
fake_target = F.one_hot(fake_target, num_classes=4)
fake_target = fake_target.permute(0, 3, 1, 2)
loss2 = focal_loss(fake_pred, fake_target)
assert loss1 == loss2
# test forward with weight
def test_forward_with_weight():
loss_cfg = dict(type='FocalLoss')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
weight = torch.rand(3 * 5 * 6, 1)
loss1 = focal_loss(fake_pred, fake_target, weight=weight)
weight2 = weight.view(-1)
loss2 = focal_loss(fake_pred, fake_target, weight=weight2)
weight3 = weight.expand(3 * 5 * 6, 4)
loss3 = focal_loss(fake_pred, fake_target, weight=weight3)
assert loss1 == loss2 == loss3
# test none reduction type
def test_none_reduction_type():
loss_cfg = dict(type='FocalLoss', reduction='none')
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
loss = focal_loss(fake_pred, fake_target)
assert loss.shape == fake_pred.shape
# test the usage of class weight
def test_class_weight():
loss_cfg_cw = dict(
type='FocalLoss', reduction='none', class_weight=[1.0, 2.0, 3.0, 4.0])
loss_cfg = dict(type='FocalLoss', reduction='none')
focal_loss_cw = build_loss(loss_cfg_cw)
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
loss_cw = focal_loss_cw(fake_pred, fake_target)
loss = focal_loss(fake_pred, fake_target)
weight = torch.tensor([1, 2, 3, 4]).view(1, 4, 1, 1)
assert (loss * weight == loss_cw).all()
# test ignore index
def test_ignore_index():
loss_cfg = dict(type='FocalLoss', reduction='none')
# ignore_index within C classes
focal_loss = build_loss(loss_cfg)
fake_pred = torch.rand(3, 5, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
dim1 = torch.randint(0, 3, (4, ))
dim2 = torch.randint(0, 5, (4, ))
dim3 = torch.randint(0, 6, (4, ))
fake_target[dim1, dim2, dim3] = 4
loss1 = focal_loss(fake_pred, fake_target, ignore_index=4)
one_hot_target = F.one_hot(fake_target, num_classes=5)
one_hot_target = one_hot_target.permute(0, 3, 1, 2)
loss2 = focal_loss(fake_pred, one_hot_target, ignore_index=4)
assert (loss1 == loss2).all()
assert (loss1[dim1, :, dim2, dim3] == 0).all()
assert (loss2[dim1, :, dim2, dim3] == 0).all()
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
loss1 = focal_loss(fake_pred, fake_target, ignore_index=2)
one_hot_target = F.one_hot(fake_target, num_classes=4)
one_hot_target = one_hot_target.permute(0, 3, 1, 2)
loss2 = focal_loss(fake_pred, one_hot_target, ignore_index=2)
ignore_mask = one_hot_target == 2
assert (loss1 == loss2).all()
assert torch.sum(loss1 * ignore_mask) == 0
assert torch.sum(loss2 * ignore_mask) == 0
# ignore index is not in prediction's classes
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
dim1 = torch.randint(0, 3, (4, ))
dim2 = torch.randint(0, 5, (4, ))
dim3 = torch.randint(0, 6, (4, ))
fake_target[dim1, dim2, dim3] = 255
loss1 = focal_loss(fake_pred, fake_target, ignore_index=255)
assert (loss1[dim1, :, dim2, dim3] == 0).all()
# test list alpha
def test_alpha():
loss_cfg = dict(type='FocalLoss')
focal_loss = build_loss(loss_cfg)
alpha_float = 0.4
alpha = [0.4, 0.4, 0.4, 0.4]
alpha2 = [0.1, 0.3, 0.2, 0.1]
fake_pred = torch.rand(3, 4, 5, 6)
fake_target = torch.randint(0, 4, (3, 5, 6))
focal_loss.alpha = alpha_float
loss1 = focal_loss(fake_pred, fake_target)
focal_loss.alpha = alpha
loss2 = focal_loss(fake_pred, fake_target)
assert loss1 == loss2
focal_loss.alpha = alpha2
focal_loss(fake_pred, fake_target)
| 7,673 | 34.364055 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_lovasz_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
def test_lovasz_loss():
from mmseg.models import build_loss
# loss_type should be 'binary' or 'multi_class'
with pytest.raises(AssertionError):
loss_cfg = dict(
type='LovaszLoss',
loss_type='Binary',
reduction='none',
loss_weight=1.0,
loss_name='loss_lovasz')
build_loss(loss_cfg)
# reduction should be 'none' when per_image is False.
with pytest.raises(AssertionError):
loss_cfg = dict(
type='LovaszLoss',
loss_type='multi_class',
loss_name='loss_lovasz')
build_loss(loss_cfg)
# test lovasz loss with loss_type = 'multi_class' and per_image = False
loss_cfg = dict(
type='LovaszLoss',
reduction='none',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(1, 3, 4, 4)
labels = (torch.rand(1, 4, 4) * 2).long()
lovasz_loss(logits, labels)
# test lovasz loss with loss_type = 'multi_class' and per_image = True
loss_cfg = dict(
type='LovaszLoss',
per_image=True,
reduction='mean',
class_weight=[1.0, 2.0, 3.0],
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(1, 3, 4, 4)
labels = (torch.rand(1, 4, 4) * 2).long()
lovasz_loss(logits, labels, ignore_index=None)
# test loss with class weights from file
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', 'pkl') # from pkl file
loss_cfg = dict(
type='LovaszLoss',
per_image=True,
reduction='mean',
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
lovasz_loss(logits, labels, ignore_index=None)
np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0])) # from npy file
loss_cfg = dict(
type='LovaszLoss',
per_image=True,
reduction='mean',
class_weight=f'{tmp_file.name}.npy',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
lovasz_loss(logits, labels, ignore_index=None)
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
# test lovasz loss with loss_type = 'binary' and per_image = False
loss_cfg = dict(
type='LovaszLoss',
loss_type='binary',
reduction='none',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(2, 4, 4)
labels = (torch.rand(2, 4, 4)).long()
lovasz_loss(logits, labels)
# test lovasz loss with loss_type = 'binary' and per_image = True
loss_cfg = dict(
type='LovaszLoss',
loss_type='binary',
per_image=True,
reduction='mean',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
logits = torch.rand(2, 4, 4)
labels = (torch.rand(2, 4, 4)).long()
lovasz_loss(logits, labels, ignore_index=None)
# test lovasz loss has name `loss_lovasz`
loss_cfg = dict(
type='LovaszLoss',
loss_type='binary',
per_image=True,
reduction='mean',
loss_weight=1.0,
loss_name='loss_lovasz')
lovasz_loss = build_loss(loss_cfg)
assert lovasz_loss.loss_name == 'loss_lovasz'
| 3,632 | 29.529412 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_tversky_loss.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
def test_tversky_lose():
from mmseg.models import build_loss
# test alpha + beta != 1
with pytest.raises(AssertionError):
loss_cfg = dict(
type='TverskyLoss',
class_weight=[1.0, 2.0, 3.0],
loss_weight=1.0,
alpha=0.4,
beta=0.7,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
logits = torch.rand(8, 3, 4, 4)
labels = (torch.rand(8, 4, 4) * 3).long()
tversky_loss(logits, labels, ignore_index=1)
# test gamma < 1.0
with pytest.raises(AssertionError):
loss_cfg = dict(
type='TverskyLoss',
class_weight=[1.0, 2.0, 3.0],
loss_weight=1.0,
alpha=0.4,
beta=0.7,
gamma=0.9999,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
logits = torch.rand(8, 3, 4, 4)
labels = (torch.rand(8, 4, 4) * 3).long()
tversky_loss(logits, labels, ignore_index=1)
# test tversky loss
loss_cfg = dict(
type='TverskyLoss',
class_weight=[1.0, 2.0, 3.0],
loss_weight=1.0,
ignore_index=1,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
logits = torch.rand(8, 3, 4, 4)
labels = (torch.rand(8, 4, 4) * 3).long()
tversky_loss(logits, labels)
# test loss with class weights from file
import os
import tempfile
import mmcv
import numpy as np
tmp_file = tempfile.NamedTemporaryFile()
mmcv.dump([1.0, 2.0, 3.0], f'{tmp_file.name}.pkl', 'pkl') # from pkl file
loss_cfg = dict(
type='TverskyLoss',
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
ignore_index=1,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
tversky_loss(logits, labels)
np.save(f'{tmp_file.name}.npy', np.array([1.0, 2.0, 3.0])) # from npy file
loss_cfg = dict(
type='TverskyLoss',
class_weight=f'{tmp_file.name}.pkl',
loss_weight=1.0,
ignore_index=1,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
tversky_loss(logits, labels)
tmp_file.close()
os.remove(f'{tmp_file.name}.pkl')
os.remove(f'{tmp_file.name}.npy')
# test tversky loss has name `loss_tversky`
loss_cfg = dict(
type='TverskyLoss',
smooth=2,
loss_weight=1.0,
ignore_index=1,
alpha=0.3,
beta=0.7,
loss_name='loss_tversky')
tversky_loss = build_loss(loss_cfg)
assert tversky_loss.loss_name == 'loss_tversky'
| 2,699 | 28.347826 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_losses/test_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmseg.models.losses import Accuracy, reduce_loss, weight_reduce_loss
def test_weight_reduce_loss():
loss = torch.rand(1, 3, 4, 4)
weight = torch.zeros(1, 3, 4, 4)
weight[:, :, :2, :2] = 1
# test reduce_loss()
reduced = reduce_loss(loss, 'none')
assert reduced is loss
reduced = reduce_loss(loss, 'mean')
np.testing.assert_almost_equal(reduced.numpy(), loss.mean())
reduced = reduce_loss(loss, 'sum')
np.testing.assert_almost_equal(reduced.numpy(), loss.sum())
# test weight_reduce_loss()
reduced = weight_reduce_loss(loss, weight=None, reduction='none')
assert reduced is loss
reduced = weight_reduce_loss(loss, weight=weight, reduction='mean')
target = (loss * weight).mean()
np.testing.assert_almost_equal(reduced.numpy(), target)
reduced = weight_reduce_loss(loss, weight=weight, reduction='sum')
np.testing.assert_almost_equal(reduced.numpy(), (loss * weight).sum())
with pytest.raises(AssertionError):
weight_wrong = weight[0, 0, ...]
weight_reduce_loss(loss, weight=weight_wrong, reduction='mean')
with pytest.raises(AssertionError):
weight_wrong = weight[:, 0:2, ...]
weight_reduce_loss(loss, weight=weight_wrong, reduction='mean')
def test_accuracy():
# test for empty pred
pred = torch.empty(0, 4)
label = torch.empty(0)
accuracy = Accuracy(topk=1)
acc = accuracy(pred, label)
assert acc.item() == 0
pred = torch.Tensor([[0.2, 0.3, 0.6, 0.5], [0.1, 0.1, 0.2, 0.6],
[0.9, 0.0, 0.0, 0.1], [0.4, 0.7, 0.1, 0.1],
[0.0, 0.0, 0.99, 0]])
# test for ignore_index
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, ignore_index=None)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(100.0))
# test for ignore_index with a wrong prediction of that index
true_label = torch.Tensor([2, 3, 1, 1, 2]).long()
accuracy = Accuracy(topk=1, ignore_index=1)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(100.0))
# test for ignore_index 1 with a wrong prediction of other index
true_label = torch.Tensor([2, 0, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, ignore_index=1)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(75.0))
# test for ignore_index 4 with a wrong prediction of other index
true_label = torch.Tensor([2, 0, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, ignore_index=4)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(80.0))
# test for ignoring all the pixels
true_label = torch.Tensor([2, 2, 2, 2, 2]).long()
accuracy = Accuracy(topk=1, ignore_index=2)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(100.0))
# test for top1
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(100.0))
# test for top1 with score thresh=0.8
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
accuracy = Accuracy(topk=1, thresh=0.8)
acc = accuracy(pred, true_label)
assert torch.allclose(acc, torch.tensor(40.0))
# test for top2
accuracy = Accuracy(topk=2)
label = torch.Tensor([3, 2, 0, 0, 2]).long()
acc = accuracy(pred, label)
assert torch.allclose(acc, torch.tensor(100.0))
# test for both top1 and top2
accuracy = Accuracy(topk=(1, 2))
true_label = torch.Tensor([2, 3, 0, 1, 2]).long()
acc = accuracy(pred, true_label)
for a in acc:
assert torch.allclose(a, torch.tensor(100.0))
# topk is larger than pred class number
with pytest.raises(AssertionError):
accuracy = Accuracy(topk=5)
accuracy(pred, true_label)
# wrong topk type
with pytest.raises(AssertionError):
accuracy = Accuracy(topk='wrong type')
accuracy(pred, true_label)
# label size is larger than required
with pytest.raises(AssertionError):
label = torch.Tensor([2, 3, 0, 1, 2, 0]).long() # size mismatch
accuracy = Accuracy()
accuracy(pred, label)
# wrong pred dimension
with pytest.raises(AssertionError):
accuracy = Accuracy()
accuracy(pred[:, :, None], true_label)
| 4,481 | 33.476923 | 74 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_feature2pyramid.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models import Feature2Pyramid
def test_feature2pyramid():
# test
rescales = [4, 2, 1, 0.5]
embed_dim = 64
inputs = [torch.randn(1, embed_dim, 32, 32) for i in range(len(rescales))]
fpn = Feature2Pyramid(
embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True))
outputs = fpn(inputs)
assert outputs[0].shape == torch.Size([1, 64, 128, 128])
assert outputs[1].shape == torch.Size([1, 64, 64, 64])
assert outputs[2].shape == torch.Size([1, 64, 32, 32])
assert outputs[3].shape == torch.Size([1, 64, 16, 16])
# test rescales = [2, 1, 0.5, 0.25]
rescales = [2, 1, 0.5, 0.25]
inputs = [torch.randn(1, embed_dim, 32, 32) for i in range(len(rescales))]
fpn = Feature2Pyramid(
embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True))
outputs = fpn(inputs)
assert outputs[0].shape == torch.Size([1, 64, 64, 64])
assert outputs[1].shape == torch.Size([1, 64, 32, 32])
assert outputs[2].shape == torch.Size([1, 64, 16, 16])
assert outputs[3].shape == torch.Size([1, 64, 8, 8])
# test rescales = [4, 2, 0.25, 0]
rescales = [4, 2, 0.25, 0]
with pytest.raises(KeyError):
fpn = Feature2Pyramid(
embed_dim, rescales, norm_cfg=dict(type='BN', requires_grad=True))
| 1,383 | 34.487179 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_fpn.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models import FPN
def test_fpn():
in_channels = [64, 128, 256, 512]
inputs = [
torch.randn(1, c, 56 // 2**i, 56 // 2**i)
for i, c in enumerate(in_channels)
]
fpn = FPN(in_channels, 64, len(in_channels))
outputs = fpn(inputs)
assert outputs[0].shape == torch.Size([1, 64, 56, 56])
assert outputs[1].shape == torch.Size([1, 64, 28, 28])
assert outputs[2].shape == torch.Size([1, 64, 14, 14])
assert outputs[3].shape == torch.Size([1, 64, 7, 7])
fpn = FPN(
in_channels,
64,
len(in_channels),
upsample_cfg=dict(mode='nearest', scale_factor=2.0))
outputs = fpn(inputs)
assert outputs[0].shape == torch.Size([1, 64, 56, 56])
assert outputs[1].shape == torch.Size([1, 64, 28, 28])
assert outputs[2].shape == torch.Size([1, 64, 14, 14])
assert outputs[3].shape == torch.Size([1, 64, 7, 7])
| 967 | 30.225806 | 60 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_ic_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.necks import ICNeck
from mmseg.models.necks.ic_neck import CascadeFeatureFusion
from ..test_heads.utils import _conv_has_norm, to_cuda
def test_ic_neck():
# test with norm_cfg
neck = ICNeck(
in_channels=(4, 16, 16),
out_channels=8,
norm_cfg=dict(type='SyncBN'),
align_corners=False)
assert _conv_has_norm(neck, sync_bn=True)
inputs = [
torch.randn(1, 4, 32, 64),
torch.randn(1, 16, 16, 32),
torch.randn(1, 16, 8, 16)
]
neck = ICNeck(
in_channels=(4, 16, 16),
out_channels=4,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False)
if torch.cuda.is_available():
neck, inputs = to_cuda(neck, inputs)
outputs = neck(inputs)
assert outputs[0].shape == (1, 4, 16, 32)
assert outputs[1].shape == (1, 4, 32, 64)
assert outputs[1].shape == (1, 4, 32, 64)
def test_ic_neck_cascade_feature_fusion():
cff = CascadeFeatureFusion(64, 64, 32)
assert cff.conv_low.in_channels == 64
assert cff.conv_low.out_channels == 32
assert cff.conv_high.in_channels == 64
assert cff.conv_high.out_channels == 32
def test_ic_neck_input_channels():
with pytest.raises(AssertionError):
# ICNet Neck input channel constraints.
ICNeck(
in_channels=(16, 64, 64, 64),
out_channels=32,
norm_cfg=dict(type='BN', requires_grad=True),
align_corners=False)
| 1,559 | 27.888889 | 59 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_jpu.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.necks import JPU
def test_fastfcn_neck():
# Test FastFCN Standard Forward
model = JPU(
in_channels=(64, 128, 256),
mid_channels=64,
start_level=0,
end_level=-1,
dilations=(1, 2, 4, 8),
)
model.init_weights()
model.train()
batch_size = 1
input = [
torch.randn(batch_size, 64, 64, 128),
torch.randn(batch_size, 128, 32, 64),
torch.randn(batch_size, 256, 16, 32)
]
feat = model(input)
assert len(feat) == 3
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128])
assert feat[1].shape == torch.Size([batch_size, 128, 32, 64])
assert feat[2].shape == torch.Size([batch_size, 256, 64, 128])
with pytest.raises(AssertionError):
# FastFCN input and in_channels constraints.
JPU(in_channels=(256, 64, 128), start_level=0, end_level=5)
# Test not default start_level
model = JPU(in_channels=(64, 128, 256), start_level=1, end_level=-1)
input = [
torch.randn(batch_size, 64, 64, 128),
torch.randn(batch_size, 128, 32, 64),
torch.randn(batch_size, 256, 16, 32)
]
feat = model(input)
assert len(feat) == 2
assert feat[0].shape == torch.Size([batch_size, 128, 32, 64])
assert feat[1].shape == torch.Size([batch_size, 2048, 32, 64])
| 1,415 | 29.12766 | 72 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_mla_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models import MLANeck
def test_mla():
in_channels = [4, 4, 4, 4]
mla = MLANeck(in_channels, 32)
inputs = [torch.randn(1, c, 12, 12) for i, c in enumerate(in_channels)]
outputs = mla(inputs)
assert outputs[0].shape == torch.Size([1, 32, 12, 12])
assert outputs[1].shape == torch.Size([1, 32, 12, 12])
assert outputs[2].shape == torch.Size([1, 32, 12, 12])
assert outputs[3].shape == torch.Size([1, 32, 12, 12])
| 518 | 29.529412 | 75 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_necks/test_multilevel_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models import MultiLevelNeck
def test_multilevel_neck():
# Test init_weights
MultiLevelNeck([266], 32).init_weights()
# Test multi feature maps
in_channels = [32, 64, 128, 256]
inputs = [torch.randn(1, c, 14, 14) for i, c in enumerate(in_channels)]
neck = MultiLevelNeck(in_channels, 32)
outputs = neck(inputs)
assert outputs[0].shape == torch.Size([1, 32, 7, 7])
assert outputs[1].shape == torch.Size([1, 32, 14, 14])
assert outputs[2].shape == torch.Size([1, 32, 28, 28])
assert outputs[3].shape == torch.Size([1, 32, 56, 56])
# Test one feature map
in_channels = [768]
inputs = [torch.randn(1, 768, 14, 14)]
neck = MultiLevelNeck(in_channels, 32)
outputs = neck(inputs)
assert outputs[0].shape == torch.Size([1, 32, 7, 7])
assert outputs[1].shape == torch.Size([1, 32, 14, 14])
assert outputs[2].shape == torch.Size([1, 32, 28, 28])
assert outputs[3].shape == torch.Size([1, 32, 56, 56])
| 1,051 | 30.878788 | 75 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_segmentors/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_segmentors/test_cascade_encoder_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv import ConfigDict
from mmseg.models import build_segmentor
from .utils import _segmentor_forward_train_test
def test_cascade_encoder_decoder():
# test 1 decode head, w.o. aux head
cfg = ConfigDict(
type='CascadeEncoderDecoder',
num_stages=2,
backbone=dict(type='ExampleBackbone'),
decode_head=[
dict(type='ExampleDecodeHead'),
dict(type='ExampleCascadeDecodeHead')
])
cfg.test_cfg = ConfigDict(mode='whole')
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test slide mode
cfg.test_cfg = ConfigDict(mode='slide', crop_size=(3, 3), stride=(2, 2))
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test 1 decode head, 1 aux head
cfg = ConfigDict(
type='CascadeEncoderDecoder',
num_stages=2,
backbone=dict(type='ExampleBackbone'),
decode_head=[
dict(type='ExampleDecodeHead'),
dict(type='ExampleCascadeDecodeHead')
],
auxiliary_head=dict(type='ExampleDecodeHead'))
cfg.test_cfg = ConfigDict(mode='whole')
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test 1 decode head, 2 aux head
cfg = ConfigDict(
type='CascadeEncoderDecoder',
num_stages=2,
backbone=dict(type='ExampleBackbone'),
decode_head=[
dict(type='ExampleDecodeHead'),
dict(type='ExampleCascadeDecodeHead')
],
auxiliary_head=[
dict(type='ExampleDecodeHead'),
dict(type='ExampleDecodeHead')
])
cfg.test_cfg = ConfigDict(mode='whole')
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
| 1,828 | 30.534483 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_segmentors/test_encoder_decoder.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv import ConfigDict
from mmseg.models import build_segmentor
from .utils import _segmentor_forward_train_test
def test_encoder_decoder():
# test 1 decode head, w.o. aux head
cfg = ConfigDict(
type='EncoderDecoder',
backbone=dict(type='ExampleBackbone'),
decode_head=dict(type='ExampleDecodeHead'),
train_cfg=None,
test_cfg=dict(mode='whole'))
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test out_channels == 1
segmentor.out_channels = 1
segmentor.decode_head.out_channels = 1
segmentor.decode_head.threshold = 0.3
_segmentor_forward_train_test(segmentor)
# test slide mode
cfg.test_cfg = ConfigDict(mode='slide', crop_size=(3, 3), stride=(2, 2))
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test 1 decode head, 1 aux head
cfg = ConfigDict(
type='EncoderDecoder',
backbone=dict(type='ExampleBackbone'),
decode_head=dict(type='ExampleDecodeHead'),
auxiliary_head=dict(type='ExampleDecodeHead'))
cfg.test_cfg = ConfigDict(mode='whole')
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
# test 1 decode head, 2 aux head
cfg = ConfigDict(
type='EncoderDecoder',
backbone=dict(type='ExampleBackbone'),
decode_head=dict(type='ExampleDecodeHead'),
auxiliary_head=[
dict(type='ExampleDecodeHead'),
dict(type='ExampleDecodeHead')
])
cfg.test_cfg = ConfigDict(mode='whole')
segmentor = build_segmentor(cfg)
_segmentor_forward_train_test(segmentor)
| 1,717 | 30.814815 | 76 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_segmentors/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch import nn
from mmseg.models import BACKBONES, HEADS
from mmseg.models.decode_heads.cascade_decode_head import BaseCascadeDecodeHead
from mmseg.models.decode_heads.decode_head import BaseDecodeHead
def _demo_mm_inputs(input_shape=(1, 3, 8, 16), num_classes=10):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
'flip_direction': 'horizontal'
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs
@BACKBONES.register_module()
class ExampleBackbone(nn.Module):
def __init__(self):
super(ExampleBackbone, self).__init__()
self.conv = nn.Conv2d(3, 3, 3)
def init_weights(self, pretrained=None):
pass
def forward(self, x):
return [self.conv(x)]
@HEADS.register_module()
class ExampleDecodeHead(BaseDecodeHead):
def __init__(self):
super(ExampleDecodeHead, self).__init__(3, 3, num_classes=19)
def forward(self, inputs):
return self.cls_seg(inputs[0])
@HEADS.register_module()
class ExampleCascadeDecodeHead(BaseCascadeDecodeHead):
def __init__(self):
super(ExampleCascadeDecodeHead, self).__init__(3, 3, num_classes=19)
def forward(self, inputs, prev_out):
return self.cls_seg(inputs[0])
def _segmentor_forward_train_test(segmentor):
if isinstance(segmentor.decode_head, nn.ModuleList):
num_classes = segmentor.decode_head[-1].num_classes
else:
num_classes = segmentor.decode_head.num_classes
# batch_size=2 for BatchNorm
mm_inputs = _demo_mm_inputs(num_classes=num_classes)
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
gt_semantic_seg = mm_inputs['gt_semantic_seg']
# convert to cuda Tensor if applicable
if torch.cuda.is_available():
segmentor = segmentor.cuda()
imgs = imgs.cuda()
gt_semantic_seg = gt_semantic_seg.cuda()
# Test forward train
losses = segmentor.forward(
imgs, img_metas, gt_semantic_seg=gt_semantic_seg, return_loss=True)
assert isinstance(losses, dict)
# Test train_step
data_batch = dict(
img=imgs, img_metas=img_metas, gt_semantic_seg=gt_semantic_seg)
outputs = segmentor.train_step(data_batch, None)
assert isinstance(outputs, dict)
assert 'loss' in outputs
assert 'log_vars' in outputs
assert 'num_samples' in outputs
# Test val_step
with torch.no_grad():
segmentor.eval()
data_batch = dict(
img=imgs, img_metas=img_metas, gt_semantic_seg=gt_semantic_seg)
outputs = segmentor.val_step(data_batch, None)
assert isinstance(outputs, dict)
assert 'loss' in outputs
assert 'log_vars' in outputs
assert 'num_samples' in outputs
# Test forward simple test
with torch.no_grad():
segmentor.eval()
# pack into lists
img_list = [img[None, :] for img in imgs]
img_meta_list = [[img_meta] for img_meta in img_metas]
segmentor.forward(img_list, img_meta_list, return_loss=False)
# Test forward aug test
with torch.no_grad():
segmentor.eval()
# pack into lists
img_list = [img[None, :] for img in imgs]
img_list = img_list + img_list
img_meta_list = [[img_meta] for img_meta in img_metas]
img_meta_list = img_meta_list + img_meta_list
segmentor.forward(img_list, img_meta_list, return_loss=False)
| 4,174 | 28.609929 | 79 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
| 48 | 23.5 | 47 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_utils/test_embed.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmseg.models.utils.embed import AdaptivePadding, PatchEmbed, PatchMerging
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
adap_pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = adap_pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = adap_pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
| 12,979 | 27.095238 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_models/test_utils/test_shape_convert.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmseg.models.utils import (nchw2nlc2nchw, nchw_to_nlc, nlc2nchw2nlc,
nlc_to_nchw)
def test_nchw2nlc2nchw():
# Test nchw2nlc2nchw function
shape_nchw = (4, 2, 5, 5)
shape_nlc = (4, 25, 2)
def test_func(x):
assert x.shape == torch.Size(shape_nlc)
return x
x = torch.rand(*shape_nchw)
output = nchw2nlc2nchw(test_func, x)
assert output.shape == torch.Size(shape_nchw)
def test_func2(x, arg):
assert x.shape == torch.Size(shape_nlc)
assert arg == 100
return x
x = torch.rand(*shape_nchw)
output = nchw2nlc2nchw(test_func2, x, arg=100)
assert output.shape == torch.Size(shape_nchw)
def test_func3(x):
assert x.is_contiguous()
assert x.shape == torch.Size(shape_nlc)
return x
x = torch.rand(*shape_nchw)
output = nchw2nlc2nchw(test_func3, x, contiguous=True)
assert output.shape == torch.Size(shape_nchw)
assert output.is_contiguous()
def test_nlc2nchw2nlc():
# Test nlc2nchw2nlc function
shape_nchw = (4, 2, 5, 5)
shape_nlc = (4, 25, 2)
def test_func(x):
assert x.shape == torch.Size(shape_nchw)
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func, x, shape_nchw[2:])
assert output.shape == torch.Size(shape_nlc)
def test_func2(x, arg):
assert x.shape == torch.Size(shape_nchw)
assert arg == 100
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func2, x, shape_nchw[2:], arg=100)
assert output.shape == torch.Size(shape_nlc)
def test_func3(x):
assert x.is_contiguous()
assert x.shape == torch.Size(shape_nchw)
return x
x = torch.rand(*shape_nlc)
output = nlc2nchw2nlc(test_func3, x, shape_nchw[2:], contiguous=True)
assert output.shape == torch.Size(shape_nlc)
assert output.is_contiguous()
def test_nchw_to_nlc():
# Test nchw_to_nlc function
shape_nchw = (4, 2, 5, 5)
shape_nlc = (4, 25, 2)
x = torch.rand(*shape_nchw)
y = nchw_to_nlc(x)
assert y.shape == torch.Size(shape_nlc)
def test_nlc_to_nchw():
# Test nlc_to_nchw function
shape_nchw = (4, 2, 5, 5)
shape_nlc = (4, 25, 2)
x = torch.rand(*shape_nlc)
y = nlc_to_nchw(x, (5, 5))
assert y.shape == torch.Size(shape_nchw)
| 2,423 | 25.933333 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_utils/test_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
from mmseg.utils import find_latest_checkpoint
def test_find_latest_checkpoint():
with tempfile.TemporaryDirectory() as tempdir:
# no checkpoints in the path
path = tempdir
latest = find_latest_checkpoint(path)
assert latest is None
# The path doesn't exist
path = osp.join(tempdir, 'none')
latest = find_latest_checkpoint(path)
assert latest is None
# test when latest.pth exists
with tempfile.TemporaryDirectory() as tempdir:
with open(osp.join(tempdir, 'latest.pth'), 'w') as f:
f.write('latest')
path = tempdir
latest = find_latest_checkpoint(path)
assert latest == osp.join(tempdir, 'latest.pth')
with tempfile.TemporaryDirectory() as tempdir:
for iter in range(1600, 160001, 1600):
with open(osp.join(tempdir, f'iter_{iter}.pth'), 'w') as f:
f.write(f'iter_{iter}.pth')
latest = find_latest_checkpoint(tempdir)
assert latest == osp.join(tempdir, 'iter_160000.pth')
with tempfile.TemporaryDirectory() as tempdir:
for epoch in range(1, 21):
with open(osp.join(tempdir, f'epoch_{epoch}.pth'), 'w') as f:
f.write(f'epoch_{epoch}.pth')
latest = find_latest_checkpoint(tempdir)
assert latest == osp.join(tempdir, 'epoch_20.pth')
| 1,454 | 34.487805 | 73 | py |
mmsegmentation | mmsegmentation-master/tests/test_utils/test_set_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import multiprocessing as mp
import os
import platform
import cv2
import pytest
from mmcv import Config
from mmseg.utils import setup_multi_processes
@pytest.mark.parametrize('workers_per_gpu', (0, 2))
@pytest.mark.parametrize(('valid', 'env_cfg'), [(True,
dict(
mp_start_method='fork',
opencv_num_threads=0,
omp_num_threads=1,
mkl_num_threads=1)),
(False,
dict(
mp_start_method=1,
opencv_num_threads=0.1,
omp_num_threads='s',
mkl_num_threads='1'))])
def test_setup_multi_processes(workers_per_gpu, valid, env_cfg):
# temp save system setting
sys_start_mehod = mp.get_start_method(allow_none=True)
sys_cv_threads = cv2.getNumThreads()
# pop and temp save system env vars
sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None)
sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None)
config = dict(data=dict(workers_per_gpu=workers_per_gpu))
config.update(env_cfg)
cfg = Config(config)
setup_multi_processes(cfg)
# test when cfg is valid and workers_per_gpu > 0
# setup_multi_processes will work
if valid and workers_per_gpu > 0:
# test config without setting env
assert os.getenv('OMP_NUM_THREADS') == str(env_cfg['omp_num_threads'])
assert os.getenv('MKL_NUM_THREADS') == str(env_cfg['mkl_num_threads'])
# when set to 0, the num threads will be 1
assert cv2.getNumThreads() == env_cfg[
'opencv_num_threads'] if env_cfg['opencv_num_threads'] > 0 else 1
if platform.system() != 'Windows':
assert mp.get_start_method() == env_cfg['mp_start_method']
# revert setting to avoid affecting other programs
if sys_start_mehod:
mp.set_start_method(sys_start_mehod, force=True)
cv2.setNumThreads(sys_cv_threads)
if sys_omp_threads:
os.environ['OMP_NUM_THREADS'] = sys_omp_threads
else:
os.environ.pop('OMP_NUM_THREADS')
if sys_mkl_threads:
os.environ['MKL_NUM_THREADS'] = sys_mkl_threads
else:
os.environ.pop('MKL_NUM_THREADS')
elif valid and workers_per_gpu == 0:
if platform.system() != 'Windows':
assert mp.get_start_method() == env_cfg['mp_start_method']
assert cv2.getNumThreads() == env_cfg[
'opencv_num_threads'] if env_cfg['opencv_num_threads'] > 0 else 1
assert 'OMP_NUM_THREADS' not in os.environ
assert 'MKL_NUM_THREADS' not in os.environ
if sys_start_mehod:
mp.set_start_method(sys_start_mehod, force=True)
cv2.setNumThreads(sys_cv_threads)
if sys_omp_threads:
os.environ['OMP_NUM_THREADS'] = sys_omp_threads
if sys_mkl_threads:
os.environ['MKL_NUM_THREADS'] = sys_mkl_threads
else:
assert mp.get_start_method() == sys_start_mehod
assert cv2.getNumThreads() == sys_cv_threads
assert 'OMP_NUM_THREADS' not in os.environ
assert 'MKL_NUM_THREADS' not in os.environ
| 3,616 | 41.05814 | 78 | py |
mmsegmentation | mmsegmentation-master/tests/test_utils/test_util_distribution.py | # Copyright (c) OpenMMLab. All rights reserved.
from unittest.mock import MagicMock, patch
import mmcv
import torch
import torch.nn as nn
from mmcv.parallel import (MMDataParallel, MMDistributedDataParallel,
is_module_wrapper)
from mmseg import digit_version
from mmseg.utils import build_ddp, build_dp
def mock(*args, **kwargs):
pass
class Model(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(2, 2, 1)
def forward(self, x):
return self.conv(x)
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_build_dp():
model = Model()
assert not is_module_wrapper(model)
mmdp = build_dp(model, 'cpu')
assert isinstance(mmdp, MMDataParallel)
if torch.cuda.is_available():
mmdp = build_dp(model, 'cuda')
assert isinstance(mmdp, MMDataParallel)
if digit_version(mmcv.__version__) >= digit_version('1.5.0'):
from mmcv.device.mlu import MLUDataParallel
from mmcv.utils import IS_MLU_AVAILABLE
if IS_MLU_AVAILABLE:
mludp = build_dp(model, 'mlu')
assert isinstance(mludp, MLUDataParallel)
if digit_version(mmcv.__version__) >= digit_version('1.7.0'):
from mmcv.device.npu import NPUDataParallel
from mmcv.utils import IS_NPU_AVAILABLE
if IS_NPU_AVAILABLE:
npu_dp = model.npu(model, 'npu')
assert isinstance(npu_dp, NPUDataParallel)
@patch('torch.distributed._broadcast_coalesced', mock)
@patch('torch.distributed.broadcast', mock)
@patch('torch.nn.parallel.DistributedDataParallel._ddp_init_helper', mock)
def test_build_ddp():
model = Model()
assert not is_module_wrapper(model)
if torch.cuda.is_available():
mmddp = build_ddp(
model, 'cuda', device_ids=[0], process_group=MagicMock())
assert isinstance(mmddp, MMDistributedDataParallel)
if digit_version(mmcv.__version__) >= digit_version('1.5.0'):
from mmcv.device.mlu import MLUDistributedDataParallel
from mmcv.utils import IS_MLU_AVAILABLE
if IS_MLU_AVAILABLE:
mluddp = build_ddp(
model, 'mlu', device_ids=[0], process_group=MagicMock())
assert isinstance(mluddp, MLUDistributedDataParallel)
if digit_version(mmcv.__version__) >= digit_version('1.7.0'):
from mmcv.device.npu import NPUDistributedDataParallel
from mmcv.utils import IS_NPU_AVAILABLE
if IS_NPU_AVAILABLE:
npu_ddp = build_ddp(
model, 'npu', device_ids=[0], process_group=MagicMock())
assert isinstance(npu_ddp, NPUDistributedDataParallel)
| 2,797 | 32.309524 | 74 | py |
mmsegmentation | mmsegmentation-master/tools/analyze_logs.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Modified from https://github.com/open-
mmlab/mmdetection/blob/master/tools/analysis_tools/analyze_logs.py."""
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import seaborn as sns
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
plot_epochs = []
plot_iters = []
plot_values = []
# In some log files exist lines of validation,
# `mode` list is used to only collect iter number
# of training line.
for epoch in epochs:
epoch_logs = log_dict[epoch]
if metric not in epoch_logs.keys():
continue
if metric in ['mIoU', 'mAcc', 'aAcc']:
plot_epochs.append(epoch)
plot_values.append(epoch_logs[metric][0])
else:
for idx in range(len(epoch_logs[metric])):
if epoch_logs['mode'][idx] == 'train':
plot_iters.append(epoch_logs['iter'][idx])
plot_values.append(epoch_logs[metric][idx])
ax = plt.gca()
label = legend[i * num_metrics + j]
if metric in ['mIoU', 'mAcc', 'aAcc']:
ax.set_xticks(plot_epochs)
plt.xlabel('epoch')
plt.plot(plot_epochs, plot_values, label=label, marker='o')
else:
plt.xlabel('iter')
plt.plot(plot_iters, plot_values, label=label, linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
parser.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser.add_argument(
'--keys',
type=str,
nargs='+',
default=['mIoU'],
help='the metric that you want to plot')
parser.add_argument('--title', type=str, help='title of figure')
parser.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser.add_argument('--out', type=str, default=None)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for line in log_file:
log = json.loads(line.strip())
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
plot_curve(log_dicts, args)
if __name__ == '__main__':
main()
| 4,459 | 33.573643 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import time
import mmcv
import numpy as np
import torch
from mmcv import Config
from mmcv.parallel import MMDataParallel
from mmcv.runner import load_checkpoint, wrap_fp16_model
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
def parse_args():
parser = argparse.ArgumentParser(description='MMSeg benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--work-dir',
help=('if specified, the results will be dumped '
'into the directory as json'))
parser.add_argument('--repeat-times', type=int, default=1)
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.work_dir is not None:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
json_file = osp.join(args.work_dir, f'fps_{timestamp}.json')
else:
# use config filename as default work_dir if cfg.work_dir is None
work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
json_file = osp.join(work_dir, f'fps_{timestamp}.json')
repeat_times = args.repeat_times
# set cudnn_benchmark
torch.backends.cudnn.benchmark = False
cfg.model.pretrained = None
cfg.data.test.test_mode = True
benchmark_dict = dict(config=args.config, unit='img / s')
overall_fps_list = []
for time_index in range(repeat_times):
print(f'Run {time_index + 1}:')
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=False,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
if 'checkpoint' in args and osp.exists(args.checkpoint):
load_checkpoint(model, args.checkpoint, map_location='cpu')
model = MMDataParallel(model, device_ids=[0])
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
total_iters = 200
# benchmark with 200 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % args.log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Done image [{i + 1:<3}/ {total_iters}], '
f'fps: {fps:.2f} img / s')
if (i + 1) == total_iters:
fps = (i + 1 - num_warmup) / pure_inf_time
print(f'Overall fps: {fps:.2f} img / s\n')
benchmark_dict[f'overall_fps_{time_index + 1}'] = round(fps, 2)
overall_fps_list.append(fps)
break
benchmark_dict['average_fps'] = round(np.mean(overall_fps_list), 2)
benchmark_dict['fps_variance'] = round(np.var(overall_fps_list), 4)
print(f'Average fps of {repeat_times} evaluations: '
f'{benchmark_dict["average_fps"]}')
print(f'The variance of {repeat_times} evaluations: '
f'{benchmark_dict["fps_variance"]}')
mmcv.dump(benchmark_dict, json_file, indent=4)
if __name__ == '__main__':
main()
| 4,328 | 34.77686 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/browse_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import warnings
from pathlib import Path
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmseg.datasets.builder import build_dataset
def parse_args():
parser = argparse.ArgumentParser(description='Browse a dataset')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--show-origin',
default=False,
action='store_true',
help='if True, omit all augmentation in pipeline,'
' show origin image and seg map')
parser.add_argument(
'--skip-type',
type=str,
nargs='+',
default=['DefaultFormatBundle', 'Normalize', 'Collect'],
help='skip some useless pipeline,if `show-origin` is true, '
'all pipeline except `Load` will be skipped')
parser.add_argument(
'--output-dir',
default='./output',
type=str,
help='If there is no display interface, you can save it')
parser.add_argument('--show', default=False, action='store_true')
parser.add_argument(
'--show-interval',
type=int,
default=999,
help='the interval of show (ms)')
parser.add_argument(
'--opacity',
type=float,
default=0.5,
help='the opacity of semantic map')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def imshow_semantic(img,
seg,
class_names,
palette=None,
win_name='',
show=False,
wait_time=0,
out_file=None,
opacity=0.5):
"""Draw `result` over `img`.
Args:
img (str or Tensor): The image to be displayed.
seg (Tensor): The semantic segmentation results to draw over
`img`.
class_names (list[str]): Names of each classes.
palette (list[list[int]]] | np.ndarray | None): The palette of
segmentation map. If None is given, random palette will be
generated. Default: None
win_name (str): The window name.
wait_time (int): Value of waitKey param.
Default: 0.
show (bool): Whether to show the image.
Default: False.
out_file (str or None): The filename to write the image.
Default: None.
opacity(float): Opacity of painted segmentation map.
Default 0.5.
Must be in (0, 1] range.
Returns:
img (Tensor): Only if not `show` or `out_file`
"""
img = mmcv.imread(img)
img = img.copy()
if palette is None:
palette = np.random.randint(0, 255, size=(len(class_names), 3))
palette = np.array(palette)
assert palette.shape[0] == len(class_names)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
# if out_file specified, do not show image in window
if out_file is not None:
show = False
if show:
mmcv.imshow(img, win_name, wait_time)
if out_file is not None:
mmcv.imwrite(img, out_file)
if not (show or out_file):
warnings.warn('show==False and out_file is not specified, only '
'result image will be returned')
return img
def _retrieve_data_cfg(_data_cfg, skip_type, show_origin):
if show_origin is True:
# only keep pipeline of Loading data and ann
_data_cfg['pipeline'] = [
x for x in _data_cfg.pipeline if 'Load' in x['type']
]
else:
_data_cfg['pipeline'] = [
x for x in _data_cfg.pipeline if x['type'] not in skip_type
]
def retrieve_data_cfg(config_path, skip_type, cfg_options, show_origin=False):
cfg = Config.fromfile(config_path)
if cfg_options is not None:
cfg.merge_from_dict(cfg_options)
train_data_cfg = cfg.data.train
if isinstance(train_data_cfg, list):
for _data_cfg in train_data_cfg:
while 'dataset' in _data_cfg and _data_cfg[
'type'] != 'MultiImageMixDataset':
_data_cfg = _data_cfg['dataset']
if 'pipeline' in _data_cfg:
_retrieve_data_cfg(_data_cfg, skip_type, show_origin)
else:
raise ValueError
else:
while 'dataset' in train_data_cfg and train_data_cfg[
'type'] != 'MultiImageMixDataset':
train_data_cfg = train_data_cfg['dataset']
_retrieve_data_cfg(train_data_cfg, skip_type, show_origin)
return cfg
def main():
args = parse_args()
cfg = retrieve_data_cfg(args.config, args.skip_type, args.cfg_options,
args.show_origin)
dataset = build_dataset(cfg.data.train)
progress_bar = mmcv.ProgressBar(len(dataset))
for item in dataset:
filename = os.path.join(args.output_dir,
Path(item['filename']).name
) if args.output_dir is not None else None
imshow_semantic(
item['img'],
item['gt_semantic_seg'],
dataset.CLASSES,
dataset.PALETTE,
show=args.show,
wait_time=args.show_interval,
out_file=filename,
opacity=args.opacity,
)
progress_bar.update()
if __name__ == '__main__':
main()
| 6,231 | 33.054645 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/confusion_matrix.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from matplotlib.ticker import MultipleLocator
from mmcv import Config, DictAction
from mmseg.datasets import build_dataset
def parse_args():
parser = argparse.ArgumentParser(
description='Generate confusion matrix from segmentation results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test .pkl result')
parser.add_argument(
'save_dir', help='directory where confusion matrix will be saved')
parser.add_argument(
'--show', action='store_true', help='show confusion matrix')
parser.add_argument(
'--color-theme',
default='winter',
help='theme of the matrix color map')
parser.add_argument(
'--title',
default='Normalized Confusion Matrix',
help='title of the matrix color map')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def calculate_confusion_matrix(dataset, results):
"""Calculate the confusion matrix.
Args:
dataset (Dataset): Test or val dataset.
results (list[ndarray]): A list of segmentation results in each image.
"""
n = len(dataset.CLASSES)
confusion_matrix = np.zeros(shape=[n, n])
assert len(dataset) == len(results)
ignore_index = dataset.ignore_index
prog_bar = mmcv.ProgressBar(len(results))
for idx, per_img_res in enumerate(results):
res_segm = per_img_res
gt_segm = dataset.get_gt_seg_map_by_idx(idx).astype(int)
gt_segm, res_segm = gt_segm.flatten(), res_segm.flatten()
to_ignore = gt_segm == ignore_index
gt_segm, res_segm = gt_segm[~to_ignore], res_segm[~to_ignore]
inds = n * gt_segm + res_segm
mat = np.bincount(inds, minlength=n**2).reshape(n, n)
confusion_matrix += mat
prog_bar.update()
return confusion_matrix
def plot_confusion_matrix(confusion_matrix,
labels,
save_dir=None,
show=True,
title='Normalized Confusion Matrix',
color_theme='winter'):
"""Draw confusion matrix with matplotlib.
Args:
confusion_matrix (ndarray): The confusion matrix.
labels (list[str]): List of class names.
save_dir (str|optional): If set, save the confusion matrix plot to the
given path. Default: None.
show (bool): Whether to show the plot. Default: True.
title (str): Title of the plot. Default: `Normalized Confusion Matrix`.
color_theme (str): Theme of the matrix color map. Default: `winter`.
"""
# normalize the confusion matrix
per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]
confusion_matrix = \
confusion_matrix.astype(np.float32) / per_label_sums * 100
num_classes = len(labels)
fig, ax = plt.subplots(
figsize=(2 * num_classes, 2 * num_classes * 0.8), dpi=180)
cmap = plt.get_cmap(color_theme)
im = ax.imshow(confusion_matrix, cmap=cmap)
plt.colorbar(mappable=im, ax=ax)
title_font = {'weight': 'bold', 'size': 12}
ax.set_title(title, fontdict=title_font)
label_font = {'size': 10}
plt.ylabel('Ground Truth Label', fontdict=label_font)
plt.xlabel('Prediction Label', fontdict=label_font)
# draw locator
xmajor_locator = MultipleLocator(1)
xminor_locator = MultipleLocator(0.5)
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ymajor_locator = MultipleLocator(1)
yminor_locator = MultipleLocator(0.5)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
# draw grid
ax.grid(True, which='minor', linestyle='-')
# draw label
ax.set_xticks(np.arange(num_classes))
ax.set_yticks(np.arange(num_classes))
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.tick_params(
axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)
plt.setp(
ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
# draw confusion matrix value
for i in range(num_classes):
for j in range(num_classes):
ax.text(
j,
i,
'{}%'.format(
round(confusion_matrix[i, j], 2
) if not np.isnan(confusion_matrix[i, j]) else -1),
ha='center',
va='center',
color='w',
size=7)
ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1
fig.tight_layout()
if save_dir is not None:
plt.savefig(
os.path.join(save_dir, 'confusion_matrix.png'), format='png')
if show:
plt.show()
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
results = mmcv.load(args.prediction_path)
assert isinstance(results, list)
if isinstance(results[0], np.ndarray):
pass
else:
raise TypeError('invalid type of prediction results')
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
dataset = build_dataset(cfg.data.test)
confusion_matrix = calculate_confusion_matrix(dataset, results)
plot_confusion_matrix(
confusion_matrix,
dataset.CLASSES,
save_dir=args.save_dir,
show=args.show,
title=args.title,
color_theme=args.color_theme)
if __name__ == '__main__':
main()
| 6,368 | 32.87766 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/deploy_test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import shutil
import warnings
from typing import Any, Iterable
import mmcv
import numpy as np
import torch
from mmcv.parallel import MMDataParallel
from mmcv.runner import get_dist_info
from mmcv.utils import DictAction
from mmseg.apis import single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models.segmentors.base import BaseSegmentor
from mmseg.ops import resize
class ONNXRuntimeSegmentor(BaseSegmentor):
def __init__(self, onnx_file: str, cfg: Any, device_id: int):
super(ONNXRuntimeSegmentor, self).__init__()
import onnxruntime as ort
# get the custom op path
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
for name in self.output_names:
self.io_binding.bind_output(name)
self.cfg = cfg
self.test_mode = cfg.model.test_cfg.mode
self.is_cuda_available = is_cuda_available
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def encode_decode(self, img, img_metas):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def simple_test(self, img: torch.Tensor, img_meta: Iterable,
**kwargs) -> list:
if not self.is_cuda_available:
img = img.detach().cpu()
elif self.device_id >= 0:
img = img.cuda(self.device_id)
device_type = img.device.type
self.io_binding.bind_input(
name='input',
device_type=device_type,
device_id=self.device_id,
element_type=np.float32,
shape=img.shape,
buffer_ptr=img.data_ptr())
self.sess.run_with_iobinding(self.io_binding)
seg_pred = self.io_binding.copy_outputs_to_cpu()[0]
# whole might support dynamic reshape
ori_shape = img_meta[0]['ori_shape']
if not (ori_shape[0] == seg_pred.shape[-2]
and ori_shape[1] == seg_pred.shape[-1]):
seg_pred = torch.from_numpy(seg_pred).float()
seg_pred = resize(
seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
seg_pred = seg_pred.long().detach().cpu().numpy()
seg_pred = seg_pred[0]
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
class TensorRTSegmentor(BaseSegmentor):
def __init__(self, trt_file: str, cfg: Any, device_id: int):
super(TensorRTSegmentor, self).__init__()
from mmcv.tensorrt import TRTWraper, load_tensorrt_plugin
try:
load_tensorrt_plugin()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with TensorRT from source.')
model = TRTWraper(
trt_file, input_names=['input'], output_names=['output'])
self.model = model
self.device_id = device_id
self.cfg = cfg
self.test_mode = cfg.model.test_cfg.mode
def extract_feat(self, imgs):
raise NotImplementedError('This method is not implemented.')
def encode_decode(self, img, img_metas):
raise NotImplementedError('This method is not implemented.')
def forward_train(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def simple_test(self, img: torch.Tensor, img_meta: Iterable,
**kwargs) -> list:
with torch.cuda.device(self.device_id), torch.no_grad():
seg_pred = self.model({'input': img})['output']
seg_pred = seg_pred.detach().cpu().numpy()
# whole might support dynamic reshape
ori_shape = img_meta[0]['ori_shape']
if not (ori_shape[0] == seg_pred.shape[-2]
and ori_shape[1] == seg_pred.shape[-1]):
seg_pred = torch.from_numpy(seg_pred).float()
seg_pred = resize(
seg_pred, size=tuple(ori_shape[:2]), mode='nearest')
seg_pred = seg_pred.long().detach().cpu().numpy()
seg_pred = seg_pred[0]
seg_pred = list(seg_pred)
return seg_pred
def aug_test(self, imgs, img_metas, **kwargs):
raise NotImplementedError('This method is not implemented.')
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(
description='mmseg backend test (and eval)')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Input model file')
parser.add_argument(
'--backend',
help='Backend of the model.',
choices=['onnxruntime', 'tensorrt'])
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--opacity',
type=float,
default=0.5,
help='Opacity of painted segmentation map. In (0, 1] range.')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# init distributed env first, since logger depends on the dist info.
distributed = False
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=cfg.data.workers_per_gpu,
dist=distributed,
shuffle=False)
# load onnx config and meta
cfg.model.train_cfg = None
if args.backend == 'onnxruntime':
model = ONNXRuntimeSegmentor(args.model, cfg=cfg, device_id=0)
elif args.backend == 'tensorrt':
model = TensorRTSegmentor(args.model, cfg=cfg, device_id=0)
model.CLASSES = dataset.CLASSES
model.PALETTE = dataset.PALETTE
# clean gpu memory when starting a new evaluation.
torch.cuda.empty_cache()
eval_kwargs = {} if args.eval_options is None else args.eval_options
# Deprecated
efficient_test = eval_kwargs.get('efficient_test', False)
if efficient_test:
warnings.warn(
'``efficient_test=True`` does not have effect in tools/test.py, '
'the evaluation and format results are CPU memory efficient by '
'default')
eval_on_format_results = (
args.eval is not None and 'cityscapes' in args.eval)
if eval_on_format_results:
assert len(args.eval) == 1, 'eval on format results is not ' \
'applicable for metrics other than ' \
'cityscapes'
if args.format_only or eval_on_format_results:
if 'imgfile_prefix' in eval_kwargs:
tmpdir = eval_kwargs['imgfile_prefix']
else:
tmpdir = '.format_cityscapes'
eval_kwargs.setdefault('imgfile_prefix', tmpdir)
mmcv.mkdir_or_exist(tmpdir)
else:
tmpdir = None
model = MMDataParallel(model, device_ids=[0])
results = single_gpu_test(
model,
data_loader,
args.show,
args.show_dir,
False,
args.opacity,
pre_eval=args.eval is not None and not eval_on_format_results,
format_only=args.format_only or eval_on_format_results,
format_args=eval_kwargs)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
warnings.warn(
'The behavior of ``args.out`` has been changed since MMSeg '
'v0.16, the pickled outputs could be seg map as type of '
'np.array, pre-eval results or file paths for '
'``dataset.format_results()``.')
print(f'\nwriting results to {args.out}')
mmcv.dump(results, args.out)
if args.eval:
dataset.evaluate(results, args.eval, **eval_kwargs)
if tmpdir is not None and eval_on_format_results:
# remove tmp dir when cityscapes evaluation
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 13,175 | 37.867257 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/dist_test.sh | CONFIG=$1
CHECKPOINT=$2
GPUS=$3
NNODES=${NNODES:-1}
NODE_RANK=${NODE_RANK:-0}
PORT=${PORT:-29500}
MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch \
--nnodes=$NNODES \
--node_rank=$NODE_RANK \
--master_addr=$MASTER_ADDR \
--nproc_per_node=$GPUS \
--master_port=$PORT \
$(dirname "$0")/test.py \
$CONFIG \
$CHECKPOINT \
--launcher pytorch \
${@:4}
| 458 | 20.857143 | 43 | sh |
mmsegmentation | mmsegmentation-master/tools/dist_train.sh | CONFIG=$1
GPUS=$2
NNODES=${NNODES:-1}
NODE_RANK=${NODE_RANK:-0}
PORT=${PORT:-29500}
MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch \
--nnodes=$NNODES \
--node_rank=$NODE_RANK \
--master_addr=$MASTER_ADDR \
--nproc_per_node=$GPUS \
--master_port=$PORT \
$(dirname "$0")/train.py \
$CONFIG \
--launcher pytorch ${@:3}
| 421 | 22.444444 | 43 | sh |
mmsegmentation | mmsegmentation-master/tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from mmcv import Config
from mmcv.cnn import get_model_complexity_info
from mmseg.models import build_segmentor
def parse_args():
parser = argparse.ArgumentParser(
description='Get the FLOPs of a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[2048, 1024],
help='input image size')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
input_shape = (3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (3, ) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = Config.fromfile(args.config)
cfg.model.pretrained = None
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg')).cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
print('{0}\nInput shape: {1}\nFlops: {2}\nParams: {3}\n{0}'.format(
split_line, input_shape, flops, params))
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 1,724 | 27.278689 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/imagenets_submit.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import shutil
def parse_args():
parser = argparse.ArgumentParser(description='Inference')
parser.add_argument(
'--imgfile_prefix',
type=str,
required=True,
help='The prefix of output image file')
parser.add_argument(
'--method',
default='example submission',
help='Method name in method description file(method.txt).')
parser.add_argument(
'--arch',
metavar='ARCH',
help='The model architecture in method description file(method.txt).')
parser.add_argument(
'--train_data',
default='null',
help='Training data in method description file(method.txt).')
parser.add_argument(
'--train_scheme',
default='null',
help='Training scheme in method description file(method.txt), '
'e.g., SSL, Sup, SSL+Sup.')
parser.add_argument(
'--link',
default='null',
help='Paper/project link in method description file(method.txt).')
parser.add_argument(
'--description',
default='null',
help='Method description in method description file(method.txt).')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
method = 'Method name: {}\n'.format(args.method) + \
'Training data: {}\nTraining scheme: {}\n'.format(
args.train_data, args.train_scheme) + \
'Networks: {}\nPaper/Project link: {}\n'.format(
args.arch, args.link) + \
'Method description: {}'.format(args.description)
with open(os.path.join(args.imgfile_prefix, 'method.txt'), 'w') as f:
f.write(method)
# zip for submission
shutil.make_archive(
args.imgfile_prefix, 'zip', root_dir=args.imgfile_prefix)
| 1,864 | 30.610169 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/model_ensemble.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
import numpy as np
import torch
from mmcv.parallel import MMDataParallel
from mmcv.parallel.scatter_gather import scatter_kwargs
from mmcv.runner import load_checkpoint, wrap_fp16_model
from PIL import Image
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
@torch.no_grad()
def main(args):
models = []
gpu_ids = args.gpus
configs = args.config
ckpts = args.checkpoint
cfg = mmcv.Config.fromfile(configs[0])
if args.aug_test:
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75, 2.0
]
cfg.data.test.pipeline[1].flip = True
else:
cfg.data.test.pipeline[1].img_ratios = [1.0]
cfg.data.test.pipeline[1].flip = False
torch.backends.cudnn.benchmark = True
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=4,
dist=False,
shuffle=False,
)
for idx, (config, ckpt) in enumerate(zip(configs, ckpts)):
cfg = mmcv.Config.fromfile(config)
cfg.model.pretrained = None
cfg.data.test.test_mode = True
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
if cfg.get('fp16', None):
wrap_fp16_model(model)
load_checkpoint(model, ckpt, map_location='cpu')
torch.cuda.empty_cache()
tmpdir = args.out
mmcv.mkdir_or_exist(tmpdir)
model = MMDataParallel(model, device_ids=[gpu_ids[idx % len(gpu_ids)]])
model.eval()
models.append(model)
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
loader_indices = data_loader.batch_sampler
for batch_indices, data in zip(loader_indices, data_loader):
result = []
for model in models:
x, _ = scatter_kwargs(
inputs=data, kwargs=None, target_gpus=model.device_ids)
if args.aug_test:
logits = model.module.aug_test_logits(**x[0])
else:
logits = model.module.simple_test_logits(**x[0])
result.append(logits)
result_logits = 0
for logit in result:
result_logits += logit
pred = result_logits.argmax(axis=1).squeeze()
img_info = dataset.img_infos[batch_indices[0]]
file_name = os.path.join(
tmpdir, img_info['ann']['seg_map'].split(os.path.sep)[-1])
Image.fromarray(pred.astype(np.uint8)).save(file_name)
prog_bar.update()
def parse_args():
parser = argparse.ArgumentParser(
description='Model Ensemble with logits result')
parser.add_argument(
'--config', type=str, nargs='+', help='ensemble config files path')
parser.add_argument(
'--checkpoint',
type=str,
nargs='+',
help='ensemble checkpoint files path')
parser.add_argument(
'--aug-test',
action='store_true',
help='control ensemble aug-result or single-result (default)')
parser.add_argument(
'--out', type=str, default='results', help='the dir to save result')
parser.add_argument(
'--gpus', type=int, nargs='+', default=[0], help='id of gpu to use')
args = parser.parse_args()
assert len(args.config) == len(args.checkpoint), \
f'len(config) must equal len(checkpoint), ' \
f'but len(config) = {len(args.config)} and' \
f'len(checkpoint) = {len(args.checkpoint)}'
assert args.out, "ensemble result out-dir can't be None"
return args
if __name__ == '__main__':
args = parse_args()
main(args)
| 3,780 | 29.991803 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/onnx2tensorrt.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
from typing import Iterable, Optional, Union
import matplotlib.pyplot as plt
import mmcv
import numpy as np
import onnxruntime as ort
import torch
from mmcv.ops import get_onnxruntime_op_path
from mmcv.tensorrt import (TRTWraper, is_tensorrt_plugin_loaded, onnx2trt,
save_trt_engine)
from mmseg.apis.inference import LoadImage
from mmseg.datasets import DATASETS
from mmseg.datasets.pipelines import Compose
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def _prepare_input_img(img_path: str,
test_pipeline: Iterable[dict],
shape: Optional[Iterable] = None,
rescale_shape: Optional[Iterable] = None) -> dict:
# build the data pipeline
if shape is not None:
test_pipeline[1]['img_scale'] = (shape[1], shape[0])
test_pipeline[1]['transforms'][0]['keep_ratio'] = False
test_pipeline = [LoadImage()] + test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img_path)
data = test_pipeline(data)
imgs = data['img']
img_metas = [i.data for i in data['img_metas']]
if rescale_shape is not None:
for img_meta in img_metas:
img_meta['ori_shape'] = tuple(rescale_shape) + (3, )
mm_inputs = {'imgs': imgs, 'img_metas': img_metas}
return mm_inputs
def _update_input_img(img_list: Iterable, img_meta_list: Iterable):
# update img and its meta list
N = img_list[0].size(0)
img_meta = img_meta_list[0][0]
img_shape = img_meta['img_shape']
ori_shape = img_meta['ori_shape']
pad_shape = img_meta['pad_shape']
new_img_meta_list = [[{
'img_shape':
img_shape,
'ori_shape':
ori_shape,
'pad_shape':
pad_shape,
'filename':
img_meta['filename'],
'scale_factor':
(img_shape[1] / ori_shape[1], img_shape[0] / ori_shape[0]) * 2,
'flip':
False,
} for _ in range(N)]]
return img_list, new_img_meta_list
def show_result_pyplot(img: Union[str, np.ndarray],
result: np.ndarray,
palette: Optional[Iterable] = None,
fig_size: Iterable[int] = (15, 10),
opacity: float = 0.5,
title: str = '',
block: bool = True):
img = mmcv.imread(img)
img = img.copy()
seg = result[0]
seg = mmcv.imresize(seg, img.shape[:2][::-1])
palette = np.array(palette)
assert palette.shape[1] == 3
assert len(palette.shape) == 2
assert 0 < opacity <= 1.0
color_seg = np.zeros((seg.shape[0], seg.shape[1], 3), dtype=np.uint8)
for label, color in enumerate(palette):
color_seg[seg == label, :] = color
# convert to BGR
color_seg = color_seg[..., ::-1]
img = img * (1 - opacity) + color_seg * opacity
img = img.astype(np.uint8)
plt.figure(figsize=fig_size)
plt.imshow(mmcv.bgr2rgb(img))
plt.title(title)
plt.tight_layout()
plt.show(block=block)
def onnx2tensorrt(onnx_file: str,
trt_file: str,
config: dict,
input_config: dict,
fp16: bool = False,
verify: bool = False,
show: bool = False,
dataset: str = 'CityscapesDataset',
workspace_size: int = 1,
verbose: bool = False):
import tensorrt as trt
min_shape = input_config['min_shape']
max_shape = input_config['max_shape']
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, min_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_file,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
inputs = _prepare_input_img(
input_config['input_path'],
config.data.test.pipeline,
shape=min_shape[2:])
imgs = inputs['imgs']
img_metas = inputs['img_metas']
img_list = [img[None, :] for img in imgs]
img_meta_list = [[img_meta] for img_meta in img_metas]
# update img_meta
img_list, img_meta_list = _update_input_img(img_list, img_meta_list)
if max_shape[0] > 1:
# concate flip image for batch test
flip_img_list = [_.flip(-1) for _ in img_list]
img_list = [
torch.cat((ori_img, flip_img), 0)
for ori_img, flip_img in zip(img_list, flip_img_list)
]
# Get results from ONNXRuntime
ort_custom_op_path = get_onnxruntime_op_path()
session_options = ort.SessionOptions()
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
sess.set_providers(['CPUExecutionProvider'], [{}]) # use cpu mode
onnx_output = sess.run(['output'],
{'input': img_list[0].detach().numpy()})[0][0]
# Get results from TensorRT
trt_model = TRTWraper(trt_file, ['input'], ['output'])
with torch.no_grad():
trt_outputs = trt_model({'input': img_list[0].contiguous().cuda()})
trt_output = trt_outputs['output'][0].cpu().detach().numpy()
if show:
dataset = DATASETS.get(dataset)
assert dataset is not None
palette = dataset.PALETTE
show_result_pyplot(
input_config['input_path'],
(onnx_output[0].astype(np.uint8), ),
palette=palette,
title='ONNXRuntime',
block=False)
show_result_pyplot(
input_config['input_path'], (trt_output[0].astype(np.uint8), ),
palette=palette,
title='TensorRT')
np.testing.assert_allclose(
onnx_output, trt_output, rtol=1e-03, atol=1e-05)
print('TensorRT and ONNXRuntime output all close.')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMSegmentation models from ONNX to TensorRT')
parser.add_argument('config', help='Config file of the model')
parser.add_argument('model', help='Path to the input ONNX model')
parser.add_argument(
'--trt-file', type=str, help='Path to the output TensorRT engine')
parser.add_argument(
'--max-shape',
type=int,
nargs=4,
default=[1, 3, 400, 600],
help='Maximum shape of model input.')
parser.add_argument(
'--min-shape',
type=int,
nargs=4,
default=[1, 3, 400, 600],
help='Minimum shape of model input.')
parser.add_argument('--fp16', action='store_true', help='Enable fp16 mode')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='CityscapesDataset',
help='Dataset name')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine.')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../demo/demo.png')
# check arguments
assert osp.exists(args.config), 'Config {} not found.'.format(args.config)
assert osp.exists(args.model), \
'ONNX model {} not found.'.format(args.model)
assert args.workspace_size >= 0, 'Workspace size less than 0.'
assert DATASETS.get(args.dataset) is not None, \
'Dataset {} does not found.'.format(args.dataset)
for max_value, min_value in zip(args.max_shape, args.min_shape):
assert max_value >= min_value, \
'max_shape should be larger than min shape'
input_config = {
'min_shape': args.min_shape,
'max_shape': args.max_shape,
'input_path': args.input_img
}
cfg = mmcv.Config.fromfile(args.config)
onnx2tensorrt(
args.model,
args.trt_file,
cfg,
input_config,
fp16=args.fp16,
verify=args.verify,
show=args.show,
dataset=args.dataset,
workspace_size=args.workspace_size,
verbose=args.verbose)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 9,866 | 33.024138 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/print_config.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from mmcv import Config, DictAction
from mmseg.apis import init_segmentor
def parse_args():
parser = argparse.ArgumentParser(description='Print the whole config')
parser.add_argument('config', help='config file path')
parser.add_argument(
'--graph', action='store_true', help='print the models graph')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options, '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
print(f'Config:\n{cfg.pretty_text}')
# dump config
cfg.dump('example.py')
# dump models graph
if args.graph:
model = init_segmentor(args.config, device='cpu')
print(f'Model graph:\n{str(model)}')
with open('example-graph.txt', 'w') as f:
f.writelines(str(model))
if __name__ == '__main__':
main()
| 2,584 | 35.928571 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/publish_model.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import subprocess
import torch
def parse_args():
parser = argparse.ArgumentParser(
description='Process a checkpoint to be published')
parser.add_argument('in_file', help='input checkpoint filename')
parser.add_argument('out_file', help='output checkpoint filename')
args = parser.parse_args()
return args
def process_checkpoint(in_file, out_file):
checkpoint = torch.load(in_file, map_location='cpu')
# remove optimizer for smaller file size
if 'optimizer' in checkpoint:
del checkpoint['optimizer']
# if it is necessary to remove some sensitive data in checkpoint['meta'],
# add the code here.
torch.save(checkpoint, out_file)
sha = subprocess.check_output(['sha256sum', out_file]).decode()
final_file = out_file.rstrip('.pth') + '-{}.pth'.format(sha[:8])
subprocess.Popen(['mv', out_file, final_file])
def main():
args = parse_args()
process_checkpoint(args.in_file, args.out_file)
if __name__ == '__main__':
main()
| 1,076 | 28.108108 | 77 | py |
mmsegmentation | mmsegmentation-master/tools/pytorch2onnx.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import warnings
from functools import partial
import mmcv
import numpy as np
import onnxruntime as rt
import torch
import torch._C
import torch.serialization
from mmcv import DictAction
from mmcv.onnx import register_extra_symbolics
from mmcv.runner import load_checkpoint
from torch import nn
from mmseg.apis import show_result_pyplot
from mmseg.apis.inference import LoadImage
from mmseg.datasets.pipelines import Compose
from mmseg.models import build_segmentor
from mmseg.ops import resize
torch.manual_seed(3)
def _convert_batchnorm(module):
module_output = module
if isinstance(module, torch.nn.SyncBatchNorm):
module_output = torch.nn.BatchNorm2d(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep requires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, _convert_batchnorm(child))
del module
return module_output
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs
def _prepare_input_img(img_path,
test_pipeline,
shape=None,
rescale_shape=None):
# build the data pipeline
if shape is not None:
test_pipeline[1]['img_scale'] = (shape[1], shape[0])
test_pipeline[1]['transforms'][0]['keep_ratio'] = False
test_pipeline = [LoadImage()] + test_pipeline[1:]
test_pipeline = Compose(test_pipeline)
# prepare data
data = dict(img=img_path)
data = test_pipeline(data)
imgs = data['img']
img_metas = [i.data for i in data['img_metas']]
if rescale_shape is not None:
for img_meta in img_metas:
img_meta['ori_shape'] = tuple(rescale_shape) + (3, )
mm_inputs = {'imgs': imgs, 'img_metas': img_metas}
return mm_inputs
def _update_input_img(img_list, img_meta_list, update_ori_shape=False):
# update img and its meta list
N, C, H, W = img_list[0].shape
img_meta = img_meta_list[0][0]
img_shape = (H, W, C)
if update_ori_shape:
ori_shape = img_shape
else:
ori_shape = img_meta['ori_shape']
pad_shape = img_shape
new_img_meta_list = [[{
'img_shape':
img_shape,
'ori_shape':
ori_shape,
'pad_shape':
pad_shape,
'filename':
img_meta['filename'],
'scale_factor':
(img_shape[1] / ori_shape[1], img_shape[0] / ori_shape[0]) * 2,
'flip':
False,
} for _ in range(N)]]
return img_list, new_img_meta_list
def pytorch2onnx(model,
mm_inputs,
opset_version=11,
show=False,
output_file='tmp.onnx',
verify=False,
dynamic_export=False):
"""Export Pytorch model to ONNX model and verify the outputs are same
between Pytorch and ONNX.
Args:
model (nn.Module): Pytorch model we want to export.
mm_inputs (dict): Contain the input tensors and img_metas information.
opset_version (int): The onnx op version. Default: 11.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the output ONNX model.
Default: `tmp.onnx`.
verify (bool): Whether compare the outputs between Pytorch and ONNX.
Default: False.
dynamic_export (bool): Whether to export ONNX with dynamic axis.
Default: False.
"""
model.cpu().eval()
test_mode = model.test_cfg.mode
if isinstance(model.decode_head, nn.ModuleList):
num_classes = model.decode_head[-1].num_classes
else:
num_classes = model.decode_head.num_classes
imgs = mm_inputs.pop('imgs')
img_metas = mm_inputs.pop('img_metas')
img_list = [img[None, :] for img in imgs]
img_meta_list = [[img_meta] for img_meta in img_metas]
# update img_meta
img_list, img_meta_list = _update_input_img(img_list, img_meta_list)
# replace original forward function
origin_forward = model.forward
model.forward = partial(
model.forward,
img_metas=img_meta_list,
return_loss=False,
rescale=True)
dynamic_axes = None
if dynamic_export:
if test_mode == 'slide':
dynamic_axes = {'input': {0: 'batch'}, 'output': {1: 'batch'}}
else:
dynamic_axes = {
'input': {
0: 'batch',
2: 'height',
3: 'width'
},
'output': {
1: 'batch',
2: 'height',
3: 'width'
}
}
register_extra_symbolics(opset_version)
with torch.no_grad():
torch.onnx.export(
model, (img_list, ),
output_file,
input_names=['input'],
output_names=['output'],
export_params=True,
keep_initializers_as_inputs=False,
verbose=show,
opset_version=opset_version,
dynamic_axes=dynamic_axes)
print(f'Successfully exported ONNX model: {output_file}')
model.forward = origin_forward
if verify:
# check by onnx
import onnx
onnx_model = onnx.load(output_file)
onnx.checker.check_model(onnx_model)
if dynamic_export and test_mode == 'whole':
# scale image for dynamic shape test
img_list = [resize(_, scale_factor=1.5) for _ in img_list]
# concate flip image for batch test
flip_img_list = [_.flip(-1) for _ in img_list]
img_list = [
torch.cat((ori_img, flip_img), 0)
for ori_img, flip_img in zip(img_list, flip_img_list)
]
# update img_meta
img_list, img_meta_list = _update_input_img(
img_list, img_meta_list, test_mode == 'whole')
# check the numerical value
# get pytorch output
with torch.no_grad():
pytorch_result = model(img_list, img_meta_list, return_loss=False)
pytorch_result = np.stack(pytorch_result, 0)
# get onnx output
input_all = [node.name for node in onnx_model.graph.input]
input_initializer = [
node.name for node in onnx_model.graph.initializer
]
net_feed_input = list(set(input_all) - set(input_initializer))
assert (len(net_feed_input) == 1)
sess = rt.InferenceSession(output_file)
onnx_result = sess.run(
None, {net_feed_input[0]: img_list[0].detach().numpy()})[0][0]
# show segmentation results
if show:
import os.path as osp
import cv2
img = img_meta_list[0][0]['filename']
if not osp.exists(img):
img = imgs[0][:3, ...].permute(1, 2, 0) * 255
img = img.detach().numpy().astype(np.uint8)
ori_shape = img.shape[:2]
else:
ori_shape = LoadImage()({'img': img})['ori_shape']
# resize onnx_result to ori_shape
onnx_result_ = cv2.resize(onnx_result[0].astype(np.uint8),
(ori_shape[1], ori_shape[0]))
show_result_pyplot(
model,
img, (onnx_result_, ),
palette=model.PALETTE,
block=False,
title='ONNXRuntime',
opacity=0.5)
# resize pytorch_result to ori_shape
pytorch_result_ = cv2.resize(pytorch_result[0].astype(np.uint8),
(ori_shape[1], ori_shape[0]))
show_result_pyplot(
model,
img, (pytorch_result_, ),
title='PyTorch',
palette=model.PALETTE,
opacity=0.5)
# compare results
np.testing.assert_allclose(
pytorch_result.astype(np.float32) / num_classes,
onnx_result.astype(np.float32) / num_classes,
rtol=1e-5,
atol=1e-5,
err_msg='The outputs are different between Pytorch and ONNX')
print('The outputs are same between Pytorch and ONNX')
def parse_args():
parser = argparse.ArgumentParser(description='Convert MMSeg to ONNX')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', default=None)
parser.add_argument(
'--input-img', type=str, help='Images for input', default=None)
parser.add_argument(
'--show',
action='store_true',
help='show onnx graph and segmentation results')
parser.add_argument(
'--verify', action='store_true', help='verify the onnx model')
parser.add_argument('--output-file', type=str, default='tmp.onnx')
parser.add_argument('--opset-version', type=int, default=11)
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=None,
help='input image height and width.')
parser.add_argument(
'--rescale_shape',
type=int,
nargs='+',
default=None,
help='output image rescale height and width, work for slide mode.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='Override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--dynamic-export',
action='store_true',
help='Whether to export onnx with dynamic axis.')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.model.pretrained = None
if args.shape is None:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
elif len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
test_mode = cfg.model.test_cfg.mode
# build the model and load checkpoint
cfg.model.train_cfg = None
segmentor = build_segmentor(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
# convert SyncBN to BN
segmentor = _convert_batchnorm(segmentor)
if args.checkpoint:
checkpoint = load_checkpoint(
segmentor, args.checkpoint, map_location='cpu')
segmentor.CLASSES = checkpoint['meta']['CLASSES']
segmentor.PALETTE = checkpoint['meta']['PALETTE']
# read input or create dummpy input
if args.input_img is not None:
preprocess_shape = (input_shape[2], input_shape[3])
rescale_shape = None
if args.rescale_shape is not None:
rescale_shape = [args.rescale_shape[0], args.rescale_shape[1]]
mm_inputs = _prepare_input_img(
args.input_img,
cfg.data.test.pipeline,
shape=preprocess_shape,
rescale_shape=rescale_shape)
else:
if isinstance(segmentor.decode_head, nn.ModuleList):
num_classes = segmentor.decode_head[-1].num_classes
else:
num_classes = segmentor.decode_head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
# convert model to onnx file
pytorch2onnx(
segmentor,
mm_inputs,
opset_version=args.opset_version,
show=args.show,
output_file=args.output_file,
verify=args.verify,
dynamic_export=args.dynamic_export)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 14,147 | 33.847291 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/pytorch2torchscript.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
import numpy as np
import torch
import torch._C
import torch.serialization
from mmcv.runner import load_checkpoint
from torch import nn
from mmseg.models import build_segmentor
torch.manual_seed(3)
def digit_version(version_str):
digit_version = []
for x in version_str.split('.'):
if x.isdigit():
digit_version.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_version.append(int(patch_version[0]) - 1)
digit_version.append(int(patch_version[1]))
return digit_version
def check_torch_version():
torch_minimum_version = '1.8.0'
torch_version = digit_version(torch.__version__)
assert (torch_version >= digit_version(torch_minimum_version)), \
f'Torch=={torch.__version__} is not support for converting to ' \
f'torchscript. Please install pytorch>={torch_minimum_version}.'
def _convert_batchnorm(module):
module_output = module
if isinstance(module, torch.nn.SyncBatchNorm):
module_output = torch.nn.BatchNorm2d(module.num_features, module.eps,
module.momentum, module.affine,
module.track_running_stats)
if module.affine:
module_output.weight.data = module.weight.data.clone().detach()
module_output.bias.data = module.bias.data.clone().detach()
# keep requires_grad unchanged
module_output.weight.requires_grad = module.weight.requires_grad
module_output.bias.requires_grad = module.bias.requires_grad
module_output.running_mean = module.running_mean
module_output.running_var = module.running_var
module_output.num_batches_tracked = module.num_batches_tracked
for name, child in module.named_children():
module_output.add_module(name, _convert_batchnorm(child))
del module
return module_output
def _demo_mm_inputs(input_shape, num_classes):
"""Create a superset of inputs needed to run test or train batches.
Args:
input_shape (tuple):
input batch dimensions
num_classes (int):
number of semantic classes
"""
(N, C, H, W) = input_shape
rng = np.random.RandomState(0)
imgs = rng.rand(*input_shape)
segs = rng.randint(
low=0, high=num_classes - 1, size=(N, 1, H, W)).astype(np.uint8)
img_metas = [{
'img_shape': (H, W, C),
'ori_shape': (H, W, C),
'pad_shape': (H, W, C),
'filename': '<demo>.png',
'scale_factor': 1.0,
'flip': False,
} for _ in range(N)]
mm_inputs = {
'imgs': torch.FloatTensor(imgs).requires_grad_(True),
'img_metas': img_metas,
'gt_semantic_seg': torch.LongTensor(segs)
}
return mm_inputs
def pytorch2libtorch(model,
input_shape,
show=False,
output_file='tmp.pt',
verify=False):
"""Export Pytorch model to TorchScript model and verify the outputs are
same between Pytorch and TorchScript.
Args:
model (nn.Module): Pytorch model we want to export.
input_shape (tuple): Use this input shape to construct
the corresponding dummy input and execute the model.
show (bool): Whether print the computation graph. Default: False.
output_file (string): The path to where we store the
output TorchScript model. Default: `tmp.pt`.
verify (bool): Whether compare the outputs between
Pytorch and TorchScript. Default: False.
"""
if isinstance(model.decode_head, nn.ModuleList):
num_classes = model.decode_head[-1].num_classes
else:
num_classes = model.decode_head.num_classes
mm_inputs = _demo_mm_inputs(input_shape, num_classes)
imgs = mm_inputs.pop('imgs')
# replace the original forword with forward_dummy
model.forward = model.forward_dummy
model.eval()
traced_model = torch.jit.trace(
model,
example_inputs=imgs,
check_trace=verify,
)
if show:
print(traced_model.graph)
traced_model.save(output_file)
print('Successfully exported TorchScript model: {}'.format(output_file))
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMSeg to TorchScript')
parser.add_argument('config', help='test config file path')
parser.add_argument('--checkpoint', help='checkpoint file', default=None)
parser.add_argument(
'--show', action='store_true', help='show TorchScript graph')
parser.add_argument(
'--verify', action='store_true', help='verify the TorchScript model')
parser.add_argument('--output-file', type=str, default='tmp.pt')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[512, 512],
help='input image size (height, width)')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
check_torch_version()
if len(args.shape) == 1:
input_shape = (1, 3, args.shape[0], args.shape[0])
elif len(args.shape) == 2:
input_shape = (
1,
3,
) + tuple(args.shape)
else:
raise ValueError('invalid input shape')
cfg = mmcv.Config.fromfile(args.config)
cfg.model.pretrained = None
# build the model and load checkpoint
cfg.model.train_cfg = None
segmentor = build_segmentor(
cfg.model, train_cfg=None, test_cfg=cfg.get('test_cfg'))
# convert SyncBN to BN
segmentor = _convert_batchnorm(segmentor)
if args.checkpoint:
load_checkpoint(segmentor, args.checkpoint, map_location='cpu')
# convert the PyTorch model to LibTorch model
pytorch2libtorch(
segmentor,
input_shape,
show=args.show,
output_file=args.output_file,
verify=args.verify)
| 6,057 | 31.569892 | 77 | py |
mmsegmentation | mmsegmentation-master/tools/slurm_test.sh | #!/usr/bin/env bash
set -x
PARTITION=$1
JOB_NAME=$2
CONFIG=$3
CHECKPOINT=$4
GPUS=${GPUS:-4}
GPUS_PER_NODE=${GPUS_PER_NODE:-4}
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
PY_ARGS=${@:5}
SRUN_ARGS=${SRUN_ARGS:-""}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
srun -p ${PARTITION} \
--job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \
--ntasks=${GPUS} \
--ntasks-per-node=${GPUS_PER_NODE} \
--cpus-per-task=${CPUS_PER_TASK} \
--kill-on-bad-exit=1 \
${SRUN_ARGS} \
python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
| 566 | 21.68 | 81 | sh |
mmsegmentation | mmsegmentation-master/tools/slurm_train.sh | #!/usr/bin/env bash
set -x
PARTITION=$1
JOB_NAME=$2
CONFIG=$3
GPUS=${GPUS:-4}
GPUS_PER_NODE=${GPUS_PER_NODE:-4}
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
SRUN_ARGS=${SRUN_ARGS:-""}
PY_ARGS=${@:4}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
srun -p ${PARTITION} \
--job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \
--ntasks=${GPUS} \
--ntasks-per-node=${GPUS_PER_NODE} \
--cpus-per-task=${CPUS_PER_TASK} \
--kill-on-bad-exit=1 \
${SRUN_ARGS} \
python -u tools/train.py ${CONFIG} --launcher="slurm" ${PY_ARGS}
| 539 | 21.5 | 68 | sh |
mmsegmentation | mmsegmentation-master/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import shutil
import time
import warnings
import mmcv
import torch
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmcv.utils import DictAction
from mmseg import digit_version
from mmseg.apis import multi_gpu_test, single_gpu_test
from mmseg.datasets import build_dataloader, build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import build_ddp, build_dp, get_device, setup_multi_processes
def parse_args():
parser = argparse.ArgumentParser(
description='mmseg test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help=('if specified, the evaluation metric results will be dumped'
'into the directory as json'))
parser.add_argument(
'--aug-test', action='store_true', help='Use Flip and Multi scale aug')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "mIoU"'
' for generic datasets, and "cityscapes" for Cityscapes')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu_collect is not specified')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument(
'--opacity',
type=float,
default=0.5,
help='Opacity of painted segmentation map. In (0, 1] range.')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if args.aug_test:
# hard code index
cfg.data.test.pipeline[1].img_ratios = [
0.5, 0.75, 1.0, 1.25, 1.5, 1.75
]
cfg.data.test.pipeline[1].flip = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.gpu_id is not None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
cfg.gpu_ids = [args.gpu_id]
distributed = False
if len(cfg.gpu_ids) > 1:
warnings.warn(f'The gpu-ids is reset from {cfg.gpu_ids} to '
f'{cfg.gpu_ids[0:1]} to avoid potential error in '
'non-distribute testing time.')
cfg.gpu_ids = cfg.gpu_ids[0:1]
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(args.work_dir,
f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(args.work_dir,
f'eval_single_scale_{timestamp}.json')
elif rank == 0:
work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
mmcv.mkdir_or_exist(osp.abspath(work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
if args.aug_test:
json_file = osp.join(work_dir,
f'eval_multi_scale_{timestamp}.json')
else:
json_file = osp.join(work_dir,
f'eval_single_scale_{timestamp}.json')
# build the dataloader
# TODO: support multiple images per gpu (only minor changes are needed)
dataset = build_dataset(cfg.data.test)
# The default loader config
loader_cfg = dict(
# cfg.gpus will be ignored if distributed
num_gpus=len(cfg.gpu_ids),
dist=distributed,
shuffle=False)
# The overall dataloader settings
loader_cfg.update({
k: v
for k, v in cfg.data.items() if k not in [
'train', 'val', 'test', 'train_dataloader', 'val_dataloader',
'test_dataloader'
]
})
test_loader_cfg = {
**loader_cfg,
'samples_per_gpu': 1,
'shuffle': False, # Not shuffle by default
**cfg.data.get('test_dataloader', {})
}
# build the dataloader
data_loader = build_dataloader(dataset, **test_loader_cfg)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_segmentor(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
print('"CLASSES" not found in meta, use dataset.CLASSES instead')
model.CLASSES = dataset.CLASSES
if 'PALETTE' in checkpoint.get('meta', {}):
model.PALETTE = checkpoint['meta']['PALETTE']
else:
print('"PALETTE" not found in meta, use dataset.PALETTE instead')
model.PALETTE = dataset.PALETTE
# clean gpu memory when starting a new evaluation.
torch.cuda.empty_cache()
eval_kwargs = {} if args.eval_options is None else args.eval_options
# Deprecated
efficient_test = eval_kwargs.get('efficient_test', False)
if efficient_test:
warnings.warn(
'``efficient_test=True`` does not have effect in tools/test.py, '
'the evaluation and format results are CPU memory efficient by '
'default')
eval_on_format_results = (
args.eval is not None and 'cityscapes' in args.eval)
if eval_on_format_results:
assert len(args.eval) == 1, 'eval on format results is not ' \
'applicable for metrics other than ' \
'cityscapes'
if args.format_only or eval_on_format_results:
if 'imgfile_prefix' in eval_kwargs:
tmpdir = eval_kwargs['imgfile_prefix']
else:
tmpdir = '.format_cityscapes'
eval_kwargs.setdefault('imgfile_prefix', tmpdir)
mmcv.mkdir_or_exist(tmpdir)
else:
tmpdir = None
cfg.device = get_device()
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
if not torch.cuda.is_available():
assert digit_version(mmcv.__version__) >= digit_version('1.4.4'), \
'Please use MMCV >= 1.4.4 for CPU training!'
model = revert_sync_batchnorm(model)
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
results = single_gpu_test(
model,
data_loader,
args.show,
args.show_dir,
False,
args.opacity,
pre_eval=args.eval is not None and not eval_on_format_results,
format_only=args.format_only or eval_on_format_results,
format_args=eval_kwargs)
else:
model = build_ddp(
model,
cfg.device,
device_ids=[int(os.environ['LOCAL_RANK'])],
broadcast_buffers=False)
results = multi_gpu_test(
model,
data_loader,
args.tmpdir,
args.gpu_collect,
False,
pre_eval=args.eval is not None and not eval_on_format_results,
format_only=args.format_only or eval_on_format_results,
format_args=eval_kwargs)
rank, _ = get_dist_info()
if rank == 0:
if args.out:
warnings.warn(
'The behavior of ``args.out`` has been changed since MMSeg '
'v0.16, the pickled outputs could be seg map as type of '
'np.array, pre-eval results or file paths for '
'``dataset.format_results()``.')
print(f'\nwriting results to {args.out}')
mmcv.dump(results, args.out)
if args.eval:
eval_kwargs.update(metric=args.eval)
metric = dataset.evaluate(results, **eval_kwargs)
metric_dict = dict(config=args.config, metric=metric)
mmcv.dump(metric_dict, json_file, indent=4)
if tmpdir is not None and eval_on_format_results:
# remove tmp dir when cityscapes evaluation
shutil.rmtree(tmpdir)
if __name__ == '__main__':
main()
| 12,704 | 38.212963 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv.cnn.utils import revert_sync_batchnorm
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import Config, DictAction, get_git_hash
from mmseg import __version__
from mmseg.apis import init_random_seed, set_random_seed, train_segmentor
from mmseg.datasets import build_dataset
from mmseg.models import build_segmentor
from mmseg.utils import (collect_env, get_device, get_root_logger,
setup_multi_processes)
def parse_args():
parser = argparse.ArgumentParser(description='Train a segmentor')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--load-from', help='the checkpoint file to load weights from')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff_seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help="--options is deprecated in favor of --cfg_options' and it will "
'not be supported in version v0.22.0. Override some settings in the '
'used config, the key-value pair in xxx=yyy format will be merged '
'into config file. If the value to be overwritten is a list, it '
'should be like key="[a,b]" or key=a,b It also allows nested '
'list/tuple values, e.g. key="[(a,b),(c,d)]" Note that the quotation '
'marks are necessary and that no white space is allowed.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
# When using PyTorch version >= 2.0.0, the `torch.distributed.launch`
# will pass the `--local-rank` parameter to `tools/train.py` instead
# of `--local_rank`.
parser.add_argument('--local_rank', '--local-rank', type=int, default=0)
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options. '
'--options will not be supported in version v0.22.0.')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.load_from is not None:
cfg.load_from = args.load_from
if args.resume_from is not None:
cfg.resume_from = args.resume_from
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
cfg.auto_resume = args.auto_resume
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# gpu_ids is used to calculate iter when resuming checkpoint
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# set multi-process settings
setup_multi_processes(cfg)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([f'{k}: {v}' for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
# set random seeds
cfg.device = get_device()
seed = init_random_seed(args.seed, device=cfg.device)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_segmentor(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# SyncBN is not support for DP
if not distributed:
warnings.warn(
'SyncBN is only supported with DDP. To be compatible with DP, '
'we convert SyncBN to BN. Please use dist_train.sh which can '
'avoid this error.')
model = revert_sync_batchnorm(model)
logger.info(model)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.pipeline
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmseg version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmseg_version=f'{__version__}+{get_git_hash()[:7]}',
config=cfg.pretty_text,
CLASSES=datasets[0].CLASSES,
PALETTE=datasets[0].PALETTE)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
# passing checkpoint meta for saving best checkpoint
meta.update(cfg.checkpoint_config.meta)
train_segmentor(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 9,480 | 37.54065 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/chase_db1.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import tempfile
import zipfile
import mmcv
CHASE_DB1_LEN = 28 * 3
TRAINING_LEN = 60
def parse_args():
parser = argparse.ArgumentParser(
description='Convert CHASE_DB1 dataset to mmsegmentation format')
parser.add_argument('dataset_path', help='path of CHASEDB1.zip')
parser.add_argument('--tmp_dir', help='path of the temporary directory')
parser.add_argument('-o', '--out_dir', help='output path')
args = parser.parse_args()
return args
def main():
args = parse_args()
dataset_path = args.dataset_path
if args.out_dir is None:
out_dir = osp.join('data', 'CHASE_DB1')
else:
out_dir = args.out_dir
print('Making directories...')
mmcv.mkdir_or_exist(out_dir)
mmcv.mkdir_or_exist(osp.join(out_dir, 'images'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'images', 'validation'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'training'))
mmcv.mkdir_or_exist(osp.join(out_dir, 'annotations', 'validation'))
with tempfile.TemporaryDirectory(dir=args.tmp_dir) as tmp_dir:
print('Extracting CHASEDB1.zip...')
zip_file = zipfile.ZipFile(dataset_path)
zip_file.extractall(tmp_dir)
print('Generating training dataset...')
assert len(os.listdir(tmp_dir)) == CHASE_DB1_LEN, \
'len(os.listdir(tmp_dir)) != {}'.format(CHASE_DB1_LEN)
for img_name in sorted(os.listdir(tmp_dir))[:TRAINING_LEN]:
img = mmcv.imread(osp.join(tmp_dir, img_name))
if osp.splitext(img_name)[1] == '.jpg':
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'training',
osp.splitext(img_name)[0] + '.png'))
else:
# The annotation img should be divided by 128, because some of
# the annotation imgs are not standard. We should set a
# threshold to convert the nonstandard annotation imgs. The
# value divided by 128 is equivalent to '1 if value >= 128
# else 0'
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'training',
osp.splitext(img_name)[0] + '.png'))
for img_name in sorted(os.listdir(tmp_dir))[TRAINING_LEN:]:
img = mmcv.imread(osp.join(tmp_dir, img_name))
if osp.splitext(img_name)[1] == '.jpg':
mmcv.imwrite(
img,
osp.join(out_dir, 'images', 'validation',
osp.splitext(img_name)[0] + '.png'))
else:
mmcv.imwrite(
img[:, :, 0] // 128,
osp.join(out_dir, 'annotations', 'validation',
osp.splitext(img_name)[0] + '.png'))
print('Removing the temporary files...')
print('Done!')
if __name__ == '__main__':
main()
| 3,196 | 34.921348 | 78 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/cityscapes.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import mmcv
from cityscapesscripts.preparation.json2labelImg import json2labelImg
def convert_json_to_label(json_file):
label_file = json_file.replace('_polygons.json', '_labelTrainIds.png')
json2labelImg(json_file, label_file, 'trainIds')
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to TrainIds')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mmcv.mkdir_or_exist(out_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
poly_files = []
for poly in mmcv.scandir(gt_dir, '_polygons.json', recursive=True):
poly_file = osp.join(gt_dir, poly)
poly_files.append(poly_file)
if args.nproc > 1:
mmcv.track_parallel_progress(convert_json_to_label, poly_files,
args.nproc)
else:
mmcv.track_progress(convert_json_to_label, poly_files)
split_names = ['train', 'val', 'test']
for split in split_names:
filenames = []
for poly in mmcv.scandir(
osp.join(gt_dir, split), '_polygons.json', recursive=True):
filenames.append(poly.replace('_gtFine_polygons.json', ''))
with open(osp.join(out_dir, f'{split}.txt'), 'w') as f:
f.writelines(f + '\n' for f in filenames)
if __name__ == '__main__':
main()
| 1,857 | 31.596491 | 75 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/coco_stuff10k.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import shutil
from functools import partial
import mmcv
import numpy as np
from PIL import Image
from scipy.io import loadmat
COCO_LEN = 10000
clsID_to_trID = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
11: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
25: 24,
27: 25,
28: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
44: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
65: 60,
67: 61,
70: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
82: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
90: 80,
92: 81,
93: 82,
94: 83,
95: 84,
96: 85,
97: 86,
98: 87,
99: 88,
100: 89,
101: 90,
102: 91,
103: 92,
104: 93,
105: 94,
106: 95,
107: 96,
108: 97,
109: 98,
110: 99,
111: 100,
112: 101,
113: 102,
114: 103,
115: 104,
116: 105,
117: 106,
118: 107,
119: 108,
120: 109,
121: 110,
122: 111,
123: 112,
124: 113,
125: 114,
126: 115,
127: 116,
128: 117,
129: 118,
130: 119,
131: 120,
132: 121,
133: 122,
134: 123,
135: 124,
136: 125,
137: 126,
138: 127,
139: 128,
140: 129,
141: 130,
142: 131,
143: 132,
144: 133,
145: 134,
146: 135,
147: 136,
148: 137,
149: 138,
150: 139,
151: 140,
152: 141,
153: 142,
154: 143,
155: 144,
156: 145,
157: 146,
158: 147,
159: 148,
160: 149,
161: 150,
162: 151,
163: 152,
164: 153,
165: 154,
166: 155,
167: 156,
168: 157,
169: 158,
170: 159,
171: 160,
172: 161,
173: 162,
174: 163,
175: 164,
176: 165,
177: 166,
178: 167,
179: 168,
180: 169,
181: 170,
182: 171
}
def convert_to_trainID(tuple_path, in_img_dir, in_ann_dir, out_img_dir,
out_mask_dir, is_train):
imgpath, maskpath = tuple_path
shutil.copyfile(
osp.join(in_img_dir, imgpath),
osp.join(out_img_dir, 'train2014', imgpath) if is_train else osp.join(
out_img_dir, 'test2014', imgpath))
annotate = loadmat(osp.join(in_ann_dir, maskpath))
mask = annotate['S'].astype(np.uint8)
mask_copy = mask.copy()
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = osp.join(out_mask_dir, 'train2014',
maskpath.split('.')[0] +
'_labelTrainIds.png') if is_train else osp.join(
out_mask_dir, 'test2014',
maskpath.split('.')[0] + '_labelTrainIds.png')
Image.fromarray(mask_copy).save(seg_filename, 'PNG')
def generate_coco_list(folder):
train_list = osp.join(folder, 'imageLists', 'train.txt')
test_list = osp.join(folder, 'imageLists', 'test.txt')
train_paths = []
test_paths = []
with open(train_list) as f:
for filename in f:
basename = filename.strip()
imgpath = basename + '.jpg'
maskpath = basename + '.mat'
train_paths.append((imgpath, maskpath))
with open(test_list) as f:
for filename in f:
basename = filename.strip()
imgpath = basename + '.jpg'
maskpath = basename + '.mat'
test_paths.append((imgpath, maskpath))
return train_paths, test_paths
def parse_args():
parser = argparse.ArgumentParser(
description=\
'Convert COCO Stuff 10k annotations to mmsegmentation format') # noqa
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--nproc', default=16, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
out_dir = args.out_dir or coco_path
out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, 'annotations')
mmcv.mkdir_or_exist(osp.join(out_img_dir, 'train2014'))
mmcv.mkdir_or_exist(osp.join(out_img_dir, 'test2014'))
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2014'))
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'test2014'))
train_list, test_list = generate_coco_list(coco_path)
assert (len(train_list) +
len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format(
len(train_list), len(test_list))
if args.nproc > 1:
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
in_img_dir=osp.join(coco_path, 'images'),
in_ann_dir=osp.join(coco_path, 'annotations'),
out_img_dir=out_img_dir,
out_mask_dir=out_mask_dir,
is_train=True),
train_list,
nproc=nproc)
mmcv.track_parallel_progress(
partial(
convert_to_trainID,
in_img_dir=osp.join(coco_path, 'images'),
in_ann_dir=osp.join(coco_path, 'annotations'),
out_img_dir=out_img_dir,
out_mask_dir=out_mask_dir,
is_train=False),
test_list,
nproc=nproc)
else:
mmcv.track_progress(
partial(
convert_to_trainID,
in_img_dir=osp.join(coco_path, 'images'),
in_ann_dir=osp.join(coco_path, 'annotations'),
out_img_dir=out_img_dir,
out_mask_dir=out_mask_dir,
is_train=True), train_list)
mmcv.track_progress(
partial(
convert_to_trainID,
in_img_dir=osp.join(coco_path, 'images'),
in_ann_dir=osp.join(coco_path, 'annotations'),
out_img_dir=out_img_dir,
out_mask_dir=out_mask_dir,
is_train=False), test_list)
print('Done!')
if __name__ == '__main__':
main()
| 6,716 | 20.808442 | 79 | py |
mmsegmentation | mmsegmentation-master/tools/convert_datasets/coco_stuff164k.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import shutil
from functools import partial
from glob import glob
import mmcv
import numpy as np
from PIL import Image
COCO_LEN = 123287
clsID_to_trID = {
0: 0,
1: 1,
2: 2,
3: 3,
4: 4,
5: 5,
6: 6,
7: 7,
8: 8,
9: 9,
10: 10,
12: 11,
13: 12,
14: 13,
15: 14,
16: 15,
17: 16,
18: 17,
19: 18,
20: 19,
21: 20,
22: 21,
23: 22,
24: 23,
26: 24,
27: 25,
30: 26,
31: 27,
32: 28,
33: 29,
34: 30,
35: 31,
36: 32,
37: 33,
38: 34,
39: 35,
40: 36,
41: 37,
42: 38,
43: 39,
45: 40,
46: 41,
47: 42,
48: 43,
49: 44,
50: 45,
51: 46,
52: 47,
53: 48,
54: 49,
55: 50,
56: 51,
57: 52,
58: 53,
59: 54,
60: 55,
61: 56,
62: 57,
63: 58,
64: 59,
66: 60,
69: 61,
71: 62,
72: 63,
73: 64,
74: 65,
75: 66,
76: 67,
77: 68,
78: 69,
79: 70,
80: 71,
81: 72,
83: 73,
84: 74,
85: 75,
86: 76,
87: 77,
88: 78,
89: 79,
91: 80,
92: 81,
93: 82,
94: 83,
95: 84,
96: 85,
97: 86,
98: 87,
99: 88,
100: 89,
101: 90,
102: 91,
103: 92,
104: 93,
105: 94,
106: 95,
107: 96,
108: 97,
109: 98,
110: 99,
111: 100,
112: 101,
113: 102,
114: 103,
115: 104,
116: 105,
117: 106,
118: 107,
119: 108,
120: 109,
121: 110,
122: 111,
123: 112,
124: 113,
125: 114,
126: 115,
127: 116,
128: 117,
129: 118,
130: 119,
131: 120,
132: 121,
133: 122,
134: 123,
135: 124,
136: 125,
137: 126,
138: 127,
139: 128,
140: 129,
141: 130,
142: 131,
143: 132,
144: 133,
145: 134,
146: 135,
147: 136,
148: 137,
149: 138,
150: 139,
151: 140,
152: 141,
153: 142,
154: 143,
155: 144,
156: 145,
157: 146,
158: 147,
159: 148,
160: 149,
161: 150,
162: 151,
163: 152,
164: 153,
165: 154,
166: 155,
167: 156,
168: 157,
169: 158,
170: 159,
171: 160,
172: 161,
173: 162,
174: 163,
175: 164,
176: 165,
177: 166,
178: 167,
179: 168,
180: 169,
181: 170,
255: 255
}
def convert_to_trainID(maskpath, out_mask_dir, is_train):
mask = np.array(Image.open(maskpath))
mask_copy = mask.copy()
for clsID, trID in clsID_to_trID.items():
mask_copy[mask == clsID] = trID
seg_filename = osp.join(
out_mask_dir, 'train2017',
osp.basename(maskpath).split('.')[0] +
'_labelTrainIds.png') if is_train else osp.join(
out_mask_dir, 'val2017',
osp.basename(maskpath).split('.')[0] + '_labelTrainIds.png')
Image.fromarray(mask_copy).save(seg_filename, 'PNG')
def parse_args():
parser = argparse.ArgumentParser(
description=\
'Convert COCO Stuff 164k annotations to mmsegmentation format') # noqa
parser.add_argument('coco_path', help='coco stuff path')
parser.add_argument('-o', '--out_dir', help='output path')
parser.add_argument(
'--nproc', default=16, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
coco_path = args.coco_path
nproc = args.nproc
out_dir = args.out_dir or coco_path
out_img_dir = osp.join(out_dir, 'images')
out_mask_dir = osp.join(out_dir, 'annotations')
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'train2017'))
mmcv.mkdir_or_exist(osp.join(out_mask_dir, 'val2017'))
if out_dir != coco_path:
shutil.copytree(osp.join(coco_path, 'images'), out_img_dir)
train_list = glob(osp.join(coco_path, 'annotations', 'train2017', '*.png'))
train_list = [file for file in train_list if '_labelTrainIds' not in file]
test_list = glob(osp.join(coco_path, 'annotations', 'val2017', '*.png'))
test_list = [file for file in test_list if '_labelTrainIds' not in file]
assert (len(train_list) +
len(test_list)) == COCO_LEN, 'Wrong length of list {} & {}'.format(
len(train_list), len(test_list))
if args.nproc > 1:
mmcv.track_parallel_progress(
partial(
convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list,
nproc=nproc)
mmcv.track_parallel_progress(
partial(
convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list,
nproc=nproc)
else:
mmcv.track_progress(
partial(
convert_to_trainID, out_mask_dir=out_mask_dir, is_train=True),
train_list)
mmcv.track_progress(
partial(
convert_to_trainID, out_mask_dir=out_mask_dir, is_train=False),
test_list)
print('Done!')
if __name__ == '__main__':
main()
| 5,115 | 18.30566 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.