|
import pytest |
|
import torch |
|
from mmcv.ops import DeformConv2dPack |
|
from mmcv.utils.parrots_wrapper import _BatchNorm |
|
from torch.nn.modules import AvgPool2d, GroupNorm |
|
|
|
from mmseg.models.backbones import (CGNet, FastSCNN, MobileNetV3, ResNeSt, |
|
ResNet, ResNetV1d, ResNeXt) |
|
from mmseg.models.backbones.cgnet import (ContextGuidedBlock, |
|
GlobalContextExtractor) |
|
from mmseg.models.backbones.resnest import Bottleneck as BottleneckS |
|
from mmseg.models.backbones.resnet import BasicBlock, Bottleneck |
|
from mmseg.models.backbones.resnext import Bottleneck as BottleneckX |
|
from mmseg.models.utils import ResLayer |
|
|
|
|
|
def is_block(modules): |
|
"""Check if is ResNet building block.""" |
|
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX)): |
|
return True |
|
return False |
|
|
|
|
|
def is_norm(modules): |
|
"""Check if is one of the norms.""" |
|
if isinstance(modules, (GroupNorm, _BatchNorm)): |
|
return True |
|
return False |
|
|
|
|
|
def all_zeros(modules): |
|
"""Check if the weight(and bias) is all zero.""" |
|
weight_zero = torch.allclose(modules.weight.data, |
|
torch.zeros_like(modules.weight.data)) |
|
if hasattr(modules, 'bias'): |
|
bias_zero = torch.allclose(modules.bias.data, |
|
torch.zeros_like(modules.bias.data)) |
|
else: |
|
bias_zero = True |
|
|
|
return weight_zero and bias_zero |
|
|
|
|
|
def check_norm_state(modules, train_state): |
|
"""Check if norm layer is in correct train state.""" |
|
for mod in modules: |
|
if isinstance(mod, _BatchNorm): |
|
if mod.training != train_state: |
|
return False |
|
return True |
|
|
|
|
|
def test_resnet_basic_block(): |
|
with pytest.raises(AssertionError): |
|
|
|
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) |
|
BasicBlock(64, 64, dcn=dcn) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv3') |
|
] |
|
BasicBlock(64, 64, plugins=plugins) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict( |
|
type='GeneralizedAttention', |
|
spatial_range=-1, |
|
num_heads=8, |
|
attention_type='0010', |
|
kv_stride=2), |
|
position='after_conv2') |
|
] |
|
BasicBlock(64, 64, plugins=plugins) |
|
|
|
|
|
block = BasicBlock(16, 16, with_cp=True) |
|
assert block.with_cp |
|
x = torch.randn(1, 16, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 16, 56, 56]) |
|
|
|
|
|
block = BasicBlock(64, 64) |
|
assert block.conv1.in_channels == 64 |
|
assert block.conv1.out_channels == 64 |
|
assert block.conv1.kernel_size == (3, 3) |
|
assert block.conv2.in_channels == 64 |
|
assert block.conv2.out_channels == 64 |
|
assert block.conv2.kernel_size == (3, 3) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
def test_resnet_bottleneck(): |
|
with pytest.raises(AssertionError): |
|
|
|
Bottleneck(64, 64, style='tensorflow') |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv4') |
|
] |
|
Bottleneck(64, 16, plugins=plugins) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv3'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv3') |
|
] |
|
Bottleneck(64, 16, plugins=plugins) |
|
|
|
with pytest.raises(KeyError): |
|
|
|
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')] |
|
Bottleneck(64, 16, plugins=plugins) |
|
|
|
|
|
block = Bottleneck(64, 16, with_cp=True) |
|
assert block.with_cp |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
block = Bottleneck(64, 64, stride=2, style='pytorch') |
|
assert block.conv1.stride == (1, 1) |
|
assert block.conv2.stride == (2, 2) |
|
block = Bottleneck(64, 64, stride=2, style='caffe') |
|
assert block.conv1.stride == (2, 2) |
|
assert block.conv2.stride == (1, 1) |
|
|
|
|
|
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) |
|
with pytest.raises(AssertionError): |
|
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv')) |
|
block = Bottleneck(64, 64, dcn=dcn) |
|
assert isinstance(block.conv2, DeformConv2dPack) |
|
|
|
|
|
block = Bottleneck(64, 16) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv3') |
|
] |
|
block = Bottleneck(64, 16, plugins=plugins) |
|
assert block.context_block.in_channels == 64 |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict( |
|
type='GeneralizedAttention', |
|
spatial_range=-1, |
|
num_heads=8, |
|
attention_type='0010', |
|
kv_stride=2), |
|
position='after_conv2') |
|
] |
|
block = Bottleneck(64, 16, plugins=plugins) |
|
assert block.gen_attention_block.in_channels == 16 |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict( |
|
type='GeneralizedAttention', |
|
spatial_range=-1, |
|
num_heads=8, |
|
attention_type='0010', |
|
kv_stride=2), |
|
position='after_conv2'), |
|
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
position='after_conv3') |
|
] |
|
block = Bottleneck(64, 16, plugins=plugins) |
|
assert block.gen_attention_block.in_channels == 16 |
|
assert block.nonlocal_block.in_channels == 16 |
|
assert block.context_block.in_channels == 64 |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), |
|
position='after_conv2'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), |
|
position='after_conv3'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3), |
|
position='after_conv3') |
|
] |
|
block = Bottleneck(64, 16, plugins=plugins) |
|
assert block.context_block1.in_channels == 16 |
|
assert block.context_block2.in_channels == 64 |
|
assert block.context_block3.in_channels == 64 |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
def test_resnet_res_layer(): |
|
|
|
layer = ResLayer(Bottleneck, 64, 16, 3) |
|
assert len(layer) == 3 |
|
assert layer[0].conv1.in_channels == 64 |
|
assert layer[0].conv1.out_channels == 16 |
|
for i in range(1, len(layer)): |
|
assert layer[i].conv1.in_channels == 64 |
|
assert layer[i].conv1.out_channels == 16 |
|
for i in range(len(layer)): |
|
assert layer[i].downsample is None |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 64, 3) |
|
assert layer[0].downsample[0].out_channels == 256 |
|
for i in range(1, len(layer)): |
|
assert layer[i].downsample is None |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 256, 56, 56]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2) |
|
assert layer[0].downsample[0].out_channels == 256 |
|
assert layer[0].downsample[0].stride == (2, 2) |
|
for i in range(1, len(layer)): |
|
assert layer[i].downsample is None |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 256, 28, 28]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True) |
|
assert isinstance(layer[0].downsample[0], AvgPool2d) |
|
assert layer[0].downsample[1].out_channels == 256 |
|
assert layer[0].downsample[1].stride == (1, 1) |
|
for i in range(1, len(layer)): |
|
assert layer[i].downsample is None |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 256, 28, 28]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2) |
|
for i in range(len(layer)): |
|
assert layer[i].conv2.dilation == (2, 2) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, contract_dilation=True) |
|
assert layer[0].conv2.dilation == (1, 1) |
|
for i in range(1, len(layer)): |
|
assert layer[i].conv2.dilation == (2, 2) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
layer = ResLayer(Bottleneck, 64, 16, 3, dilation=2, multi_grid=(1, 2, 4)) |
|
assert layer[0].conv2.dilation == (1, 1) |
|
assert layer[1].conv2.dilation == (2, 2) |
|
assert layer[2].conv2.dilation == (4, 4) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = layer(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
def test_resnet_backbone(): |
|
"""Test resnet backbone.""" |
|
with pytest.raises(KeyError): |
|
|
|
ResNet(20) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
ResNet(50, num_stages=0) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) |
|
ResNet(50, dcn=dcn, stage_with_dcn=(True, )) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
stages=(False, True, True), |
|
position='after_conv3') |
|
] |
|
ResNet(50, plugins=plugins) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
ResNet(50, num_stages=5) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3) |
|
|
|
with pytest.raises(TypeError): |
|
|
|
model = ResNet(50) |
|
model.init_weights(pretrained=0) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
ResNet(50, style='tensorflow') |
|
|
|
|
|
model = ResNet(50, norm_eval=True) |
|
model.init_weights() |
|
model.train() |
|
assert check_norm_state(model.modules(), False) |
|
|
|
|
|
model = ResNet(depth=50, norm_eval=True) |
|
model.init_weights('torchvision://resnet50') |
|
model.train() |
|
assert check_norm_state(model.modules(), False) |
|
|
|
|
|
frozen_stages = 1 |
|
model = ResNet(50, frozen_stages=frozen_stages) |
|
model.init_weights() |
|
model.train() |
|
assert model.norm1.training is False |
|
for layer in [model.conv1, model.norm1]: |
|
for param in layer.parameters(): |
|
assert param.requires_grad is False |
|
for i in range(1, frozen_stages + 1): |
|
layer = getattr(model, 'layer{}'.format(i)) |
|
for mod in layer.modules(): |
|
if isinstance(mod, _BatchNorm): |
|
assert mod.training is False |
|
for param in layer.parameters(): |
|
assert param.requires_grad is False |
|
|
|
|
|
model = ResNetV1d(depth=50, frozen_stages=frozen_stages) |
|
assert len(model.stem) == 9 |
|
model.init_weights() |
|
model.train() |
|
check_norm_state(model.stem, False) |
|
for param in model.stem.parameters(): |
|
assert param.requires_grad is False |
|
for i in range(1, frozen_stages + 1): |
|
layer = getattr(model, 'layer{}'.format(i)) |
|
for mod in layer.modules(): |
|
if isinstance(mod, _BatchNorm): |
|
assert mod.training is False |
|
for param in layer.parameters(): |
|
assert param.requires_grad is False |
|
|
|
|
|
model = ResNet(18) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 64, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 128, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 256, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 512, 7, 7]) |
|
|
|
|
|
model = ResNet(50) |
|
for m in model.modules(): |
|
if is_norm(m): |
|
assert isinstance(m, _BatchNorm) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
model = ResNet(50, out_indices=(0, 1, 2)) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
|
|
|
|
model = ResNet(18, with_cp=True) |
|
for m in model.modules(): |
|
if is_block(m): |
|
assert m.with_cp |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 64, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 128, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 256, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 512, 7, 7]) |
|
|
|
|
|
model = ResNet(50, with_cp=True) |
|
for m in model.modules(): |
|
if is_block(m): |
|
assert m.with_cp |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
model = ResNet( |
|
50, norm_cfg=dict(type='GN', num_groups=32, requires_grad=True)) |
|
for m in model.modules(): |
|
if is_norm(m): |
|
assert isinstance(m, GroupNorm) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict( |
|
type='GeneralizedAttention', |
|
spatial_range=-1, |
|
num_heads=8, |
|
attention_type='0010', |
|
kv_stride=2), |
|
stages=(False, True, True, True), |
|
position='after_conv2'), |
|
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16), |
|
stages=(False, True, True, False), |
|
position='after_conv3') |
|
] |
|
model = ResNet(50, plugins=plugins) |
|
for m in model.layer1.modules(): |
|
if is_block(m): |
|
assert not hasattr(m, 'context_block') |
|
assert not hasattr(m, 'gen_attention_block') |
|
assert m.nonlocal_block.in_channels == 64 |
|
for m in model.layer2.modules(): |
|
if is_block(m): |
|
assert m.nonlocal_block.in_channels == 128 |
|
assert m.gen_attention_block.in_channels == 128 |
|
assert m.context_block.in_channels == 512 |
|
|
|
for m in model.layer3.modules(): |
|
if is_block(m): |
|
assert m.nonlocal_block.in_channels == 256 |
|
assert m.gen_attention_block.in_channels == 256 |
|
assert m.context_block.in_channels == 1024 |
|
|
|
for m in model.layer4.modules(): |
|
if is_block(m): |
|
assert m.nonlocal_block.in_channels == 512 |
|
assert m.gen_attention_block.in_channels == 512 |
|
assert not hasattr(m, 'context_block') |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
|
|
plugins = [ |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1), |
|
stages=(False, True, True, False), |
|
position='after_conv3'), |
|
dict( |
|
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2), |
|
stages=(False, True, True, False), |
|
position='after_conv3') |
|
] |
|
|
|
model = ResNet(50, plugins=plugins) |
|
for m in model.layer1.modules(): |
|
if is_block(m): |
|
assert not hasattr(m, 'context_block') |
|
assert not hasattr(m, 'context_block1') |
|
assert not hasattr(m, 'context_block2') |
|
for m in model.layer2.modules(): |
|
if is_block(m): |
|
assert not hasattr(m, 'context_block') |
|
assert m.context_block1.in_channels == 512 |
|
assert m.context_block2.in_channels == 512 |
|
|
|
for m in model.layer3.modules(): |
|
if is_block(m): |
|
assert not hasattr(m, 'context_block') |
|
assert m.context_block1.in_channels == 1024 |
|
assert m.context_block2.in_channels == 1024 |
|
|
|
for m in model.layer4.modules(): |
|
if is_block(m): |
|
assert not hasattr(m, 'context_block') |
|
assert not hasattr(m, 'context_block1') |
|
assert not hasattr(m, 'context_block2') |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
model = ResNet(50, zero_init_residual=True) |
|
model.init_weights() |
|
for m in model.modules(): |
|
if isinstance(m, Bottleneck): |
|
assert all_zeros(m.norm3) |
|
elif isinstance(m, BasicBlock): |
|
assert all_zeros(m.norm2) |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
model = ResNetV1d(depth=50) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
def test_renext_bottleneck(): |
|
with pytest.raises(AssertionError): |
|
|
|
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow') |
|
|
|
|
|
block = BottleneckX( |
|
64, 64, groups=32, base_width=4, stride=2, style='pytorch') |
|
assert block.conv2.stride == (2, 2) |
|
assert block.conv2.groups == 32 |
|
assert block.conv2.out_channels == 128 |
|
|
|
|
|
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False) |
|
with pytest.raises(AssertionError): |
|
|
|
BottleneckX( |
|
64, |
|
64, |
|
groups=32, |
|
base_width=4, |
|
dcn=dcn, |
|
conv_cfg=dict(type='Conv')) |
|
BottleneckX(64, 64, dcn=dcn) |
|
|
|
|
|
block = BottleneckX(64, 16, groups=32, base_width=4) |
|
x = torch.randn(1, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 64, 56, 56]) |
|
|
|
|
|
def test_resnext_backbone(): |
|
with pytest.raises(KeyError): |
|
|
|
ResNeXt(depth=18) |
|
|
|
|
|
model = ResNeXt(depth=50, groups=32, base_width=4) |
|
print(model) |
|
for m in model.modules(): |
|
if is_block(m): |
|
assert m.conv2.groups == 32 |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(1, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([1, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([1, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([1, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([1, 2048, 7, 7]) |
|
|
|
|
|
def test_fastscnn_backbone(): |
|
with pytest.raises(AssertionError): |
|
|
|
FastSCNN( |
|
3, (32, 48), |
|
64, (64, 96, 128), (2, 2, 1), |
|
global_out_channels=127, |
|
higher_in_channels=64, |
|
lower_in_channels=128) |
|
|
|
|
|
model = FastSCNN() |
|
model.init_weights() |
|
model.train() |
|
batch_size = 4 |
|
imgs = torch.randn(batch_size, 3, 512, 1024) |
|
feat = model(imgs) |
|
|
|
assert len(feat) == 3 |
|
|
|
assert feat[0].shape == torch.Size([batch_size, 64, 64, 128]) |
|
|
|
assert feat[1].shape == torch.Size([batch_size, 128, 16, 32]) |
|
|
|
assert feat[2].shape == torch.Size([batch_size, 128, 64, 128]) |
|
|
|
|
|
def test_resnest_bottleneck(): |
|
with pytest.raises(AssertionError): |
|
|
|
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow') |
|
|
|
|
|
block = BottleneckS( |
|
64, 256, radix=2, reduction_factor=4, stride=2, style='pytorch') |
|
assert block.avd_layer.stride == 2 |
|
assert block.conv2.channels == 256 |
|
|
|
|
|
block = BottleneckS(64, 16, radix=2, reduction_factor=4) |
|
x = torch.randn(2, 64, 56, 56) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([2, 64, 56, 56]) |
|
|
|
|
|
def test_resnest_backbone(): |
|
with pytest.raises(KeyError): |
|
|
|
ResNeSt(depth=18) |
|
|
|
|
|
model = ResNeSt( |
|
depth=50, radix=2, reduction_factor=4, out_indices=(0, 1, 2, 3)) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 4 |
|
assert feat[0].shape == torch.Size([2, 256, 56, 56]) |
|
assert feat[1].shape == torch.Size([2, 512, 28, 28]) |
|
assert feat[2].shape == torch.Size([2, 1024, 14, 14]) |
|
assert feat[3].shape == torch.Size([2, 2048, 7, 7]) |
|
|
|
|
|
def test_cgnet_GlobalContextExtractor(): |
|
block = GlobalContextExtractor(16, 16, with_cp=True) |
|
x = torch.randn(2, 16, 64, 64, requires_grad=True) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([2, 16, 64, 64]) |
|
|
|
|
|
def test_cgnet_context_guided_block(): |
|
with pytest.raises(AssertionError): |
|
|
|
|
|
ContextGuidedBlock(8, 8) |
|
|
|
|
|
block = ContextGuidedBlock( |
|
16, 16, act_cfg=dict(type='PReLU'), with_cp=True) |
|
assert block.with_cp |
|
x = torch.randn(2, 16, 64, 64, requires_grad=True) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([2, 16, 64, 64]) |
|
|
|
|
|
block = ContextGuidedBlock(32, 32) |
|
assert not block.with_cp |
|
x = torch.randn(3, 32, 32, 32) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([3, 32, 32, 32]) |
|
|
|
|
|
block = ContextGuidedBlock(32, 32, downsample=True) |
|
assert block.conv1x1.conv.in_channels == 32 |
|
assert block.conv1x1.conv.out_channels == 32 |
|
assert block.conv1x1.conv.kernel_size == (3, 3) |
|
assert block.conv1x1.conv.stride == (2, 2) |
|
assert block.conv1x1.conv.padding == (1, 1) |
|
|
|
assert block.f_loc.in_channels == 32 |
|
assert block.f_loc.out_channels == 32 |
|
assert block.f_loc.kernel_size == (3, 3) |
|
assert block.f_loc.stride == (1, 1) |
|
assert block.f_loc.padding == (1, 1) |
|
assert block.f_loc.groups == 32 |
|
assert block.f_loc.dilation == (1, 1) |
|
assert block.f_loc.bias is None |
|
|
|
assert block.f_sur.in_channels == 32 |
|
assert block.f_sur.out_channels == 32 |
|
assert block.f_sur.kernel_size == (3, 3) |
|
assert block.f_sur.stride == (1, 1) |
|
assert block.f_sur.padding == (2, 2) |
|
assert block.f_sur.groups == 32 |
|
assert block.f_sur.dilation == (2, 2) |
|
assert block.f_sur.bias is None |
|
|
|
assert block.bottleneck.in_channels == 64 |
|
assert block.bottleneck.out_channels == 32 |
|
assert block.bottleneck.kernel_size == (1, 1) |
|
assert block.bottleneck.stride == (1, 1) |
|
assert block.bottleneck.bias is None |
|
|
|
x = torch.randn(1, 32, 32, 32) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 32, 16, 16]) |
|
|
|
|
|
block = ContextGuidedBlock(32, 32, downsample=False) |
|
assert block.conv1x1.conv.in_channels == 32 |
|
assert block.conv1x1.conv.out_channels == 16 |
|
assert block.conv1x1.conv.kernel_size == (1, 1) |
|
assert block.conv1x1.conv.stride == (1, 1) |
|
assert block.conv1x1.conv.padding == (0, 0) |
|
|
|
assert block.f_loc.in_channels == 16 |
|
assert block.f_loc.out_channels == 16 |
|
assert block.f_loc.kernel_size == (3, 3) |
|
assert block.f_loc.stride == (1, 1) |
|
assert block.f_loc.padding == (1, 1) |
|
assert block.f_loc.groups == 16 |
|
assert block.f_loc.dilation == (1, 1) |
|
assert block.f_loc.bias is None |
|
|
|
assert block.f_sur.in_channels == 16 |
|
assert block.f_sur.out_channels == 16 |
|
assert block.f_sur.kernel_size == (3, 3) |
|
assert block.f_sur.stride == (1, 1) |
|
assert block.f_sur.padding == (2, 2) |
|
assert block.f_sur.groups == 16 |
|
assert block.f_sur.dilation == (2, 2) |
|
assert block.f_sur.bias is None |
|
|
|
x = torch.randn(1, 32, 32, 32) |
|
x_out = block(x) |
|
assert x_out.shape == torch.Size([1, 32, 32, 32]) |
|
|
|
|
|
def test_cgnet_backbone(): |
|
with pytest.raises(AssertionError): |
|
|
|
CGNet(num_channels=(32, 64, 128, 256)) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
CGNet(num_blocks=(3, 21, 3)) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
CGNet(num_blocks=2) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
CGNet(reductions=16) |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
CGNet(num_channels=(32, 64, 128), reductions=(64, 129)) |
|
|
|
|
|
model = CGNet() |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == torch.Size([2, 35, 112, 112]) |
|
assert feat[1].shape == torch.Size([2, 131, 56, 56]) |
|
assert feat[2].shape == torch.Size([2, 256, 28, 28]) |
|
|
|
|
|
model = CGNet(norm_eval=True, with_cp=True) |
|
with pytest.raises(TypeError): |
|
|
|
model.init_weights(pretrained=8) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == torch.Size([2, 35, 112, 112]) |
|
assert feat[1].shape == torch.Size([2, 131, 56, 56]) |
|
assert feat[2].shape == torch.Size([2, 256, 28, 28]) |
|
|
|
|
|
def test_mobilenet_v3(): |
|
with pytest.raises(AssertionError): |
|
|
|
MobileNetV3('big') |
|
|
|
with pytest.raises(AssertionError): |
|
|
|
MobileNetV3(reduction_factor=0) |
|
|
|
with pytest.raises(ValueError): |
|
|
|
MobileNetV3(out_indices=(0, 1, 15)) |
|
|
|
with pytest.raises(ValueError): |
|
|
|
MobileNetV3(frozen_stages=15) |
|
|
|
with pytest.raises(TypeError): |
|
|
|
model = MobileNetV3() |
|
model.init_weights(pretrained=8) |
|
|
|
|
|
model = MobileNetV3() |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == (2, 16, 112, 112) |
|
assert feat[1].shape == (2, 16, 56, 56) |
|
assert feat[2].shape == (2, 576, 28, 28) |
|
|
|
|
|
model = MobileNetV3(arch='large', out_indices=(1, 3, 16)) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == (2, 16, 112, 112) |
|
assert feat[1].shape == (2, 24, 56, 56) |
|
assert feat[2].shape == (2, 960, 28, 28) |
|
|
|
|
|
model = MobileNetV3(norm_eval=True, with_cp=True, frozen_stages=5) |
|
with pytest.raises(TypeError): |
|
|
|
model.init_weights(pretrained=8) |
|
model.init_weights() |
|
model.train() |
|
|
|
imgs = torch.randn(2, 3, 224, 224) |
|
feat = model(imgs) |
|
assert len(feat) == 3 |
|
assert feat[0].shape == (2, 16, 112, 112) |
|
assert feat[1].shape == (2, 16, 56, 56) |
|
assert feat[2].shape == (2, 576, 28, 28) |
|
|