Search is not available for this dataset
repo
stringlengths 2
152
⌀ | file
stringlengths 15
239
| code
stringlengths 0
58.4M
| file_length
int64 0
58.4M
| avg_line_length
float64 0
1.81M
| max_line_length
int64 0
12.7M
| extension_type
stringclasses 364
values |
---|---|---|---|---|---|---|
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_mobilenet_v2.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.mobilenet_v2 import MobileNetV2
from .utils import check_norm_state, is_block, is_norm
def test_mobilenetv2_backbone():
with pytest.raises(ValueError):
# frozen_stages must in range(-1, 8)
MobileNetV2(frozen_stages=8)
with pytest.raises(ValueError):
# out_indices in range(-1, 8)
MobileNetV2(out_indices=[8])
# Test MobileNetV2 with first stage frozen
frozen_stages = 1
model = MobileNetV2(frozen_stages=frozen_stages)
model.train()
for mod in model.conv1.modules():
for param in mod.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test MobileNetV2 with norm_eval=True
model = MobileNetV2(norm_eval=True)
model.train()
assert check_norm_state(model.modules(), False)
# Test MobileNetV2 forward with widen_factor=1.0
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 8))
model.train()
assert check_norm_state(model.modules(), True)
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 8
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
assert feat[7].shape == torch.Size((1, 1280, 7, 7))
# Test MobileNetV2 forward with widen_factor=0.5
model = MobileNetV2(widen_factor=0.5, out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 8, 112, 112))
assert feat[1].shape == torch.Size((1, 16, 56, 56))
assert feat[2].shape == torch.Size((1, 16, 28, 28))
assert feat[3].shape == torch.Size((1, 32, 14, 14))
assert feat[4].shape == torch.Size((1, 48, 14, 14))
assert feat[5].shape == torch.Size((1, 80, 7, 7))
assert feat[6].shape == torch.Size((1, 160, 7, 7))
# Test MobileNetV2 forward with widen_factor=2.0
model = MobileNetV2(widen_factor=2.0, out_indices=range(0, 8))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert feat[0].shape == torch.Size((1, 32, 112, 112))
assert feat[1].shape == torch.Size((1, 48, 56, 56))
assert feat[2].shape == torch.Size((1, 64, 28, 28))
assert feat[3].shape == torch.Size((1, 128, 14, 14))
assert feat[4].shape == torch.Size((1, 192, 14, 14))
assert feat[5].shape == torch.Size((1, 320, 7, 7))
assert feat[6].shape == torch.Size((1, 640, 7, 7))
assert feat[7].shape == torch.Size((1, 2560, 7, 7))
# Test MobileNetV2 forward with dict(type='ReLU')
model = MobileNetV2(
widen_factor=1.0, act_cfg=dict(type='ReLU'), out_indices=range(0, 7))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with BatchNorm forward
model = MobileNetV2(widen_factor=1.0, out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with GroupNorm forward
model = MobileNetV2(
widen_factor=1.0,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True),
out_indices=range(0, 7))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
# Test MobileNetV2 with layers 1, 3, 5 out forward
model = MobileNetV2(widen_factor=1.0, out_indices=(0, 2, 4))
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 32, 28, 28))
assert feat[2].shape == torch.Size((1, 96, 14, 14))
# Test MobileNetV2 with checkpoint forward
model = MobileNetV2(
widen_factor=1.0, with_cp=True, out_indices=range(0, 7))
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 224, 224)
feat = model(imgs)
assert len(feat) == 7
assert feat[0].shape == torch.Size((1, 16, 112, 112))
assert feat[1].shape == torch.Size((1, 24, 56, 56))
assert feat[2].shape == torch.Size((1, 32, 28, 28))
assert feat[3].shape == torch.Size((1, 64, 14, 14))
assert feat[4].shape == torch.Size((1, 96, 14, 14))
assert feat[5].shape == torch.Size((1, 160, 7, 7))
assert feat[6].shape == torch.Size((1, 320, 7, 7))
| 6,546 | 36.626437 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_pvt.py | import pytest
import torch
from mmdet.models.backbones.pvt import (PVTEncoderLayer,
PyramidVisionTransformer,
PyramidVisionTransformerV2)
def test_pvt_block():
# test PVT structure and forward
block = PVTEncoderLayer(
embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_pvt():
"""Test PVT backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformer(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = PyramidVisionTransformer(
pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 33, 33))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformer()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 34)
assert outs[1].shape == (1, 128, 14, 17)
assert outs[2].shape == (1, 320, 7, 8)
assert outs[3].shape == (1, 512, 3, 4)
def test_pvtv2():
"""Test PVTv2 backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
PyramidVisionTransformerV2(pretrained=123)
# test pretrained image size
with pytest.raises(AssertionError):
PyramidVisionTransformerV2(pretrain_img_size=(224, 224, 224))
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 8, 8)
assert outs[1].shape == (1, 128, 4, 4)
assert outs[2].shape == (1, 320, 2, 2)
assert outs[3].shape == (1, 512, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = PyramidVisionTransformerV2()
outs = model(temp)
assert outs[0].shape == (1, 64, 28, 35)
assert outs[1].shape == (1, 128, 14, 18)
assert outs[2].shape == (1, 320, 7, 9)
assert outs[3].shape == (1, 512, 4, 5)
| 3,332 | 31.048077 | 69 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_regnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import RegNet
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
@pytest.mark.parametrize('arch_name,arch,out_channels', regnet_test_data)
def test_regnet_backbone(arch_name, arch, out_channels):
with pytest.raises(AssertionError):
# ResNeXt depth should be in [50, 101, 152]
RegNet(arch_name + '233')
# Test RegNet with arch_name
model = RegNet(arch_name)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
# Test RegNet with arch
model = RegNet(arch)
assert feat[0].shape == torch.Size([1, out_channels[0], 8, 8])
assert feat[1].shape == torch.Size([1, out_channels[1], 4, 4])
assert feat[2].shape == torch.Size([1, out_channels[2], 2, 2])
assert feat[3].shape == torch.Size([1, out_channels[3], 1, 1])
| 2,177 | 35.915254 | 73 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_renext.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeXt
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from .utils import is_block
def test_renext_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckX(64, 64, groups=32, base_width=4, style='tensorflow')
# Test ResNeXt Bottleneck structure
block = BottleneckX(
64, 64, groups=32, base_width=4, stride=2, style='pytorch')
assert block.conv2.stride == (2, 2)
assert block.conv2.groups == 32
assert block.conv2.out_channels == 128
# Test ResNeXt Bottleneck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
BottleneckX(
64,
64,
groups=32,
base_width=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
BottleneckX(64, 64, dcn=dcn)
# Test ResNeXt Bottleneck forward
block = BottleneckX(64, 16, groups=32, base_width=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResNeXt Bottleneck forward with plugins
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, False, True, True),
position='after_conv2')
]
block = BottleneckX(64, 16, groups=32, base_width=4, plugins=plugins)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnext_backbone():
with pytest.raises(KeyError):
# ResNeXt depth should be in [50, 101, 152]
ResNeXt(depth=18)
# Test ResNeXt with group 32, base_width 4
model = ResNeXt(depth=50, groups=32, base_width=4)
for m in model.modules():
if is_block(m):
assert m.conv2.groups == 32
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
regnet_test_data = [
('regnetx_400mf',
dict(w0=24, wa=24.48, wm=2.54, group_w=16, depth=22,
bot_mul=1.0), [32, 64, 160, 384]),
('regnetx_800mf',
dict(w0=56, wa=35.73, wm=2.28, group_w=16, depth=16,
bot_mul=1.0), [64, 128, 288, 672]),
('regnetx_1.6gf',
dict(w0=80, wa=34.01, wm=2.25, group_w=24, depth=18,
bot_mul=1.0), [72, 168, 408, 912]),
('regnetx_3.2gf',
dict(w0=88, wa=26.31, wm=2.25, group_w=48, depth=25,
bot_mul=1.0), [96, 192, 432, 1008]),
('regnetx_4.0gf',
dict(w0=96, wa=38.65, wm=2.43, group_w=40, depth=23,
bot_mul=1.0), [80, 240, 560, 1360]),
('regnetx_6.4gf',
dict(w0=184, wa=60.83, wm=2.07, group_w=56, depth=17,
bot_mul=1.0), [168, 392, 784, 1624]),
('regnetx_8.0gf',
dict(w0=80, wa=49.56, wm=2.88, group_w=120, depth=23,
bot_mul=1.0), [80, 240, 720, 1920]),
('regnetx_12gf',
dict(w0=168, wa=73.36, wm=2.37, group_w=112, depth=19,
bot_mul=1.0), [224, 448, 896, 2240]),
]
| 3,528 | 32.292453 | 73 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_res2net.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import Res2Net
from mmdet.models.backbones.res2net import Bottle2neck
from .utils import is_block
def test_res2net_bottle2neck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottle2neck(64, 64, base_width=26, scales=4, style='tensorflow')
with pytest.raises(AssertionError):
# Scale must be larger than 1
Bottle2neck(64, 64, base_width=26, scales=1, style='pytorch')
# Test Res2Net Bottle2neck structure
block = Bottle2neck(
64, 64, base_width=26, stride=2, scales=4, style='pytorch')
assert block.scales == 4
# Test Res2Net Bottle2neck with DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
# conv_cfg must be None if dcn is not None
Bottle2neck(
64,
64,
base_width=26,
scales=4,
dcn=dcn,
conv_cfg=dict(type='Conv'))
Bottle2neck(64, 64, dcn=dcn)
# Test Res2Net Bottle2neck forward
block = Bottle2neck(64, 16, base_width=26, scales=4)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_res2net_backbone():
with pytest.raises(KeyError):
# Res2Net depth should be in [50, 101, 152]
Res2Net(depth=18)
# Test Res2Net with scales 4, base_width 26
model = Res2Net(depth=50, scales=4, base_width=26)
for m in model.modules():
if is_block(m):
assert m.scales == 4
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 256, 8, 8])
assert feat[1].shape == torch.Size([1, 512, 4, 4])
assert feat[2].shape == torch.Size([1, 1024, 2, 2])
assert feat[3].shape == torch.Size([1, 2048, 1, 1])
| 1,976 | 30.380952 | 72 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_resnest.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import ResNeSt
from mmdet.models.backbones.resnest import Bottleneck as BottleneckS
def test_resnest_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
BottleneckS(64, 64, radix=2, reduction_factor=4, style='tensorflow')
# Test ResNeSt Bottleneck structure
block = BottleneckS(
2, 4, radix=2, reduction_factor=4, stride=2, style='pytorch')
assert block.avd_layer.stride == 2
assert block.conv2.channels == 4
# Test ResNeSt Bottleneck forward
block = BottleneckS(16, 4, radix=2, reduction_factor=4)
x = torch.randn(2, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([2, 16, 56, 56])
def test_resnest_backbone():
with pytest.raises(KeyError):
# ResNeSt depth should be in [50, 101, 152, 200]
ResNeSt(depth=18)
# Test ResNeSt with radix 2, reduction_factor 4
model = ResNeSt(
depth=50,
base_channels=4,
radix=2,
reduction_factor=4,
out_indices=(0, 1, 2, 3))
model.train()
imgs = torch.randn(2, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([2, 16, 8, 8])
assert feat[1].shape == torch.Size([2, 32, 4, 4])
assert feat[2].shape == torch.Size([2, 64, 2, 2])
assert feat[3].shape == torch.Size([2, 128, 1, 1])
| 1,473 | 29.708333 | 76 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv import assert_params_all_zeros
from mmcv.ops import DeformConv2dPack
from torch.nn.modules import AvgPool2d, GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones import ResNet, ResNetV1d
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.utils import ResLayer, SimplifiedBasicBlock
from .utils import check_norm_state, is_block, is_norm
def test_resnet_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
BasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
BasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
BasicBlock(64, 64, plugins=plugins)
# test BasicBlock structure and forward
block = BasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test BasicBlock with checkpoint forward
block = BasicBlock(64, 64, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_bottleneck():
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
Bottleneck(64, 64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
Bottleneck(64, 16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
Bottleneck(64, 16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = Bottleneck(64, 16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck style
block = Bottleneck(64, 64, stride=2, style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = Bottleneck(64, 64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck DCN
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
with pytest.raises(AssertionError):
Bottleneck(64, 64, dcn=dcn, conv_cfg=dict(type='Conv'))
block = Bottleneck(64, 64, dcn=dcn)
assert isinstance(block.conv2, DeformConv2dPack)
# Test Bottleneck forward
block = Bottleneck(64, 16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = Bottleneck(64, 16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_simplied_basic_block():
with pytest.raises(AssertionError):
# Not implemented yet.
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
SimplifiedBasicBlock(64, 64, dcn=dcn)
with pytest.raises(AssertionError):
# Not implemented yet.
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
SimplifiedBasicBlock(64, 64, plugins=plugins)
with pytest.raises(AssertionError):
# Not implemented yet
SimplifiedBasicBlock(64, 64, with_cp=True)
# test SimplifiedBasicBlock structure and forward
block = SimplifiedBasicBlock(64, 64)
assert block.conv1.in_channels == 64
assert block.conv1.out_channels == 64
assert block.conv1.kernel_size == (3, 3)
assert block.conv2.in_channels == 64
assert block.conv2.out_channels == 64
assert block.conv2.kernel_size == (3, 3)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# test SimplifiedBasicBlock without norm
block = SimplifiedBasicBlock(64, 64, norm_cfg=None)
assert block.norm1 is None
assert block.norm2 is None
x_out = block(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
def test_resnet_res_layer():
# Test ResLayer of 3 Bottleneck w\o downsample
layer = ResLayer(Bottleneck, 64, 16, 3)
assert len(layer) == 3
assert layer[0].conv1.in_channels == 64
assert layer[0].conv1.out_channels == 16
for i in range(1, len(layer)):
assert layer[i].conv1.in_channels == 64
assert layer[i].conv1.out_channels == 16
for i in range(len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 56, 56])
# Test ResLayer of 3 Bottleneck with downsample
layer = ResLayer(Bottleneck, 64, 64, 3)
assert layer[0].downsample[0].out_channels == 256
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 56, 56])
# Test ResLayer of 3 Bottleneck with stride=2
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2)
assert layer[0].downsample[0].out_channels == 256
assert layer[0].downsample[0].stride == (2, 2)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 Bottleneck with stride=2 and average downsample
layer = ResLayer(Bottleneck, 64, 64, 3, stride=2, avg_down=True)
assert isinstance(layer[0].downsample[0], AvgPool2d)
assert layer[0].downsample[1].out_channels == 256
assert layer[0].downsample[1].stride == (1, 1)
for i in range(1, len(layer)):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 256, 28, 28])
# Test ResLayer of 3 BasicBlock with stride=2 and downsample_first=False
layer = ResLayer(BasicBlock, 64, 64, 3, stride=2, downsample_first=False)
assert layer[2].downsample[0].out_channels == 64
assert layer[2].downsample[0].stride == (2, 2)
for i in range(len(layer) - 1):
assert layer[i].downsample is None
x = torch.randn(1, 64, 56, 56)
x_out = layer(x)
assert x_out.shape == torch.Size([1, 64, 28, 28])
def test_resnest_stem():
# Test default stem_channels
model = ResNet(50)
assert model.stem_channels == 64
assert model.conv1.out_channels == 64
assert model.norm1.num_features == 64
# Test default stem_channels, with base_channels=3
model = ResNet(50, base_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3
model = ResNet(50, stem_channels=3)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test stem_channels=3, with base_channels=2
model = ResNet(50, stem_channels=3, base_channels=2)
assert model.stem_channels == 3
assert model.conv1.out_channels == 3
assert model.norm1.num_features == 3
assert model.layer1[0].conv1.in_channels == 3
# Test V1d stem_channels
model = ResNetV1d(depth=50, stem_channels=6)
model.train()
assert model.stem[0].out_channels == 3
assert model.stem[1].num_features == 3
assert model.stem[3].out_channels == 3
assert model.stem[4].num_features == 3
assert model.stem[6].out_channels == 6
assert model.stem[7].num_features == 6
assert model.layer1[0].conv1.in_channels == 6
def test_resnet_backbone():
"""Test resnet backbone."""
with pytest.raises(KeyError):
# ResNet depth should be in [18, 34, 50, 101, 152]
ResNet(20)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=0)
with pytest.raises(AssertionError):
# len(stage_with_dcn) == num_stages
dcn = dict(type='DCN', deform_groups=1, fallback_on_stride=False)
ResNet(50, dcn=dcn, stage_with_dcn=(True, ))
with pytest.raises(AssertionError):
# len(stage_with_plugin) == num_stages
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True),
position='after_conv3')
]
ResNet(50, plugins=plugins)
with pytest.raises(AssertionError):
# In ResNet: 1 <= num_stages <= 4
ResNet(50, num_stages=5)
with pytest.raises(AssertionError):
# len(strides) == len(dilations) == num_stages
ResNet(50, strides=(1, ), dilations=(1, 1), num_stages=3)
with pytest.raises(TypeError):
# pretrained must be a string path
model = ResNet(50, pretrained=0)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
ResNet(50, style='tensorflow')
# Test ResNet50 norm_eval=True
model = ResNet(50, norm_eval=True, base_channels=1)
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with torchvision pretrained weight
model = ResNet(
depth=50, norm_eval=True, pretrained='torchvision://resnet50')
model.train()
assert check_norm_state(model.modules(), False)
# Test ResNet50 with first stage frozen
frozen_stages = 1
model = ResNet(50, frozen_stages=frozen_stages, base_channels=1)
model.train()
assert model.norm1.training is False
for layer in [model.conv1, model.norm1]:
for param in layer.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet50V1d with first stage frozen
model = ResNetV1d(depth=50, frozen_stages=frozen_stages, base_channels=2)
assert len(model.stem) == 9
model.train()
assert check_norm_state(model.stem, False)
for param in model.stem.parameters():
assert param.requires_grad is False
for i in range(1, frozen_stages + 1):
layer = getattr(model, f'layer{i}')
for mod in layer.modules():
if isinstance(mod, _BatchNorm):
assert mod.training is False
for param in layer.parameters():
assert param.requires_grad is False
# Test ResNet18 forward
model = ResNet(18)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 64, 8, 8])
assert feat[1].shape == torch.Size([1, 128, 4, 4])
assert feat[2].shape == torch.Size([1, 256, 2, 2])
assert feat[3].shape == torch.Size([1, 512, 1, 1])
# Test ResNet18 with checkpoint forward
model = ResNet(18, with_cp=True)
for m in model.modules():
if is_block(m):
assert m.with_cp
# Test ResNet50 with BatchNorm forward
model = ResNet(50, base_channels=1)
for m in model.modules():
if is_norm(m):
assert isinstance(m, _BatchNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with layers 1, 2, 3 out forward
model = ResNet(50, out_indices=(0, 1, 2), base_channels=1)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 3
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
# Test ResNet50 with checkpoint forward
model = ResNet(50, with_cp=True, base_channels=1)
for m in model.modules():
if is_block(m):
assert m.with_cp
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNet50 with GroupNorm forward
model = ResNet(
50,
base_channels=4,
norm_cfg=dict(type='GN', num_groups=2, requires_grad=True))
for m in model.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 16, 8, 8])
assert feat[1].shape == torch.Size([1, 32, 4, 4])
assert feat[2].shape == torch.Size([1, 64, 2, 2])
assert feat[3].shape == torch.Size([1, 128, 1, 1])
# Test ResNet50 with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
stages=(False, True, True, True),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'gen_attention_block')
assert m.nonlocal_block.in_channels == 8
for m in model.layer2.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 16
assert m.gen_attention_block.in_channels == 16
assert m.context_block.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 32
assert m.gen_attention_block.in_channels == 32
assert m.context_block.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert m.nonlocal_block.in_channels == 64
assert m.gen_attention_block.in_channels == 64
assert not hasattr(m, 'context_block')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 with 1 ContextBlock after conv2, 1 ContextBlock after
# conv3 in layers 2, 3, 4
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
stages=(False, True, True, False),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
stages=(False, True, True, False),
position='after_conv3')
]
model = ResNet(50, plugins=plugins, base_channels=8)
for m in model.layer1.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
for m in model.layer2.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 64
assert m.context_block2.in_channels == 64
for m in model.layer3.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert m.context_block1.in_channels == 128
assert m.context_block2.in_channels == 128
for m in model.layer4.modules():
if is_block(m):
assert not hasattr(m, 'context_block')
assert not hasattr(m, 'context_block1')
assert not hasattr(m, 'context_block2')
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 32, 8, 8])
assert feat[1].shape == torch.Size([1, 64, 4, 4])
assert feat[2].shape == torch.Size([1, 128, 2, 2])
assert feat[3].shape == torch.Size([1, 256, 1, 1])
# Test ResNet50 zero initialization of residual
model = ResNet(50, zero_init_residual=True, base_channels=1)
model.init_weights()
for m in model.modules():
if isinstance(m, Bottleneck):
assert assert_params_all_zeros(m.norm3)
elif isinstance(m, BasicBlock):
assert assert_params_all_zeros(m.norm2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 4, 8, 8])
assert feat[1].shape == torch.Size([1, 8, 4, 4])
assert feat[2].shape == torch.Size([1, 16, 2, 2])
assert feat[3].shape == torch.Size([1, 32, 1, 1])
# Test ResNetV1d forward
model = ResNetV1d(depth=50, base_channels=2)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 4
assert feat[0].shape == torch.Size([1, 8, 8, 8])
assert feat[1].shape == torch.Size([1, 16, 4, 4])
assert feat[2].shape == torch.Size([1, 32, 2, 2])
assert feat[3].shape == torch.Size([1, 64, 1, 1])
| 22,380 | 34.35703 | 78 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_swin.py | import pytest
import torch
from mmdet.models.backbones.swin import SwinBlock, SwinTransformer
def test_swin_block():
# test SwinBlock structure and forward
block = SwinBlock(embed_dims=64, num_heads=4, feedforward_channels=256)
assert block.ffn.embed_dims == 64
assert block.attn.w_msa.num_heads == 4
assert block.ffn.feedforward_channels == 256
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
# Test BasicBlock with checkpoint forward
block = SwinBlock(
embed_dims=64, num_heads=4, feedforward_channels=256, with_cp=True)
assert block.with_cp
x = torch.randn(1, 56 * 56, 64)
x_out = block(x, (56, 56))
assert x_out.shape == torch.Size([1, 56 * 56, 64])
def test_swin_transformer():
"""Test Swin Transformer backbone."""
with pytest.raises(TypeError):
# Pretrained arg must be str or None.
SwinTransformer(pretrained=123)
with pytest.raises(AssertionError):
# Because swin uses non-overlapping patch embed, so the stride of patch
# embed must be equal to patch size.
SwinTransformer(strides=(2, 2, 2, 2), patch_size=4)
# test pretrained image size
with pytest.raises(AssertionError):
SwinTransformer(pretrain_img_size=(224, 224, 224))
# Test absolute position embedding
temp = torch.randn((1, 3, 224, 224))
model = SwinTransformer(pretrain_img_size=224, use_abs_pos_embed=True)
model.init_weights()
model(temp)
# Test different inputs when use absolute position embedding
temp = torch.randn((1, 3, 112, 112))
model(temp)
temp = torch.randn((1, 3, 256, 256))
model(temp)
# Test patch norm
model = SwinTransformer(patch_norm=False)
model(temp)
# Test normal inference
temp = torch.randn((1, 3, 32, 32))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 31, 31))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 8, 8)
assert outs[1].shape == (1, 192, 4, 4)
assert outs[2].shape == (1, 384, 2, 2)
assert outs[3].shape == (1, 768, 1, 1)
# Test abnormal inference size
temp = torch.randn((1, 3, 112, 137))
model = SwinTransformer()
outs = model(temp)
assert outs[0].shape == (1, 96, 28, 35)
assert outs[1].shape == (1, 192, 14, 18)
assert outs[2].shape == (1, 384, 7, 9)
assert outs[3].shape == (1, 768, 4, 5)
model = SwinTransformer(frozen_stages=4)
model.train()
for p in model.parameters():
assert not p.requires_grad
| 2,827 | 31.136364 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/test_trident_resnet.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.backbones import TridentResNet
from mmdet.models.backbones.trident_resnet import TridentBottleneck
def test_trident_resnet_bottleneck():
trident_dilations = (1, 2, 3)
test_branch_idx = 1
concat_output = True
trident_build_config = (trident_dilations, test_branch_idx, concat_output)
with pytest.raises(AssertionError):
# Style must be in ['pytorch', 'caffe']
TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, style='tensorflow')
with pytest.raises(AssertionError):
# Allowed positions are 'after_conv1', 'after_conv2', 'after_conv3'
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv4')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(AssertionError):
# Need to specify different postfix to avoid duplicate plugin name
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
with pytest.raises(KeyError):
# Plugin type is not supported
plugins = [dict(cfg=dict(type='WrongPlugin'), position='after_conv3')]
TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
# Test Bottleneck with checkpoint forward
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, with_cp=True)
assert block.with_cp
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck style
block = TridentBottleneck(
*trident_build_config,
inplanes=64,
planes=64,
stride=2,
style='pytorch')
assert block.conv1.stride == (1, 1)
assert block.conv2.stride == (2, 2)
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=64, stride=2, style='caffe')
assert block.conv1.stride == (2, 2)
assert block.conv2.stride == (1, 1)
# Test Bottleneck forward
block = TridentBottleneck(*trident_build_config, inplanes=64, planes=16)
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 GeneralizedAttention after conv2, 1 NonLocal2D
# after conv2, 1 ContextBlock after conv3
plugins = [
dict(
cfg=dict(
type='GeneralizedAttention',
spatial_range=-1,
num_heads=8,
attention_type='0010',
kv_stride=2),
position='after_conv2'),
dict(cfg=dict(type='NonLocal2d'), position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.gen_attention_block.in_channels == 16
assert block.nonlocal_block.in_channels == 16
assert block.context_block.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
# Test Bottleneck with 1 ContextBlock after conv2, 2 ContextBlock after
# conv3
plugins = [
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=1),
position='after_conv2'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=2),
position='after_conv3'),
dict(
cfg=dict(type='ContextBlock', ratio=1. / 16, postfix=3),
position='after_conv3')
]
block = TridentBottleneck(
*trident_build_config, inplanes=64, planes=16, plugins=plugins)
assert block.context_block1.in_channels == 16
assert block.context_block2.in_channels == 64
assert block.context_block3.in_channels == 64
x = torch.randn(1, 64, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size([block.num_branch, 64, 56, 56])
def test_trident_resnet_backbone():
tridentresnet_config = dict(
num_branch=3,
test_branch_idx=1,
strides=(1, 2, 2),
dilations=(1, 1, 1),
trident_dilations=(1, 2, 3),
out_indices=(2, ),
)
"""Test tridentresnet backbone."""
with pytest.raises(AssertionError):
# TridentResNet depth should be in [50, 101, 152]
TridentResNet(18, **tridentresnet_config)
with pytest.raises(AssertionError):
# In TridentResNet: num_stages == 3
TridentResNet(50, num_stages=4, **tridentresnet_config)
model = TridentResNet(50, num_stages=3, **tridentresnet_config)
model.train()
imgs = torch.randn(1, 3, 32, 32)
feat = model(imgs)
assert len(feat) == 1
assert feat[0].shape == torch.Size([3, 1024, 2, 2])
| 6,372 | 34.209945 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_backbones/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch.nn.modules import GroupNorm
from torch.nn.modules.batchnorm import _BatchNorm
from mmdet.models.backbones.res2net import Bottle2neck
from mmdet.models.backbones.resnet import BasicBlock, Bottleneck
from mmdet.models.backbones.resnext import Bottleneck as BottleneckX
from mmdet.models.utils import SimplifiedBasicBlock
def is_block(modules):
"""Check if is ResNet building block."""
if isinstance(modules, (BasicBlock, Bottleneck, BottleneckX, Bottle2neck,
SimplifiedBasicBlock)):
return True
return False
def is_norm(modules):
"""Check if is one of the norms."""
if isinstance(modules, (GroupNorm, _BatchNorm)):
return True
return False
def check_norm_state(modules, train_state):
"""Check if norm layer is in correct train state."""
for mod in modules:
if isinstance(mod, _BatchNorm):
if mod.training != train_state:
return False
return True
| 1,026 | 30.121212 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import AnchorHead
def test_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False))
self = AnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,548 | 34.901408 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_ascend_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import (AscendAnchorHead, AscendRetinaHead,
AscendSSDHead)
def test_ascend_anchor_head_loss():
"""Tests AscendAnchorHead loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='AscendMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = AscendAnchorHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.prior_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_ascend_retina_head_loss():
"""Tests AscendRetinaHead loss when truth is empty and non-empty."""
img_shape = (800, 1067, 3)
pad_shape = (800, 1088, 3)
num_classes = 80
in_channels = 256
img_metas = [{
'img_shape': img_shape,
'scale_factor': 1,
'pad_shape': pad_shape
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='AscendMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = AscendRetinaHead(
num_classes=num_classes, in_channels=in_channels, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, in_channels, pad_shape[0] // strides[0],
pad_shape[1] // strides[1])
for strides in self.prior_generator.strides
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_ascend_ssd_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
img_shape = (320, 320, 3)
pad_shape = (320, 320, 3)
in_channels = (96, 1280, 512, 256, 256, 128)
img_metas = [{
'img_shape': img_shape,
'scale_factor': 1,
'pad_shape': pad_shape
}, {
'img_shape': img_shape,
'scale_factor': 1,
'pad_shape': pad_shape
}]
self = AscendSSDHead(
in_channels=in_channels,
num_classes=80,
use_depthwise=True,
norm_cfg=dict(type='BN', eps=0.001, momentum=0.03),
act_cfg=dict(type='ReLU6'),
init_cfg=dict(type='Normal', layer='Conv2d', std=0.001),
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]],
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
train_cfg=mmcv.Config(
dict(
assigner=dict(
type='AscendMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False)))
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(2, in_channels[i],
round(pad_shape[0] / self.prior_generator.strides[i][0]),
round(pad_shape[1] / self.prior_generator.strides[i][1]))
for i in range(len(self.prior_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4)), torch.empty((0, 4))]
gt_labels = [torch.LongTensor([]), torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() >= 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2]), torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 8,285 | 37.361111 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_atss_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import ATSSHead
def test_atss_head_loss():
"""Tests atss head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = ATSSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centernesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centernesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_centerness_loss = sum(empty_gt_losses['loss_centerness'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_centerness_loss.item() == 0, (
'there should be no centerness loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centernesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_centerness_loss = sum(one_gt_losses['loss_centerness'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_centerness_loss.item() > 0, (
'centerness loss should be non-zero')
| 2,949 | 36.820513 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_autoassign_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads.autoassign_head import AutoAssignHead
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_autoassign_head_loss():
"""Tests autoassign head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(assigner=None, allowed_border=-1, pos_weight=-1, debug=False))
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, objectnesses = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_pos_loss = empty_gt_losses['loss_pos']
empty_neg_loss = empty_gt_losses['loss_neg']
empty_center_loss = empty_gt_losses['loss_center']
assert empty_neg_loss.item() > 0, 'cls loss should be non-zero'
assert empty_pos_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_center_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_pos_loss = one_gt_losses['loss_pos']
onegt_neg_loss = one_gt_losses['loss_neg']
onegt_center_loss = one_gt_losses['loss_center']
assert onegt_pos_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_neg_loss.item() > 0, 'box loss should be non-zero'
assert onegt_center_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
self = AutoAssignHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
strides=(4, ))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 3,580 | 37.923913 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_centernet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import CenterNetHead
def test_center_head_loss():
"""Tests center head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
test_cfg = dict(topK=100, max_per_img=100)
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
feat = [torch.rand(1, 1, s, s)]
center_out, wh_out, offset_out = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = empty_gt_losses['loss_center_heatmap']
loss_wh = empty_gt_losses['loss_wh']
loss_offset = empty_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() == 0, (
'there should be no loss_wh when there are no true boxes')
assert loss_offset.item() == 0, (
'there should be no loss_offset when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(center_out, wh_out, offset_out, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
loss_center = one_gt_losses['loss_center_heatmap']
loss_wh = one_gt_losses['loss_wh']
loss_offset = one_gt_losses['loss_offset']
assert loss_center.item() > 0, 'loss_center should be non-zero'
assert loss_wh.item() > 0, 'loss_wh should be non-zero'
assert loss_offset.item() > 0, 'loss_offset should be non-zero'
def test_centernet_head_get_bboxes():
"""Tests center head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': np.array([1., 1., 1., 1.]),
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s),
'border': (0, 0, 0, 0),
'flip': False
}]
test_cfg = ConfigDict(
dict(topk=100, local_maximum_kernel=3, max_per_img=100))
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 100, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CenterNetHead(
num_classes=4, in_channel=1, feat_channel=4, test_cfg=test_cfg)
self.feat_shape = (1, 1, s // 4, s // 4)
targets, _ = self.get_targets(gt_bboxes, gt_labels, self.feat_shape,
img_metas[0]['pad_shape'])
center_target = targets['center_heatmap_target']
wh_target = targets['wh_target']
offset_target = targets['offset_target']
# make sure assign target right
for i in range(len(gt_bboxes[0])):
bbox, label = gt_bboxes[0][i] / 4, gt_labels[0][i]
ctx, cty = sum(bbox[0::2]) / 2, sum(bbox[1::2]) / 2
int_ctx, int_cty = int(sum(bbox[0::2]) / 2), int(sum(bbox[1::2]) / 2)
w, h = bbox[2] - bbox[0], bbox[3] - bbox[1]
x_off = ctx - int(ctx)
y_off = cty - int(cty)
assert center_target[0, label, int_cty, int_ctx] == 1
assert wh_target[0, 0, int_cty, int_ctx] == w
assert wh_target[0, 1, int_cty, int_ctx] == h
assert offset_target[0, 0, int_cty, int_ctx] == x_off
assert offset_target[0, 1, int_cty, int_ctx] == y_off
# make sure get_bboxes is right
detections = self.get_bboxes([center_target], [wh_target], [offset_target],
img_metas,
rescale=True,
with_nms=False)
out_bboxes = detections[0][0][:3]
out_clses = detections[0][1][:3]
for bbox, cls in zip(out_bboxes, out_clses):
flag = False
for gt_bbox, gt_cls in zip(gt_bboxes[0], gt_labels[0]):
if (bbox[:4] == gt_bbox[:4]).all():
flag = True
assert flag, 'get_bboxes is wrong'
| 4,385 | 39.611111 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_corner_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.models.dense_heads import CornerHead
def test_corner_head_loss():
"""Tests corner head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = CornerHead(num_classes=4, in_channels=1)
# Corner head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
]
tl_heats, br_heats, tl_embs, br_embs, tl_offs, br_offs = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
empty_det_loss = sum(empty_gt_losses['det_loss'])
empty_push_loss = sum(empty_gt_losses['push_loss'])
empty_pull_loss = sum(empty_gt_losses['pull_loss'])
empty_off_loss = sum(empty_gt_losses['off_loss'])
assert empty_det_loss.item() > 0, 'det loss should be non-zero'
assert empty_push_loss.item() == 0, (
'there should be no push loss when there are no true boxes')
assert empty_pull_loss.item() == 0, (
'there should be no pull loss when there are no true boxes')
assert empty_off_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_det_loss = sum(one_gt_losses['det_loss'])
onegt_push_loss = sum(one_gt_losses['push_loss'])
onegt_pull_loss = sum(one_gt_losses['pull_loss'])
onegt_off_loss = sum(one_gt_losses['off_loss'])
assert onegt_det_loss.item() > 0, 'det loss should be non-zero'
assert onegt_push_loss.item() == 0, (
'there should be no push loss when there are only one true box')
assert onegt_pull_loss.item() > 0, 'pull loss should be non-zero'
assert onegt_off_loss.item() > 0, 'off loss should be non-zero'
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874],
[123.6667, 123.8757, 138.6326, 251.8874]]),
]
gt_labels = [torch.LongTensor([2, 3])]
# equalize the corners' embedding value of different objects to make the
# push_loss larger than 0
gt_bboxes_ind = (gt_bboxes[0] // 4).int().tolist()
for tl_emb_feat, br_emb_feat in zip(tl_embs, br_embs):
tl_emb_feat[:, :, gt_bboxes_ind[0][1],
gt_bboxes_ind[0][0]] = tl_emb_feat[:, :,
gt_bboxes_ind[1][1],
gt_bboxes_ind[1][0]]
br_emb_feat[:, :, gt_bboxes_ind[0][3],
gt_bboxes_ind[0][2]] = br_emb_feat[:, :,
gt_bboxes_ind[1][3],
gt_bboxes_ind[1][2]]
two_gt_losses = self.loss(tl_heats, br_heats, tl_embs, br_embs, tl_offs,
br_offs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
twogt_det_loss = sum(two_gt_losses['det_loss'])
twogt_push_loss = sum(two_gt_losses['push_loss'])
twogt_pull_loss = sum(two_gt_losses['pull_loss'])
twogt_off_loss = sum(two_gt_losses['off_loss'])
assert twogt_det_loss.item() > 0, 'det loss should be non-zero'
assert twogt_push_loss.item() > 0, 'push loss should be non-zero'
assert twogt_pull_loss.item() > 0, 'pull loss should be non-zero'
assert twogt_off_loss.item() > 0, 'off loss should be non-zero'
def test_corner_head_encode_and_decode_heatmap():
"""Tests corner head generating and decoding the heatmap."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'border': (0, 0, 0, 0)
}]
gt_bboxes = [
torch.Tensor([[10, 20, 200, 240], [40, 50, 100, 200],
[10, 20, 200, 240]])
]
gt_labels = [torch.LongTensor([1, 1, 2])]
self = CornerHead(num_classes=4, in_channels=1, corner_emb_channels=1)
feat = [
torch.rand(1, 1, s // 4, s // 4) for _ in range(self.num_feat_levels)
]
targets = self.get_targets(
gt_bboxes,
gt_labels,
feat[0].shape,
img_metas[0]['pad_shape'],
with_corner_emb=self.with_corner_emb)
gt_tl_heatmap = targets['topleft_heatmap']
gt_br_heatmap = targets['bottomright_heatmap']
gt_tl_offset = targets['topleft_offset']
gt_br_offset = targets['bottomright_offset']
embedding = targets['corner_embedding']
[top, left], [bottom, right] = embedding[0][0]
gt_tl_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_br_embedding_heatmap = torch.zeros([1, 1, s // 4, s // 4])
gt_tl_embedding_heatmap[0, 0, top, left] = 1
gt_br_embedding_heatmap[0, 0, bottom, right] = 1
batch_bboxes, batch_scores, batch_clses = self.decode_heatmap(
tl_heat=gt_tl_heatmap,
br_heat=gt_br_heatmap,
tl_off=gt_tl_offset,
br_off=gt_br_offset,
tl_emb=gt_tl_embedding_heatmap,
br_emb=gt_br_embedding_heatmap,
img_meta=img_metas[0],
k=100,
kernel=3,
distance_threshold=0.5)
bboxes = batch_bboxes.view(-1, 4)
scores = batch_scores.view(-1, 1)
clses = batch_clses.view(-1, 1)
idx = scores.argsort(dim=0, descending=True)
bboxes = bboxes[idx].view(-1, 4)
scores = scores[idx].view(-1)
clses = clses[idx].view(-1)
valid_bboxes = bboxes[torch.where(scores > 0.05)]
valid_labels = clses[torch.where(scores > 0.05)]
max_coordinate = valid_bboxes.max()
offsets = valid_labels.to(valid_bboxes) * (max_coordinate + 1)
gt_offsets = gt_labels[0].to(gt_bboxes[0]) * (max_coordinate + 1)
offset_bboxes = valid_bboxes + offsets[:, None]
offset_gtbboxes = gt_bboxes[0] + gt_offsets[:, None]
iou_matrix = bbox_overlaps(offset_bboxes.numpy(), offset_gtbboxes.numpy())
assert (iou_matrix == 1).sum() == 3
| 6,756 | 39.220238 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_ddod_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import DDODHead
def test_ddod_head_loss():
"""Tests ddod head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict( # ATSSAssigner
assigner=dict(type='ATSSAssigner', topk=9, alpha=0.8),
reg_assigner=dict(type='ATSSAssigner', topk=9, alpha=0.5),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = DDODHead(
num_classes=4,
in_channels=1,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
train_cfg=train_cfg,
norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),
loss_iou=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, iou_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_iou_loss = sum(empty_gt_losses['loss_iou'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no iou loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_iou_loss = sum(one_gt_losses['loss_iou'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'iou loss should be non-zero'
| 2,886 | 38.547945 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_dense_heads_attr.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from terminaltables import AsciiTable
from mmdet.models import dense_heads
from mmdet.models.dense_heads import * # noqa: F401,F403
def test_dense_heads_test_attr():
"""Tests inference methods such as simple_test and aug_test."""
# make list of dense heads
exceptions = ['FeatureAdaption'] # module used in head
all_dense_heads = [m for m in dense_heads.__all__ if m not in exceptions]
# search attributes
check_attributes = [
'simple_test', 'aug_test', 'simple_test_bboxes', 'simple_test_rpn',
'aug_test_rpn'
]
table_header = ['head name'] + check_attributes
table_data = [table_header]
not_found = {k: [] for k in check_attributes}
for target_head_name in all_dense_heads:
target_head = globals()[target_head_name]
target_head_attributes = dir(target_head)
check_results = [target_head_name]
for check_attribute in check_attributes:
found = check_attribute in target_head_attributes
check_results.append(found)
if not found:
not_found[check_attribute].append(target_head_name)
table_data.append(check_results)
table = AsciiTable(table_data)
print()
print(table.table)
# NOTE: this test just checks attributes.
# simple_test of RPN heads will not work now.
assert len(not_found['simple_test']) == 0, \
f'simple_test not found in {not_found["simple_test"]}'
if len(not_found['aug_test']) != 0:
warnings.warn(f'aug_test not found in {not_found["aug_test"]}. '
'Please implement it or raise NotImplementedError.')
| 1,702 | 36.844444 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_detr_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv import ConfigDict
from mmdet.models.dense_heads import DETRHead
def test_detr_head_loss():
"""Tests transformer head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3),
'batch_input_shape': (s, s)
}]
config = ConfigDict(
dict(
type='DETRHead',
num_classes=80,
in_channels=200,
transformer=dict(
type='Transformer',
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm')),
)),
positional_encoding=dict(
type='SinePositionalEncoding', num_feats=128, normalize=True),
loss_cls=dict(
type='CrossEntropyLoss',
bg_cls_weight=0.1,
use_sigmoid=False,
loss_weight=1.0,
class_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=5.0),
loss_iou=dict(type='GIoULoss', loss_weight=2.0)))
self = DETRHead(**config)
self.init_weights()
feat = [torch.rand(1, 200, 10, 10)]
cls_scores, bbox_preds = self.forward(feat, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'bbox' in key:
assert loss.item(
) == 0, 'there should be no box loss when there are no true boxes'
elif 'iou' in key:
assert loss.item(
) == 0, 'there should be no iou loss when there are no true boxes'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
for loss in one_gt_losses.values():
assert loss.item(
) > 0, 'cls loss, or box loss, or iou loss should be non-zero'
# test forward_train
self.forward_train(feat, img_metas, gt_bboxes, gt_labels)
# test inference mode
self.get_bboxes(cls_scores, bbox_preds, img_metas, rescale=True)
| 4,130 | 38.342857 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_fcos_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import FCOSHead
def test_fcos_head_loss():
"""Tests fcos head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = FCOSHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, centerness = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, centerness, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,406 | 36.030769 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_fsaf_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import FSAFHead
def test_fsaf_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = dict(
reg_decoded_bbox=True,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(type='TBLRBBoxCoder', normalizer=4.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0,
reduction='none'),
loss_bbox=dict(
type='IoULoss', eps=1e-6, loss_weight=1.0, reduction='none'))
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='CenterRegionAssigner',
pos_scale=0.2,
neg_scale=0.2,
min_pos_iof=0.01),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = FSAFHead(num_classes=4, in_channels=1, train_cfg=train_cfg, **cfg)
if torch.cuda.is_available():
head.cuda()
# FSAF head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.anchor_generator.strides))
]
cls_scores, bbox_preds = head.forward(feat)
gt_bboxes_ignore = None
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
# Test that empty ground truth encourages the network to predict bkg
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
| 3,097 | 36.325301 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_ga_anchor_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GuidedAnchorHead
def test_ga_anchor_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
ga_assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
ga_sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=-1,
center_ratio=0.2,
ignore_ratio=0.5,
pos_weight=-1,
debug=False))
head = GuidedAnchorHead(num_classes=4, in_channels=4, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
if torch.cuda.is_available():
head.cuda()
feat = [
torch.rand(1, 4, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds, shape_preds, loc_preds = head.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, shape_preds,
loc_preds, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 3,410 | 36.076087 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_gfl_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead
def test_gfl_head_loss():
"""Tests gfl head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_dfl_loss = sum(empty_gt_losses['loss_dfl'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_dfl_loss.item() == 0, (
'there should be no dfl loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
onegt_dfl_loss = sum(one_gt_losses['loss_dfl'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_dfl_loss.item() > 0, 'dfl loss should be non-zero'
| 2,786 | 36.16 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_lad_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import LADHead, lad_head
from mmdet.models.dense_heads.lad_head import levels_to_images
def test_lad_head_loss():
"""Tests lad head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
lad_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
teacher_model = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
teacher_model.init_weights()
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
outs_teacher = teacher_model(feat)
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
outs = teacher_model(feat)
empty_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
label_assignment_results = teacher_model.get_label_assignment(
*outs_teacher, gt_bboxes, gt_labels, img_metas, gt_bboxes_ignore)
one_gt_losses = self.loss(*outs, gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore, label_assignment_results)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = LADHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 5,294 | 34.3 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_ld_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import GFLHead, LDHead
def test_ld_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9, ignore_iof_thr=0.1),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = LDHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_ld=dict(type='KnowledgeDistillationKLDivLoss', loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
teacher_model = GFLHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds = self.forward(feat)
rand_soft_target = teacher_model.forward(feat)[1]
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero, ld loss should
# be non-negative but there should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
empty_ld_loss = sum(empty_gt_losses['loss_ld'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_ld_loss.item() >= 0, 'ld loss should be non-negative'
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
gt_bboxes_ignore = gt_bboxes
# When truth is non-empty but ignored then the cls loss should be nonzero,
# but there should be no box loss.
ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
rand_soft_target, img_metas, gt_bboxes_ignore)
ignore_cls_loss = sum(ignore_gt_losses['loss_cls'])
ignore_box_loss = sum(ignore_gt_losses['loss_bbox'])
assert ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert ignore_box_loss.item() == 0, 'gt bbox ignored loss should be zero'
# When truth is non-empty and not ignored then both cls and box loss should
# be nonzero for random inputs
gt_bboxes_ignore = [torch.randn(1, 4)]
not_ignore_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, rand_soft_target, img_metas,
gt_bboxes_ignore)
not_ignore_cls_loss = sum(not_ignore_gt_losses['loss_cls'])
not_ignore_box_loss = sum(not_ignore_gt_losses['loss_bbox'])
assert not_ignore_cls_loss.item() > 0, 'cls loss should be non-zero'
assert not_ignore_box_loss.item(
) > 0, 'gt bbox not ignored loss should be non-zero'
| 4,605 | 36.754098 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_mask2former_head.py | import numpy as np
import pytest
import torch
from mmcv import ConfigDict
from mmdet.core.mask import BitmapMasks
from mmdet.models.dense_heads import Mask2FormerHead
@pytest.mark.parametrize('num_stuff_classes, \
label_num', [(53, 100), (0, 80)])
def test_mask2former_head_loss(num_stuff_classes, label_num):
"""Tests head loss when truth is empty and non-empty.
Tests head loss as Panoptic Segmentation and Instance Segmentation. Tests
forward_train and simple_test with masks and None as gt_semantic_seg
"""
self = _init_model(num_stuff_classes)
img_metas = [{
'batch_input_shape': (128, 160),
'pad_shape': (128, 160, 3),
'img_shape': (126, 160, 3),
'ori_shape': (63, 80, 3)
}, {
'batch_input_shape': (128, 160),
'pad_shape': (128, 160, 3),
'img_shape': (120, 160, 3),
'ori_shape': (60, 80, 3)
}]
feats = [
torch.rand((2, 64 * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
all_cls_scores, all_mask_preds = self.forward(feats, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_labels_list = [torch.LongTensor([]), torch.LongTensor([])]
gt_masks_list = [
torch.zeros((0, 128, 160)).long(),
torch.zeros((0, 128, 160)).long()
]
empty_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list,
gt_masks_list, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no mask loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'mask' in key:
assert loss.item(
) == 0, 'there should be no mask loss when there are no true mask'
elif 'dice' in key:
assert loss.item(
) == 0, 'there should be no dice loss when there are no true mask'
# when truth is non-empty then both cls, mask, dice loss should be nonzero
# random inputs
gt_labels_list = [
torch.tensor([10, label_num]).long(),
torch.tensor([label_num, 10]).long()
]
mask1 = torch.zeros((2, 128, 160)).long()
mask1[0, :50] = 1
mask1[1, 50:] = 1
mask2 = torch.zeros((2, 128, 160)).long()
mask2[0, :, :50] = 1
mask2[1, :, 50:] = 1
gt_masks_list = [mask1, mask2]
two_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list,
gt_masks_list, img_metas)
for loss in two_gt_losses.values():
assert loss.item() > 0, 'all loss should be non-zero'
# test forward_train
gt_bboxes = None
gt_labels = [
torch.tensor([10]).long(),
torch.tensor([10]).long(),
]
thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask1[0, :50] = 1
thing_mask2 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask2[0, :, 50:] = 1
gt_masks = [
BitmapMasks(thing_mask1, 128, 160),
BitmapMasks(thing_mask2, 128, 160),
]
stuff_mask1 = torch.zeros((1, 128, 160)).long()
stuff_mask1[0, :50] = 10
stuff_mask1[0, 50:] = 100
stuff_mask2 = torch.zeros((1, 128, 160)).long()
stuff_mask2[0, :, 50:] = 10
stuff_mask2[0, :, :50] = 100
gt_semantic_seg = [stuff_mask1, stuff_mask2]
self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks,
gt_semantic_seg)
# test when gt_semantic_seg is None
gt_semantic_seg = None
self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks,
gt_semantic_seg)
# test inference mode
self.simple_test(feats, img_metas)
def _init_model(num_stuff_classes):
base_channels = 64
num_things_classes = 80
num_classes = num_things_classes + num_stuff_classes
config = ConfigDict(
dict(
type='Mask2FormerHead',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
num_queries=100,
num_transformer_feat_level=3,
pixel_decoder=dict(
type='MSDeformAttnPixelDecoder',
num_outs=3,
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiScaleDeformableAttention',
embed_dims=base_channels,
num_heads=8,
num_levels=3,
num_points=4,
im2col_step=64,
dropout=0.0,
batch_first=False,
norm_cfg=None,
init_cfg=None),
ffn_cfgs=dict(
type='FFN',
embed_dims=base_channels,
feedforward_channels=base_channels * 4,
num_fcs=2,
ffn_drop=0.0,
act_cfg=dict(type='ReLU', inplace=True)),
feedforward_channels=base_channels * 4,
ffn_dropout=0.0,
operation_order=('self_attn', 'norm', 'ffn', 'norm')),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True),
init_cfg=None),
enforce_decoder_input_project=False,
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True),
transformer_decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=9,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=base_channels,
num_heads=8,
attn_drop=0.0,
proj_drop=0.0,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.0,
dropout_layer=None,
add_identity=True),
# the following parameter was not used,
# just make current api happy
feedforward_channels=base_channels * 8,
operation_order=('cross_attn', 'norm', 'self_attn', 'norm',
'ffn', 'norm')),
init_cfg=None),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=2.0,
reduction='mean',
class_weight=[1.0] * num_classes + [0.1]),
loss_mask=dict(
type='CrossEntropyLoss',
use_sigmoid=True,
reduction='mean',
loss_weight=5.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=True,
eps=1.0,
loss_weight=5.0),
train_cfg=dict(
num_points=256,
oversample_ratio=3.0,
importance_sample_ratio=0.75,
assigner=dict(
type='MaskHungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=2.0),
mask_cost=dict(
type='CrossEntropyLossCost',
weight=5.0,
use_sigmoid=True),
dice_cost=dict(
type='DiceCost', weight=5.0, pred_act=True, eps=1.0)),
sampler=dict(type='MaskPseudoSampler')),
test_cfg=dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_dets_per_image=100,
object_mask_thr=0.8,
iou_thr=0.8)))
self = Mask2FormerHead(**config)
self.init_weights()
return self
| 9,110 | 37.605932 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_maskformer_head.py | import numpy as np
import torch
from mmcv import ConfigDict
from mmdet.core.mask import BitmapMasks
from mmdet.models.dense_heads import MaskFormerHead
def test_maskformer_head_loss():
"""Tests head loss when truth is empty and non-empty."""
base_channels = 64
# batch_input_shape = (128, 160)
img_metas = [{
'batch_input_shape': (128, 160),
'pad_shape': (128, 160, 3),
'img_shape': (126, 160, 3),
'ori_shape': (63, 80, 3)
}, {
'batch_input_shape': (128, 160),
'pad_shape': (128, 160, 3),
'img_shape': (120, 160, 3),
'ori_shape': (60, 80, 3)
}]
feats = [
torch.rand((2, 64 * 2**i, 4 * 2**(3 - i), 5 * 2**(3 - i)))
for i in range(4)
]
num_things_classes = 80
num_stuff_classes = 53
num_classes = num_things_classes + num_stuff_classes
config = ConfigDict(
dict(
type='MaskFormerHead',
in_channels=[base_channels * 2**i for i in range(4)],
feat_channels=base_channels,
out_channels=base_channels,
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
num_queries=100,
pixel_decoder=dict(
type='TransformerEncoderPixelDecoder',
norm_cfg=dict(type='GN', num_groups=32),
act_cfg=dict(type='ReLU'),
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
operation_order=('self_attn', 'norm', 'ffn', 'norm'),
norm_cfg=dict(type='LN'),
init_cfg=None,
batch_first=False),
init_cfg=None),
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True)),
enforce_decoder_input_project=False,
positional_encoding=dict(
type='SinePositionalEncoding',
num_feats=base_channels // 2,
normalize=True),
transformer_decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=base_channels,
num_heads=8,
attn_drop=0.1,
proj_drop=0.1,
dropout_layer=None,
batch_first=False),
ffn_cfgs=dict(
embed_dims=base_channels,
feedforward_channels=base_channels * 8,
num_fcs=2,
act_cfg=dict(type='ReLU', inplace=True),
ffn_drop=0.1,
dropout_layer=None,
add_identity=True),
# the following parameter was not used,
# just make current api happy
feedforward_channels=base_channels * 8,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
init_cfg=None),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
loss_weight=1.0,
reduction='mean',
class_weight=[1.0] * num_classes + [0.1]),
loss_mask=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
reduction='mean',
loss_weight=20.0),
loss_dice=dict(
type='DiceLoss',
use_sigmoid=True,
activate=True,
reduction='mean',
naive_dice=True,
eps=1.0,
loss_weight=1.0),
train_cfg=dict(
assigner=dict(
type='MaskHungarianAssigner',
cls_cost=dict(type='ClassificationCost', weight=1.0),
mask_cost=dict(
type='FocalLossCost', weight=20.0, binary_input=True),
dice_cost=dict(
type='DiceCost', weight=1.0, pred_act=True, eps=1.0)),
sampler=dict(type='MaskPseudoSampler')),
test_cfg=dict(object_mask_thr=0.8, iou_thr=0.8)))
self = MaskFormerHead(**config)
self.init_weights()
all_cls_scores, all_mask_preds = self.forward(feats, img_metas)
# Test that empty ground truth encourages the network to predict background
gt_labels_list = [torch.LongTensor([]), torch.LongTensor([])]
gt_masks_list = [
torch.zeros((0, 128, 160)).long(),
torch.zeros((0, 128, 160)).long()
]
empty_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list,
gt_masks_list, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no mask loss.
for key, loss in empty_gt_losses.items():
if 'cls' in key:
assert loss.item() > 0, 'cls loss should be non-zero'
elif 'mask' in key:
assert loss.item(
) == 0, 'there should be no mask loss when there are no true mask'
elif 'dice' in key:
assert loss.item(
) == 0, 'there should be no dice loss when there are no true mask'
# when truth is non-empty then both cls, mask, dice loss should be nonzero
# random inputs
gt_labels_list = [
torch.tensor([10, 100]).long(),
torch.tensor([100, 10]).long()
]
mask1 = torch.zeros((2, 128, 160)).long()
mask1[0, :50] = 1
mask1[1, 50:] = 1
mask2 = torch.zeros((2, 128, 160)).long()
mask2[0, :, :50] = 1
mask2[1, :, 50:] = 1
gt_masks_list = [mask1, mask2]
two_gt_losses = self.loss(all_cls_scores, all_mask_preds, gt_labels_list,
gt_masks_list, img_metas)
for loss in two_gt_losses.values():
assert loss.item() > 0, 'all loss should be non-zero'
# test forward_train
gt_bboxes = None
gt_labels = [
torch.tensor([10]).long(),
torch.tensor([10]).long(),
]
thing_mask1 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask1[0, :50] = 1
thing_mask2 = np.zeros((1, 128, 160), dtype=np.int32)
thing_mask2[0, :, 50:] = 1
gt_masks = [
BitmapMasks(thing_mask1, 128, 160),
BitmapMasks(thing_mask2, 128, 160),
]
stuff_mask1 = torch.zeros((1, 128, 160)).long()
stuff_mask1[0, :50] = 10
stuff_mask1[0, 50:] = 100
stuff_mask2 = torch.zeros((1, 128, 160)).long()
stuff_mask2[0, :, 50:] = 10
stuff_mask2[0, :, :50] = 100
gt_semantic_seg = [stuff_mask1, stuff_mask2]
self.forward_train(feats, img_metas, gt_bboxes, gt_labels, gt_masks,
gt_semantic_seg)
# test inference mode
self.simple_test(feats, img_metas)
| 8,154 | 38.396135 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_paa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.models.dense_heads import PAAHead, paa_head
from mmdet.models.dense_heads.paa_head import levels_to_images
def test_paa_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
class mock_skm:
def GaussianMixture(self, *args, **kwargs):
return self
def fit(self, loss):
pass
def predict(self, loss):
components = np.zeros_like(loss, dtype=np.long)
return components.reshape(-1)
def score_samples(self, loss):
scores = np.random.random(len(loss))
return scores
paa_head.skm = mock_skm()
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.1,
neg_iou_thr=0.1,
min_pos_iou=0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
self.init_weights()
cls_scores, bbox_preds, iou_preds = self(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
empty_iou_loss = empty_gt_losses['loss_iou']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_iou_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, iou_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
onegt_iou_loss = one_gt_losses['loss_iou']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_iou_loss.item() > 0, 'box loss should be non-zero'
n, c, h, w = 10, 4, 20, 20
mlvl_tensor = [torch.ones(n, c, h, w) for i in range(5)]
results = levels_to_images(mlvl_tensor)
assert len(results) == n
assert results[0].size() == (h * w * 5, c)
assert self.with_score_voting
self = PAAHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8]),
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.3),
loss_centerness=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=0.5))
cls_scores = [torch.ones(2, 4, 5, 5)]
bbox_preds = [torch.ones(2, 4, 5, 5)]
iou_preds = [torch.ones(2, 1, 5, 5)]
cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
rescale = False
self.get_bboxes(
cls_scores, bbox_preds, iou_preds, img_metas, cfg, rescale=rescale)
| 4,800 | 34.301471 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_pisa_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import PISARetinaHead, PISASSDHead
from mmdet.models.roi_heads import PISARoIHead
def test_pisa_retinanet_head_loss():
"""Tests pisa retinanet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
self = PISARetinaHead(num_classes=4, in_channels=1, train_cfg=cfg)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_ssd_head_loss():
"""Tests pisa ssd head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False))
ssd_anchor_generator = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
strides=[1],
ratios=([2], ),
basesize_ratio_range=(0.15, 0.9))
self = PISASSDHead(
num_classes=4,
in_channels=(1, ),
train_cfg=cfg,
anchor_generator=ssd_anchor_generator)
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(len(self.anchor_generator.strides))
]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
# SSD is special, #pos:#neg = 1: 3, so empth gt will also lead loss cls = 0
assert empty_cls_loss.item() == 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_loss = sum(one_gt_losses['loss_bbox'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
def test_pisa_roi_head_loss():
"""Tests pisa roi head loss when truth is empty and non-empty."""
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
match_low_quality=True,
ignore_iof_thr=-1),
sampler=dict(
type='ScoreHLRSampler',
num=4,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True,
k=0.5,
bias=0.),
isr=dict(k=2., bias=0.),
carl=dict(k=1., bias=0.2),
allowed_border=0,
pos_weight=-1,
debug=False))
bbox_roi_extractor = dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=0),
out_channels=1,
featmap_strides=[1])
bbox_head = dict(
type='Shared2FCBBoxHead',
in_channels=1,
fc_out_channels=2,
roi_feat_size=7,
num_classes=4,
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2]),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='L1Loss', loss_weight=1.0))
self = PISARoIHead(bbox_roi_extractor, bbox_head, train_cfg=train_cfg)
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 1, s // (2**(i + 2)), s // (2**(i + 2)))
for i in range(1)
]
proposal_list = [
torch.Tensor([[22.6667, 22.8757, 238.6326, 151.8874], [0, 3, 5, 7]])
]
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.forward_train(feat, img_metas, proposal_list,
gt_bboxes, gt_labels, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 8,805 | 34.796748 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_sabl_retina_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import SABLRetinaHead
def test_sabl_retina_head_loss():
"""Tests anchor head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
cfg = mmcv.Config(
dict(
assigner=dict(
type='ApproxMaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.0,
ignore_iof_thr=-1),
allowed_border=-1,
pos_weight=-1,
debug=False))
head = SABLRetinaHead(
num_classes=4,
in_channels=3,
feat_channels=10,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
train_cfg=cfg)
if torch.cuda.is_available():
head.cuda()
# Anchor head expects a multiple levels of features per image
feat = [
torch.rand(1, 3, s // (2**(i + 2)), s // (2**(i + 2))).cuda()
for i in range(len(head.approx_anchor_generator.base_anchors))
]
cls_scores, bbox_preds = head.forward(feat)
# Test that empty ground truth encourages the network
# to predict background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes,
gt_labels, img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_cls_loss = sum(empty_gt_losses['loss_bbox_cls'])
empty_box_reg_loss = sum(empty_gt_losses['loss_bbox_reg'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_cls_loss.item() == 0, (
'there should be no box cls loss when there are no true boxes')
assert empty_box_reg_loss.item() == 0, (
'there should be no box reg loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should
# be nonzero for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = head.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = sum(one_gt_losses['loss_cls'])
onegt_box_cls_loss = sum(one_gt_losses['loss_bbox_cls'])
onegt_box_reg_loss = sum(one_gt_losses['loss_bbox_reg'])
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_cls_loss.item() > 0, 'box loss cls should be non-zero'
assert onegt_box_reg_loss.item() > 0, 'box loss reg should be non-zero'
| 3,080 | 39.012987 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_solo_head.py | import pytest
import torch
from mmdet.models.dense_heads import (DecoupledSOLOHead,
DecoupledSOLOLightHead, SOLOHead)
def test_solo_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = SOLOHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
SOLOHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
def test_desolo_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = DecoupledSOLOHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds_x, mask_preds_y, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
DecoupledSOLOHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss',
use_sigmoid=True,
activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
def test_desolo_light_head_loss():
"""Tests solo head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
self = DecoupledSOLOLightHead(
num_classes=4,
in_channels=1,
num_grids=[40, 36, 24, 16, 12],
loss_mask=dict(
type='DiceLoss', use_sigmoid=True, activate=False,
loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
mask_preds_x, mask_preds_y, cls_preds = self.forward(feat)
# Test that empty ground truth encourages the network to
# predict background.
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_mask_loss = empty_gt_losses['loss_mask']
empty_cls_loss = empty_gt_losses['loss_cls']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_mask_loss.item() == 0, (
'there should be no mask loss when there are no true masks')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 256, 256)) > 0.5).float()]
one_gt_losses = self.loss(
mask_preds_x,
mask_preds_y,
cls_preds,
gt_labels,
gt_masks,
img_metas,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore)
onegt_mask_loss = one_gt_losses['loss_mask']
onegt_cls_loss = one_gt_losses['loss_cls']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# When the length of num_grids, scale_ranges, and num_levels are not equal.
with pytest.raises(AssertionError):
DecoupledSOLOLightHead(
num_classes=4,
in_channels=1,
num_grids=[36, 24, 16, 12],
loss_mask=dict(type='DiceLoss', use_sigmoid=True, loss_weight=3.0),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0))
# When input feature length is not equal to num_levels.
with pytest.raises(AssertionError):
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32]
]
self.forward(feat)
| 9,519 | 32.403509 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_tood_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import TOODHead
def test_tood_head_loss():
"""Tests paa head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
initial_epoch=4,
initial_assigner=dict(type='ATSSAssigner', topk=9),
assigner=dict(type='TaskAlignedAssigner', topk=13),
alpha=1,
beta=6,
allowed_border=-1,
pos_weight=-1,
debug=False))
test_cfg = mmcv.Config(
dict(
nms_pre=1000,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.6),
max_per_img=100))
# since Focal Loss is not supported on CPU
self = TOODHead(
num_classes=80,
in_channels=1,
stacked_convs=6,
feat_channels=256,
anchor_type='anchor_free',
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
octave_base_scale=8,
scales_per_octave=1,
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
initial_loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_cls=dict(
type='QualityFocalLoss',
use_sigmoid=True,
activated=True, # use probability instead of logit as input
beta=2.0,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=2.0),
train_cfg=train_cfg,
test_cfg=test_cfg)
self.init_weights()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [8, 16, 32, 64, 128]
]
cls_scores, bbox_preds = self(feat)
# test initial assigner and losses
self.epoch = 0
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
# test task alignment assigner and losses
self.epoch = 10
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert sum(empty_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(empty_box_loss).item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert sum(onegt_cls_loss).item() > 0, 'cls loss should be non-zero'
assert sum(onegt_box_loss).item() > 0, 'box loss should be non-zero'
| 4,942 | 37.317829 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_vfnet_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import VFNetHead
def test_vfnet_head_loss():
"""Tests vfnet head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(type='ATSSAssigner', topk=9),
allowed_border=-1,
pos_weight=-1,
debug=False))
# since Focal Loss is not supported on CPU
self = VFNetHead(
num_classes=4,
in_channels=1,
train_cfg=train_cfg,
loss_cls=dict(type='VarifocalLoss', use_sigmoid=True, loss_weight=1.0))
if torch.cuda.is_available():
self.cuda()
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size).cuda()
for feat_size in [4, 8, 16, 32, 64]
]
cls_scores, bbox_preds, bbox_preds_refine = self.forward(feat)
# Test that empty ground truth encourages the network to predict
# background
gt_bboxes = [torch.empty((0, 4)).cuda()]
gt_labels = [torch.LongTensor([]).cuda()]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there
# should be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero
# for random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]).cuda(),
]
gt_labels = [torch.LongTensor([2]).cuda()]
one_gt_losses = self.loss(cls_scores, bbox_preds, bbox_preds_refine,
gt_bboxes, gt_labels, img_metas,
gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,561 | 39.03125 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_yolact_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLACTHead, YOLACTProtonet, YOLACTSegmHead
def test_yolact_head_loss():
"""Tests yolact head losses when truth is empty and non-empty."""
s = 550
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.4,
min_pos_iou=0.,
ignore_iof_thr=-1,
gt_max_assign_all=False),
smoothl1_beta=1.,
allowed_border=-1,
pos_weight=-1,
neg_pos_ratio=3,
debug=False,
min_gt_box_wh=[4.0, 4.0]))
bbox_head = YOLACTHead(
num_classes=80,
in_channels=256,
feat_channels=256,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=3,
scales_per_octave=1,
base_sizes=[8, 16, 32, 64, 128],
ratios=[0.5, 1.0, 2.0],
strides=[550.0 / x for x in [69, 35, 18, 9, 5]],
centers=[(550 * 0.5 / x, 550 * 0.5 / x)
for x in [69, 35, 18, 9, 5]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]),
loss_cls=dict(
type='CrossEntropyLoss',
use_sigmoid=False,
reduction='none',
loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.5),
num_head_convs=1,
num_protos=32,
use_ohem=True,
train_cfg=train_cfg)
segm_head = YOLACTSegmHead(
in_channels=256,
num_classes=80,
loss_segm=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0))
mask_head = YOLACTProtonet(
num_classes=80,
in_channels=256,
num_protos=32,
max_masks_to_train=100,
loss_mask_weight=6.125)
feat = [
torch.rand(1, 256, feat_size, feat_size)
for feat_size in [69, 35, 18, 9, 5]
]
cls_score, bbox_pred, coeff_pred = bbox_head.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_masks = [torch.empty((0, 550, 550))]
gt_bboxes_ignore = None
empty_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = sum(empty_gt_losses['loss_cls'])
empty_box_loss = sum(empty_gt_losses['loss_bbox'])
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# Test segm head and mask head
segm_head_outs = segm_head(feat[0])
empty_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
empty_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes, img_metas,
sampling_results)
# When there is no truth, the segm and mask loss should be zero.
empty_segm_loss = sum(empty_segm_loss['loss_segm'])
empty_mask_loss = sum(empty_mask_loss['loss_mask'])
assert empty_segm_loss.item() == 0, (
'there should be no segm loss when there are no true boxes')
assert empty_mask_loss == 0, (
'there should be no mask loss when there are no true boxes')
# When truth is non-empty then cls, box, mask, segm loss should be
# nonzero for random inputs.
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
gt_masks = [(torch.rand((1, 550, 550)) > 0.5).float()]
one_gt_losses, sampling_results = bbox_head.loss(
cls_score,
bbox_pred,
gt_bboxes,
gt_labels,
img_metas,
gt_bboxes_ignore=gt_bboxes_ignore)
one_gt_cls_loss = sum(one_gt_losses['loss_cls'])
one_gt_box_loss = sum(one_gt_losses['loss_bbox'])
assert one_gt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert one_gt_box_loss.item() > 0, 'box loss should be non-zero'
one_gt_segm_loss = segm_head.loss(segm_head_outs, gt_masks, gt_labels)
mask_pred = mask_head(feat[0], coeff_pred, gt_bboxes, img_metas,
sampling_results)
one_gt_mask_loss = mask_head.loss(mask_pred, gt_masks, gt_bboxes,
img_metas, sampling_results)
one_gt_segm_loss = sum(one_gt_segm_loss['loss_segm'])
one_gt_mask_loss = sum(one_gt_mask_loss['loss_mask'])
assert one_gt_segm_loss.item() > 0, 'segm loss should be non-zero'
assert one_gt_mask_loss.item() > 0, 'mask loss should be non-zero'
| 5,247 | 37.028986 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_yolof_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.dense_heads import YOLOFHead
def test_yolof_head_loss():
"""Tests yolof head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='UniformAssigner',
pos_ignore_thr=0.15,
neg_ignore_thr=0.7),
allowed_border=-1,
pos_weight=-1,
debug=False))
self = YOLOFHead(
num_classes=4,
in_channels=1,
reg_decoded_bbox=True,
train_cfg=train_cfg,
anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[1, 2, 4, 8, 16],
strides=[32]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1., 1., 1., 1.],
add_ctr_clamp=True,
ctr_clamp=32),
loss_cls=dict(
type='FocalLoss',
use_sigmoid=True,
gamma=2.0,
alpha=0.25,
loss_weight=1.0),
loss_bbox=dict(type='GIoULoss', loss_weight=1.0))
feat = [torch.rand(1, 1, s // 32, s // 32)]
cls_scores, bbox_preds = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
gt_bboxes_ignore = None
empty_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls']
empty_box_loss = empty_gt_losses['loss_bbox']
assert empty_cls_loss.item() > 0, 'cls loss should be non-zero'
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, gt_bboxes, gt_labels,
img_metas, gt_bboxes_ignore)
onegt_cls_loss = one_gt_losses['loss_cls']
onegt_box_loss = one_gt_losses['loss_bbox']
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
| 2,716 | 34.285714 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_dense_heads/test_yolox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.cnn import ConvModule, DepthwiseSeparableConvModule
from mmdet.models.dense_heads import YOLOXHead
def test_yolox_head_loss():
"""Tests yolox head loss when truth is empty and non-empty."""
s = 256
img_metas = [{
'img_shape': (s, s, 3),
'scale_factor': 1,
'pad_shape': (s, s, 3)
}]
train_cfg = mmcv.Config(
dict(
assigner=dict(
type='SimOTAAssigner',
center_radius=2.5,
candidate_topk=10,
iou_weight=3.0,
cls_weight=1.0)))
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=False, train_cfg=train_cfg)
assert not self.use_l1
assert isinstance(self.multi_level_cls_convs[0][0], ConvModule)
feat = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16]
]
cls_scores, bbox_preds, objectnesses = self.forward(feat)
# Test that empty ground truth encourages the network to predict background
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When there is no truth, the cls loss should be nonzero but there should
# be no box loss.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when there are no true boxes')
assert empty_box_loss.item() == 0, (
'there should be no box loss when there are no true boxes')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
# When truth is non-empty then both cls and box loss should be nonzero for
# random inputs
self = YOLOXHead(
num_classes=4, in_channels=1, use_depthwise=True, train_cfg=train_cfg)
assert isinstance(self.multi_level_cls_convs[0][0],
DepthwiseSeparableConvModule)
self.use_l1 = True
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
one_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses, gt_bboxes,
gt_labels, img_metas)
onegt_cls_loss = one_gt_losses['loss_cls'].sum()
onegt_box_loss = one_gt_losses['loss_bbox'].sum()
onegt_obj_loss = one_gt_losses['loss_obj'].sum()
onegt_l1_loss = one_gt_losses['loss_l1'].sum()
assert onegt_cls_loss.item() > 0, 'cls loss should be non-zero'
assert onegt_box_loss.item() > 0, 'box loss should be non-zero'
assert onegt_obj_loss.item() > 0, 'obj loss should be non-zero'
assert onegt_l1_loss.item() > 0, 'l1 loss should be non-zero'
# Test groud truth out of bound
gt_bboxes = [torch.Tensor([[s * 4, s * 4, s * 4 + 10, s * 4 + 10]])]
gt_labels = [torch.LongTensor([2])]
empty_gt_losses = self.loss(cls_scores, bbox_preds, objectnesses,
gt_bboxes, gt_labels, img_metas)
# When gt_bboxes out of bound, the assign results should be empty,
# so the cls and bbox loss should be zero.
empty_cls_loss = empty_gt_losses['loss_cls'].sum()
empty_box_loss = empty_gt_losses['loss_bbox'].sum()
empty_obj_loss = empty_gt_losses['loss_obj'].sum()
assert empty_cls_loss.item() == 0, (
'there should be no cls loss when gt_bboxes out of bound')
assert empty_box_loss.item() == 0, (
'there should be no box loss when gt_bboxes out of bound')
assert empty_obj_loss.item() > 0, 'objectness loss should be non-zero'
| 3,809 | 41.808989 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import _dummy_bbox_sampling
__all__ = ['_dummy_bbox_sampling']
| 124 | 24 | 47 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/test_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import BBoxHead
from .utils import _dummy_bbox_sampling
def test_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = BBoxHead(in_channels=8, roi_feat_size=3)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox', 0) == 0, 'empty gt loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8 * 3 * 3)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox', 0) > 0, 'box-loss should be non-zero'
@pytest.mark.parametrize('num_sample', [0, 1, 2])
def test_bbox_head_get_bboxes(num_sample):
self = BBoxHead(reg_class_agnostic=True)
num_class = 6
rois = torch.rand((num_sample, 5))
cls_score = torch.rand((num_sample, num_class))
bbox_pred = torch.rand((num_sample, 4))
scale_factor = np.array([2.0, 2.0, 2.0, 2.0])
det_bboxes, det_labels = self.get_bboxes(
rois, cls_score, bbox_pred, None, scale_factor, rescale=True)
if num_sample == 0:
assert len(det_bboxes) == 0 and len(det_labels) == 0
else:
assert det_bboxes.shape == bbox_pred.shape
assert det_labels.shape == cls_score.shape
def test_refine_boxes():
"""Mirrors the doctest in
``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` but checks for
multiple values of n_roi / n_img."""
self = BBoxHead(reg_class_agnostic=True)
test_settings = [
# Corner case: less rois than images
{
'n_roi': 2,
'n_img': 4,
'rng': 34285940
},
# Corner case: no images
{
'n_roi': 0,
'n_img': 0,
'rng': 52925222
},
# Corner cases: few images / rois
{
'n_roi': 1,
'n_img': 1,
'rng': 1200281
},
{
'n_roi': 2,
'n_img': 1,
'rng': 1200282
},
{
'n_roi': 2,
'n_img': 2,
'rng': 1200283
},
{
'n_roi': 1,
'n_img': 2,
'rng': 1200284
},
# Corner case: no rois few images
{
'n_roi': 0,
'n_img': 1,
'rng': 23955860
},
{
'n_roi': 0,
'n_img': 2,
'rng': 25830516
},
# Corner case: no rois many images
{
'n_roi': 0,
'n_img': 10,
'rng': 671346
},
{
'n_roi': 0,
'n_img': 20,
'rng': 699807
},
# Corner case: cal_similarity num rois and images
{
'n_roi': 20,
'n_img': 20,
'rng': 1200238
},
{
'n_roi': 10,
'n_img': 20,
'rng': 1200238
},
{
'n_roi': 5,
'n_img': 5,
'rng': 1200238
},
# ----------------------------------
# Common case: more rois than images
{
'n_roi': 100,
'n_img': 1,
'rng': 337156
},
{
'n_roi': 150,
'n_img': 2,
'rng': 275898
},
{
'n_roi': 500,
'n_img': 5,
'rng': 4903221
},
]
for demokw in test_settings:
try:
n_roi = demokw['n_roi']
n_img = demokw['n_img']
rng = demokw['rng']
print(f'Test refine_boxes case: {demokw!r}')
tup = _demodata_refine_boxes(n_roi, n_img, rng=rng)
rois, labels, bbox_preds, pos_is_gts, img_metas = tup
bboxes_list = self.refine_bboxes(rois, labels, bbox_preds,
pos_is_gts, img_metas)
assert len(bboxes_list) == n_img
assert sum(map(len, bboxes_list)) <= n_roi
assert all(b.shape[1] == 4 for b in bboxes_list)
except Exception:
print(f'Test failed with demokw={demokw!r}')
raise
def _demodata_refine_boxes(n_roi, n_img, rng=0):
"""Create random test data for the
``mmdet.models.bbox_heads.bbox_head.BBoxHead.refine_boxes`` method."""
import numpy as np
from mmdet.core.bbox.demodata import ensure_rng, random_boxes
try:
import kwarray
except ImportError:
import pytest
pytest.skip('kwarray is required for this test')
scale = 512
rng = ensure_rng(rng)
img_metas = [{'img_shape': (scale, scale)} for _ in range(n_img)]
# Create rois in the expected format
roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)
if n_img == 0:
assert n_roi == 0, 'cannot have any rois if there are no images'
img_ids = torch.empty((0, ), dtype=torch.long)
roi_boxes = torch.empty((0, 4), dtype=torch.float32)
else:
img_ids = rng.randint(0, n_img, (n_roi, ))
img_ids = torch.from_numpy(img_ids)
rois = torch.cat([img_ids[:, None].float(), roi_boxes], dim=1)
# Create other args
labels = rng.randint(0, 2, (n_roi, ))
labels = torch.from_numpy(labels).long()
bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)
# For each image, pretend random positive boxes are gts
is_label_pos = (labels.numpy() > 0).astype(np.int)
lbl_per_img = kwarray.group_items(is_label_pos, img_ids.numpy())
pos_per_img = [sum(lbl_per_img.get(gid, [])) for gid in range(n_img)]
# randomly generate with numpy then sort with torch
_pos_is_gts = [
rng.randint(0, 2, (npos, )).astype(np.uint8) for npos in pos_per_img
]
pos_is_gts = [
torch.from_numpy(p).sort(descending=True)[0] for p in _pos_is_gts
]
return rois, labels, bbox_preds, pos_is_gts, img_metas
| 7,910 | 30.392857 | 78 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/test_mask_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.models.roi_heads.mask_heads import (DynamicMaskHead, FCNMaskHead,
MaskIoUHead)
from .utils import _dummy_bbox_sampling
def test_mask_head_loss():
"""Test mask head loss when mask target is empty."""
self = FCNMaskHead(
num_convs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
num_classes=8)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
# create dummy mask
import numpy as np
from mmdet.core import BitmapMasks
dummy_mask = np.random.randint(0, 2, (1, 160, 240), dtype=np.uint8)
gt_masks = [BitmapMasks(dummy_mask, 160, 240)]
# create dummy train_cfg
train_cfg = mmcv.Config(dict(mask_size=12, mask_thr_binary=0.5))
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 8, 6, 6)
mask_pred = self.forward(dummy_feats)
mask_targets = self.get_targets(sampling_results, gt_masks, train_cfg)
pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])
loss_mask = self.loss(mask_pred, mask_targets, pos_labels)
onegt_mask_loss = sum(loss_mask['loss_mask'])
assert onegt_mask_loss.item() > 0, 'mask loss should be non-zero'
# test mask_iou_head
mask_iou_head = MaskIoUHead(
num_convs=1,
num_fcs=1,
roi_feat_size=6,
in_channels=8,
conv_out_channels=8,
fc_out_channels=8,
num_classes=8)
pos_mask_pred = mask_pred[range(mask_pred.size(0)), pos_labels]
mask_iou_pred = mask_iou_head(dummy_feats, pos_mask_pred)
pos_mask_iou_pred = mask_iou_pred[range(mask_iou_pred.size(0)), pos_labels]
mask_iou_targets = mask_iou_head.get_targets(sampling_results, gt_masks,
pos_mask_pred, mask_targets,
train_cfg)
loss_mask_iou = mask_iou_head.loss(pos_mask_iou_pred, mask_iou_targets)
onegt_mask_iou_loss = loss_mask_iou['loss_mask_iou'].sum()
assert onegt_mask_iou_loss.item() >= 0
# test dynamic_mask_head
dummy_proposal_feats = torch.rand(num_sampled, 8)
dynamic_mask_head = DynamicMaskHead(
dynamic_conv_cfg=dict(
type='DynamicConv',
in_channels=8,
feat_channels=8,
out_channels=8,
input_feat_shape=6,
with_proj=False,
act_cfg=dict(type='ReLU', inplace=True),
norm_cfg=dict(type='LN')),
num_convs=1,
num_classes=8,
in_channels=8,
roi_feat_size=6)
mask_pred = dynamic_mask_head(dummy_feats, dummy_proposal_feats)
mask_target = dynamic_mask_head.get_targets(sampling_results, gt_masks,
train_cfg)
loss_mask = dynamic_mask_head.loss(mask_pred, mask_target, pos_labels)
loss_mask = loss_mask['loss_mask'].sum()
assert loss_mask.item() >= 0
| 3,431 | 34.020408 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/test_roi_extractor.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.roi_heads.roi_extractors import GenericRoIExtractor
def test_groie():
# test with pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32],
pre_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False,
),
post_cfg=dict(
type='ConvModule',
in_channels=256,
out_channels=256,
kernel_size=5,
padding=2,
inplace=False))
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 256, 7, 7])
# test w.o. pre/post
cfg = dict(
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 256, 7, 7])
# test w.o. pre/post concat
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 4,
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
res = groie(feats, rois)
assert res.shape == torch.Size([1, 1024, 7, 7])
# test not supported aggregate method
with pytest.raises(AssertionError):
cfg = dict(
aggregation='not support',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=1024,
featmap_strides=[4, 8, 16, 32])
_ = GenericRoIExtractor(**cfg)
# test concat channels number
cfg = dict(
aggregation='concat',
roi_layer=dict(type='RoIAlign', output_size=7, sampling_ratio=2),
out_channels=256 * 5, # 256*5 != 256*4
featmap_strides=[4, 8, 16, 32])
groie = GenericRoIExtractor(**cfg)
feats = (
torch.rand((1, 256, 200, 336)),
torch.rand((1, 256, 100, 168)),
torch.rand((1, 256, 50, 84)),
torch.rand((1, 256, 25, 42)),
)
rois = torch.tensor([[0.0000, 587.8285, 52.1405, 886.2484, 341.5644]])
# out_channels does not sum of feat channels
with pytest.raises(AssertionError):
_ = groie(feats, rois)
| 3,257 | 27.330435 | 77 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/test_sabl_bbox_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmdet.core import bbox2roi
from mmdet.models.roi_heads.bbox_heads import SABLHead
from .utils import _dummy_bbox_sampling
def test_sabl_bbox_head_loss():
"""Tests bbox head loss when truth is empty and non-empty."""
self = SABLHead(
num_classes=4,
cls_in_channels=3,
reg_in_channels=3,
cls_out_channels=3,
reg_offset_out_channels=3,
reg_cls_out_channels=3,
roi_feat_size=7)
# Dummy proposals
proposal_list = [
torch.Tensor([[23.6667, 23.8757, 228.6326, 153.8874]]),
]
target_cfg = mmcv.Config(dict(pos_weight=1))
# Test bbox loss when truth is empty
gt_bboxes = [torch.empty((0, 4))]
gt_labels = [torch.LongTensor([])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
rois = bbox2roi([res.bboxes for res in sampling_results])
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_cls', 0) > 0, 'cls-loss should be non-zero'
assert losses.get('loss_bbox_cls',
0) == 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) == 0, 'empty gt bbox-reg-loss should be zero'
# Test bbox loss when truth is non-empty
gt_bboxes = [
torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]]),
]
gt_labels = [torch.LongTensor([2])]
sampling_results = _dummy_bbox_sampling(proposal_list, gt_bboxes,
gt_labels)
rois = bbox2roi([res.bboxes for res in sampling_results])
bbox_targets = self.get_targets(sampling_results, gt_bboxes, gt_labels,
target_cfg)
labels, label_weights, bbox_targets, bbox_weights = bbox_targets
# Create dummy features "extracted" for each sampled bbox
num_sampled = sum(len(res.bboxes) for res in sampling_results)
dummy_feats = torch.rand(num_sampled, 3, 7, 7)
cls_scores, bbox_preds = self.forward(dummy_feats)
losses = self.loss(cls_scores, bbox_preds, rois, labels, label_weights,
bbox_targets, bbox_weights)
assert losses.get('loss_bbox_cls',
0) > 0, 'empty gt bbox-cls-loss should be zero'
assert losses.get('loss_bbox_reg',
0) > 0, 'empty gt bbox-reg-loss should be zero'
| 2,979 | 37.205128 | 75 | py |
mmdetection | mmdetection-master/tests/test_models/test_roi_heads/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmdet.core import build_assigner, build_sampler
def _dummy_bbox_sampling(proposal_list, gt_bboxes, gt_labels):
"""Create sample results that can be passed to BBoxHead.get_targets."""
num_imgs = 1
feat = torch.rand(1, 1, 3, 3)
assign_config = dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1)
sampler_config = dict(
type='RandomSampler',
num=512,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=True)
bbox_assigner = build_assigner(assign_config)
bbox_sampler = build_sampler(sampler_config)
gt_bboxes_ignore = [None for _ in range(num_imgs)]
sampling_results = []
for i in range(num_imgs):
assign_result = bbox_assigner.assign(proposal_list[i], gt_bboxes[i],
gt_bboxes_ignore[i], gt_labels[i])
sampling_result = bbox_sampler.sample(
assign_result,
proposal_list[i],
gt_bboxes[i],
gt_labels[i],
feats=feat)
sampling_results.append(sampling_result)
return sampling_results
| 1,249 | 31.051282 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_seg_heads/test_maskformer_fusion_head.py | import pytest
import torch
from mmcv import ConfigDict
from mmdet.models.seg_heads.panoptic_fusion_heads import MaskFormerFusionHead
def test_maskformer_fusion_head():
img_metas = [
{
'batch_input_shape': (128, 160),
'img_shape': (126, 160, 3),
'ori_shape': (63, 80, 3),
'pad_shape': (128, 160, 3)
},
]
num_things_classes = 80
num_stuff_classes = 53
num_classes = num_things_classes + num_stuff_classes
config = ConfigDict(
type='MaskFormerFusionHead',
num_things_classes=num_things_classes,
num_stuff_classes=num_stuff_classes,
loss_panoptic=None,
test_cfg=dict(
panoptic_on=True,
semantic_on=False,
instance_on=True,
max_per_image=100,
object_mask_thr=0.8,
iou_thr=0.8,
filter_low_score=False),
init_cfg=None)
self = MaskFormerFusionHead(**config)
# test forward_train
assert self.forward_train() == dict()
mask_cls_results = torch.rand((1, 100, num_classes + 1))
mask_pred_results = torch.rand((1, 100, 128, 160))
# test panoptic_postprocess and instance_postprocess
results = self.simple_test(mask_cls_results, mask_pred_results, img_metas)
assert 'ins_results' in results[0] and 'pan_results' in results[0]
# test semantic_postprocess
config.test_cfg.semantic_on = True
with pytest.raises(AssertionError):
self.simple_test(mask_cls_results, mask_pred_results, img_metas)
with pytest.raises(NotImplementedError):
self.semantic_postprocess(mask_cls_results, mask_pred_results)
| 1,673 | 30 | 78 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_brick_wrappers.py | from unittest.mock import patch
import torch
import torch.nn as nn
import torch.nn.functional as F
from mmdet.models.utils import AdaptiveAvgPool2d, adaptive_avg_pool2d
if torch.__version__ != 'parrots':
torch_version = '1.7'
else:
torch_version = 'parrots'
@patch('torch.__version__', torch_version)
def test_adaptive_avg_pool2d():
# Test the empty batch dimension
# Test the two input conditions
x_empty = torch.randn(0, 3, 4, 5)
# 1. tuple[int, int]
wrapper_out = adaptive_avg_pool2d(x_empty, (2, 2))
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper_out = adaptive_avg_pool2d(x_empty, 2)
assert wrapper_out.shape == (0, 3, 2, 2)
# wrapper op with 3-dim input
x_normal = torch.randn(3, 3, 4, 5)
wrapper_out = adaptive_avg_pool2d(x_normal, (2, 2))
ref_out = F.adaptive_avg_pool2d(x_normal, (2, 2))
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper_out = adaptive_avg_pool2d(x_normal, 2)
ref_out = F.adaptive_avg_pool2d(x_normal, 2)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
@patch('torch.__version__', torch_version)
def test_AdaptiveAvgPool2d():
# Test the empty batch dimension
x_empty = torch.randn(0, 3, 4, 5)
# Test the four input conditions
# 1. tuple[int, int]
wrapper = AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 2. int
wrapper = AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 2)
# 3. tuple[None, int]
wrapper = AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 4, 2)
# 3. tuple[int, None]
wrapper = AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_empty)
assert wrapper_out.shape == (0, 3, 2, 5)
# Test the normal batch dimension
x_normal = torch.randn(3, 3, 4, 5)
wrapper = AdaptiveAvgPool2d((2, 2))
ref = nn.AdaptiveAvgPool2d((2, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d(2)
ref = nn.AdaptiveAvgPool2d(2)
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((None, 2))
ref = nn.AdaptiveAvgPool2d((None, 2))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 4, 2)
assert torch.equal(wrapper_out, ref_out)
wrapper = AdaptiveAvgPool2d((2, None))
ref = nn.AdaptiveAvgPool2d((2, None))
wrapper_out = wrapper(x_normal)
ref_out = ref(x_normal)
assert wrapper_out.shape == (3, 3, 2, 5)
assert torch.equal(wrapper_out, ref_out)
| 2,931 | 30.191489 | 69 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_conv_upsample.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import ConvUpsample
@pytest.mark.parametrize('num_layers', [0, 1, 2])
def test_conv_upsample(num_layers):
num_upsample = num_layers if num_layers > 0 else 0
num_layers = num_layers if num_layers > 0 else 1
layer = ConvUpsample(
10,
5,
num_layers=num_layers,
num_upsample=num_upsample,
conv_cfg=None,
norm_cfg=None)
size = 5
x = torch.randn((1, 10, size, size))
size = size * pow(2, num_upsample)
x = layer(x)
assert x.shape[-2:] == (size, size)
| 628 | 24.16 | 54 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_inverted_residual.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.cnn import is_norm
from torch.nn.modules import GroupNorm
from mmdet.models.utils import InvertedResidual, SELayer
def test_inverted_residual():
with pytest.raises(AssertionError):
# stride must be in [1, 2]
InvertedResidual(16, 16, 32, stride=3)
with pytest.raises(AssertionError):
# se_cfg must be None or dict
InvertedResidual(16, 16, 32, se_cfg=list())
with pytest.raises(AssertionError):
# in_channeld and mid_channels must be the same if
# with_expand_conv is False
InvertedResidual(16, 16, 32, with_expand_conv=False)
# Test InvertedResidual forward, stride=1
block = InvertedResidual(16, 16, 32, stride=1)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert getattr(block, 'se', None) is None
assert block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, stride=2
block = InvertedResidual(16, 16, 32, stride=2)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert not block.with_res_shortcut
assert x_out.shape == torch.Size((1, 16, 28, 28))
# Test InvertedResidual forward with se layer
se_cfg = dict(channels=32)
block = InvertedResidual(16, 16, 32, stride=1, se_cfg=se_cfg)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert isinstance(block.se, SELayer)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward, with_expand_conv=False
block = InvertedResidual(32, 16, 32, with_expand_conv=False)
x = torch.randn(1, 32, 56, 56)
x_out = block(x)
assert getattr(block, 'expand_conv', None) is None
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with GroupNorm
block = InvertedResidual(
16, 16, 32, norm_cfg=dict(type='GN', num_groups=2))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
for m in block.modules():
if is_norm(m):
assert isinstance(m, GroupNorm)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with HSigmoid
block = InvertedResidual(16, 16, 32, act_cfg=dict(type='HSigmoid'))
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert x_out.shape == torch.Size((1, 16, 56, 56))
# Test InvertedResidual forward with checkpoint
block = InvertedResidual(16, 16, 32, with_cp=True)
x = torch.randn(1, 16, 56, 56)
x_out = block(x)
assert block.with_cp
assert x_out.shape == torch.Size((1, 16, 56, 56))
| 2,635 | 33.233766 | 71 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_model_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from torch.autograd import gradcheck
from mmdet.models.utils import interpolate_as, sigmoid_geometric_mean
def test_interpolate_as():
source = torch.rand((1, 5, 4, 4))
target = torch.rand((1, 1, 16, 16))
# Test 4D source and target
result = interpolate_as(source, target)
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D target
result = interpolate_as(source, target.squeeze(0))
assert result.shape == torch.Size((1, 5, 16, 16))
# Test 3D source
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
# Test type(target) == np.ndarray
target = np.random.rand(16, 16)
result = interpolate_as(source.squeeze(0), target)
assert result.shape == torch.Size((5, 16, 16))
def test_sigmoid_geometric_mean():
x = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
y = torch.randn(20, 20, dtype=torch.double, requires_grad=True)
inputs = (x, y)
test = gradcheck(sigmoid_geometric_mean, inputs, eps=1e-6, atol=1e-4)
assert test
| 1,149 | 30.081081 | 73 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_position_encoding.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.models.utils import (LearnedPositionalEncoding,
SinePositionalEncoding)
def test_sine_positional_encoding(num_feats=16, batch_size=2):
# test invalid type of scale
with pytest.raises(AssertionError):
module = SinePositionalEncoding(
num_feats, scale=(3., ), normalize=True)
module = SinePositionalEncoding(num_feats)
h, w = 10, 6
mask = (torch.rand(batch_size, h, w) > 0.5).to(torch.int)
assert not module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
# set normalize
module = SinePositionalEncoding(num_feats, normalize=True)
assert module.normalize
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
def test_learned_positional_encoding(num_feats=16,
row_num_embed=10,
col_num_embed=10,
batch_size=2):
module = LearnedPositionalEncoding(num_feats, row_num_embed, col_num_embed)
assert module.row_embed.weight.shape == (row_num_embed, num_feats)
assert module.col_embed.weight.shape == (col_num_embed, num_feats)
h, w = 10, 6
mask = torch.rand(batch_size, h, w) > 0.5
out = module(mask)
assert out.shape == (batch_size, num_feats * 2, h, w)
| 1,437 | 34.95 | 79 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_se_layer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn.functional as F
from mmcv.cnn import constant_init
from mmdet.models.utils import DyReLU, SELayer
def test_se_layer():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
SELayer(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
SELayer(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test SELayer forward
layer = SELayer(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
def test_dyrelu():
with pytest.raises(AssertionError):
# act_cfg sequence length must equal to 2
DyReLU(channels=32, act_cfg=(dict(type='ReLU'), ))
with pytest.raises(AssertionError):
# act_cfg sequence must be a tuple of dict
DyReLU(channels=32, act_cfg=[dict(type='ReLU'), dict(type='ReLU')])
# Test DyReLU forward
layer = DyReLU(channels=32)
layer.init_weights()
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
assert x_out.shape == torch.Size((1, 32, 10, 10))
# DyReLU should act as standard (static) ReLU
# when eliminating the effect of SE-like module
layer = DyReLU(channels=32)
constant_init(layer.conv2.conv, 0)
layer.train()
x = torch.randn((1, 32, 10, 10))
x_out = layer(x)
relu_out = F.relu(x)
assert torch.equal(x_out, relu_out)
| 1,616 | 28.4 | 76 | py |
mmdetection | mmdetection-master/tests/test_models/test_utils/test_transformer.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.utils import ConfigDict
from mmdet.models.utils.transformer import (AdaptivePadding,
DetrTransformerDecoder,
DetrTransformerEncoder, PatchEmbed,
PatchMerging, Transformer)
def test_adaptive_padding():
for padding in ('same', 'corner'):
kernel_size = 16
stride = 16
dilation = 1
input = torch.rand(1, 1, 15, 17)
pool = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
input = torch.rand(1, 1, 16, 17)
out = pool(input)
# padding to divisible by 16
assert (out.shape[2], out.shape[3]) == (16, 32)
kernel_size = (2, 2)
stride = (2, 2)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# padding to divisible by 2
assert (out.shape[2], out.shape[3]) == (12, 14)
kernel_size = (2, 2)
stride = (10, 10)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 10, 13)
out = adap_pad(input)
# no padding
assert (out.shape[2], out.shape[3]) == (10, 13)
kernel_size = (11, 11)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
input = torch.rand(1, 1, 11, 13)
out = adap_pad(input)
# all padding
assert (out.shape[2], out.shape[3]) == (21, 21)
# test padding as kernel is (7,9)
input = torch.rand(1, 1, 11, 13)
stride = (3, 4)
kernel_size = (4, 5)
dilation = (2, 2)
# actually (7, 9)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
dilation_out = adap_pad(input)
assert (dilation_out.shape[2], dilation_out.shape[3]) == (16, 21)
kernel_size = (7, 9)
dilation = (1, 1)
adap_pad = AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=padding)
kernel79_out = adap_pad(input)
assert (kernel79_out.shape[2], kernel79_out.shape[3]) == (16, 21)
assert kernel79_out.shape == dilation_out.shape
# assert only support "same" "corner"
with pytest.raises(AssertionError):
AdaptivePadding(
kernel_size=kernel_size,
stride=stride,
dilation=dilation,
padding=1)
def test_patch_embed():
B = 2
H = 3
W = 4
C = 3
embed_dims = 10
kernel_size = 3
stride = 1
dummy_input = torch.rand(B, C, H, W)
patch_merge_1 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=1,
norm_cfg=None)
x1, shape = patch_merge_1(dummy_input)
# test out shape
assert x1.shape == (2, 2, 10)
# test outsize is correct
assert shape == (1, 2)
# test L = out_h * out_w
assert shape[0] * shape[1] == x1.shape[1]
B = 2
H = 10
W = 10
C = 3
embed_dims = 10
kernel_size = 5
stride = 2
dummy_input = torch.rand(B, C, H, W)
# test dilation
patch_merge_2 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=None,
)
x2, shape = patch_merge_2(dummy_input)
# test out shape
assert x2.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x2.shape[1]
stride = 2
input_size = (10, 10)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
x3, shape = patch_merge_3(dummy_input)
# test out shape
assert x3.shape == (2, 1, 10)
# test outsize is correct
assert shape == (1, 1)
# test L = out_h * out_w
assert shape[0] * shape[1] == x3.shape[1]
# test the init_out_size with nn.Unfold
assert patch_merge_3.init_out_size[1] == (input_size[0] - 2 * 4 -
1) // 2 + 1
assert patch_merge_3.init_out_size[0] == (input_size[0] - 2 * 4 -
1) // 2 + 1
H = 11
W = 12
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
input_size = (H, W)
dummy_input = torch.rand(B, C, H, W)
# test stride and norm
patch_merge_3 = PatchEmbed(
in_channels=C,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=0,
dilation=2,
norm_cfg=dict(type='LN'),
input_size=input_size)
_, shape = patch_merge_3(dummy_input)
# when input_size equal to real input
# the out_size should be equal to `init_out_size`
assert shape == patch_merge_3.init_out_size
# test adap padding
for padding in ('same', 'corner'):
in_c = 2
embed_dims = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
x = torch.rand(B, in_c, *input_size)
patch_embed = PatchEmbed(
in_channels=in_c,
embed_dims=embed_dims,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_embed(x)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_patch_merging():
# Test the model with int padding
in_c = 3
out_c = 4
kernel_size = 3
stride = 3
padding = 1
dilation = 1
bias = False
# test the case `pad_to_stride` is False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 3
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 16, 4)
assert out_size == (4, 4)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
in_c = 4
out_c = 5
kernel_size = 6
stride = 3
padding = 2
dilation = 2
bias = False
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
B, L, C = 1, 100, 4
input_size = (10, 10)
x = torch.rand(B, L, C)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (1, 4, 5)
assert out_size == (2, 2)
# assert out size is consistent with real output
assert x_out.size(1) == out_size[0] * out_size[1]
# Test with adaptive padding
for padding in ('same', 'corner'):
in_c = 2
out_c = 3
B = 2
# test stride is 1
input_size = (5, 5)
kernel_size = (5, 5)
stride = (1, 1)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 25, 3)
assert out_size == (5, 5)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (5, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 1, 3)
assert out_size == (1, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test kernel_size == stride
input_size = (6, 5)
kernel_size = (5, 5)
stride = (5, 5)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 2, 3)
assert out_size == (2, 1)
assert x_out.size(1) == out_size[0] * out_size[1]
# test different kernel_size with different stride
input_size = (6, 5)
kernel_size = (6, 2)
stride = (6, 2)
dilation = 1
bias = False
L = input_size[0] * input_size[1]
x = torch.rand(B, L, in_c)
patch_merge = PatchMerging(
in_channels=in_c,
out_channels=out_c,
kernel_size=kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
bias=bias)
x_out, out_size = patch_merge(x, input_size)
assert x_out.size() == (B, 3, 3)
assert out_size == (1, 3)
assert x_out.size(1) == out_size[0] * out_size[1]
def test_detr_transformer_dencoder_encoder_layer():
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=(
'norm',
'self_attn',
'norm',
'cross_attn',
'norm',
'ffn',
))))
assert DetrTransformerDecoder(**config).layers[0].pre_norm
assert len(DetrTransformerDecoder(**config).layers) == 6
DetrTransformerDecoder(**config)
with pytest.raises(AssertionError):
config = ConfigDict(
dict(
return_intermediate=True,
num_layers=6,
transformerlayers=[
dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))
] * 5))
DetrTransformerDecoder(**config)
config = ConfigDict(
dict(
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('norm', 'self_attn', 'norm', 'cross_attn',
'norm', 'ffn', 'norm'))))
with pytest.raises(AssertionError):
# len(operation_order) == 6
DetrTransformerEncoder(**config)
def test_transformer():
config = ConfigDict(
dict(
encoder=dict(
type='DetrTransformerEncoder',
num_layers=6,
transformerlayers=dict(
type='BaseTransformerLayer',
attn_cfgs=[
dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1)
],
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'ffn', 'norm'))),
decoder=dict(
type='DetrTransformerDecoder',
return_intermediate=True,
num_layers=6,
transformerlayers=dict(
type='DetrTransformerDecoderLayer',
attn_cfgs=dict(
type='MultiheadAttention',
embed_dims=256,
num_heads=8,
dropout=0.1),
feedforward_channels=2048,
ffn_dropout=0.1,
operation_order=('self_attn', 'norm', 'cross_attn', 'norm',
'ffn', 'norm')),
)))
transformer = Transformer(**config)
transformer.init_weights()
| 16,994 | 28.815789 | 79 | py |
mmdetection | mmdetection-master/tests/test_onnx/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .utils import ort_validate
__all__ = ['ort_validate']
| 108 | 20.8 | 47 | py |
mmdetection | mmdetection-master/tests/test_onnx/test_head.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from functools import partial
import mmcv
import numpy as np
import pytest
import torch
from mmcv.cnn import Scale
from mmdet import digit_version
from mmdet.models import build_detector
from mmdet.models.dense_heads import (FCOSHead, FSAFHead, RetinaHead, SSDHead,
YOLOV3Head)
from .utils import ort_validate
data_path = osp.join(osp.dirname(__file__), 'data')
if digit_version(torch.__version__) <= digit_version('1.5.0'):
pytest.skip(
'ort backend does not support version below 1.5.0',
allow_module_level=True)
def test_cascade_onnx_export():
config_path = './configs/cascade_rcnn/cascade_rcnn_r50_fpn_1x_coco.py'
cfg = mmcv.Config.fromfile(config_path)
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
with torch.no_grad():
model.forward = partial(model.forward, img_metas=[[dict()]])
dynamic_axes = {
'input_img': {
0: 'batch',
2: 'width',
3: 'height'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
torch.onnx.export(
model, [torch.rand(1, 3, 400, 500)],
'tmp.onnx',
output_names=['dets', 'labels'],
input_names=['input_img'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
def test_faster_onnx_export():
config_path = './configs/faster_rcnn/faster_rcnn_r50_fpn_1x_coco.py'
cfg = mmcv.Config.fromfile(config_path)
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
with torch.no_grad():
model.forward = partial(model.forward, img_metas=[[dict()]])
dynamic_axes = {
'input_img': {
0: 'batch',
2: 'width',
3: 'height'
},
'dets': {
0: 'batch',
1: 'num_dets',
},
'labels': {
0: 'batch',
1: 'num_dets',
},
}
torch.onnx.export(
model, [torch.rand(1, 3, 400, 500)],
'tmp.onnx',
output_names=['dets', 'labels'],
input_names=['input_img'],
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11,
dynamic_axes=dynamic_axes)
def retinanet_config():
"""RetinanNet Head Config."""
head_cfg = dict(
stacked_convs=6,
feat_channels=2,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = RetinaHead(
num_classes=4, in_channels=1, test_cfg=test_cfg, **head_cfg)
model.requires_grad_(False)
return model
def test_retina_head_forward_single():
"""Test RetinaNet Head single forward in torch and onnxruntime env."""
retina_model = retinanet_config()
feat = torch.rand(1, retina_model.in_channels, 32, 32)
# validate the result between the torch and ort
ort_validate(retina_model.forward_single, feat)
def test_retina_head_forward():
"""Test RetinaNet Head forward in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
# RetinaNet head expects a multiple levels of features per image
feats = [
torch.rand(1, retina_model.in_channels, s // (2**(i + 2)),
s // (2**(i + 2))) # [32, 16, 8, 4, 2]
for i in range(len(retina_model.prior_generator.strides))
]
ort_validate(retina_model.forward, feats)
def test_retinanet_head_onnx_export():
"""Test RetinaNet Head _get_bboxes() in torch and onnxruntime env."""
retina_model = retinanet_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of retina_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2).
# the bboxes's size: (1, 36, 32, 32), (1, 36, 16, 16),
# (1, 36, 8, 8), (1, 36, 4, 4), (1, 36, 2, 2)
retina_head_data = 'retina_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, retina_head_data))
cls_score = feats[:5]
bboxes = feats[5:]
retina_model.onnx_export = partial(
retina_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(retina_model.onnx_export, (cls_score, bboxes))
def yolo_config():
"""YoloV3 Head Config."""
head_cfg = dict(
anchor_generator=dict(
type='YOLOAnchorGenerator',
base_sizes=[[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)]],
strides=[32, 16, 8]),
bbox_coder=dict(type='YOLOBBoxCoder'))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
conf_thr=0.005,
nms=dict(type='nms', iou_threshold=0.45),
max_per_img=100))
model = YOLOV3Head(
num_classes=4,
in_channels=[1, 1, 1],
out_channels=[16, 8, 4],
test_cfg=test_cfg,
**head_cfg)
model.requires_grad_(False)
# yolov3 need eval()
model.cpu().eval()
return model
def test_yolov3_head_forward():
"""Test Yolov3 head forward() in torch and ort env."""
yolo_model = yolo_config()
# Yolov3 head expects a multiple levels of features per image
feats = [
torch.rand(1, 1, 64 // (2**(i + 2)), 64 // (2**(i + 2)))
for i in range(len(yolo_model.in_channels))
]
ort_validate(yolo_model.forward, feats)
def test_yolov3_head_onnx_export():
"""Test yolov3 head get_bboxes() in torch and ort env."""
yolo_model = yolo_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'img_shape': (s, s, 3),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3)
}]
# The data of yolov3_head_get_bboxes.pkl contains
# a list of torch.Tensor, where each torch.Tensor
# is generated by torch.rand and each tensor size is:
# (1, 27, 32, 32), (1, 27, 16, 16), (1, 27, 8, 8).
yolo_head_data = 'yolov3_head_get_bboxes.pkl'
pred_maps = mmcv.load(osp.join(data_path, yolo_head_data))
yolo_model.onnx_export = partial(
yolo_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(yolo_model.onnx_export, pred_maps)
def fcos_config():
"""FCOS Head Config."""
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = FCOSHead(num_classes=4, in_channels=1, test_cfg=test_cfg)
model.requires_grad_(False)
return model
def test_fcos_head_forward_single():
"""Test fcos forward single in torch and ort env."""
fcos_model = fcos_config()
feat = torch.rand(1, fcos_model.in_channels, 32, 32)
fcos_model.forward_single = partial(
fcos_model.forward_single,
scale=Scale(1.0).requires_grad_(False),
stride=(4, ))
ort_validate(fcos_model.forward_single, feat)
def test_fcos_head_forward():
"""Test fcos forward in mutil-level feature map."""
fcos_model = fcos_config()
s = 128
feats = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
ort_validate(fcos_model.forward, feats)
def test_fcos_head_onnx_export():
"""Test fcos head get_bboxes() in ort."""
fcos_model = fcos_config()
s = 128
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'img_shape': (s, s, 3),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3)
}]
cls_scores = [
torch.rand(1, fcos_model.num_classes, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
bboxes = [
torch.rand(1, 4, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
centerness = [
torch.rand(1, 1, s // feat_size, s // feat_size)
for feat_size in [4, 8, 16, 32, 64]
]
fcos_model.onnx_export = partial(
fcos_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(fcos_model.onnx_export, (cls_scores, bboxes, centerness))
def fsaf_config():
"""FSAF Head Config."""
cfg = dict(
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=1,
scales_per_octave=1,
ratios=[1.0],
strides=[8, 16, 32, 64, 128]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
min_bbox_size=0,
score_thr=0.05,
nms=dict(type='nms', iou_threshold=0.5),
max_per_img=100))
model = FSAFHead(num_classes=4, in_channels=1, test_cfg=test_cfg, **cfg)
model.requires_grad_(False)
return model
def test_fsaf_head_forward_single():
"""Test RetinaNet Head forward_single() in torch and onnxruntime env."""
fsaf_model = fsaf_config()
feat = torch.rand(1, fsaf_model.in_channels, 32, 32)
ort_validate(fsaf_model.forward_single, feat)
def test_fsaf_head_forward():
"""Test RetinaNet Head forward in torch and onnxruntime env."""
fsaf_model = fsaf_config()
s = 128
feats = [
torch.rand(1, fsaf_model.in_channels, s // (2**(i + 2)),
s // (2**(i + 2)))
for i in range(len(fsaf_model.anchor_generator.strides))
]
ort_validate(fsaf_model.forward, feats)
def test_fsaf_head_onnx_export():
"""Test RetinaNet Head get_bboxes in torch and onnxruntime env."""
fsaf_model = fsaf_config()
s = 256
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of fsaf_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 4, 64, 64), (1, 4, 32, 32),
# (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4).
# the bboxes's size: (1, 4, 64, 64), (1, 4, 32, 32),
# (1, 4, 16, 16), (1, 4, 8, 8), (1, 4, 4, 4).
fsaf_head_data = 'fsaf_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, fsaf_head_data))
cls_score = feats[:5]
bboxes = feats[5:]
fsaf_model.onnx_export = partial(
fsaf_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(fsaf_model.onnx_export, (cls_score, bboxes))
def ssd_config():
"""SSD Head Config."""
cfg = dict(
anchor_generator=dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[0.1, 0.1, 0.2, 0.2]))
test_cfg = mmcv.Config(
dict(
deploy_nms_pre=0,
nms=dict(type='nms', iou_threshold=0.45),
min_bbox_size=0,
score_thr=0.02,
max_per_img=200))
model = SSDHead(
num_classes=4,
in_channels=(4, 8, 4, 2, 2, 2),
test_cfg=test_cfg,
**cfg)
model.requires_grad_(False)
return model
def test_ssd_head_forward():
"""Test SSD Head forward in torch and onnxruntime env."""
ssd_model = ssd_config()
featmap_size = [38, 19, 10, 6, 5, 3, 1]
feats = [
torch.rand(1, ssd_model.in_channels[i], featmap_size[i],
featmap_size[i]) for i in range(len(ssd_model.in_channels))
]
ort_validate(ssd_model.forward, feats)
def test_ssd_head_onnx_export():
"""Test SSD Head get_bboxes in torch and onnxruntime env."""
ssd_model = ssd_config()
s = 300
img_metas = [{
'img_shape_for_onnx': torch.Tensor([s, s]),
'scale_factor': np.ones(4),
'pad_shape': (s, s, 3),
'img_shape': (s, s, 2)
}]
# The data of ssd_head_get_bboxes.pkl contains two parts:
# cls_score(list(Tensor)) and bboxes(list(Tensor)),
# where each torch.Tensor is generated by torch.rand().
# the cls_score's size: (1, 20, 38, 38), (1, 30, 19, 19),
# (1, 30, 10, 10), (1, 30, 5, 5), (1, 20, 3, 3), (1, 20, 1, 1).
# the bboxes's size: (1, 16, 38, 38), (1, 24, 19, 19),
# (1, 24, 10, 10), (1, 24, 5, 5), (1, 16, 3, 3), (1, 16, 1, 1).
ssd_head_data = 'ssd_head_get_bboxes.pkl'
feats = mmcv.load(osp.join(data_path, ssd_head_data))
cls_score = feats[:6]
bboxes = feats[6:]
ssd_model.onnx_export = partial(
ssd_model.onnx_export, img_metas=img_metas, with_nms=False)
ort_validate(ssd_model.onnx_export, (cls_score, bboxes))
| 14,104 | 30.068282 | 78 | py |
mmdetection | mmdetection-master/tests/test_onnx/test_neck.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import pytest
import torch
from mmdet import digit_version
from mmdet.models.necks import FPN, YOLOV3Neck
from .utils import ort_validate
if digit_version(torch.__version__) <= digit_version('1.5.0'):
pytest.skip(
'ort backend does not support version below 1.5.0',
allow_module_level=True)
# Control the returned model of fpn_neck_config()
fpn_test_step_names = {
'fpn_normal': 0,
'fpn_wo_extra_convs': 1,
'fpn_lateral_bns': 2,
'fpn_bilinear_upsample': 3,
'fpn_scale_factor': 4,
'fpn_extra_convs_inputs': 5,
'fpn_extra_convs_laterals': 6,
'fpn_extra_convs_outputs': 7,
}
# Control the returned model of yolo_neck_config()
yolo_test_step_names = {'yolo_normal': 0}
data_path = osp.join(osp.dirname(__file__), 'data')
def fpn_neck_config(test_step_name):
"""Return the class containing the corresponding attributes according to
the fpn_test_step_names."""
s = 64
in_channels = [8, 16, 32, 64]
feat_sizes = [s // 2**i for i in range(4)] # [64, 32, 16, 8]
out_channels = 8
feats = [
torch.rand(1, in_channels[i], feat_sizes[i], feat_sizes[i])
for i in range(len(in_channels))
]
if (fpn_test_step_names[test_step_name] == 0):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 1):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=False,
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 2):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
no_norm_on_lateral=False,
norm_cfg=dict(type='BN', requires_grad=True),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 3):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
upsample_cfg=dict(mode='bilinear', align_corners=True),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 4):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs=True,
upsample_cfg=dict(scale_factor=2),
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 5):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_input',
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 6):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_lateral',
num_outs=5)
elif (fpn_test_step_names[test_step_name] == 7):
fpn_model = FPN(
in_channels=in_channels,
out_channels=out_channels,
add_extra_convs='on_output',
num_outs=5)
return fpn_model, feats
def yolo_neck_config(test_step_name):
"""Config yolov3 Neck."""
in_channels = [16, 8, 4]
out_channels = [8, 4, 2]
# The data of yolov3_neck.pkl contains a list of
# torch.Tensor, where each torch.Tensor is generated by
# torch.rand and each tensor size is:
# (1, 4, 64, 64), (1, 8, 32, 32), (1, 16, 16, 16).
yolov3_neck_data = 'yolov3_neck.pkl'
feats = mmcv.load(osp.join(data_path, yolov3_neck_data))
if (yolo_test_step_names[test_step_name] == 0):
yolo_model = YOLOV3Neck(
in_channels=in_channels, out_channels=out_channels, num_scales=3)
return yolo_model, feats
def test_fpn_normal():
outs = fpn_neck_config('fpn_normal')
ort_validate(*outs)
def test_fpn_wo_extra_convs():
outs = fpn_neck_config('fpn_wo_extra_convs')
ort_validate(*outs)
def test_fpn_lateral_bns():
outs = fpn_neck_config('fpn_lateral_bns')
ort_validate(*outs)
def test_fpn_bilinear_upsample():
outs = fpn_neck_config('fpn_bilinear_upsample')
ort_validate(*outs)
def test_fpn_scale_factor():
outs = fpn_neck_config('fpn_scale_factor')
ort_validate(*outs)
def test_fpn_extra_convs_inputs():
outs = fpn_neck_config('fpn_extra_convs_inputs')
ort_validate(*outs)
def test_fpn_extra_convs_laterals():
outs = fpn_neck_config('fpn_extra_convs_laterals')
ort_validate(*outs)
def test_fpn_extra_convs_outputs():
outs = fpn_neck_config('fpn_extra_convs_outputs')
ort_validate(*outs)
def test_yolo_normal():
outs = yolo_neck_config('yolo_normal')
ort_validate(*outs)
| 4,808 | 28.323171 | 77 | py |
mmdetection | mmdetection-master/tests/test_onnx/utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import onnxruntime as ort
import torch
import torch.nn as nn
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
class WrapFunction(nn.Module):
"""Wrap the function to be tested for torch.onnx.export tracking."""
def __init__(self, wrapped_function):
super(WrapFunction, self).__init__()
self.wrapped_function = wrapped_function
def forward(self, *args, **kwargs):
return self.wrapped_function(*args, **kwargs)
def ort_validate(model, feats, onnx_io='tmp.onnx'):
"""Validate the output of the onnxruntime backend is the same as the output
generated by torch.
Args:
model (nn.Module | function): the function of model or model
to be verified.
feats (tuple(list(torch.Tensor)) | list(torch.Tensor) | torch.Tensor):
the input of model.
onnx_io (str): the name of onnx output file.
"""
# if model is not an instance of nn.Module, then it is a normal
# function and it should be wrapped.
if isinstance(model, nn.Module):
wrap_model = model
else:
wrap_model = WrapFunction(model)
wrap_model.cpu().eval()
with torch.no_grad():
torch.onnx.export(
wrap_model,
feats,
onnx_io,
export_params=True,
keep_initializers_as_inputs=True,
do_constant_folding=True,
verbose=False,
opset_version=11)
if isinstance(feats, tuple):
ort_feats = []
for feat in feats:
ort_feats += feat
else:
ort_feats = feats
# default model name: tmp.onnx
onnx_outputs = get_ort_model_output(ort_feats)
# remove temp file
if osp.exists(onnx_io):
os.remove(onnx_io)
if isinstance(feats, tuple):
torch_outputs = convert_result_list(wrap_model.forward(*feats))
else:
torch_outputs = convert_result_list(wrap_model.forward(feats))
torch_outputs = [
torch_output.detach().numpy() for torch_output in torch_outputs
]
# match torch_outputs and onnx_outputs
for i in range(len(onnx_outputs)):
np.testing.assert_allclose(
torch_outputs[i], onnx_outputs[i], rtol=1e-03, atol=1e-05)
def get_ort_model_output(feat, onnx_io='tmp.onnx'):
"""Run the model in onnxruntime env.
Args:
feat (list[Tensor]): A list of tensors from torch.rand,
each is a 4D-tensor.
Returns:
list[np.array]: onnxruntime infer result, each is a np.array
"""
onnx_model = onnx.load(onnx_io)
onnx.checker.check_model(onnx_model)
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_io, session_options)
if isinstance(feat, torch.Tensor):
onnx_outputs = sess.run(None,
{sess.get_inputs()[0].name: feat.numpy()})
else:
onnx_outputs = sess.run(None, {
sess.get_inputs()[i].name: feat[i].numpy()
for i in range(len(feat))
})
return onnx_outputs
def convert_result_list(outputs):
"""Convert the torch forward outputs containing tuple or list to a list
only containing torch.Tensor.
Args:
output (list(Tensor) | tuple(list(Tensor) | ...): the outputs
in torch env, maybe containing nested structures such as list
or tuple.
Returns:
list(Tensor): a list only containing torch.Tensor
"""
# recursive end condition
if isinstance(outputs, torch.Tensor):
return [outputs]
ret = []
for sub in outputs:
ret += convert_result_list(sub)
return ret
| 4,141 | 29.014493 | 79 | py |
mmdetection | mmdetection-master/tests/test_runtime/async_benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import asyncio
import os
import shutil
import urllib
import mmcv
import torch
from mmdet.apis import (async_inference_detector, inference_detector,
init_detector)
from mmdet.utils.contextmanagers import concurrent
from mmdet.utils.profiling import profile_time
async def main():
"""Benchmark between async and synchronous inference interfaces.
Sample runs for 20 demo images on K80 GPU, model - mask_rcnn_r50_fpn_1x:
async sync
7981.79 ms 9660.82 ms
8074.52 ms 9660.94 ms
7976.44 ms 9406.83 ms
Async variant takes about 0.83-0.85 of the time of the synchronous
interface.
"""
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
checkpoint_file = os.path.join(
project_dir,
'checkpoints/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
if not os.path.exists(checkpoint_file):
url = ('https://download.openmmlab.com/mmdetection/v2.0'
'/mask_rcnn/mask_rcnn_r50_fpn_1x_coco'
'/mask_rcnn_r50_fpn_1x_coco_20200205-d4b0c5d6.pth')
print(f'Downloading {url} ...')
local_filename, _ = urllib.request.urlretrieve(url)
os.makedirs(os.path.dirname(checkpoint_file), exist_ok=True)
shutil.move(local_filename, checkpoint_file)
print(f'Saved as {checkpoint_file}')
else:
print(f'Using existing checkpoint {checkpoint_file}')
device = 'cuda:0'
model = init_detector(
config_file, checkpoint=checkpoint_file, device=device)
# queue is used for concurrent inference of multiple images
streamqueue = asyncio.Queue()
# queue size defines concurrency level
streamqueue_size = 4
for _ in range(streamqueue_size):
streamqueue.put_nowait(torch.cuda.Stream(device=device))
# test a single image and show the results
img = mmcv.imread(os.path.join(project_dir, 'demo/demo.jpg'))
# warmup
await async_inference_detector(model, img)
async def detect(img):
async with concurrent(streamqueue):
return await async_inference_detector(model, img)
num_of_images = 20
with profile_time('benchmark', 'async'):
tasks = [
asyncio.create_task(detect(img)) for _ in range(num_of_images)
]
async_results = await asyncio.gather(*tasks)
with torch.cuda.stream(torch.cuda.default_stream()):
with profile_time('benchmark', 'sync'):
sync_results = [
inference_detector(model, img) for _ in range(num_of_images)
]
result_dir = os.path.join(project_dir, 'demo')
model.show_result(
img,
async_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_async.jpg'))
model.show_result(
img,
sync_results[0],
score_thr=0.5,
show=False,
out_file=os.path.join(result_dir, 'result_sync.jpg'))
if __name__ == '__main__':
asyncio.run(main())
| 3,215 | 30.223301 | 77 | py |
mmdetection | mmdetection-master/tests/test_runtime/test_apis.py | import os
from pathlib import Path
import pytest
from mmdet.apis import init_detector
def test_init_detector():
project_dir = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))
project_dir = os.path.join(project_dir, '..')
config_file = os.path.join(
project_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
# test init_detector with config_file: str and cfg_options
cfg_options = dict(
model=dict(
backbone=dict(
depth=18,
init_cfg=dict(
type='Pretrained', checkpoint='torchvision://resnet18'))))
model = init_detector(config_file, device='cpu', cfg_options=cfg_options)
# test init_detector with :obj:`Path`
config_path_object = Path(config_file)
model = init_detector(config_path_object, device='cpu')
# test init_detector with undesirable type
with pytest.raises(TypeError):
config_list = [config_file]
model = init_detector(config_list) # noqa: F841
| 1,019 | 29.909091 | 78 | py |
mmdetection | mmdetection-master/tests/test_runtime/test_async.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Tests for async interface."""
import asyncio
import os
import sys
import asynctest
import mmcv
import torch
from mmdet.apis import async_inference_detector, init_detector
if sys.version_info >= (3, 7):
from mmdet.utils.contextmanagers import concurrent
class AsyncTestCase(asynctest.TestCase):
use_default_loop = False
forbid_get_event_loop = True
TEST_TIMEOUT = int(os.getenv('ASYNCIO_TEST_TIMEOUT', '30'))
def _run_test_method(self, method):
result = method()
if asyncio.iscoroutine(result):
self.loop.run_until_complete(
asyncio.wait_for(result, timeout=self.TEST_TIMEOUT))
class MaskRCNNDetector:
def __init__(self,
model_config,
checkpoint=None,
streamqueue_size=3,
device='cuda:0'):
self.streamqueue_size = streamqueue_size
self.device = device
# build the model and load checkpoint
self.model = init_detector(
model_config, checkpoint=None, device=self.device)
self.streamqueue = None
async def init(self):
self.streamqueue = asyncio.Queue()
for _ in range(self.streamqueue_size):
stream = torch.cuda.Stream(device=self.device)
self.streamqueue.put_nowait(stream)
if sys.version_info >= (3, 7):
async def apredict(self, img):
if isinstance(img, str):
img = mmcv.imread(img)
async with concurrent(self.streamqueue):
result = await async_inference_detector(self.model, img)
return result
class AsyncInferenceTestCase(AsyncTestCase):
if sys.version_info >= (3, 7):
async def test_simple_inference(self):
if not torch.cuda.is_available():
import pytest
pytest.skip('test requires GPU and torch+cuda')
ori_grad_enabled = torch.is_grad_enabled()
root_dir = os.path.dirname(os.path.dirname(__name__))
model_config = os.path.join(
root_dir, 'configs/mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py')
detector = MaskRCNNDetector(model_config)
await detector.init()
img_path = os.path.join(root_dir, 'demo/demo.jpg')
bboxes, _ = await detector.apredict(img_path)
self.assertTrue(bboxes)
# asy inference detector will hack grad_enabled,
# so restore here to avoid it to influence other tests
torch.set_grad_enabled(ori_grad_enabled)
| 2,608 | 30.059524 | 75 | py |
mmdetection | mmdetection-master/tests/test_runtime/test_config.py | # Copyright (c) OpenMMLab. All rights reserved.
from os.path import dirname, exists, join
from unittest.mock import Mock
import pytest
from mmdet.core import BitmapMasks, PolygonMasks
from mmdet.datasets.builder import DATASETS
from mmdet.datasets.utils import NumClassCheckHook
def _get_config_directory():
"""Find the predefined detector config directory."""
try:
# Assume we are running in the source mmdetection repo
repo_dpath = dirname(dirname(__file__))
repo_dpath = join(repo_dpath, '..')
except NameError:
# For IPython development when this __file__ is not defined
import mmdet
repo_dpath = dirname(dirname(mmdet.__file__))
config_dpath = join(repo_dpath, 'configs')
if not exists(config_dpath):
raise Exception('Cannot find config path')
return config_dpath
def _check_numclasscheckhook(detector, config_mod):
dummy_runner = Mock()
dummy_runner.model = detector
def get_dataset_name_classes(dataset):
# deal with `RepeatDataset`,`ConcatDataset`,`ClassBalancedDataset`..
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
while ('dataset' in dataset):
dataset = dataset['dataset']
# ConcatDataset
if isinstance(dataset, (list, tuple)):
dataset = dataset[0]
return dataset['type'], dataset.get('classes', None)
compatible_check = NumClassCheckHook()
dataset_name, CLASSES = get_dataset_name_classes(
config_mod['data']['train'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_train_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_train_epoch(dummy_runner)
dataset_name, CLASSES = get_dataset_name_classes(config_mod['data']['val'])
if CLASSES is None:
CLASSES = DATASETS.get(dataset_name).CLASSES
dummy_runner.data_loader.dataset.CLASSES = CLASSES
compatible_check.before_val_epoch(dummy_runner)
dummy_runner.data_loader.dataset.CLASSES = None
compatible_check.before_val_epoch(dummy_runner)
def _check_roi_head(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
# check roi_align
bbox_roi_cfg = config.bbox_roi_extractor
bbox_roi_extractor = head.bbox_roi_extractor
_check_roi_extractor(bbox_roi_cfg, bbox_roi_extractor)
# check bbox head infos
bbox_cfg = config.bbox_head
bbox_head = head.bbox_head
_check_bbox_head(bbox_cfg, bbox_head)
if head.with_mask:
# check roi_align
if config.mask_roi_extractor:
mask_roi_cfg = config.mask_roi_extractor
mask_roi_extractor = head.mask_roi_extractor
_check_roi_extractor(mask_roi_cfg, mask_roi_extractor,
bbox_roi_extractor)
# check mask head infos
mask_head = head.mask_head
mask_cfg = config.mask_head
_check_mask_head(mask_cfg, mask_head)
# check arch specific settings, e.g., cascade/htc
if config['type'] in ['CascadeRoIHead', 'HybridTaskCascadeRoIHead']:
assert config.num_stages == len(head.bbox_head)
assert config.num_stages == len(head.bbox_roi_extractor)
if head.with_mask:
assert config.num_stages == len(head.mask_head)
assert config.num_stages == len(head.mask_roi_extractor)
elif config['type'] in ['MaskScoringRoIHead']:
assert (hasattr(head, 'mask_iou_head')
and head.mask_iou_head is not None)
mask_iou_cfg = config.mask_iou_head
mask_iou_head = head.mask_iou_head
assert (mask_iou_cfg.fc_out_channels ==
mask_iou_head.fc_mask_iou.in_features)
elif config['type'] in ['GridRoIHead']:
grid_roi_cfg = config.grid_roi_extractor
grid_roi_extractor = head.grid_roi_extractor
_check_roi_extractor(grid_roi_cfg, grid_roi_extractor,
bbox_roi_extractor)
config.grid_head.grid_points = head.grid_head.grid_points
def _check_roi_extractor(config, roi_extractor, prev_roi_extractor=None):
import torch.nn as nn
# Separate roi_extractor and prev_roi_extractor checks for flexibility
if isinstance(roi_extractor, nn.ModuleList):
roi_extractor = roi_extractor[0]
if prev_roi_extractor and isinstance(prev_roi_extractor, nn.ModuleList):
prev_roi_extractor = prev_roi_extractor[0]
assert (len(config.featmap_strides) == len(roi_extractor.roi_layers))
assert (config.out_channels == roi_extractor.out_channels)
from torch.nn.modules.utils import _pair
assert (_pair(config.roi_layer.output_size) ==
roi_extractor.roi_layers[0].output_size)
if 'use_torchvision' in config.roi_layer:
assert (config.roi_layer.use_torchvision ==
roi_extractor.roi_layers[0].use_torchvision)
elif 'aligned' in config.roi_layer:
assert (
config.roi_layer.aligned == roi_extractor.roi_layers[0].aligned)
if prev_roi_extractor:
assert (roi_extractor.roi_layers[0].aligned ==
prev_roi_extractor.roi_layers[0].aligned)
assert (roi_extractor.roi_layers[0].use_torchvision ==
prev_roi_extractor.roi_layers[0].use_torchvision)
def _check_mask_head(mask_cfg, mask_head):
import torch.nn as nn
if isinstance(mask_cfg, list):
for single_mask_cfg, single_mask_head in zip(mask_cfg, mask_head):
_check_mask_head(single_mask_cfg, single_mask_head)
elif isinstance(mask_head, nn.ModuleList):
for single_mask_head in mask_head:
_check_mask_head(mask_cfg, single_mask_head)
else:
assert mask_cfg['type'] == mask_head.__class__.__name__
assert mask_cfg.in_channels == mask_head.in_channels
class_agnostic = mask_cfg.get('class_agnostic', False)
out_dim = (1 if class_agnostic else mask_cfg.num_classes)
if hasattr(mask_head, 'conv_logits'):
assert (mask_cfg.conv_out_channels ==
mask_head.conv_logits.in_channels)
assert mask_head.conv_logits.out_channels == out_dim
else:
assert mask_cfg.fc_out_channels == mask_head.fc_logits.in_features
assert (mask_head.fc_logits.out_features == out_dim *
mask_head.output_area)
def _check_bbox_head(bbox_cfg, bbox_head):
import torch.nn as nn
if isinstance(bbox_cfg, list):
for single_bbox_cfg, single_bbox_head in zip(bbox_cfg, bbox_head):
_check_bbox_head(single_bbox_cfg, single_bbox_head)
elif isinstance(bbox_head, nn.ModuleList):
for single_bbox_head in bbox_head:
_check_bbox_head(bbox_cfg, single_bbox_head)
else:
assert bbox_cfg['type'] == bbox_head.__class__.__name__
if bbox_cfg['type'] == 'SABLHead':
assert bbox_cfg.cls_in_channels == bbox_head.cls_in_channels
assert bbox_cfg.reg_in_channels == bbox_head.reg_in_channels
cls_out_channels = bbox_cfg.get('cls_out_channels', 1024)
assert (cls_out_channels == bbox_head.fc_cls.in_features)
assert (bbox_cfg.num_classes + 1 == bbox_head.fc_cls.out_features)
elif bbox_cfg['type'] == 'DIIHead':
assert bbox_cfg['num_ffn_fcs'] == bbox_head.ffn.num_fcs
# 3 means FC and LN and Relu
assert bbox_cfg['num_cls_fcs'] == len(bbox_head.cls_fcs) // 3
assert bbox_cfg['num_reg_fcs'] == len(bbox_head.reg_fcs) // 3
assert bbox_cfg['in_channels'] == bbox_head.in_channels
assert bbox_cfg['in_channels'] == bbox_head.fc_cls.in_features
assert bbox_cfg['in_channels'] == bbox_head.fc_reg.in_features
assert bbox_cfg['in_channels'] == bbox_head.attention.embed_dims
assert bbox_cfg[
'feedforward_channels'] == bbox_head.ffn.feedforward_channels
else:
assert bbox_cfg.in_channels == bbox_head.in_channels
with_cls = bbox_cfg.get('with_cls', True)
if with_cls:
fc_out_channels = bbox_cfg.get('fc_out_channels', 2048)
assert (fc_out_channels == bbox_head.fc_cls.in_features)
if bbox_head.custom_cls_channels:
assert (bbox_head.loss_cls.get_cls_channels(
bbox_head.num_classes) == bbox_head.fc_cls.out_features
)
else:
assert (bbox_cfg.num_classes +
1 == bbox_head.fc_cls.out_features)
with_reg = bbox_cfg.get('with_reg', True)
if with_reg:
out_dim = (4 if bbox_cfg.reg_class_agnostic else 4 *
bbox_cfg.num_classes)
assert bbox_head.fc_reg.out_features == out_dim
def _check_anchorhead(config, head):
# check consistency between head_config and roi_head
assert config['type'] == head.__class__.__name__
assert config.in_channels == head.in_channels
num_classes = (
config.num_classes -
1 if config.loss_cls.get('use_sigmoid', False) else config.num_classes)
if config['type'] == 'ATSSHead':
assert (config.feat_channels == head.atss_cls.in_channels)
assert (config.feat_channels == head.atss_reg.in_channels)
assert (config.feat_channels == head.atss_centerness.in_channels)
elif config['type'] == 'SABLRetinaHead':
assert (config.feat_channels == head.retina_cls.in_channels)
assert (config.feat_channels == head.retina_bbox_reg.in_channels)
assert (config.feat_channels == head.retina_bbox_cls.in_channels)
else:
assert (config.in_channels == head.conv_cls.in_channels)
assert (config.in_channels == head.conv_reg.in_channels)
assert (head.conv_cls.out_channels == num_classes * head.num_anchors)
assert head.fc_reg.out_channels == 4 * head.num_anchors
# Only tests a representative subset of configurations
# TODO: test pipelines using Albu, current Albu throw None given empty GT
@pytest.mark.parametrize(
'config_rpath',
[
'wider_face/ssd300_wider_face.py',
'pascal_voc/ssd300_voc0712.py',
'pascal_voc/ssd512_voc0712.py',
# 'albu_example/mask_rcnn_r50_fpn_1x.py',
'foveabox/fovea_align_r50_fpn_gn-head_mstrain_640-800_4x4_2x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain-poly_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_caffe_fpn_mstrain_1x_coco.py',
'mask_rcnn/mask_rcnn_r50_fpn_fp16_1x_coco.py'
])
def test_config_data_pipeline(config_rpath):
"""Test whether the data pipeline is valid and can process corner cases.
CommandLine:
xdoctest -m tests/test_runtime/
test_config.py test_config_build_data_pipeline
"""
import numpy as np
from mmcv import Config
from mmdet.datasets.pipelines import Compose
config_dpath = _get_config_directory()
print(f'Found config_dpath = {config_dpath}')
def dummy_masks(h, w, num_obj=3, mode='bitmap'):
assert mode in ('polygon', 'bitmap')
if mode == 'bitmap':
masks = np.random.randint(0, 2, (num_obj, h, w), dtype=np.uint8)
masks = BitmapMasks(masks, h, w)
else:
masks = []
for i in range(num_obj):
masks.append([])
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (8 + 4 * i, )))
masks[-1].append(
np.random.uniform(0, min(h - 1, w - 1), (10 + 4 * i, )))
masks = PolygonMasks(masks, h, w)
return masks
config_fpath = join(config_dpath, config_rpath)
cfg = Config.fromfile(config_fpath)
# remove loading pipeline
loading_pipeline = cfg.train_pipeline.pop(0)
loading_ann_pipeline = cfg.train_pipeline.pop(0)
cfg.test_pipeline.pop(0)
train_pipeline = Compose(cfg.train_pipeline)
test_pipeline = Compose(cfg.test_pipeline)
print(f'Building data pipeline, config_fpath = {config_fpath}')
print(f'Test training data pipeline: \n{train_pipeline!r}')
img = np.random.randint(0, 255, size=(888, 666, 3), dtype=np.uint8)
if loading_pipeline.get('to_float32', False):
img = img.astype(np.float32)
mode = 'bitmap' if loading_ann_pipeline.get('poly2mask',
True) else 'polygon'
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.array([[35.2, 11.7, 39.7, 15.7]], dtype=np.float32),
gt_labels=np.array([1], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
# test empty GT
print('Test empty GT with training data pipeline: '
f'\n{train_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = train_pipeline(results)
assert output_results is not None
print(f'Test empty GT with testing data pipeline: \n{test_pipeline!r}')
results = dict(
filename='test_img.png',
ori_filename='test_img.png',
img=img,
img_shape=img.shape,
ori_shape=img.shape,
gt_bboxes=np.zeros((0, 4), dtype=np.float32),
gt_labels=np.array([], dtype=np.int64),
gt_masks=dummy_masks(img.shape[0], img.shape[1], num_obj=0, mode=mode),
)
results['img_fields'] = ['img']
results['bbox_fields'] = ['gt_bboxes']
results['mask_fields'] = ['gt_masks']
output_results = test_pipeline(results)
assert output_results is not None
| 15,154 | 39.52139 | 79 | py |
mmdetection | mmdetection-master/tests/test_runtime/test_eval_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import unittest.mock as mock
from collections import OrderedDict
from unittest.mock import MagicMock, patch
import pytest
import torch
import torch.nn as nn
from mmcv.runner import EpochBasedRunner, build_optimizer
from mmcv.utils import get_logger
from torch.utils.data import DataLoader, Dataset
from mmdet.core import DistEvalHook, EvalHook
class ExampleDataset(Dataset):
def __init__(self):
self.index = 0
self.eval_result = [0.1, 0.4, 0.3, 0.7, 0.2, 0.05, 0.4, 0.6]
def __getitem__(self, idx):
results = dict(imgs=torch.tensor([1]))
return results
def __len__(self):
return 1
@mock.create_autospec
def evaluate(self, results, logger=None):
pass
class EvalDataset(ExampleDataset):
def evaluate(self, results, logger=None):
mean_ap = self.eval_result[self.index]
output = OrderedDict(mAP=mean_ap, index=self.index, score=mean_ap)
self.index += 1
return output
class ExampleModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Linear(1, 1)
self.test_cfg = None
def forward(self, imgs, rescale=False, return_loss=False):
return imgs
def train_step(self, data_batch, optimizer, **kwargs):
outputs = {
'loss': 0.5,
'log_vars': {
'accuracy': 0.98
},
'num_samples': 1
}
return outputs
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
@patch('mmdet.apis.single_gpu_test', MagicMock)
@patch('mmdet.apis.multi_gpu_test', MagicMock)
@pytest.mark.parametrize('EvalHookCls', (EvalHook, DistEvalHook))
def test_eval_hook(EvalHookCls):
with pytest.raises(TypeError):
# dataloader must be a pytorch DataLoader
test_dataset = ExampleDataset()
data_loader = [
DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_worker=0,
shuffle=False)
]
EvalHookCls(data_loader)
with pytest.raises(KeyError):
# rule must be in keys of rule_map
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='auto', rule='unsupport')
with pytest.raises(ValueError):
# key_indicator must be valid when rule_map is None
test_dataset = ExampleDataset()
data_loader = DataLoader(
test_dataset,
batch_size=1,
sampler=None,
num_workers=0,
shuffle=False)
EvalHookCls(data_loader, save_best='unsupport')
optimizer_cfg = dict(
type='SGD', lr=0.01, momentum=0.9, weight_decay=0.0001)
test_dataset = ExampleDataset()
loader = DataLoader(test_dataset, batch_size=1)
model = ExampleModel()
optimizer = build_optimizer(model, optimizer_cfg)
data_loader = DataLoader(test_dataset, batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best=None)
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 1)
assert runner.meta is None or 'best_score' not in runner.meta[
'hook_msgs']
assert runner.meta is None or 'best_ckpt' not in runner.meta[
'hook_msgs']
# when `save_best` is set to 'auto', first metric will be used.
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='auto')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
loader = DataLoader(EvalDataset(), batch_size=1)
model = ExampleModel()
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, interval=1, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(
data_loader, interval=1, save_best='score', rule='greater')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_score_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP', rule='less')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_6.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.05
data_loader = DataLoader(EvalDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
with tempfile.TemporaryDirectory() as tmpdir:
logger = get_logger('test_eval')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.run([loader], [('train', 1)], 2)
real_path = osp.join(tmpdir, 'best_mAP_epoch_2.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.4
resume_from = osp.join(tmpdir, 'latest.pth')
loader = DataLoader(ExampleDataset(), batch_size=1)
eval_hook = EvalHookCls(data_loader, save_best='mAP')
runner = EpochBasedRunner(
model=model,
batch_processor=None,
optimizer=optimizer,
work_dir=tmpdir,
logger=logger)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_hook(eval_hook)
runner.resume(resume_from)
runner.run([loader], [('train', 1)], 8)
real_path = osp.join(tmpdir, 'best_mAP_epoch_4.pth')
assert runner.meta['hook_msgs']['best_ckpt'] == osp.realpath(real_path)
assert runner.meta['hook_msgs']['best_score'] == 0.7
| 8,590 | 32.956522 | 79 | py |
mmdetection | mmdetection-master/tests/test_runtime/test_fp16.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import auto_fp16, force_fp32
from mmcv.runner.fp16_utils import cast_tensor_type
def test_cast_tensor_type():
inputs = torch.FloatTensor([5.])
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, torch.Tensor)
assert outputs.dtype == dst_type
inputs = 'tensor'
src_type = str
dst_type = str
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, str)
inputs = np.array([5.])
src_type = np.ndarray
dst_type = np.ndarray
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, np.ndarray)
inputs = dict(
tensor_a=torch.FloatTensor([1.]), tensor_b=torch.FloatTensor([2.]))
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, dict)
assert outputs['tensor_a'].dtype == dst_type
assert outputs['tensor_b'].dtype == dst_type
inputs = [torch.FloatTensor([1.]), torch.FloatTensor([2.])]
src_type = torch.float32
dst_type = torch.int32
outputs = cast_tensor_type(inputs, src_type, dst_type)
assert isinstance(outputs, list)
assert outputs[0].dtype == dst_type
assert outputs[1].dtype == dst_type
inputs = 5
outputs = cast_tensor_type(inputs, None, None)
assert isinstance(outputs, int)
def test_auto_fp16():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@auto_fp16()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@auto_fp16()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
# apply to specified input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
# apply to optional input args
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.float32
# out_fp32=True
class ExampleModule(nn.Module):
@auto_fp16(apply_to=('x', 'y'), out_fp32=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.float32)
input_z = torch.ones(1, dtype=torch.float32)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.float32
def test_force_fp32():
with pytest.raises(TypeError):
# ExampleObject is not a subclass of nn.Module
class ExampleObject:
@force_fp32()
def __call__(self, x):
return x
model = ExampleObject()
input_x = torch.ones(1, dtype=torch.float32)
model(input_x)
# apply to all input args
class ExampleModule(nn.Module):
@force_fp32()
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
# apply to specified input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', ))
def forward(self, x, y):
return x, y
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
model.fp16_enabled = True
output_x, output_y = model(input_x, input_y)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y = model(input_x.cuda(), input_y.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
# apply to optional input args
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'))
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.half)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.float32
assert output_z.dtype == torch.half
# out_fp16=True
class ExampleModule(nn.Module):
@force_fp32(apply_to=('x', 'y'), out_fp16=True)
def forward(self, x, y=None, z=None):
return x, y, z
model = ExampleModule()
input_x = torch.ones(1, dtype=torch.float32)
input_y = torch.ones(1, dtype=torch.half)
input_z = torch.ones(1, dtype=torch.half)
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.float32
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
model.fp16_enabled = True
output_x, output_y, output_z = model(input_x, y=input_y, z=input_z)
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
if torch.cuda.is_available():
model.cuda()
output_x, output_y, output_z = model(
input_x.cuda(), y=input_y.cuda(), z=input_z.cuda())
assert output_x.dtype == torch.half
assert output_y.dtype == torch.half
assert output_z.dtype == torch.half
| 9,746 | 31.274834 | 75 | py |
mmdetection | mmdetection-master/tests/test_utils/test_anchor.py | # Copyright (c) OpenMMLab. All rights reserved.
"""
CommandLine:
pytest tests/test_utils/test_anchor.py
xdoctest tests/test_utils/test_anchor.py zero
"""
import pytest
import torch
def test_standard_points_generator():
from mmdet.core.anchor import build_prior_generator
# teat init
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
assert anchor_generator.num_base_priors == [1, 1]
# test_stride
from mmdet.core.anchor import MlvlPointGenerator
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cpu')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cpu')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cpu')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
if torch.cuda.is_available():
anchor_generator_cfg = dict(
type='MlvlPointGenerator', strides=[4, 8], offset=0)
anchor_generator = build_prior_generator(anchor_generator_cfg)
assert anchor_generator is not None
# Square strides
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
mlvl_points_half_stride_generator = MlvlPointGenerator(
strides=[4, 10], offset=0.5)
assert mlvl_points.num_levels == 2
# assert self.num_levels == len(featmap_sizes)
with pytest.raises(AssertionError):
mlvl_points.grid_priors(featmap_sizes=[(2, 2)], device='cuda')
priors = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
priors_with_stride = mlvl_points.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], with_stride=True, device='cuda')
assert len(priors) == 2
# assert last dimension is (coord_x, coord_y, stride_w, stride_h).
assert priors_with_stride[0].size(1) == 4
assert priors_with_stride[0][0][2] == 4
assert priors_with_stride[0][0][3] == 4
assert priors_with_stride[1][0][2] == 10
assert priors_with_stride[1][0][3] == 10
stride_4_feat_2_2 = priors[0]
assert (stride_4_feat_2_2[1] - stride_4_feat_2_2[0]).sum() == 4
assert stride_4_feat_2_2.size(0) == 4
assert stride_4_feat_2_2.size(1) == 2
stride_10_feat_4_8 = priors[1]
assert (stride_10_feat_4_8[1] - stride_10_feat_4_8[0]).sum() == 10
assert stride_10_feat_4_8.size(0) == 4 * 8
assert stride_10_feat_4_8.size(1) == 2
# assert the offset of 0.5 * stride
priors_half_offset = mlvl_points_half_stride_generator.grid_priors(
featmap_sizes=[(2, 2), (4, 8)], device='cuda')
assert (priors_half_offset[0][0] - priors[0][0]).sum() == 4 * 0.5 * 2
assert (priors_half_offset[1][0] - priors[1][0]).sum() == 10 * 0.5 * 2
def test_sparse_prior():
from mmdet.core.anchor import MlvlPointGenerator
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cpu')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert not sparse_prior.is_cuda
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
from mmdet.core.anchor import AnchorGenerator
mlvl_anchors = AnchorGenerator(
strides=[16, 32], ratios=[1.], scales=[1.], base_sizes=[4, 8])
prior_indexs = torch.Tensor([0, 2, 4, 5, 6, 9]).long()
featmap_sizes = [(3, 5), (6, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cpu')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cpu')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cpu')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(featmap_sizes, device='cpu')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cpu')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
if torch.cuda.is_available():
mlvl_points = MlvlPointGenerator(strides=[4, 10], offset=0)
prior_indexs = torch.Tensor([0, 3, 4, 5, 6, 7, 1, 2, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(6, 8), (6, 4)]
grid_anchors = mlvl_points.grid_priors(
featmap_sizes=featmap_sizes, with_stride=False, device='cuda')
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_points.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
assert sparse_prior.is_cuda
mlvl_anchors = AnchorGenerator(
strides=[16, 32],
ratios=[1., 2.5],
scales=[1., 5.],
base_sizes=[4, 8])
prior_indexs = torch.Tensor([4, 5, 6, 7, 0, 2, 50, 4, 5, 6,
9]).long().cuda()
featmap_sizes = [(13, 5), (16, 4)]
grid_anchors = mlvl_anchors.grid_priors(
featmap_sizes=featmap_sizes, device='cuda')
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[0],
level_idx=0,
device='cuda')
assert (sparse_prior == grid_anchors[0][prior_indexs]).all()
sparse_prior = mlvl_anchors.sparse_priors(
prior_idxs=prior_indexs,
featmap_size=featmap_sizes[1],
level_idx=1,
device='cuda')
assert (sparse_prior == grid_anchors[1][prior_indexs]).all()
# for ssd
from mmdet.core.anchor.anchor_generator import SSDAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = SSDAnchorGenerator(
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32],
ratios=[[2], [2, 3], [2, 3]])
ssd_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_ssd_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_ssd_anchors == ssd_anchors[i][prior_indexs]).all()
# for yolo
from mmdet.core.anchor.anchor_generator import YOLOAnchorGenerator
featmap_sizes = [(38, 38), (19, 19), (10, 10)]
anchor_generator = YOLOAnchorGenerator(
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
yolo_anchors = anchor_generator.grid_anchors(
featmap_sizes, device='cuda')
for i in range(len(featmap_sizes)):
sparse_yolo_anchors = anchor_generator.sparse_priors(
prior_idxs=prior_indexs,
level_idx=i,
featmap_size=featmap_sizes[i],
device='cuda')
assert (sparse_yolo_anchors == yolo_anchors[i][prior_indexs]).all()
def test_standard_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
anchor_generator_cfg = dict(
type='AnchorGenerator',
scales=[8],
ratios=[0.5, 1.0, 2.0],
strides=[4, 8])
anchor_generator = build_anchor_generator(anchor_generator_cfg)
assert anchor_generator.num_base_priors == \
anchor_generator.num_base_anchors
assert anchor_generator.num_base_priors == [3, 3]
assert anchor_generator is not None
def test_strides():
from mmdet.core import AnchorGenerator
# Square strides
self = AnchorGenerator([10], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 5., 5., 15.], [5., 5., 15., 15.]])
assert torch.equal(anchors[0], expected_anchors)
# Different strides in x and y direction
self = AnchorGenerator([(10, 20)], [1.], [1.], [10])
anchors = self.grid_anchors([(2, 2)], device='cpu')
expected_anchors = torch.tensor([[-5., -5., 5., 5.], [5., -5., 15., 5.],
[-5., 15., 5., 25.], [5., 15., 15., 25.]])
assert torch.equal(anchors[0], expected_anchors)
def test_ssd_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# min_sizes max_sizes must set at the same time
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=None,
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# length of min_sizes max_sizes must be the same
with pytest.raises(AssertionError):
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 300],
max_sizes=[100, 150, 202, 253],
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
build_anchor_generator(anchor_generator_cfg)
# test setting anchor size manually
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
min_sizes=[48, 100, 150, 202, 253, 304],
max_sizes=[100, 150, 202, 253, 304, 320],
strides=[16, 32, 64, 107, 160, 320],
ratios=[[2, 3], [2, 3], [2, 3], [2, 3], [2, 3], [2, 3]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
expected_base_anchors = [
torch.Tensor([[-16.0000, -16.0000, 32.0000, 32.0000],
[-26.6410, -26.6410, 42.6410, 42.6410],
[-25.9411, -8.9706, 41.9411, 24.9706],
[-8.9706, -25.9411, 24.9706, 41.9411],
[-33.5692, -5.8564, 49.5692, 21.8564],
[-5.8564, -33.5692, 21.8564, 49.5692]]),
torch.Tensor([[-34.0000, -34.0000, 66.0000, 66.0000],
[-45.2372, -45.2372, 77.2372, 77.2372],
[-54.7107, -19.3553, 86.7107, 51.3553],
[-19.3553, -54.7107, 51.3553, 86.7107],
[-70.6025, -12.8675, 102.6025, 44.8675],
[-12.8675, -70.6025, 44.8675, 102.6025]]),
torch.Tensor([[-43.0000, -43.0000, 107.0000, 107.0000],
[-55.0345, -55.0345, 119.0345, 119.0345],
[-74.0660, -21.0330, 138.0660, 85.0330],
[-21.0330, -74.0660, 85.0330, 138.0660],
[-97.9038, -11.3013, 161.9038, 75.3013],
[-11.3013, -97.9038, 75.3013, 161.9038]]),
torch.Tensor([[-47.5000, -47.5000, 154.5000, 154.5000],
[-59.5332, -59.5332, 166.5332, 166.5332],
[-89.3356, -17.9178, 196.3356, 124.9178],
[-17.9178, -89.3356, 124.9178, 196.3356],
[-121.4371, -4.8124, 228.4371, 111.8124],
[-4.8124, -121.4371, 111.8124, 228.4371]]),
torch.Tensor([[-46.5000, -46.5000, 206.5000, 206.5000],
[-58.6651, -58.6651, 218.6651, 218.6651],
[-98.8980, -9.4490, 258.8980, 169.4490],
[-9.4490, -98.8980, 169.4490, 258.8980],
[-139.1044, 6.9652, 299.1044, 153.0348],
[6.9652, -139.1044, 153.0348, 299.1044]]),
torch.Tensor([[8.0000, 8.0000, 312.0000, 312.0000],
[4.0513, 4.0513, 315.9487, 315.9487],
[-54.9605, 52.5198, 374.9604, 267.4802],
[52.5198, -54.9605, 267.4802, 374.9604],
[-103.2717, 72.2428, 423.2717, 247.7572],
[72.2428, -103.2717, 247.7572, 423.2717]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [2400, 600, 150, 54, 24, 6]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (320, 320), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [6, 6, 6, 6, 6, 6]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
# test vgg ssd anchor setting
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-6.5000, -6.5000, 14.5000, 14.5000],
[-11.3704, -11.3704, 19.3704, 19.3704],
[-10.8492, -3.4246, 18.8492, 11.4246],
[-3.4246, -10.8492, 11.4246, 18.8492]]),
torch.Tensor([[-14.5000, -14.5000, 30.5000, 30.5000],
[-25.3729, -25.3729, 41.3729, 41.3729],
[-23.8198, -7.9099, 39.8198, 23.9099],
[-7.9099, -23.8198, 23.9099, 39.8198],
[-30.9711, -4.9904, 46.9711, 20.9904],
[-4.9904, -30.9711, 20.9904, 46.9711]]),
torch.Tensor([[-33.5000, -33.5000, 65.5000, 65.5000],
[-45.5366, -45.5366, 77.5366, 77.5366],
[-54.0036, -19.0018, 86.0036, 51.0018],
[-19.0018, -54.0036, 51.0018, 86.0036],
[-69.7365, -12.5788, 101.7365, 44.5788],
[-12.5788, -69.7365, 44.5788, 101.7365]]),
torch.Tensor([[-44.5000, -44.5000, 108.5000, 108.5000],
[-56.9817, -56.9817, 120.9817, 120.9817],
[-76.1873, -22.0937, 140.1873, 86.0937],
[-22.0937, -76.1873, 86.0937, 140.1873],
[-100.5019, -12.1673, 164.5019, 76.1673],
[-12.1673, -100.5019, 76.1673, 164.5019]]),
torch.Tensor([[-53.5000, -53.5000, 153.5000, 153.5000],
[-66.2185, -66.2185, 166.2185, 166.2185],
[-96.3711, -23.1855, 196.3711, 123.1855],
[-23.1855, -96.3711, 123.1855, 196.3711]]),
torch.Tensor([[19.5000, 19.5000, 280.5000, 280.5000],
[6.6342, 6.6342, 293.3658, 293.3658],
[-34.5549, 57.7226, 334.5549, 242.2774],
[57.7226, -34.5549, 242.2774, 334.5549]]),
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [5776, 2166, 600, 150, 36, 4]
multi_level_valid_flags = anchor_generator.valid_flags(
featmap_sizes, (300, 300), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [4, 6, 6, 6, 4, 4]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 6
def test_anchor_generator_with_tuples():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[8, 16, 32, 64, 100, 300],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
featmap_sizes = [(38, 38), (19, 19), (10, 10), (5, 5), (3, 3), (1, 1)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
anchor_generator_cfg_tuples = dict(
type='SSDAnchorGenerator',
scale_major=False,
input_size=300,
basesize_ratio_range=(0.15, 0.9),
strides=[(8, 8), (16, 16), (32, 32), (64, 64), (100, 100), (300, 300)],
ratios=[[2], [2, 3], [2, 3], [2, 3], [2], [2]])
anchor_generator_tuples = build_anchor_generator(
anchor_generator_cfg_tuples)
anchors_tuples = anchor_generator_tuples.grid_anchors(
featmap_sizes, device)
for anchor, anchor_tuples in zip(anchors, anchors_tuples):
assert torch.equal(anchor, anchor_tuples)
def test_yolo_anchor_generator():
from mmdet.core.anchor import build_anchor_generator
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
anchor_generator_cfg = dict(
type='YOLOAnchorGenerator',
strides=[32, 16, 8],
base_sizes=[
[(116, 90), (156, 198), (373, 326)],
[(30, 61), (62, 45), (59, 119)],
[(10, 13), (16, 30), (33, 23)],
])
featmap_sizes = [(14, 18), (28, 36), (56, 72)]
anchor_generator = build_anchor_generator(anchor_generator_cfg)
# check base anchors
expected_base_anchors = [
torch.Tensor([[-42.0000, -29.0000, 74.0000, 61.0000],
[-62.0000, -83.0000, 94.0000, 115.0000],
[-170.5000, -147.0000, 202.5000, 179.0000]]),
torch.Tensor([[-7.0000, -22.5000, 23.0000, 38.5000],
[-23.0000, -14.5000, 39.0000, 30.5000],
[-21.5000, -51.5000, 37.5000, 67.5000]]),
torch.Tensor([[-1.0000, -2.5000, 9.0000, 10.5000],
[-4.0000, -11.0000, 12.0000, 19.0000],
[-12.5000, -7.5000, 20.5000, 15.5000]])
]
base_anchors = anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check number of base anchors for each level
assert anchor_generator.num_base_anchors == [3, 3, 3]
# check anchor generation
anchors = anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 3
def test_retina_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/nas_fpn/retinanet_r50_fpn_crop640_50e.py
bbox_head = dict(
type='RetinaSepBNHead',
num_classes=4,
num_ins=5,
in_channels=4,
stacked_convs=1,
feat_channels=4,
anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
bbox_coder=dict(
type='DeltaXYWHBBoxCoder',
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0]))
retina_head = build_head(bbox_head)
assert retina_head.anchor_generator is not None
# use the featmap sizes in NASFPN setting to test retina head
featmap_sizes = [(80, 80), (40, 40), (20, 20), (10, 10), (5, 5)]
# check base anchors
expected_base_anchors = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
base_anchors = retina_head.anchor_generator.base_anchors
for i, base_anchor in enumerate(base_anchors):
assert base_anchor.allclose(expected_base_anchors[i])
# check valid flags
expected_valid_pixels = [57600, 14400, 3600, 900, 225]
multi_level_valid_flags = retina_head.anchor_generator.valid_flags(
featmap_sizes, (640, 640), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert retina_head.anchor_generator.num_base_anchors == [9, 9, 9, 9, 9]
# check anchor generation
anchors = retina_head.anchor_generator.grid_anchors(featmap_sizes, device)
assert len(anchors) == 5
def test_guided_anchor():
from mmdet.models import build_head
if torch.cuda.is_available():
device = 'cuda'
else:
device = 'cpu'
# head configs modified from
# configs/guided_anchoring/ga_retinanet_r50_fpn_1x_coco.py
bbox_head = dict(
type='GARetinaHead',
num_classes=8,
in_channels=4,
stacked_convs=1,
feat_channels=4,
approx_anchor_generator=dict(
type='AnchorGenerator',
octave_base_scale=4,
scales_per_octave=3,
ratios=[0.5, 1.0, 2.0],
strides=[8, 16, 32, 64, 128]),
square_anchor_generator=dict(
type='AnchorGenerator',
ratios=[1.0],
scales=[4],
strides=[8, 16, 32, 64, 128]))
ga_retina_head = build_head(bbox_head)
assert ga_retina_head.approx_anchor_generator is not None
# use the featmap sizes in NASFPN setting to test ga_retina_head
featmap_sizes = [(100, 152), (50, 76), (25, 38), (13, 19), (7, 10)]
# check base anchors
expected_approxs = [
torch.Tensor([[-22.6274, -11.3137, 22.6274, 11.3137],
[-28.5088, -14.2544, 28.5088, 14.2544],
[-35.9188, -17.9594, 35.9188, 17.9594],
[-16.0000, -16.0000, 16.0000, 16.0000],
[-20.1587, -20.1587, 20.1587, 20.1587],
[-25.3984, -25.3984, 25.3984, 25.3984],
[-11.3137, -22.6274, 11.3137, 22.6274],
[-14.2544, -28.5088, 14.2544, 28.5088],
[-17.9594, -35.9188, 17.9594, 35.9188]]),
torch.Tensor([[-45.2548, -22.6274, 45.2548, 22.6274],
[-57.0175, -28.5088, 57.0175, 28.5088],
[-71.8376, -35.9188, 71.8376, 35.9188],
[-32.0000, -32.0000, 32.0000, 32.0000],
[-40.3175, -40.3175, 40.3175, 40.3175],
[-50.7968, -50.7968, 50.7968, 50.7968],
[-22.6274, -45.2548, 22.6274, 45.2548],
[-28.5088, -57.0175, 28.5088, 57.0175],
[-35.9188, -71.8376, 35.9188, 71.8376]]),
torch.Tensor([[-90.5097, -45.2548, 90.5097, 45.2548],
[-114.0350, -57.0175, 114.0350, 57.0175],
[-143.6751, -71.8376, 143.6751, 71.8376],
[-64.0000, -64.0000, 64.0000, 64.0000],
[-80.6349, -80.6349, 80.6349, 80.6349],
[-101.5937, -101.5937, 101.5937, 101.5937],
[-45.2548, -90.5097, 45.2548, 90.5097],
[-57.0175, -114.0350, 57.0175, 114.0350],
[-71.8376, -143.6751, 71.8376, 143.6751]]),
torch.Tensor([[-181.0193, -90.5097, 181.0193, 90.5097],
[-228.0701, -114.0350, 228.0701, 114.0350],
[-287.3503, -143.6751, 287.3503, 143.6751],
[-128.0000, -128.0000, 128.0000, 128.0000],
[-161.2699, -161.2699, 161.2699, 161.2699],
[-203.1873, -203.1873, 203.1873, 203.1873],
[-90.5097, -181.0193, 90.5097, 181.0193],
[-114.0350, -228.0701, 114.0350, 228.0701],
[-143.6751, -287.3503, 143.6751, 287.3503]]),
torch.Tensor([[-362.0387, -181.0193, 362.0387, 181.0193],
[-456.1401, -228.0701, 456.1401, 228.0701],
[-574.7006, -287.3503, 574.7006, 287.3503],
[-256.0000, -256.0000, 256.0000, 256.0000],
[-322.5398, -322.5398, 322.5398, 322.5398],
[-406.3747, -406.3747, 406.3747, 406.3747],
[-181.0193, -362.0387, 181.0193, 362.0387],
[-228.0701, -456.1401, 228.0701, 456.1401],
[-287.3503, -574.7006, 287.3503, 574.7006]])
]
approxs = ga_retina_head.approx_anchor_generator.base_anchors
for i, base_anchor in enumerate(approxs):
assert base_anchor.allclose(expected_approxs[i])
# check valid flags
expected_valid_pixels = [136800, 34200, 8550, 2223, 630]
multi_level_valid_flags = ga_retina_head.approx_anchor_generator \
.valid_flags(featmap_sizes, (800, 1216), device)
for i, single_level_valid_flag in enumerate(multi_level_valid_flags):
assert single_level_valid_flag.sum() == expected_valid_pixels[i]
# check number of base anchors for each level
assert ga_retina_head.approx_anchor_generator.num_base_anchors == [
9, 9, 9, 9, 9
]
# check approx generation
squares = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(squares) == 5
expected_squares = [
torch.Tensor([[-16., -16., 16., 16.]]),
torch.Tensor([[-32., -32., 32., 32]]),
torch.Tensor([[-64., -64., 64., 64.]]),
torch.Tensor([[-128., -128., 128., 128.]]),
torch.Tensor([[-256., -256., 256., 256.]])
]
squares = ga_retina_head.square_anchor_generator.base_anchors
for i, base_anchor in enumerate(squares):
assert base_anchor.allclose(expected_squares[i])
# square_anchor_generator does not check valid flags
# check number of base anchors for each level
assert (ga_retina_head.square_anchor_generator.num_base_anchors == [
1, 1, 1, 1, 1
])
# check square generation
anchors = ga_retina_head.square_anchor_generator.grid_anchors(
featmap_sizes, device)
assert len(anchors) == 5
| 33,157 | 42.062338 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_assigner.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Tests the Assigner objects.
CommandLine:
pytest tests/test_utils/test_assigner.py
xdoctest tests/test_utils/test_assigner.py zero
"""
import pytest
import torch
from mmdet.core.bbox.assigners import (ApproxMaxIoUAssigner,
AscendMaxIoUAssigner,
CenterRegionAssigner, HungarianAssigner,
MaskHungarianAssigner, MaxIoUAssigner,
PointAssigner, SimOTAAssigner,
TaskAlignedAssigner, UniformAssigner)
def test_max_iou_assigner():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_ignore():
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
ignore_wrt_candidates=False,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[30, 32, 40, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
assign_result = self.assign(
bboxes, gt_bboxes, gt_bboxes_ignore=gt_bboxes_ignore)
expected_gt_inds = torch.LongTensor([1, 0, 2, -1])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(bboxes, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_max_iou_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_ignore():
"""Test corner case where a network might predict no boxes and
ignore_iof_thr is on."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
ignore_iof_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_bboxes_ignore = torch.Tensor([
[30, 30, 40, 40],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_labels=gt_labels,
gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(
bboxes, gt_bboxes, gt_labels=None, gt_bboxes_ignore=gt_bboxes_ignore)
assert len(assign_result.gt_inds) == 0
assert assign_result.labels is None
def test_max_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where a network might predict no boxes and no gt."""
self = MaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
assign_result = self.assign(bboxes, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_point_assigner():
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 2, 1, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = PointAssigner()
points = torch.FloatTensor([ # [x, y, stride]
[0, 0, 1],
[10, 10, 1],
[5, 5, 1],
[32, 32, 1],
])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_point_assigner_with_empty_boxes_and_gt():
"""Test corner case where an image might predict no points and no gt."""
self = PointAssigner()
points = torch.FloatTensor([])
gt_bboxes = torch.FloatTensor([])
assign_result = self.assign(points, gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner():
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_approx_iou_assigner_with_empty_boxes():
"""Test corner case where an network might predict no boxes."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_approx_iou_assigner_with_empty_boxes_and_gt():
"""Test corner case where an network might predict no boxes and no gt."""
self = ApproxMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
bboxes = torch.empty((0, 4))
gt_bboxes = torch.empty((0, 4))
approxs_per_octave = 1
approxs = bboxes
squares = bboxes
assign_result = self.assign(approxs, squares, approxs_per_octave,
gt_bboxes)
assert len(assign_result.gt_inds) == 0
def test_random_assign_result():
"""Test random instantiation of assign result to catch corner cases."""
from mmdet.core.bbox.assigners.assign_result import AssignResult
AssignResult.random()
AssignResult.random(num_gts=0, num_preds=0)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=3, num_preds=3)
AssignResult.random(num_gts=0, num_preds=3)
AssignResult.random(num_gts=7, num_preds=7)
AssignResult.random(num_gts=7, num_preds=64)
AssignResult.random(num_gts=24, num_preds=3)
def test_center_region_assigner():
self = CenterRegionAssigner(pos_scale=0.3, neg_scale=1)
bboxes = torch.FloatTensor([[0, 0, 10, 10], [10, 10, 20, 20], [8, 8, 9,
9]])
gt_bboxes = torch.FloatTensor([
[0, 0, 11, 11], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
[4.5, 4.5, 5.5, 5.5], # match bboxes[0] but area is too small
[0, 0, 10, 10], # match bboxes[1] and has a smaller area than gt[0]
])
gt_labels = torch.LongTensor([2, 3, 4, 5])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 3
assert len(assign_result.labels) == 3
expected_gt_inds = torch.LongTensor([4, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
shadowed_labels = assign_result.get_extra_property('shadowed_labels')
# [8, 8, 9, 9] in the shadowed region of [0, 0, 11, 11] (label: 2)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 2]]))
# [8, 8, 9, 9] in the shadowed region of [0, 0, 10, 10] (label: 5)
assert torch.any(shadowed_labels == torch.LongTensor([[2, 5]]))
# [0, 0, 10, 10] is already assigned to [4.5, 4.5, 5.5, 5.5].
# Therefore, [0, 0, 11, 11] (label: 2) is shadowed
assert torch.any(shadowed_labels == torch.LongTensor([[0, 2]]))
def test_center_region_assigner_with_ignore():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_bboxes_ignore = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(
bboxes,
gt_bboxes,
gt_bboxes_ignore=gt_bboxes_ignore,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
assert len(assign_result.labels) == 2
expected_gt_inds = torch.LongTensor([-1, 2])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_center_region_assigner_with_empty_bboxes():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.empty((0, 4)).float()
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 10], # match bboxes[0]
[10, 10, 20, 20], # match bboxes[1]
])
gt_labels = torch.LongTensor([1, 2])
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert assign_result.gt_inds is None or assign_result.gt_inds.numel() == 0
assert assign_result.labels is None or assign_result.labels.numel() == 0
def test_center_region_assigner_with_empty_gts():
self = CenterRegionAssigner(
pos_scale=0.5,
neg_scale=1,
)
bboxes = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
])
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
assign_result = self.assign(bboxes, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 2
expected_gt_inds = torch.LongTensor([0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_hungarian_match_assigner():
self = HungarianAssigner()
assert self.iou_cost.iou_mode == 'giou'
# test no gt bboxes
bbox_pred = torch.rand((10, 4))
cls_pred = torch.rand((10, 81))
gt_bboxes = torch.empty((0, 4)).float()
gt_labels = torch.empty((0, )).long()
img_meta = dict(img_shape=(10, 8, 3))
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds == 0)
assert torch.all(assign_result.labels == -1)
# test with gt bboxes
gt_bboxes = torch.FloatTensor([[0, 0, 5, 7], [3, 5, 7, 8]])
gt_labels = torch.LongTensor([1, 20])
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test iou mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='iou', weight=1.0))
assert self.iou_cost.iou_mode == 'iou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
# test focal loss mode
self = HungarianAssigner(
iou_cost=dict(type='IoUCost', iou_mode='giou', weight=1.0),
cls_cost=dict(type='FocalLossCost', weight=1.))
assert self.iou_cost.iou_mode == 'giou'
assign_result = self.assign(bbox_pred, cls_pred, gt_bboxes, gt_labels,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_bboxes.size(0)
assert (assign_result.labels > -1).sum() == gt_bboxes.size(0)
def test_uniform_assigner():
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
expected_gt_inds = torch.LongTensor([-1, 0, 2, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_gt():
"""Test corner case where an image might have no true detections."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.empty(0, 4)
assign_result = self.assign(pred_bbox, anchor, gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_uniform_assigner_with_empty_boxes():
"""Test corner case where a network might predict no boxes."""
self = UniformAssigner(0.15, 0.7, 1)
pred_bbox = torch.empty((0, 4))
anchor = torch.empty((0, 4))
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([2, 3])
# Test with gt_labels
assign_result = self.assign(
pred_bbox, anchor, gt_bboxes, gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 0
assert tuple(assign_result.labels.shape) == (0, )
# Test without gt_labels
assign_result = self.assign(pred_bbox, anchor, gt_bboxes, gt_labels=None)
assert len(assign_result.gt_inds) == 0
def test_sim_ota_assigner():
self = SimOTAAssigner(
center_radius=2.5, candidate_topk=1, iou_weight=3.0, cls_weight=1.0)
pred_scores = torch.FloatTensor([[0.2], [0.8]])
priors = torch.Tensor([[0, 12, 23, 34], [4, 5, 6, 7]])
decoded_bboxes = torch.Tensor([[[30, 40, 50, 60]], [[4, 5, 6, 7]]])
gt_bboxes = torch.Tensor([[23.6667, 23.8757, 238.6326, 151.8874]])
gt_labels = torch.LongTensor([2])
assign_result = self.assign(pred_scores, priors, decoded_bboxes, gt_bboxes,
gt_labels)
expected_gt_inds = torch.LongTensor([0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_task_aligned_assigner():
with pytest.raises(AssertionError):
TaskAlignedAssigner(topk=0)
self = TaskAlignedAssigner(topk=13)
pred_score = torch.FloatTensor([[0.1, 0.2], [0.2, 0.3], [0.3, 0.4],
[0.4, 0.5]])
pred_bbox = torch.FloatTensor([
[1, 1, 12, 8],
[4, 4, 20, 20],
[1, 5, 15, 15],
[30, 5, 32, 42],
])
anchor = torch.FloatTensor([
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
])
gt_bboxes = torch.FloatTensor([
[0, 0, 10, 9],
[0, 10, 10, 19],
])
gt_labels = torch.LongTensor([0, 1])
assign_result = self.assign(
pred_score,
pred_bbox,
anchor,
gt_bboxes=gt_bboxes,
gt_labels=gt_labels)
assert len(assign_result.gt_inds) == 4
assert len(assign_result.labels) == 4
# test empty gt
gt_bboxes = torch.empty(0, 4)
gt_labels = torch.empty(0, 2)
assign_result = self.assign(
pred_score, pred_bbox, anchor, gt_bboxes=gt_bboxes)
expected_gt_inds = torch.LongTensor([0, 0, 0, 0])
assert torch.all(assign_result.gt_inds == expected_gt_inds)
def test_mask_hungarian_match_assigner():
# test no gt masks
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=1.0),
mask_cost=dict(type='FocalLossCost', weight=20.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
cls_pred = torch.rand((10, 133))
mask_pred = torch.rand((10, 50, 50))
gt_labels = torch.empty((0, )).long()
gt_masks = torch.empty((0, 50, 50)).float()
img_meta = None
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds == 0)
assert torch.all(assign_result.labels == -1)
# test with gt masks of naive_dice is True
gt_labels = torch.LongTensor([10, 100])
gt_masks = torch.zeros((2, 50, 50)).long()
gt_masks[0, :25] = 1
gt_masks[0, 25:] = 1
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with cls mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=1.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask focal mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=1.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask dice mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(type='DiceCost', weight=1.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask dice mode that naive_dice is False
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(type='FocalLossCost', weight=0.0, binary_input=True),
dice_cost=dict(
type='DiceCost',
weight=1.0,
pred_act=True,
eps=1.0,
naive_dice=False))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with mask bce mode
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(
type='CrossEntropyLossCost', weight=1.0, use_sigmoid=True),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
self = MaskHungarianAssigner(**assigner_cfg)
assign_result = self.assign(cls_pred, mask_pred, gt_labels, gt_masks,
img_meta)
assert torch.all(assign_result.gt_inds > -1)
assert (assign_result.gt_inds > 0).sum() == gt_labels.size(0)
assert (assign_result.labels > -1).sum() == gt_labels.size(0)
# test with ce mode of CrossEntropyLossCost which is not supported yet
assigner_cfg = dict(
cls_cost=dict(type='ClassificationCost', weight=0.0),
mask_cost=dict(
type='CrossEntropyLossCost', weight=1.0, use_sigmoid=False),
dice_cost=dict(type='DiceCost', weight=0.0, pred_act=True, eps=1.0))
with pytest.raises(AssertionError):
self = MaskHungarianAssigner(**assigner_cfg)
def test_ascend_max_iou_assigner():
self = AscendMaxIoUAssigner(
pos_iou_thr=0.5,
neg_iou_thr=0.5,
)
batch_bboxes = torch.FloatTensor([[
[0, 0, 10, 10],
[10, 10, 20, 20],
[5, 5, 15, 15],
[32, 32, 38, 42],
]])
batch_gt_bboxes = torch.FloatTensor([[
[0, 0, 10, 9],
[0, 10, 10, 19],
]])
batch_gt_labels = torch.LongTensor([[2, 3]])
batch_bboxes_ignore_mask = torch.IntTensor([[1, 1, 1, 1]])
assign_result = self.assign(
batch_bboxes,
batch_gt_bboxes,
batch_gt_labels=batch_gt_labels,
batch_bboxes_ignore_mask=batch_bboxes_ignore_mask)
expected_batch_pos_mask = torch.IntTensor([1, 0, 1, 0])
expected_batch_anchor_gt_indes = torch.IntTensor([0, 0, 1, 0])
expected_batch_anchor_gt_labels = torch.IntTensor([2, 0, 3, 0])
assert torch.all(assign_result.batch_pos_mask == expected_batch_pos_mask)
assert torch.all(
assign_result.batch_anchor_gt_indes *
assign_result.batch_pos_mask == expected_batch_anchor_gt_indes)
assert torch.all(
assign_result.batch_anchor_gt_labels *
assign_result.batch_pos_mask == expected_batch_anchor_gt_labels)
| 24,349 | 33.736091 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_coder.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmdet.core.bbox.coder import (DeltaXYWHBBoxCoder, DistancePointBBoxCoder,
TBLRBBoxCoder, YOLOBBoxCoder)
def test_yolo_bbox_coder():
coder = YOLOBBoxCoder()
bboxes = torch.Tensor([[-42., -29., 74., 61.], [-10., -29., 106., 61.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
pred_bboxes = torch.Tensor([[0.4709, 0.6152, 0.1690, -0.4056],
[0.5399, 0.6653, 0.1162, -0.4162],
[0.4654, 0.6618, 0.1548, -0.4301],
[0.4786, 0.6197, 0.1896, -0.4479]])
grid_size = 32
expected_decode_bboxes = torch.Tensor(
[[-53.6102, -10.3096, 83.7478, 49.6824],
[-15.8700, -8.3901, 114.4236, 50.9693],
[11.1822, -8.0924, 146.6034, 50.4476],
[41.2068, -8.9232, 181.4236, 48.5840]])
assert expected_decode_bboxes.allclose(
coder.decode(bboxes, pred_bboxes, grid_size))
def test_delta_bbox_coder():
coder = DeltaXYWHBBoxCoder()
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 1.0000, 1.0000],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((32, 32)))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(32, 32))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(
batch_rois, batch_deltas, max_shape=[(32, 32), (32, 32), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
# test add_ctr_clamp
coder = DeltaXYWHBBoxCoder(add_ctr_clamp=True, ctr_clamp=2)
rois = torch.Tensor([[0., 0., 6., 6.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[1., 1., 2., 2.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.0000, 0.0000, 27.1672, 27.1672],
[0.1409, 0.1409, 2.8591, 2.8591],
[0.0000, 0.3161, 4.1945, 0.6839],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert expected_decode_bboxes.allclose(out, atol=1e-04)
def test_tblr_bbox_coder():
coder = TBLRBBoxCoder(normalizer=15.)
rois = torch.Tensor([[0., 0., 1., 1.], [0., 0., 1., 1.], [0., 0., 1., 1.],
[5., 5., 5., 5.]])
deltas = torch.Tensor([[0., 0., 0., 0.], [1., 1., 1., 1.],
[0., 0., 2., -1.], [0.7, -1.9, -0.5, 0.3]])
expected_decode_bboxes = torch.Tensor([[0.5000, 0.5000, 0.5000, 0.5000],
[0.0000, 0.0000, 12.0000, 13.0000],
[0.0000, 0.5000, 0.0000, 0.5000],
[5.0000, 5.0000, 5.0000, 5.0000]])
out = coder.decode(rois, deltas, max_shape=(13, 12))
assert expected_decode_bboxes.allclose(out)
out = coder.decode(rois, deltas, max_shape=torch.Tensor((13, 12)))
assert expected_decode_bboxes.allclose(out)
batch_rois = rois.unsqueeze(0).repeat(2, 1, 1)
batch_deltas = deltas.unsqueeze(0).repeat(2, 1, 1)
batch_out = coder.decode(batch_rois, batch_deltas, max_shape=(13, 12))[0]
assert out.allclose(batch_out)
batch_out = coder.decode(
batch_rois, batch_deltas, max_shape=[(13, 12), (13, 12)])[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
coder.decode(batch_rois, batch_deltas, max_shape=[(13, 12)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = coder.decode(rois, deltas, max_shape=(32, 32))
assert rois.shape == out.shape
def test_distance_point_bbox_coder():
coder = DistancePointBBoxCoder()
points = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
gt_bboxes = torch.Tensor([[74., 61., 75., 62.], [0., 104., 0., 112.],
[100., 90., 100., 120.], [0., 120., 100., 120.]])
expected_distance = torch.Tensor([[0., 0., 1., 1.], [0., 2., 29., 6.],
[38., 0., 0., 50.], [29., 50., 50., 0.]])
out_distance = coder.encode(points, gt_bboxes, max_dis=50, eps=0)
assert expected_distance.allclose(out_distance)
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
out_bbox = coder.decode(points, distance, max_shape=(120, 100))
assert gt_bboxes.allclose(out_bbox)
| 5,779 | 44.15625 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_compat_config.py | import pytest
from mmcv import ConfigDict
from mmdet.utils.compat_config import (compat_imgs_per_gpu, compat_loader_args,
compat_runner_args)
def test_compat_runner_args():
cfg = ConfigDict(dict(total_epochs=12))
with pytest.warns(None) as record:
cfg = compat_runner_args(cfg)
assert len(record) == 1
assert 'runner' in record.list[0].message.args[0]
assert 'runner' in cfg
assert cfg.runner.type == 'EpochBasedRunner'
assert cfg.runner.max_epochs == cfg.total_epochs
def test_compat_loader_args():
cfg = ConfigDict(dict(data=dict(val=dict(), test=dict(), train=dict())))
cfg = compat_loader_args(cfg)
# auto fill loader args
assert 'val_dataloader' in cfg.data
assert 'train_dataloader' in cfg.data
assert 'test_dataloader' in cfg.data
cfg = ConfigDict(
dict(
data=dict(
samples_per_gpu=1,
persistent_workers=True,
workers_per_gpu=1,
val=dict(samples_per_gpu=3),
test=dict(samples_per_gpu=2),
train=dict())))
cfg = compat_loader_args(cfg)
assert cfg.data.train_dataloader.workers_per_gpu == 1
assert cfg.data.train_dataloader.samples_per_gpu == 1
assert cfg.data.train_dataloader.persistent_workers
assert cfg.data.val_dataloader.workers_per_gpu == 1
assert cfg.data.val_dataloader.samples_per_gpu == 3
assert cfg.data.test_dataloader.workers_per_gpu == 1
assert cfg.data.test_dataloader.samples_per_gpu == 2
# test test is a list
cfg = ConfigDict(
dict(
data=dict(
samples_per_gpu=1,
persistent_workers=True,
workers_per_gpu=1,
val=dict(samples_per_gpu=3),
test=[dict(samples_per_gpu=2),
dict(samples_per_gpu=3)],
train=dict())))
cfg = compat_loader_args(cfg)
assert cfg.data.test_dataloader.samples_per_gpu == 3
# assert can not set args at the same time
cfg = ConfigDict(
dict(
data=dict(
samples_per_gpu=1,
persistent_workers=True,
workers_per_gpu=1,
val=dict(samples_per_gpu=3),
test=dict(samples_per_gpu=2),
train=dict(),
train_dataloader=dict(samples_per_gpu=2))))
# samples_per_gpu can not be set in `train_dataloader`
# and data field at the same time
with pytest.raises(AssertionError):
compat_loader_args(cfg)
cfg = ConfigDict(
dict(
data=dict(
samples_per_gpu=1,
persistent_workers=True,
workers_per_gpu=1,
val=dict(samples_per_gpu=3),
test=dict(samples_per_gpu=2),
train=dict(),
val_dataloader=dict(samples_per_gpu=2))))
# samples_per_gpu can not be set in `val_dataloader`
# and data field at the same time
with pytest.raises(AssertionError):
compat_loader_args(cfg)
cfg = ConfigDict(
dict(
data=dict(
samples_per_gpu=1,
persistent_workers=True,
workers_per_gpu=1,
val=dict(samples_per_gpu=3),
test=dict(samples_per_gpu=2),
test_dataloader=dict(samples_per_gpu=2))))
# samples_per_gpu can not be set in `test_dataloader`
# and data field at the same time
with pytest.raises(AssertionError):
compat_loader_args(cfg)
def test_compat_imgs_per_gpu():
cfg = ConfigDict(
dict(
data=dict(
imgs_per_gpu=1,
samples_per_gpu=2,
val=dict(),
test=dict(),
train=dict())))
cfg = compat_imgs_per_gpu(cfg)
assert cfg.data.samples_per_gpu == cfg.data.imgs_per_gpu
| 3,947 | 33.034483 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_general_data.py | import copy
import numpy as np
import pytest
import torch
from mmdet.core import GeneralData, InstanceData
def _equal(a, b):
if isinstance(a, (torch.Tensor, np.ndarray)):
return (a == b).all()
else:
return a == b
def test_general_data():
# test init
meta_info = dict(
img_size=[256, 256],
path='dadfaff',
scale_factor=np.array([1.5, 1.5]),
img_shape=torch.rand(4))
data = dict(
bboxes=torch.rand(4, 4),
labels=torch.rand(4),
masks=np.random.rand(4, 2, 2))
instance_data = GeneralData(meta_info=meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# test nice_repr
repr_instance_data = instance_data.new(data=data)
nice_repr = str(repr_instance_data)
for line in nice_repr.split('\n'):
if 'masks' in line:
assert 'shape' in line
assert '(4, 2, 2)' in line
if 'bboxes' in line:
assert 'shape' in line
assert 'torch.Size([4, 4])' in line
if 'path' in line:
assert 'dadfaff' in line
if 'scale_factor' in line:
assert '[1.5 1.5]' in line
instance_data = GeneralData(
meta_info=meta_info, data=dict(bboxes=torch.rand(5)))
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 5
# data should be a dict
with pytest.raises(AssertionError):
GeneralData(data=1)
# test set data
instance_data = GeneralData()
instance_data.set_data(data)
assert 'bboxes' in instance_data
assert len(instance_data.bboxes) == 4
assert 'masks' in instance_data
assert len(instance_data.masks) == 4
# data should be a dict
with pytest.raises(AssertionError):
instance_data.set_data(data=1)
# test set_meta
instance_data = GeneralData()
instance_data.set_meta_info(meta_info)
assert 'img_size' in instance_data
assert instance_data.img_size == [256, 256]
assert instance_data['img_size'] == [256, 256]
assert 'path' in instance_data
assert instance_data.path == 'dadfaff'
# can skip same value when overwrite
instance_data.set_meta_info(meta_info)
# meta should be a dict
with pytest.raises(AssertionError):
instance_data.set_meta_info(meta_info='fjhka')
# attribute in `_meta_info_field` is immutable once initialized
instance_data.set_meta_info(meta_info)
# meta should be immutable
with pytest.raises(KeyError):
instance_data.set_meta_info(dict(img_size=[254, 251]))
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['path'] = 'dada'
instance_data.set_meta_info(duplicate_meta_info)
with pytest.raises(KeyError):
duplicate_meta_info = copy.deepcopy(meta_info)
duplicate_meta_info['scale_factor'] = np.array([1.5, 1.6])
instance_data.set_meta_info(duplicate_meta_info)
# test new_instance_data
instance_data = GeneralData(meta_info)
new_instance_data = instance_data.new()
for k, v in instance_data.meta_info_items():
assert k in new_instance_data
_equal(v, new_instance_data[k])
instance_data = GeneralData(meta_info, data=data)
temp_meta = copy.deepcopy(meta_info)
temp_data = copy.deepcopy(data)
temp_data['time'] = '12212'
temp_meta['img_norm'] = np.random.random(3)
new_instance_data = instance_data.new(meta_info=temp_meta, data=temp_data)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, temp_meta[k])
assert k == 'img_norm'
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'time'
assert _equal(v, temp_data[k])
# test keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'bboxes' in instance_data.keys()
instance_data.b = 10
assert 'b' in instance_data
# test meta keys
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 'path' in instance_data.meta_info_keys()
assert len(instance_data.meta_info_keys()) == len(meta_info)
instance_data.set_meta_info(dict(workdir='fafaf'))
assert 'workdir' in instance_data
assert len(instance_data.meta_info_keys()) == len(meta_info) + 1
# test values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
assert 10 in instance_data.values()
assert len(instance_data.values()) == 1
# test meta values
instance_data = GeneralData(meta_info, data=dict(bboxes=10))
# torch 1.3 eq() can not compare str and tensor
from mmdet import digit_version
if digit_version(torch.__version__) >= [1, 4]:
assert 'dadfaff' in instance_data.meta_info_values()
assert len(instance_data.meta_info_values()) == len(meta_info)
# test items
instance_data = GeneralData(data=data)
for k, v in instance_data.items():
assert k in data
assert _equal(v, data[k])
# test meta_info_items
instance_data = GeneralData(meta_info=meta_info)
for k, v in instance_data.meta_info_items():
assert k in meta_info
assert _equal(v, meta_info[k])
# test __setattr__
new_instance_data = GeneralData(data=data)
new_instance_data.mask = torch.rand(3, 4, 5)
new_instance_data.bboxes = torch.rand(2, 4)
assert 'mask' in new_instance_data
assert len(new_instance_data.mask) == 3
assert len(new_instance_data.bboxes) == 2
# test instance_data_field has been updated
assert 'mask' in new_instance_data._data_fields
assert 'bboxes' in new_instance_data._data_fields
for k in data:
assert k in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
new_instance_data._data_fields = None
with pytest.raises(AttributeError):
new_instance_data._meta_info_fields = None
with pytest.raises(AttributeError):
del new_instance_data._data_fields
with pytest.raises(AttributeError):
del new_instance_data._meta_info_fields
# key in _meta_info_field is immutable
new_instance_data.set_meta_info(meta_info)
with pytest.raises(KeyError):
del new_instance_data.img_size
with pytest.raises(KeyError):
del new_instance_data.scale_factor
for k in new_instance_data.meta_info_keys():
with pytest.raises(AttributeError):
new_instance_data[k] = None
# test __delattr__
# test key can be removed in instance_data_field
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data.mask
assert 'mask' not in new_instance_data.keys()
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert not hasattr(new_instance_data, 'mask')
# tset __delitem__
new_instance_data.mask = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data
assert hasattr(new_instance_data, 'mask')
del new_instance_data['mask']
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
assert not hasattr(new_instance_data, 'mask')
# test __setitem__
new_instance_data['mask'] = torch.rand(1, 2, 3)
assert 'mask' in new_instance_data._data_fields
assert 'mask' in new_instance_data.keys()
assert hasattr(new_instance_data, 'mask')
# test data_fields has been updated
assert 'mask' in new_instance_data.keys()
assert 'mask' in new_instance_data._data_fields
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
del new_instance_data['_data_fields']
with pytest.raises(AttributeError):
del new_instance_data['_meta_info_field']
# test __getitem__
new_instance_data.mask is new_instance_data['mask']
# test get
assert new_instance_data.get('mask') is new_instance_data.mask
assert new_instance_data.get('none_attribute', None) is None
assert new_instance_data.get('none_attribute', 1) == 1
# test pop
mask = new_instance_data.mask
assert new_instance_data.pop('mask') is mask
assert new_instance_data.pop('mask', None) is None
assert new_instance_data.pop('mask', 1) == 1
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(KeyError):
new_instance_data.pop('_data_fields')
with pytest.raises(KeyError):
new_instance_data.pop('_meta_info_field')
# attribute in `_meta_info_field` is immutable
with pytest.raises(KeyError):
new_instance_data.pop('img_size')
# test pop attribute in instance_data_filed
new_instance_data['mask'] = torch.rand(1, 2, 3)
new_instance_data.pop('mask')
# test data_field has been updated
assert 'mask' not in new_instance_data
assert 'mask' not in new_instance_data._data_fields
assert 'mask' not in new_instance_data
# test_keys
new_instance_data.mask = torch.ones(1, 2, 3)
'mask' in new_instance_data.keys()
has_flag = False
for key in new_instance_data.keys():
if key == 'mask':
has_flag = True
assert has_flag
# test values
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.values()))
mask = new_instance_data.mask
has_flag = False
for value in new_instance_data.values():
if value is mask:
has_flag = True
assert has_flag
# test items
assert len(list(new_instance_data.keys())) == len(
list(new_instance_data.items()))
mask = new_instance_data.mask
has_flag = False
for key, value in new_instance_data.items():
if value is mask:
assert key == 'mask'
has_flag = True
assert has_flag
# test device
new_instance_data = GeneralData()
if torch.cuda.is_available():
newnew_instance_data = new_instance_data.new()
devices = ('cpu', 'cuda')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cpu()
for value in newnew_instance_data.values():
assert not value.is_cuda
newnew_instance_data = new_instance_data.new()
devices = ('cuda', 'cpu')
for i in range(10):
device = devices[i % 2]
newnew_instance_data[f'{i}'] = torch.rand(1, 2, 3, device=device)
newnew_instance_data = newnew_instance_data.cuda()
for value in newnew_instance_data.values():
assert value.is_cuda
# test to
double_instance_data = instance_data.new()
double_instance_data.long = torch.LongTensor(1, 2, 3, 4)
double_instance_data.bool = torch.BoolTensor(1, 2, 3, 4)
double_instance_data = instance_data.to(torch.double)
for k, v in double_instance_data.items():
if isinstance(v, torch.Tensor):
assert v.dtype is torch.double
# test .cpu() .cuda()
if torch.cuda.is_available():
cpu_instance_data = double_instance_data.new()
cpu_instance_data.mask = torch.rand(1)
cuda_tensor = torch.rand(1, 2, 3).cuda()
cuda_instance_data = cpu_instance_data.to(cuda_tensor.device)
for value in cuda_instance_data.values():
assert value.is_cuda
cpu_instance_data = cuda_instance_data.cpu()
for value in cpu_instance_data.values():
assert not value.is_cuda
cuda_instance_data = cpu_instance_data.cuda()
for value in cuda_instance_data.values():
assert value.is_cuda
# test detach
grad_instance_data = double_instance_data.new()
grad_instance_data.mask = torch.rand(2, requires_grad=True)
grad_instance_data.mask_1 = torch.rand(2, requires_grad=True)
detach_instance_data = grad_instance_data.detach()
for value in detach_instance_data.values():
assert not value.requires_grad
# test numpy
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2, requires_grad=True)
tensor_instance_data.mask_1 = torch.rand(2, requires_grad=True)
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
if torch.cuda.is_available():
tensor_instance_data = double_instance_data.new()
tensor_instance_data.mask = torch.rand(2)
tensor_instance_data.mask_1 = torch.rand(2)
tensor_instance_data = tensor_instance_data.cuda()
numpy_instance_data = tensor_instance_data.numpy()
for value in numpy_instance_data.values():
assert isinstance(value, np.ndarray)
instance_data['_c'] = 10000
instance_data.get('dad', None) is None
assert hasattr(instance_data, '_c')
del instance_data['_c']
assert not hasattr(instance_data, '_c')
instance_data.a = 1000
instance_data['a'] = 2000
assert instance_data['a'] == 2000
assert instance_data.a == 2000
assert instance_data.get('a') == instance_data['a'] == instance_data.a
instance_data._meta = 1000
assert '_meta' in instance_data.keys()
if torch.cuda.is_available():
instance_data.bbox = torch.ones(2, 3, 4, 5).cuda()
instance_data.score = torch.ones(2, 3, 4, 4)
else:
instance_data.bbox = torch.ones(2, 3, 4, 5)
assert len(instance_data.new().keys()) == 0
with pytest.raises(AttributeError):
instance_data.img_size = 100
for k, v in instance_data.items():
if k == 'bbox':
assert isinstance(v, torch.Tensor)
assert 'a' in instance_data
instance_data.pop('a')
assert 'a' not in instance_data
cpu_instance_data = instance_data.cpu()
for k, v in cpu_instance_data.items():
if isinstance(v, torch.Tensor):
assert not v.is_cuda
assert isinstance(cpu_instance_data.numpy().bbox, np.ndarray)
if torch.cuda.is_available():
cuda_resutls = instance_data.cuda()
for k, v in cuda_resutls.items():
if isinstance(v, torch.Tensor):
assert v.is_cuda
def test_instance_data():
meta_info = dict(
img_size=(256, 256),
path='dadfaff',
scale_factor=np.array([1.5, 1.5, 1, 1]))
data = dict(
bboxes=torch.rand(4, 4),
masks=torch.rand(4, 2, 2),
labels=np.random.rand(4),
size=[(i, i) for i in range(4)])
# test init
instance_data = InstanceData(meta_info)
assert 'path' in instance_data
instance_data = InstanceData(meta_info, data=data)
assert len(instance_data) == 4
instance_data.set_data(data)
assert len(instance_data) == 4
meta_info = copy.deepcopy(meta_info)
meta_info['img_name'] = 'flag'
# test newinstance_data
new_instance_data = instance_data.new(meta_info=meta_info)
for k, v in new_instance_data.meta_info_items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert _equal(v, meta_info[k])
assert k == 'img_name'
# meta info is immutable
with pytest.raises(KeyError):
meta_info = copy.deepcopy(meta_info)
meta_info['path'] = 'fdasfdsd'
instance_data.new(meta_info=meta_info)
# data fields should have same length
with pytest.raises(AssertionError):
temp_data = copy.deepcopy(data)
temp_data['bboxes'] = torch.rand(5, 4)
instance_data.new(data=temp_data)
temp_data = copy.deepcopy(data)
temp_data['scores'] = torch.rand(4)
new_instance_data = instance_data.new(data=temp_data)
for k, v in new_instance_data.items():
if k in instance_data:
_equal(v, instance_data[k])
else:
assert k == 'scores'
assert _equal(v, temp_data[k])
instance_data = instance_data.new()
# test __setattr__
# '_meta_info_field', '_data_fields' is immutable.
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
with pytest.raises(AttributeError):
instance_data._data_fields = dict()
# all attribute in instance_data_field should be
# (torch.Tensor, np.ndarray, list))
with pytest.raises(AssertionError):
instance_data.a = 1000
# instance_data field should has same length
new_instance_data = instance_data.new()
new_instance_data.det_bbox = torch.rand(100, 4)
new_instance_data.det_label = torch.arange(100)
with pytest.raises(AssertionError):
new_instance_data.scores = torch.rand(101, 1)
new_instance_data.none = [None] * 100
with pytest.raises(AssertionError):
new_instance_data.scores = [None] * 101
new_instance_data.numpy_det = np.random.random([100, 1])
with pytest.raises(AssertionError):
new_instance_data.scores = np.random.random([101, 1])
# isinstance(str, slice, int, torch.LongTensor, torch.BoolTensor)
item = torch.Tensor([1, 2, 3, 4])
with pytest.raises(AssertionError):
new_instance_data[item]
len(new_instance_data[item.long()]) == 1
# when input is a bool tensor, The shape of
# the input at index 0 should equal to
# the value length in instance_data_field
with pytest.raises(AssertionError):
new_instance_data[item.bool()]
for i in range(len(new_instance_data)):
assert new_instance_data[i].det_label == i
assert len(new_instance_data[i]) == 1
# assert the index should in 0 ~ len(instance_data) -1
with pytest.raises(IndexError):
new_instance_data[101]
# assert the index should not be an empty tensor
new_new_instance_data = new_instance_data.new()
with pytest.raises(AssertionError):
new_new_instance_data[0]
# test str
with pytest.raises(AssertionError):
instance_data.img_size_dummmy = meta_info['img_size']
# test slice
ten_ressults = new_instance_data[:10]
len(ten_ressults) == 10
for v in ten_ressults.values():
assert len(v) == 10
# test Longtensor
long_tensor = torch.randint(100, (50, ))
long_index_instance_data = new_instance_data[long_tensor]
assert len(long_index_instance_data) == len(long_tensor)
for key, value in long_index_instance_data.items():
if not isinstance(value, list):
assert (long_index_instance_data[key] == new_instance_data[key]
[long_tensor]).all()
else:
len(long_tensor) == len(value)
# test bool tensor
bool_tensor = torch.rand(100) > 0.5
bool_index_instance_data = new_instance_data[bool_tensor]
assert len(bool_index_instance_data) == bool_tensor.sum()
for key, value in bool_index_instance_data.items():
if not isinstance(value, list):
assert (bool_index_instance_data[key] == new_instance_data[key]
[bool_tensor]).all()
else:
assert len(value) == bool_tensor.sum()
num_instance = 1000
instance_data_list = []
# assert len(instance_lists) > 0
with pytest.raises(AssertionError):
instance_data.cat(instance_data_list)
for _ in range(2):
instance_data['bbox'] = torch.rand(num_instance, 4)
instance_data['label'] = torch.rand(num_instance, 1)
instance_data['mask'] = torch.rand(num_instance, 224, 224)
instance_data['instances_infos'] = [1] * num_instance
instance_data['cpu_bbox'] = np.random.random((num_instance, 4))
if torch.cuda.is_available():
instance_data.cuda_tensor = torch.rand(num_instance).cuda()
assert instance_data.cuda_tensor.is_cuda
cuda_instance_data = instance_data.cuda()
assert cuda_instance_data.cuda_tensor.is_cuda
assert len(instance_data[0]) == 1
with pytest.raises(IndexError):
return instance_data[num_instance + 1]
with pytest.raises(AssertionError):
instance_data.centerness = torch.rand(num_instance + 1, 1)
mask_tensor = torch.rand(num_instance) > 0.5
length = mask_tensor.sum()
assert len(instance_data[mask_tensor]) == length
index_tensor = torch.LongTensor([1, 5, 8, 110, 399])
length = len(index_tensor)
assert len(instance_data[index_tensor]) == length
instance_data_list.append(instance_data)
cat_resutls = InstanceData.cat(instance_data_list)
assert len(cat_resutls) == num_instance * 2
instances = InstanceData(data=dict(bboxes=torch.rand(4, 4)))
# cat only single instance
assert len(InstanceData.cat([instances])) == 4
| 21,205 | 34.820946 | 78 | py |
mmdetection | mmdetection-master/tests/test_utils/test_hook.py | # Copyright (c) OpenMMLab. All rights reserved.
import logging
import shutil
import sys
import tempfile
from unittest.mock import MagicMock, Mock, call, patch
import numpy as np
import pytest
import torch
import torch.nn as nn
from mmcv.runner import (CheckpointHook, IterTimerHook, PaviLoggerHook,
build_runner)
from torch.nn.init import constant_
from torch.utils.data import DataLoader, Dataset
from mmdet.core.hook import ExpMomentumEMAHook, YOLOXLrUpdaterHook
from mmdet.core.hook.sync_norm_hook import SyncNormHook
from mmdet.core.hook.sync_random_size_hook import SyncRandomSizeHook
def _build_demo_runner_without_hook(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None,
multi_optimziers=False):
class Model(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
self.conv = nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
model = Model()
if multi_optimziers:
optimizer = {
'model1':
torch.optim.SGD(model.linear.parameters(), lr=0.02, momentum=0.95),
'model2':
torch.optim.SGD(model.conv.parameters(), lr=0.01, momentum=0.9),
}
else:
optimizer = torch.optim.SGD(model.parameters(), lr=0.02, momentum=0.95)
tmp_dir = tempfile.mkdtemp()
runner = build_runner(
dict(type=runner_type),
default_args=dict(
model=model,
work_dir=tmp_dir,
optimizer=optimizer,
logger=logging.getLogger(),
max_epochs=max_epochs,
max_iters=max_iters))
return runner
def _build_demo_runner(runner_type='EpochBasedRunner',
max_epochs=1,
max_iters=None,
multi_optimziers=False):
log_config = dict(
interval=1, hooks=[
dict(type='TextLoggerHook'),
])
runner = _build_demo_runner_without_hook(runner_type, max_epochs,
max_iters, multi_optimziers)
runner.register_checkpoint_hook(dict(interval=1))
runner.register_logger_hooks(log_config)
return runner
@pytest.mark.parametrize('multi_optimziers', (True, False))
def test_yolox_lrupdater_hook(multi_optimziers):
"""xdoctest -m tests/test_hooks.py test_cosine_runner_hook."""
# Only used to prevent program errors
YOLOXLrUpdaterHook(0, min_lr_ratio=0.05)
sys.modules['pavi'] = MagicMock()
loader = DataLoader(torch.ones((10, 2)))
runner = _build_demo_runner(multi_optimziers=multi_optimziers)
hook_cfg = dict(
type='YOLOXLrUpdaterHook',
warmup='exp',
by_epoch=False,
warmup_by_epoch=True,
warmup_ratio=1,
warmup_iters=5, # 5 epoch
num_last_epochs=15,
min_lr_ratio=0.05)
runner.register_hook_from_cfg(hook_cfg)
runner.register_hook_from_cfg(dict(type='IterTimerHook'))
runner.register_hook(IterTimerHook())
# add pavi hook
hook = PaviLoggerHook(interval=1, add_graph=False, add_last_ckpt=True)
runner.register_hook(hook)
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
# TODO: use a more elegant way to check values
assert hasattr(hook, 'writer')
if multi_optimziers:
calls = [
call(
'train', {
'learning_rate/model1': 8.000000000000001e-06,
'learning_rate/model2': 4.000000000000001e-06,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 1),
call(
'train', {
'learning_rate/model1': 0.00039200000000000004,
'learning_rate/model2': 0.00019600000000000002,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 7),
call(
'train', {
'learning_rate/model1': 0.0008000000000000001,
'learning_rate/model2': 0.0004000000000000001,
'momentum/model1': 0.95,
'momentum/model2': 0.9
}, 10)
]
else:
calls = [
call('train', {
'learning_rate': 8.000000000000001e-06,
'momentum': 0.95
}, 1),
call('train', {
'learning_rate': 0.00039200000000000004,
'momentum': 0.95
}, 7),
call('train', {
'learning_rate': 0.0008000000000000001,
'momentum': 0.95
}, 10)
]
hook.writer.add_scalars.assert_has_calls(calls, any_order=True)
def test_ema_hook():
"""xdoctest -m tests/test_hooks.py test_ema_hook."""
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.conv = nn.Conv2d(
in_channels=1,
out_channels=2,
kernel_size=1,
padding=1,
bias=True)
self.bn = nn.BatchNorm2d(2)
self._init_weight()
def _init_weight(self):
constant_(self.conv.weight, 0)
constant_(self.conv.bias, 0)
constant_(self.bn.weight, 0)
constant_(self.bn.bias, 0)
def forward(self, x):
return self.bn(self.conv(x)).sum()
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def val_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
loader = DataLoader(torch.ones((1, 1, 1, 1)))
runner = _build_demo_runner()
demo_model = DemoModel()
runner.model = demo_model
ema_hook = ExpMomentumEMAHook(
momentum=0.0002,
total_iter=1,
skip_buffers=True,
interval=2,
resume_from=None)
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(ema_hook, priority='HIGHEST')
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_1.pth')
num_eam_params = 0
for name, value in checkpoint['state_dict'].items():
if 'ema' in name:
num_eam_params += 1
value.fill_(1)
assert num_eam_params == 4
torch.save(checkpoint, f'{runner.work_dir}/epoch_1.pth')
work_dir = runner.work_dir
resume_ema_hook = ExpMomentumEMAHook(
momentum=0.5,
total_iter=10,
skip_buffers=True,
interval=1,
resume_from=f'{work_dir}/epoch_1.pth')
runner = _build_demo_runner(max_epochs=2)
runner.model = demo_model
runner.register_hook(resume_ema_hook, priority='HIGHEST')
checkpointhook = CheckpointHook(interval=1, by_epoch=True)
runner.register_hook(checkpointhook)
runner.run([loader, loader], [('train', 1), ('val', 1)])
checkpoint = torch.load(f'{runner.work_dir}/epoch_2.pth')
num_eam_params = 0
desired_output = [0.9094, 0.9094]
for name, value in checkpoint['state_dict'].items():
if 'ema' in name:
num_eam_params += 1
assert value.sum() == 2
else:
if ('weight' in name) or ('bias' in name):
np.allclose(value.data.cpu().numpy().reshape(-1),
desired_output, 1e-4)
assert num_eam_params == 4
shutil.rmtree(runner.work_dir)
shutil.rmtree(work_dir)
def test_sync_norm_hook():
# Only used to prevent program errors
SyncNormHook()
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
runner.register_hook_from_cfg(dict(type='SyncNormHook'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
def test_sync_random_size_hook():
# Only used to prevent program errors
SyncRandomSizeHook()
class DemoDataset(Dataset):
def __getitem__(self, item):
return torch.ones(2)
def __len__(self):
return 5
def update_dynamic_scale(self, dynamic_scale):
pass
loader = DataLoader(DemoDataset())
runner = _build_demo_runner()
runner.register_hook_from_cfg(
dict(type='SyncRandomSizeHook', device='cpu'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
if torch.cuda.is_available():
runner = _build_demo_runner()
runner.register_hook_from_cfg(
dict(type='SyncRandomSizeHook', device='cuda'))
runner.run([loader, loader], [('train', 1), ('val', 1)])
shutil.rmtree(runner.work_dir)
@pytest.mark.parametrize('set_loss', [
dict(set_loss_nan=False, set_loss_inf=False),
dict(set_loss_nan=True, set_loss_inf=False),
dict(set_loss_nan=False, set_loss_inf=True)
])
def test_check_invalid_loss_hook(set_loss):
# Check whether loss is valid during training.
class DemoModel(nn.Module):
def __init__(self, set_loss_nan=False, set_loss_inf=False):
super().__init__()
self.set_loss_nan = set_loss_nan
self.set_loss_inf = set_loss_inf
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
if self.set_loss_nan:
return dict(loss=torch.tensor(float('nan')))
elif self.set_loss_inf:
return dict(loss=torch.tensor(float('inf')))
else:
return dict(loss=self(x))
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner()
demo_model = DemoModel(**set_loss)
runner.model = demo_model
runner.register_hook_from_cfg(
dict(type='CheckInvalidLossHook', interval=1))
if not set_loss['set_loss_nan'] \
and not set_loss['set_loss_inf']:
# check loss is valid
runner.run([loader], [('train', 1)])
else:
# check loss is nan or inf
with pytest.raises(AssertionError):
runner.run([loader], [('train', 1)])
shutil.rmtree(runner.work_dir)
def test_set_epoch_info_hook():
"""Test SetEpochInfoHook."""
class DemoModel(nn.Module):
def __init__(self):
super().__init__()
self.epoch = 0
self.linear = nn.Linear(2, 1)
def forward(self, x):
return self.linear(x)
def train_step(self, x, optimizer, **kwargs):
return dict(loss=self(x))
def set_epoch(self, epoch):
self.epoch = epoch
loader = DataLoader(torch.ones((5, 2)))
runner = _build_demo_runner(max_epochs=3)
demo_model = DemoModel()
runner.model = demo_model
runner.register_hook_from_cfg(dict(type='SetEpochInfoHook'))
runner.run([loader], [('train', 1)])
assert demo_model.epoch == 2
def test_memory_profiler_hook():
from collections import namedtuple
# test ImportError without psutil and memory_profiler
with pytest.raises(ImportError):
from mmdet.core.hook import MemoryProfilerHook
MemoryProfilerHook(1)
# test ImportError without memory_profiler
sys.modules['psutil'] = MagicMock()
with pytest.raises(ImportError):
from mmdet.core.hook import MemoryProfilerHook
MemoryProfilerHook(1)
sys.modules['memory_profiler'] = MagicMock()
def _mock_virtual_memory():
virtual_memory_type = namedtuple(
'virtual_memory', ['total', 'available', 'percent', 'used'])
return virtual_memory_type(
total=270109085696,
available=250416816128,
percent=7.3,
used=17840881664)
def _mock_swap_memory():
swap_memory_type = namedtuple('swap_memory', [
'total',
'used',
'percent',
])
return swap_memory_type(total=8589930496, used=0, percent=0.0)
def _mock_memory_usage():
return [40.22265625]
mock_virtual_memory = Mock(return_value=_mock_virtual_memory())
mock_swap_memory = Mock(return_value=_mock_swap_memory())
mock_memory_usage = Mock(return_value=_mock_memory_usage())
@patch('psutil.swap_memory', mock_swap_memory)
@patch('psutil.virtual_memory', mock_virtual_memory)
@patch('memory_profiler.memory_usage', mock_memory_usage)
def _test_memory_profiler_hook():
from mmdet.core.hook import MemoryProfilerHook
hook = MemoryProfilerHook(1)
runner = _build_demo_runner()
assert not mock_memory_usage.called
assert not mock_swap_memory.called
assert not mock_memory_usage.called
hook.after_iter(runner)
assert mock_memory_usage.called
assert mock_swap_memory.called
assert mock_memory_usage.called
_test_memory_profiler_hook()
| 13,257 | 30.870192 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_layer_decay_optimizer_constructor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule
from mmdet.core.optimizers import LearningRateDecayOptimizerConstructor
base_lr = 1
decay_rate = 2
base_wd = 0.05
weight_decay = 0.05
expected_stage_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 8
}, {
'weight_decay': 0.0,
'lr_scale': 8
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
expected_layer_wise_lr_wd_convnext = [{
'weight_decay': 0.0,
'lr_scale': 128
}, {
'weight_decay': 0.0,
'lr_scale': 1
}, {
'weight_decay': 0.05,
'lr_scale': 64
}, {
'weight_decay': 0.0,
'lr_scale': 64
}, {
'weight_decay': 0.05,
'lr_scale': 32
}, {
'weight_decay': 0.0,
'lr_scale': 32
}, {
'weight_decay': 0.05,
'lr_scale': 16
}, {
'weight_decay': 0.0,
'lr_scale': 16
}, {
'weight_decay': 0.05,
'lr_scale': 2
}, {
'weight_decay': 0.0,
'lr_scale': 2
}, {
'weight_decay': 0.05,
'lr_scale': 128
}, {
'weight_decay': 0.05,
'lr_scale': 1
}]
class ToyConvNeXt(nn.Module):
def __init__(self):
super().__init__()
self.stages = nn.ModuleList()
for i in range(4):
stage = nn.Sequential(ConvModule(3, 4, kernel_size=1, bias=True))
self.stages.append(stage)
self.norm0 = nn.BatchNorm2d(2)
# add some variables to meet unit test coverate rate
self.cls_token = nn.Parameter(torch.ones(1))
self.mask_token = nn.Parameter(torch.ones(1))
self.pos_embed = nn.Parameter(torch.ones(1))
self.stem_norm = nn.Parameter(torch.ones(1))
self.downsample_norm0 = nn.BatchNorm2d(2)
self.downsample_norm1 = nn.BatchNorm2d(2)
self.downsample_norm2 = nn.BatchNorm2d(2)
self.lin = nn.Parameter(torch.ones(1))
self.lin.requires_grad = False
self.downsample_layers = nn.ModuleList()
for _ in range(4):
stage = nn.Sequential(nn.Conv2d(3, 4, kernel_size=1, bias=True))
self.downsample_layers.append(stage)
class ToyDetector(nn.Module):
def __init__(self, backbone):
super().__init__()
self.backbone = backbone
self.head = nn.Conv2d(2, 2, kernel_size=1, groups=2)
class PseudoDataParallel(nn.Module):
def __init__(self, model):
super().__init__()
self.module = model
def check_optimizer_lr_wd(optimizer, gt_lr_wd):
assert isinstance(optimizer, torch.optim.AdamW)
assert optimizer.defaults['lr'] == base_lr
assert optimizer.defaults['weight_decay'] == base_wd
param_groups = optimizer.param_groups
print(param_groups)
assert len(param_groups) == len(gt_lr_wd)
for i, param_dict in enumerate(param_groups):
assert param_dict['weight_decay'] == gt_lr_wd[i]['weight_decay']
assert param_dict['lr_scale'] == gt_lr_wd[i]['lr_scale']
assert param_dict['lr_scale'] == param_dict['lr']
def test_learning_rate_decay_optimizer_constructor():
# Test lr wd for ConvNeXT
backbone = ToyConvNeXt()
model = PseudoDataParallel(ToyDetector(backbone))
optimizer_cfg = dict(
type='AdamW', lr=base_lr, betas=(0.9, 0.999), weight_decay=0.05)
# stagewise decay
stagewise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='stage_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, stagewise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_stage_wise_lr_wd_convnext)
# layerwise decay
layerwise_paramwise_cfg = dict(
decay_rate=decay_rate, decay_type='layer_wise', num_layers=6)
optim_constructor = LearningRateDecayOptimizerConstructor(
optimizer_cfg, layerwise_paramwise_cfg)
optimizer = optim_constructor(model)
check_optimizer_lr_wd(optimizer, expected_layer_wise_lr_wd_convnext)
| 4,383 | 25.569697 | 77 | py |
mmdetection | mmdetection-master/tests/test_utils/test_logger.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmdet.utils import get_caller_name, log_img_scale
def callee_func():
caller_name = get_caller_name()
return caller_name
class CallerClassForTest:
def __init__(self):
self.caller_name = callee_func()
def test_get_caller_name():
# test the case that caller is a function
caller_name = callee_func()
assert caller_name == 'test_get_caller_name'
# test the case that caller is a method in a class
caller_class = CallerClassForTest()
assert caller_class.caller_name == 'CallerClassForTest.__init__'
def test_log_img_scale():
img_scale = (800, 1333)
done_logging = log_img_scale(img_scale)
assert done_logging
img_scale = (1333, 800)
done_logging = log_img_scale(img_scale, shape_order='wh')
assert done_logging
with pytest.raises(ValueError):
img_scale = (1333, 800)
done_logging = log_img_scale(img_scale, shape_order='xywh')
img_scale = (640, 640)
done_logging = log_img_scale(img_scale, skip_square=False)
assert done_logging
img_scale = (640, 640)
done_logging = log_img_scale(img_scale, skip_square=True)
assert not done_logging
| 1,223 | 24.5 | 68 | py |
mmdetection | mmdetection-master/tests/test_utils/test_masks.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmdet.core import BitmapMasks, PolygonMasks, mask2bbox
def dummy_raw_bitmap_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (H, W) or (N, H, W)
Return:
ndarray: dummy mask
"""
return np.random.randint(0, 2, size, dtype=np.uint8)
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def dummy_bboxes(num, max_height, max_width):
x1y1 = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
wh = np.random.randint(0, min(max_height // 2, max_width // 2), (num, 2))
x2y2 = x1y1 + wh
return np.concatenate([x1y1, x2y2], axis=1).squeeze().astype(np.float32)
def test_bitmap_mask_init():
# init with empty ndarray masks
raw_masks = np.empty((0, 28, 28), dtype=np.uint8)
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with empty list masks
raw_masks = []
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 0
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with ndarray masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with list masks contain 3 instances
raw_masks = [dummy_raw_bitmap_masks((28, 28)) for _ in range(3)]
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert len(bitmap_masks) == 3
assert bitmap_masks.height == 28
assert bitmap_masks.width == 28
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[dummy_raw_bitmap_masks((28, 28))]]
BitmapMasks(raw_masks, 28, 28)
def test_bitmap_mask_rescale():
# rescale with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
rescaled_masks = bitmap_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
# rescale with bitmap masks contain 1 instances
raw_masks = np.array([[[1, 0, 0, 0], [0, 1, 0, 1]]])
bitmap_masks = BitmapMasks(raw_masks, 2, 4)
rescaled_masks = bitmap_masks.rescale((8, 8))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 4
assert rescaled_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 1, 1], [0, 0, 1, 1, 0, 0, 1, 1]]])
assert (rescaled_masks.masks == truth).all()
def test_bitmap_mask_resize():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
resized_masks = bitmap_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
# resize with bitmap masks contain 1 instances
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((8, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 8
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
# resize to non-square
raw_masks = np.diag(np.ones(4, dtype=np.uint8))[np.newaxis, ...]
bitmap_masks = BitmapMasks(raw_masks, 4, 4)
resized_masks = bitmap_masks.resize((4, 8))
assert len(resized_masks) == 1
assert resized_masks.height == 4
assert resized_masks.width == 8
truth = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 1]]])
assert (resized_masks.masks == truth).all()
def test_bitmap_mask_get_bboxes():
# resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
bboxes = bitmap_masks.get_bboxes()
assert len(bboxes) == 0
# resize with bitmap masks contain 1 instances
raw_masks = np.array([[[0, 0, 0, 0, 0, 0, 0, 0], [0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0,
0]]])
bitmap_masks = BitmapMasks(raw_masks, 8, 8)
bboxes = bitmap_masks.get_bboxes()
assert len(bboxes) == 1
truth = np.array([[1, 1, 6, 6]])
assert (bboxes == truth).all()
# resize to non-square
raw_masks = np.array([[[1, 1, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0,
0]]])
bitmap_masks = BitmapMasks(raw_masks, 4, 8)
bboxes = bitmap_masks.get_bboxes()
truth = np.array([[0, 0, 6, 3]])
assert (bboxes == truth).all()
def test_bitmap_mask_flip():
# flip with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
# horizontally flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert flipped_masks.masks.shape == (3, 28, 28)
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, :, ::-1]).all()
# vertically flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, :]).all()
# diagonal flip with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
flipped_masks = bitmap_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert (bitmap_masks.masks == flipped_flipped_masks.masks).all()
assert (flipped_masks.masks == raw_masks[:, ::-1, ::-1]).all()
def test_bitmap_mask_pad():
# pad with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
# pad with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
padded_masks = bitmap_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert (padded_masks.masks[:, 28:, 28:] == 0).all()
def test_bitmap_mask_crop():
# crop with empty bitmap masks
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
# crop with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_masks = bitmap_masks.crop(dummy_bbox)
assert len(cropped_masks) == 3
assert cropped_masks.height == 17
assert cropped_masks.width == 10
x1, y1, x2, y2 = dummy_bbox
assert (cropped_masks.masks == raw_masks[:, y1:y2, x1:x2]).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
bitmap_masks.crop(dummy_bbox)
def test_bitmap_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
# crop and resize with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
cropped_resized_masks = bitmap_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
def test_bitmap_mask_expand():
# expand with empty bitmap masks
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 0
assert expanded_masks.height == 56
assert expanded_masks.width == 56
# expand with bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
expanded_masks = bitmap_masks.expand(56, 56, 12, 14)
assert len(expanded_masks) == 3
assert expanded_masks.height == 56
assert expanded_masks.width == 56
assert (expanded_masks.masks[:, :12, :14] == 0).all()
assert (expanded_masks.masks[:, 12 + 28:, 14 + 28:] == 0).all()
def test_bitmap_mask_area():
# area of empty bitmap mask
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert bitmap_masks.areas.sum() == 0
# area of bitmap masks contain 3 instances
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
areas = bitmap_masks.areas
assert len(areas) == 3
assert (areas == raw_masks.sum((1, 2))).all()
def test_bitmap_mask_to_ndarray():
# empty bitmap masks to ndarray
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to ndarray
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
ndarray_masks = bitmap_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
assert (ndarray_masks == raw_masks).all()
def test_bitmap_mask_to_tensor():
# empty bitmap masks to tensor
raw_masks = dummy_raw_bitmap_masks((0, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# bitmap masks contain 3 instances to tensor
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
tensor_masks = bitmap_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == raw_masks).all()
def test_bitmap_mask_index():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
assert (bitmap_masks[0].masks == raw_masks[0]).all()
assert (bitmap_masks[range(2)].masks == raw_masks[range(2)]).all()
def test_bitmap_mask_iter():
raw_masks = dummy_raw_bitmap_masks((3, 28, 28))
bitmap_masks = BitmapMasks(raw_masks, 28, 28)
for i, bitmap_mask in enumerate(bitmap_masks):
assert bitmap_mask.shape == (28, 28)
assert (bitmap_mask == raw_masks[i]).all()
def test_polygon_mask_init():
# init with empty masks
raw_masks = []
polygon_masks = BitmapMasks(raw_masks, 28, 28)
assert len(polygon_masks) == 0
assert polygon_masks.height == 28
assert polygon_masks.width == 28
# init with masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert isinstance(polygon_masks.masks, list)
assert isinstance(polygon_masks.masks[0], list)
assert isinstance(polygon_masks.masks[0][0], np.ndarray)
assert len(polygon_masks) == 3
assert polygon_masks.height == 28
assert polygon_masks.width == 28
assert polygon_masks.to_ndarray().shape == (3, 28, 28)
# init with raw masks of unsupported type
with pytest.raises(AssertionError):
raw_masks = [[[]]]
PolygonMasks(raw_masks, 28, 28)
raw_masks = [dummy_raw_polygon_masks((3, 28, 28))]
PolygonMasks(raw_masks, 28, 28)
def test_polygon_mask_rescale():
# rescale with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
rescaled_masks = polygon_masks.rescale((56, 72))
assert len(rescaled_masks) == 0
assert rescaled_masks.height == 56
assert rescaled_masks.width == 56
assert rescaled_masks.to_ndarray().shape == (0, 56, 56)
# rescale with polygon masks contain 3 instances
raw_masks = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks = PolygonMasks(raw_masks, 5, 5)
rescaled_masks = polygon_masks.rescale((12, 10))
assert len(rescaled_masks) == 1
assert rescaled_masks.height == 10
assert rescaled_masks.width == 10
assert rescaled_masks.to_ndarray().shape == (1, 10, 10)
truth = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (rescaled_masks.to_ndarray() == truth).all()
def test_polygon_mask_resize():
# resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
resized_masks = polygon_masks.resize((56, 72))
assert len(resized_masks) == 0
assert resized_masks.height == 56
assert resized_masks.width == 72
assert resized_masks.to_ndarray().shape == (0, 56, 72)
assert len(resized_masks.get_bboxes()) == 0
# resize with polygon masks contain 1 instance 1 part
raw_masks1 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks1 = PolygonMasks(raw_masks1, 5, 5)
resized_masks1 = polygon_masks1.resize((10, 10))
assert len(resized_masks1) == 1
assert resized_masks1.height == 10
assert resized_masks1.width == 10
assert resized_masks1.to_ndarray().shape == (1, 10, 10)
truth1 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0]],
np.uint8)
assert (resized_masks1.to_ndarray() == truth1).all()
bboxes = resized_masks1.get_bboxes()
bbox_truth = np.array([[2, 2, 8, 8]])
assert (bboxes == bbox_truth).all()
# resize with polygon masks contain 1 instance 2 part
raw_masks2 = [[
np.array([0., 0., 1., 0., 1., 1.]),
np.array([1., 1., 2., 1., 2., 2., 1., 2.])
]]
polygon_masks2 = PolygonMasks(raw_masks2, 3, 3)
resized_masks2 = polygon_masks2.resize((6, 6))
assert len(resized_masks2) == 1
assert resized_masks2.height == 6
assert resized_masks2.width == 6
assert resized_masks2.to_ndarray().shape == (1, 6, 6)
truth2 = np.array(
[[0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], np.uint8)
assert (resized_masks2.to_ndarray() == truth2).all()
# resize with polygon masks contain 2 instances
raw_masks3 = [raw_masks1[0], raw_masks2[0]]
polygon_masks3 = PolygonMasks(raw_masks3, 5, 5)
resized_masks3 = polygon_masks3.resize((10, 10))
assert len(resized_masks3) == 2
assert resized_masks3.height == 10
assert resized_masks3.width == 10
assert resized_masks3.to_ndarray().shape == (2, 10, 10)
truth3 = np.stack([truth1, np.pad(truth2, ((0, 4), (0, 4)), 'constant')])
assert (resized_masks3.to_ndarray() == truth3).all()
# resize to non-square
raw_masks4 = [[np.array([1, 1, 3, 1, 4, 3, 2, 4, 1, 3], dtype=np.float)]]
polygon_masks4 = PolygonMasks(raw_masks4, 5, 5)
resized_masks4 = polygon_masks4.resize((5, 10))
assert len(resized_masks4) == 1
assert resized_masks4.height == 5
assert resized_masks4.width == 10
assert resized_masks4.to_ndarray().shape == (1, 5, 10)
truth4 = np.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0], [0, 0, 0, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], np.uint8)
assert (resized_masks4.to_ndarray() == truth4).all()
def test_polygon_mask_flip():
# flip with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 0
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (0, 28, 28)
# TODO: fixed flip correctness checking after v2.0_coord is merged
# horizontally flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='horizontal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='horizontal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# vertically flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='vertical')
flipped_flipped_masks = flipped_masks.flip(flip_direction='vertical')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
# diagonal flip with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
flipped_masks = polygon_masks.flip(flip_direction='diagonal')
flipped_flipped_masks = flipped_masks.flip(flip_direction='diagonal')
assert len(flipped_masks) == 3
assert flipped_masks.height == 28
assert flipped_masks.width == 28
assert flipped_masks.to_ndarray().shape == (3, 28, 28)
assert (polygon_masks.to_ndarray() == flipped_flipped_masks.to_ndarray()
).all()
def test_polygon_mask_crop():
dummy_bbox = np.array([0, 10, 10, 27], dtype=np.int)
# crop with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_masks = polygon_masks.crop(dummy_bbox)
assert len(cropped_masks) == 0
assert cropped_masks.height == 17
assert cropped_masks.width == 10
assert cropped_masks.to_ndarray().shape == (0, 17, 10)
# crop with polygon masks contain 1 instances
raw_masks = [[np.array([1., 3., 5., 1., 5., 6., 1, 6])]]
polygon_masks = PolygonMasks(raw_masks, 7, 7)
bbox = np.array([0, 0, 3, 4])
cropped_masks = polygon_masks.crop(bbox)
assert len(cropped_masks) == 1
assert cropped_masks.height == 4
assert cropped_masks.width == 3
assert cropped_masks.to_ndarray().shape == (1, 4, 3)
truth = np.array([[0, 0, 0], [0, 0, 0], [0, 0, 1], [0, 1, 1]])
assert (cropped_masks.to_ndarray() == truth).all()
# crop with invalid bbox
with pytest.raises(AssertionError):
dummy_bbox = dummy_bboxes(2, 28, 28)
polygon_masks.crop(dummy_bbox)
def test_polygon_mask_pad():
# pad with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 0
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (0, 56, 56)
# pad with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
padded_masks = polygon_masks.pad((56, 56))
assert len(padded_masks) == 3
assert padded_masks.height == 56
assert padded_masks.width == 56
assert padded_masks.to_ndarray().shape == (3, 56, 56)
assert (padded_masks.to_ndarray()[:, 28:, 28:] == 0).all()
def test_polygon_mask_expand():
with pytest.raises(NotImplementedError):
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
polygon_masks.expand(56, 56, 10, 17)
def test_polygon_mask_crop_and_resize():
dummy_bbox = dummy_bboxes(5, 28, 28)
inds = np.random.randint(0, 3, (5, ))
# crop and resize with empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 0
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (0, 56, 56)
# crop and resize with polygon masks contain 3 instances
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
cropped_resized_masks = polygon_masks.crop_and_resize(
dummy_bbox, (56, 56), inds)
assert len(cropped_resized_masks) == 5
assert cropped_resized_masks.height == 56
assert cropped_resized_masks.width == 56
assert cropped_resized_masks.to_ndarray().shape == (5, 56, 56)
def test_polygon_mask_area():
# area of empty polygon masks
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
assert polygon_masks.areas.sum() == 0
# area of polygon masks contain 1 instance
# here we hack a case that the gap between the area of bitmap and polygon
# is minor
raw_masks = [[np.array([1, 1, 5, 1, 3, 4])]]
polygon_masks = PolygonMasks(raw_masks, 6, 6)
polygon_area = polygon_masks.areas
bitmap_area = polygon_masks.to_bitmap().areas
assert len(polygon_area) == 1
assert np.isclose(polygon_area, bitmap_area).all()
def test_polygon_mask_to_bitmap():
# polygon masks contain 3 instances to bitmap
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
bitmap_masks = polygon_masks.to_bitmap()
assert (polygon_masks.to_ndarray() == bitmap_masks.to_ndarray()).all()
def test_polygon_mask_to_ndarray():
# empty polygon masks to ndarray
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to ndarray
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
ndarray_masks = polygon_masks.to_ndarray()
assert isinstance(ndarray_masks, np.ndarray)
assert ndarray_masks.shape == (3, 28, 28)
def test_polygon_to_tensor():
# empty polygon masks to tensor
raw_masks = dummy_raw_polygon_masks((0, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (0, 28, 28)
# polygon masks contain 3 instances to tensor
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
tensor_masks = polygon_masks.to_tensor(dtype=torch.uint8, device='cpu')
assert isinstance(tensor_masks, torch.Tensor)
assert tensor_masks.shape == (3, 28, 28)
assert (tensor_masks.numpy() == polygon_masks.to_ndarray()).all()
def test_polygon_mask_index():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
# index by integer
polygon_masks[0]
# index by list
polygon_masks[[0, 1]]
# index by ndarray
polygon_masks[np.asarray([0, 1])]
with pytest.raises(ValueError):
# invalid index
polygon_masks[torch.Tensor([1, 2])]
def test_polygon_mask_iter():
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
for i, polygon_mask in enumerate(polygon_masks):
assert np.equal(polygon_mask, raw_masks[i]).all()
def test_mask2bbox():
# no instance
masks = torch.zeros((1, 20, 15), dtype=torch.bool)
bboxes_empty_gt = torch.tensor([[0, 0, 0, 0]]).float()
bboxes = mask2bbox(masks)
assert torch.allclose(bboxes_empty_gt.float(), bboxes)
# the entire mask is an instance
bboxes_full_gt = torch.tensor([[0, 0, 15, 20]]).float()
masks = torch.ones((1, 20, 15), dtype=torch.bool)
bboxes = mask2bbox(masks)
assert torch.allclose(bboxes_full_gt, bboxes)
# a pentagon-shaped instance
bboxes_gt = torch.tensor([[2, 2, 7, 6]]).float()
masks = torch.zeros((1, 20, 15), dtype=torch.bool)
masks[0, 2, 4] = True
masks[0, 3, 3:6] = True
masks[0, 4, 2:7] = True
masks[0, 5, 2:7] = True
bboxes = mask2bbox(masks)
assert torch.allclose(bboxes_gt, bboxes)
| 28,276 | 38.603641 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_memory.py | import numpy as np
import pytest
import torch
from mmdet.utils import AvoidOOM
from mmdet.utils.memory import cast_tensor_type
def test_avoidoom():
tensor = torch.from_numpy(np.random.random((20, 20)))
if torch.cuda.is_available():
tensor = tensor.cuda()
# get default result
default_result = torch.mm(tensor, tensor.transpose(1, 0))
# when not occurred OOM error
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
# calculate with fp16 and convert back to source type
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.allclose(default_result, result, 1e-3)
# calculate on cpu and convert back to source device
AvoidCudaOOM = AvoidOOM(test=True)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device and \
torch.allclose(default_result, result)
# do not calculate on cpu and the outputs will be same as input
AvoidCudaOOM = AvoidOOM(test=True, to_cpu=False)
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert result.dtype == default_result.dtype and \
result.device == default_result.device
else:
default_result = torch.mm(tensor, tensor.transpose(1, 0))
AvoidCudaOOM = AvoidOOM()
result = AvoidCudaOOM.retry_if_cuda_oom(torch.mm)(tensor,
tensor.transpose(
1, 0))
assert default_result.device == result.device and \
default_result.dtype == result.dtype and \
torch.equal(default_result, result)
def test_cast_tensor_type():
inputs = torch.rand(10)
if torch.cuda.is_available():
inputs = inputs.cuda()
with pytest.raises(AssertionError):
cast_tensor_type(inputs, src_type=None, dst_type=None)
# input is a float
out = cast_tensor_type(10., dst_type=torch.half)
assert out == 10. and isinstance(out, float)
# convert Tensor to fp16 and re-convert to fp32
fp16_out = cast_tensor_type(inputs, dst_type=torch.half)
assert fp16_out.dtype == torch.half
fp32_out = cast_tensor_type(fp16_out, dst_type=torch.float32)
assert fp32_out.dtype == torch.float32
# input is a list
list_input = [inputs, inputs]
list_outs = cast_tensor_type(list_input, dst_type=torch.half)
assert len(list_outs) == len(list_input) and \
isinstance(list_outs, list)
for out in list_outs:
assert out.dtype == torch.half
# input is a dict
dict_input = {'test1': inputs, 'test2': inputs}
dict_outs = cast_tensor_type(dict_input, dst_type=torch.half)
assert len(dict_outs) == len(dict_input) and \
isinstance(dict_outs, dict)
# convert the input tensor to CPU and re-convert to GPU
if torch.cuda.is_available():
cpu_device = torch.empty(0).device
gpu_device = inputs.device
cpu_out = cast_tensor_type(inputs, dst_type=cpu_device)
assert cpu_out.device == cpu_device
gpu_out = cast_tensor_type(inputs, dst_type=gpu_device)
assert gpu_out.device == gpu_device
| 4,261 | 42.050505 | 75 | py |
mmdetection | mmdetection-master/tests/test_utils/test_misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import tempfile
import numpy as np
import pytest
import torch
from mmdet.core.bbox import distance2bbox
from mmdet.core.mask.structures import BitmapMasks, PolygonMasks
from mmdet.core.utils import (center_of_mass, filter_scores_and_topk,
flip_tensor, mask2ndarray, select_single_mlvl)
from mmdet.utils import find_latest_checkpoint
def dummy_raw_polygon_masks(size):
"""
Args:
size (tuple): expected shape of dummy masks, (N, H, W)
Return:
list[list[ndarray]]: dummy mask
"""
num_obj, height, width = size
polygons = []
for _ in range(num_obj):
num_points = np.random.randint(5) * 2 + 6
polygons.append([np.random.uniform(0, min(height, width), num_points)])
return polygons
def test_mask2ndarray():
raw_masks = np.ones((3, 28, 28))
bitmap_mask = BitmapMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(bitmap_mask)
assert np.allclose(raw_masks, output_mask)
raw_masks = dummy_raw_polygon_masks((3, 28, 28))
polygon_masks = PolygonMasks(raw_masks, 28, 28)
output_mask = mask2ndarray(polygon_masks)
assert output_mask.shape == (3, 28, 28)
raw_masks = np.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
raw_masks = torch.ones((3, 28, 28))
output_mask = mask2ndarray(raw_masks)
assert np.allclose(raw_masks, output_mask)
# test unsupported type
raw_masks = []
with pytest.raises(TypeError):
output_mask = mask2ndarray(raw_masks)
def test_distance2bbox():
point = torch.Tensor([[74., 61.], [-29., 106.], [138., 61.], [29., 170.]])
distance = torch.Tensor([[0., 0, 1., 1.], [1., 2., 10., 6.],
[22., -29., 138., 61.], [54., -29., 170., 61.]])
expected_decode_bboxes = torch.Tensor([[74., 61., 75., 62.],
[0., 104., 0., 112.],
[100., 90., 100., 120.],
[0., 120., 100., 120.]])
out_bbox = distance2bbox(point, distance, max_shape=(120, 100))
assert expected_decode_bboxes.allclose(out_bbox)
out = distance2bbox(point, distance, max_shape=torch.Tensor((120, 100)))
assert expected_decode_bboxes.allclose(out)
batch_point = point.unsqueeze(0).repeat(2, 1, 1)
batch_distance = distance.unsqueeze(0).repeat(2, 1, 1)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(
batch_point, batch_distance, max_shape=[(120, 100), (120, 100)])[0]
assert out.allclose(batch_out)
batch_out = distance2bbox(point, batch_distance, max_shape=(120, 100))[0]
assert out.allclose(batch_out)
# test max_shape is not equal to batch
with pytest.raises(AssertionError):
distance2bbox(
batch_point,
batch_distance,
max_shape=[(120, 100), (120, 100), (32, 32)])
rois = torch.zeros((0, 4))
deltas = torch.zeros((0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
rois = torch.zeros((2, 0, 4))
deltas = torch.zeros((2, 0, 4))
out = distance2bbox(rois, deltas, max_shape=(120, 100))
assert rois.shape == out.shape
@pytest.mark.parametrize('mask', [
torch.ones((28, 28)),
torch.zeros((28, 28)),
torch.rand(28, 28) > 0.5,
torch.tensor([[0, 0, 0, 0], [0, 1, 1, 0], [0, 1, 1, 0], [0, 0, 0, 0]])
])
def test_center_of_mass(mask):
center_h, center_w = center_of_mass(mask)
if mask.shape[0] == 4:
assert center_h == 1.5
assert center_w == 1.5
assert isinstance(center_h, torch.Tensor) \
and isinstance(center_w, torch.Tensor)
assert 0 <= center_h <= 28 \
and 0 <= center_w <= 28
def test_flip_tensor():
img = np.random.random((1, 3, 10, 10))
src_tensor = torch.from_numpy(img)
# test flip_direction parameter error
with pytest.raises(AssertionError):
flip_tensor(src_tensor, 'flip')
# test tensor dimension
with pytest.raises(AssertionError):
flip_tensor(src_tensor[0], 'vertical')
hfilp_tensor = flip_tensor(src_tensor, 'horizontal')
expected_hflip_tensor = torch.from_numpy(img[..., ::-1, :].copy())
expected_hflip_tensor.allclose(hfilp_tensor)
vfilp_tensor = flip_tensor(src_tensor, 'vertical')
expected_vflip_tensor = torch.from_numpy(img[..., ::-1].copy())
expected_vflip_tensor.allclose(vfilp_tensor)
diag_filp_tensor = flip_tensor(src_tensor, 'diagonal')
expected_diag_filp_tensor = torch.from_numpy(img[..., ::-1, ::-1].copy())
expected_diag_filp_tensor.allclose(diag_filp_tensor)
def test_select_single_mlvl():
mlvl_tensors = [torch.rand(2, 1, 10, 10)] * 5
mlvl_tensor_list = select_single_mlvl(mlvl_tensors, 1)
assert len(mlvl_tensor_list) == 5 and mlvl_tensor_list[0].ndim == 3
def test_filter_scores_and_topk():
score = torch.tensor([[0.1, 0.3, 0.2], [0.12, 0.7, 0.9], [0.02, 0.8, 0.08],
[0.4, 0.1, 0.08]])
bbox_pred = torch.tensor([[0.2, 0.3], [0.4, 0.7], [0.1, 0.1], [0.5, 0.1]])
score_thr = 0.15
nms_pre = 4
# test results type error
with pytest.raises(NotImplementedError):
filter_scores_and_topk(score, score_thr, nms_pre, (score, ))
filtered_results = filter_scores_and_topk(
score, score_thr, nms_pre, results=dict(bbox_pred=bbox_pred))
filtered_score, labels, keep_idxs, results = filtered_results
assert filtered_score.allclose(torch.tensor([0.9, 0.8, 0.7, 0.4]))
assert labels.allclose(torch.tensor([2, 1, 1, 0]))
assert keep_idxs.allclose(torch.tensor([1, 2, 1, 3]))
assert results['bbox_pred'].allclose(
torch.tensor([[0.4, 0.7], [0.1, 0.1], [0.4, 0.7], [0.5, 0.1]]))
def test_find_latest_checkpoint():
with tempfile.TemporaryDirectory() as tmpdir:
path = tmpdir
latest = find_latest_checkpoint(path)
# There are no checkpoints in the path.
assert latest is None
path = osp.join(tmpdir, 'none')
latest = find_latest_checkpoint(path)
# The path does not exist.
assert latest is None
with tempfile.TemporaryDirectory() as tmpdir:
with open(osp.join(tmpdir, 'latest.pth'), 'w') as f:
f.write('latest')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == osp.join(tmpdir, 'latest.pth')
with tempfile.TemporaryDirectory() as tmpdir:
with open(osp.join(tmpdir, 'iter_4000.pth'), 'w') as f:
f.write('iter_4000')
with open(osp.join(tmpdir, 'iter_8000.pth'), 'w') as f:
f.write('iter_8000')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == osp.join(tmpdir, 'iter_8000.pth')
with tempfile.TemporaryDirectory() as tmpdir:
with open(osp.join(tmpdir, 'epoch_1.pth'), 'w') as f:
f.write('epoch_1')
with open(osp.join(tmpdir, 'epoch_2.pth'), 'w') as f:
f.write('epoch_2')
path = tmpdir
latest = find_latest_checkpoint(path)
assert latest == osp.join(tmpdir, 'epoch_2.pth')
| 7,361 | 34.912195 | 79 | py |
mmdetection | mmdetection-master/tests/test_utils/test_nms.py | import pytest
import torch
from mmdet.core.post_processing import mask_matrix_nms
def _create_mask(N, h, w):
masks = torch.rand((N, h, w)) > 0.5
labels = torch.rand(N)
scores = torch.rand(N)
return masks, labels, scores
def test_nms_input_errors():
with pytest.raises(AssertionError):
mask_matrix_nms(
torch.rand((10, 28, 28)), torch.rand(11), torch.rand(11))
with pytest.raises(AssertionError):
masks = torch.rand((10, 28, 28))
mask_matrix_nms(
masks,
torch.rand(11),
torch.rand(11),
mask_area=masks.sum((1, 2)).float()[:8])
with pytest.raises(NotImplementedError):
mask_matrix_nms(
torch.rand((10, 28, 28)),
torch.rand(10),
torch.rand(10),
kernel='None')
# test an empty results
masks, labels, scores = _create_mask(0, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 0
# do not use update_thr, nms_pre and max_num
masks, labels, scores = _create_mask(1000, 28, 28)
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 1000
# only use nms_pre
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores, nms_pre=500)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 500
# use max_num
score, label, mask, keep_ind = \
mask_matrix_nms(masks, labels, scores,
nms_pre=500, max_num=100)
assert len(score) == len(label) == \
len(mask) == len(keep_ind) == 100
masks, labels, _ = _create_mask(1, 28, 28)
scores = torch.Tensor([1.0])
masks = masks.expand(1000, 28, 28)
labels = labels.expand(1000)
scores = scores.expand(1000)
# assert scores is decayed and update_thr is worked
# if with the same mask, label, and all scores = 1
# the first score will set to 1, others will decay.
score, label, mask, keep_ind = \
mask_matrix_nms(masks,
labels,
scores,
nms_pre=500,
max_num=100,
kernel='gaussian',
sigma=2.0,
filter_thr=0.5)
assert len(score) == 1
assert score[0] == 1
| 2,528 | 32.276316 | 69 | py |
mmdetection | mmdetection-master/tests/test_utils/test_replace_cfg_vals.py | import os.path as osp
import tempfile
from copy import deepcopy
import pytest
from mmcv.utils import Config
from mmdet.utils import replace_cfg_vals
def test_replace_cfg_vals():
temp_file = tempfile.NamedTemporaryFile()
cfg_path = f'{temp_file.name}.py'
with open(cfg_path, 'w') as f:
f.write('configs')
ori_cfg_dict = dict()
ori_cfg_dict['cfg_name'] = osp.basename(temp_file.name)
ori_cfg_dict['work_dir'] = 'work_dirs/${cfg_name}/${percent}/${fold}'
ori_cfg_dict['percent'] = 5
ori_cfg_dict['fold'] = 1
ori_cfg_dict['model_wrapper'] = dict(
type='SoftTeacher', detector='${model}')
ori_cfg_dict['model'] = dict(
type='FasterRCNN',
backbone=dict(type='ResNet'),
neck=dict(type='FPN'),
rpn_head=dict(type='RPNHead'),
roi_head=dict(type='StandardRoIHead'),
train_cfg=dict(
rpn=dict(
assigner=dict(type='MaxIoUAssigner'),
sampler=dict(type='RandomSampler'),
),
rpn_proposal=dict(nms=dict(type='nms', iou_threshold=0.7)),
rcnn=dict(
assigner=dict(type='MaxIoUAssigner'),
sampler=dict(type='RandomSampler'),
),
),
test_cfg=dict(
rpn=dict(nms=dict(type='nms', iou_threshold=0.7)),
rcnn=dict(nms=dict(type='nms', iou_threshold=0.5)),
),
)
ori_cfg_dict['iou_threshold'] = dict(
rpn_proposal_nms='${model.train_cfg.rpn_proposal.nms.iou_threshold}',
test_rpn_nms='${model.test_cfg.rpn.nms.iou_threshold}',
test_rcnn_nms='${model.test_cfg.rcnn.nms.iou_threshold}',
)
ori_cfg_dict['str'] = 'Hello, world!'
ori_cfg_dict['dict'] = {'Hello': 'world!'}
ori_cfg_dict['list'] = [
'Hello, world!',
]
ori_cfg_dict['tuple'] = ('Hello, world!', )
ori_cfg_dict['test_str'] = 'xxx${str}xxx'
ori_cfg = Config(ori_cfg_dict, filename=cfg_path)
updated_cfg = replace_cfg_vals(deepcopy(ori_cfg))
assert updated_cfg.work_dir \
== f'work_dirs/{osp.basename(temp_file.name)}/5/1'
assert updated_cfg.model.detector == ori_cfg.model
assert updated_cfg.iou_threshold.rpn_proposal_nms \
== ori_cfg.model.train_cfg.rpn_proposal.nms.iou_threshold
assert updated_cfg.test_str == 'xxxHello, world!xxx'
ori_cfg_dict['test_dict'] = 'xxx${dict}xxx'
ori_cfg_dict['test_list'] = 'xxx${list}xxx'
ori_cfg_dict['test_tuple'] = 'xxx${tuple}xxx'
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_dict'] = 'xxx${dict}xxx'
updated_cfg = replace_cfg_vals(cfg)
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_list'] = 'xxx${list}xxx'
updated_cfg = replace_cfg_vals(cfg)
with pytest.raises(AssertionError):
cfg = deepcopy(ori_cfg)
cfg['test_tuple'] = 'xxx${tuple}xxx'
updated_cfg = replace_cfg_vals(cfg)
| 2,980 | 34.488095 | 77 | py |
mmdetection | mmdetection-master/tests/test_utils/test_setup_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import multiprocessing as mp
import os
import platform
import cv2
from mmcv import Config
from mmdet.utils import setup_multi_processes
def test_setup_multi_processes():
# temp save system setting
sys_start_mehod = mp.get_start_method(allow_none=True)
sys_cv_threads = cv2.getNumThreads()
# pop and temp save system env vars
sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None)
sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None)
# test config without setting env
config = dict(data=dict(workers_per_gpu=2))
cfg = Config(config)
setup_multi_processes(cfg)
assert os.getenv('OMP_NUM_THREADS') == '1'
assert os.getenv('MKL_NUM_THREADS') == '1'
# when set to 0, the num threads will be 1
assert cv2.getNumThreads() == 1
if platform.system() != 'Windows':
assert mp.get_start_method() == 'fork'
# test num workers <= 1
os.environ.pop('OMP_NUM_THREADS')
os.environ.pop('MKL_NUM_THREADS')
config = dict(data=dict(workers_per_gpu=0))
cfg = Config(config)
setup_multi_processes(cfg)
assert 'OMP_NUM_THREADS' not in os.environ
assert 'MKL_NUM_THREADS' not in os.environ
# test manually set env var
os.environ['OMP_NUM_THREADS'] = '4'
config = dict(data=dict(workers_per_gpu=2))
cfg = Config(config)
setup_multi_processes(cfg)
assert os.getenv('OMP_NUM_THREADS') == '4'
# test manually set opencv threads and mp start method
config = dict(
data=dict(workers_per_gpu=2),
opencv_num_threads=4,
mp_start_method='spawn')
cfg = Config(config)
setup_multi_processes(cfg)
assert cv2.getNumThreads() == 4
assert mp.get_start_method() == 'spawn'
# revert setting to avoid affecting other programs
if sys_start_mehod:
mp.set_start_method(sys_start_mehod, force=True)
cv2.setNumThreads(sys_cv_threads)
if sys_omp_threads:
os.environ['OMP_NUM_THREADS'] = sys_omp_threads
else:
os.environ.pop('OMP_NUM_THREADS')
if sys_mkl_threads:
os.environ['MKL_NUM_THREADS'] = sys_mkl_threads
else:
os.environ.pop('MKL_NUM_THREADS')
| 2,221 | 31.202899 | 69 | py |
mmdetection | mmdetection-master/tests/test_utils/test_split_batch.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from copy import deepcopy
import mmcv
import numpy as np
import torch
from mmdet.utils import split_batch
def test_split_batch():
img_root = osp.join(osp.dirname(__file__), '../data/color.jpg')
img = mmcv.imread(img_root, 'color')
h, w, _ = img.shape
gt_bboxes = np.array([[0.2 * w, 0.2 * h, 0.4 * w, 0.4 * h],
[0.6 * w, 0.6 * h, 0.8 * w, 0.8 * h]],
dtype=np.float32)
gt_lables = np.ones(gt_bboxes.shape[0], dtype=np.int64)
img = torch.tensor(img).permute(2, 0, 1)
meta = dict()
meta['filename'] = img_root
meta['ori_shape'] = img.shape
meta['img_shape'] = img.shape
meta['img_norm_cfg'] = {
'mean': np.array([103.53, 116.28, 123.675], dtype=np.float32),
'std': np.array([1., 1., 1.], dtype=np.float32),
'to_rgb': False
}
meta['pad_shape'] = img.shape
# For example, tag include sup, unsup_teacher and unsup_student,
# in order to distinguish the difference between the three groups of data,
# the scale_factor of sup is [0.5, 0.5, 0.5, 0.5]
# the scale_factor of unsup_teacher is [1.0, 1.0, 1.0, 1.0]
# the scale_factor of unsup_student is [2.0, 2.0, 2.0, 2.0]
imgs = img.unsqueeze(0).repeat(9, 1, 1, 1)
img_metas = []
tags = [
'sup', 'unsup_teacher', 'unsup_student', 'unsup_teacher',
'unsup_student', 'unsup_teacher', 'unsup_student', 'unsup_teacher',
'unsup_student'
]
for tag in tags:
img_meta = deepcopy(meta)
if tag == 'sup':
img_meta['scale_factor'] = [0.5, 0.5, 0.5, 0.5]
img_meta['tag'] = 'sup'
elif tag == 'unsup_teacher':
img_meta['scale_factor'] = [1.0, 1.0, 1.0, 1.0]
img_meta['tag'] = 'unsup_teacher'
elif tag == 'unsup_student':
img_meta['scale_factor'] = [2.0, 2.0, 2.0, 2.0]
img_meta['tag'] = 'unsup_student'
else:
continue
img_metas.append(img_meta)
kwargs = dict()
kwargs['gt_bboxes'] = [torch.tensor(gt_bboxes)] + [torch.zeros(0, 4)] * 8
kwargs['gt_lables'] = [torch.tensor(gt_lables)] + [torch.zeros(0, )] * 8
data_groups = split_batch(imgs, img_metas, kwargs)
assert set(data_groups.keys()) == set(tags)
assert data_groups['sup']['img'].shape == (1, 3, h, w)
assert data_groups['unsup_teacher']['img'].shape == (4, 3, h, w)
assert data_groups['unsup_student']['img'].shape == (4, 3, h, w)
# the scale_factor of sup is [0.5, 0.5, 0.5, 0.5]
assert data_groups['sup']['img_metas'][0]['scale_factor'] == [
0.5, 0.5, 0.5, 0.5
]
# the scale_factor of unsup_teacher is [1.0, 1.0, 1.0, 1.0]
assert data_groups['unsup_teacher']['img_metas'][0]['scale_factor'] == [
1.0, 1.0, 1.0, 1.0
]
assert data_groups['unsup_teacher']['img_metas'][1]['scale_factor'] == [
1.0, 1.0, 1.0, 1.0
]
assert data_groups['unsup_teacher']['img_metas'][2]['scale_factor'] == [
1.0, 1.0, 1.0, 1.0
]
assert data_groups['unsup_teacher']['img_metas'][3]['scale_factor'] == [
1.0, 1.0, 1.0, 1.0
]
# the scale_factor of unsup_student is [2.0, 2.0, 2.0, 2.0]
assert data_groups['unsup_student']['img_metas'][0]['scale_factor'] == [
2.0, 2.0, 2.0, 2.0
]
assert data_groups['unsup_student']['img_metas'][1]['scale_factor'] == [
2.0, 2.0, 2.0, 2.0
]
assert data_groups['unsup_student']['img_metas'][2]['scale_factor'] == [
2.0, 2.0, 2.0, 2.0
]
assert data_groups['unsup_student']['img_metas'][3]['scale_factor'] == [
2.0, 2.0, 2.0, 2.0
]
| 3,704 | 37.59375 | 78 | py |
mmdetection | mmdetection-master/tests/test_utils/test_version.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmdet import digit_version
def test_version_check():
assert digit_version('1.0.5') > digit_version('1.0.5rc0')
assert digit_version('1.0.5') > digit_version('1.0.4rc0')
assert digit_version('1.0.5') > digit_version('1.0rc0')
assert digit_version('1.0.0') > digit_version('0.6.2')
assert digit_version('1.0.0') > digit_version('0.2.16')
assert digit_version('1.0.5rc0') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc1') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc2') > digit_version('1.0.0rc0')
assert digit_version('1.0.0rc2') > digit_version('1.0.0rc1')
assert digit_version('1.0.1rc1') > digit_version('1.0.0rc1')
assert digit_version('1.0.0') > digit_version('1.0.0rc1')
| 798 | 46 | 64 | py |
mmdetection | mmdetection-master/tests/test_utils/test_visualization.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import tempfile
import mmcv
import numpy as np
import pytest
import torch
from mmdet.core import visualization as vis
from mmdet.datasets import (CityscapesDataset, CocoDataset,
CocoPanopticDataset, VOCDataset)
def test_color():
assert vis.color_val_matplotlib(mmcv.Color.blue) == (0., 0., 1.)
assert vis.color_val_matplotlib('green') == (0., 1., 0.)
assert vis.color_val_matplotlib((1, 2, 3)) == (3 / 255, 2 / 255, 1 / 255)
assert vis.color_val_matplotlib(100) == (100 / 255, 100 / 255, 100 / 255)
assert vis.color_val_matplotlib(np.zeros(3, dtype=np.int)) == (0., 0., 0.)
# forbid white color
with pytest.raises(TypeError):
vis.color_val_matplotlib([255, 255, 255])
# forbid float
with pytest.raises(TypeError):
vis.color_val_matplotlib(1.0)
# overflowed
with pytest.raises(AssertionError):
vis.color_val_matplotlib((0, 0, 500))
def test_imshow_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
out_image = vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape[:2]
os.remove(tmp_filename)
# test shaped (0,)
image = np.ones((10, 10, 3), np.uint8)
bbox = np.ones((0, 4))
label = np.ones((0, ))
vis.imshow_det_bboxes(
image, bbox, label, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test mask
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
segms = np.random.random((2, 10, 10)) > 0.5
segms = np.array(segms, np.int32)
vis.imshow_det_bboxes(
image, bbox, label, segms, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask type error
with pytest.raises(AttributeError):
segms = torch.tensor(segms)
vis.imshow_det_bboxes(image, bbox, label, segms, show=False)
def test_imshow_gt_det_bboxes():
tmp_filename = osp.join(tempfile.gettempdir(), 'det_bboxes_image',
'image.jpg')
image = np.ones((10, 10, 3), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
out_image = vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
assert image.shape == out_image.shape
assert not np.allclose(image, out_image)
os.remove(tmp_filename)
# test grayscale images
image = np.ones((10, 10), np.uint8)
bbox = np.array([[2, 1, 3, 3], [3, 4, 6, 6]])
label = np.array([0, 1])
annotation = dict(gt_bboxes=bbox, gt_labels=label)
det_result = np.array([[2, 1, 3, 3, 0], [3, 4, 6, 6, 1]])
result = [det_result]
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test numpy mask
gt_mask = np.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test tensor mask
gt_mask = torch.ones((2, 10, 10))
annotation['gt_masks'] = gt_mask
vis.imshow_gt_det_bboxes(
image, annotation, result, out_file=tmp_filename, show=False)
assert osp.isfile(tmp_filename)
os.remove(tmp_filename)
# test unsupported type
annotation['gt_masks'] = []
with pytest.raises(TypeError):
vis.imshow_gt_det_bboxes(image, annotation, result, show=False)
def test_palette():
assert vis.palette_val([(1, 2, 3)])[0] == (1 / 255, 2 / 255, 3 / 255)
# test list
palette = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
palette_ = vis.get_palette(palette, 3)
for color, color_ in zip(palette, palette_):
assert color == color_
# test tuple
palette = vis.get_palette((1, 2, 3), 3)
assert len(palette) == 3
for color in palette:
assert color == (1, 2, 3)
# test color str
palette = vis.get_palette('red', 3)
assert len(palette) == 3
for color in palette:
assert color == (255, 0, 0)
# test dataset str
palette = vis.get_palette('coco', len(CocoDataset.CLASSES))
assert len(palette) == len(CocoDataset.CLASSES)
assert palette[0] == (220, 20, 60)
palette = vis.get_palette('coco', len(CocoPanopticDataset.CLASSES))
assert len(palette) == len(CocoPanopticDataset.CLASSES)
assert palette[-1] == (250, 141, 255)
palette = vis.get_palette('voc', len(VOCDataset.CLASSES))
assert len(palette) == len(VOCDataset.CLASSES)
assert palette[0] == (106, 0, 228)
palette = vis.get_palette('citys', len(CityscapesDataset.CLASSES))
assert len(palette) == len(CityscapesDataset.CLASSES)
assert palette[0] == (220, 20, 60)
# test random
palette1 = vis.get_palette('random', 3)
palette2 = vis.get_palette(None, 3)
for color1, color2 in zip(palette1, palette2):
assert isinstance(color1, tuple)
assert isinstance(color2, tuple)
assert color1 == color2
| 6,080 | 33.948276 | 78 | py |
mmdetection | mmdetection-master/tools/dist_test.sh | #!/usr/bin/env bash
CONFIG=$1
CHECKPOINT=$2
GPUS=$3
NNODES=${NNODES:-1}
NODE_RANK=${NODE_RANK:-0}
PORT=${PORT:-29500}
MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch \
--nnodes=$NNODES \
--node_rank=$NODE_RANK \
--master_addr=$MASTER_ADDR \
--nproc_per_node=$GPUS \
--master_port=$PORT \
$(dirname "$0")/test.py \
$CONFIG \
$CHECKPOINT \
--launcher pytorch \
${@:4}
| 479 | 19.869565 | 43 | sh |
mmdetection | mmdetection-master/tools/dist_train.sh | #!/usr/bin/env bash
CONFIG=$1
GPUS=$2
NNODES=${NNODES:-1}
NODE_RANK=${NODE_RANK:-0}
PORT=${PORT:-29500}
MASTER_ADDR=${MASTER_ADDR:-"127.0.0.1"}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
python -m torch.distributed.launch \
--nnodes=$NNODES \
--node_rank=$NODE_RANK \
--master_addr=$MASTER_ADDR \
--nproc_per_node=$GPUS \
--master_port=$PORT \
$(dirname "$0")/train.py \
$CONFIG \
--seed 0 \
--launcher pytorch ${@:3}
| 457 | 20.809524 | 43 | sh |
mmdetection | mmdetection-master/tools/slurm_test.sh | #!/usr/bin/env bash
set -x
PARTITION=$1
JOB_NAME=$2
CONFIG=$3
CHECKPOINT=$4
GPUS=${GPUS:-8}
GPUS_PER_NODE=${GPUS_PER_NODE:-8}
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
PY_ARGS=${@:5}
SRUN_ARGS=${SRUN_ARGS:-""}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
srun -p ${PARTITION} \
--job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \
--ntasks=${GPUS} \
--ntasks-per-node=${GPUS_PER_NODE} \
--cpus-per-task=${CPUS_PER_TASK} \
--kill-on-bad-exit=1 \
${SRUN_ARGS} \
python -u tools/test.py ${CONFIG} ${CHECKPOINT} --launcher="slurm" ${PY_ARGS}
| 566 | 21.68 | 81 | sh |
mmdetection | mmdetection-master/tools/slurm_train.sh | #!/usr/bin/env bash
set -x
PARTITION=$1
JOB_NAME=$2
CONFIG=$3
WORK_DIR=$4
GPUS=${GPUS:-8}
GPUS_PER_NODE=${GPUS_PER_NODE:-8}
CPUS_PER_TASK=${CPUS_PER_TASK:-5}
SRUN_ARGS=${SRUN_ARGS:-""}
PY_ARGS=${@:5}
PYTHONPATH="$(dirname $0)/..":$PYTHONPATH \
srun -p ${PARTITION} \
--job-name=${JOB_NAME} \
--gres=gpu:${GPUS_PER_NODE} \
--ntasks=${GPUS} \
--ntasks-per-node=${GPUS_PER_NODE} \
--cpus-per-task=${CPUS_PER_TASK} \
--kill-on-bad-exit=1 \
${SRUN_ARGS} \
python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" ${PY_ARGS}
| 574 | 22 | 91 | sh |
mmdetection | mmdetection-master/tools/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from mmdet.apis import multi_gpu_test, single_gpu_test
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from mmdet.utils import (build_ddp, build_dp, compat_cfg, get_device,
replace_cfg_vals, rfnext_init_model,
setup_multi_processes, update_data_root)
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet test (and eval) a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--work-dir',
help='the directory to save the file containing evaluation metrics')
parser.add_argument('--out', help='output result file in pickle format')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
parser.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed testing)')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument(
'--gpu-collect',
action='store_true',
help='whether to use gpu to collect results.')
parser.add_argument(
'--tmpdir',
help='tmp directory used for collecting results from multiple '
'workers, available when gpu-collect is not specified')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function (deprecate), '
'change to --eval-options instead.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.eval_options:
raise ValueError(
'--options and --eval-options cannot be both '
'specified, --options is deprecated in favor of --eval-options')
if args.options:
warnings.warn('--options is deprecated in favor of --eval-options')
args.eval_options = args.options
return args
def main():
args = parse_args()
assert args.out or args.eval or args.format_only or args.show \
or args.show_dir, \
('Please specify at least one operation (save/eval/format/show the '
'results / save the results) with the argument "--out", "--eval"'
', "--format-only", "--show" or "--show-dir"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg = compat_cfg(cfg)
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
if 'pretrained' in cfg.model:
cfg.model.pretrained = None
elif 'init_cfg' in cfg.model.backbone:
cfg.model.backbone.init_cfg = None
if cfg.model.get('neck'):
if isinstance(cfg.model.neck, list):
for neck_cfg in cfg.model.neck:
if neck_cfg.get('rfp_backbone'):
if neck_cfg.rfp_backbone.get('pretrained'):
neck_cfg.rfp_backbone.pretrained = None
elif cfg.model.neck.get('rfp_backbone'):
if cfg.model.neck.rfp_backbone.get('pretrained'):
cfg.model.neck.rfp_backbone.pretrained = None
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed testing. Use the first GPU '
'in `gpu_ids` now.')
else:
cfg.gpu_ids = [args.gpu_id]
cfg.device = get_device()
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
test_dataloader_default_args = dict(
samples_per_gpu=1, workers_per_gpu=2, dist=distributed, shuffle=False)
# in case the test dataset is concatenated
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(
cfg.data.test.pipeline)
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
if cfg.data.test_dataloader.get('samples_per_gpu', 1) > 1:
for ds_cfg in cfg.data.test:
ds_cfg.pipeline = replace_ImageToTensor(ds_cfg.pipeline)
test_loader_cfg = {
**test_dataloader_default_args,
**cfg.data.get('test_dataloader', {})
}
rank, _ = get_dist_info()
# allows not to create
if args.work_dir is not None and rank == 0:
mmcv.mkdir_or_exist(osp.abspath(args.work_dir))
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
json_file = osp.join(args.work_dir, f'eval_{timestamp}.json')
# build the dataloader
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(dataset, **test_loader_cfg)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
# init rfnext if 'RFSearchHook' is defined in cfg
rfnext_init_model(model, cfg=cfg)
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is None and cfg.get('device', None) == 'npu':
fp16_cfg = dict(loss_scale='dynamic')
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu')
if args.fuse_conv_bn:
model = fuse_conv_bn(model)
# old versions did not save class info in checkpoints, this walkaround is
# for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = build_dp(model, cfg.device, device_ids=cfg.gpu_ids)
outputs = single_gpu_test(model, data_loader, args.show, args.show_dir,
args.show_score_thr)
else:
model = build_ddp(
model,
cfg.device,
device_ids=[int(os.environ['LOCAL_RANK'])],
broadcast_buffers=False)
# In multi_gpu_test, if tmpdir is None, some tesnors
# will init on cuda by default, and no device choice supported.
# Init a tmpdir to avoid error on npu here.
if cfg.device == 'npu' and args.tmpdir is None:
args.tmpdir = './npu_tmpdir'
outputs = multi_gpu_test(
model, data_loader, args.tmpdir, args.gpu_collect
or cfg.evaluation.get('gpu_collect', False))
rank, _ = get_dist_info()
if rank == 0:
if args.out:
print(f'\nwriting results to {args.out}')
mmcv.dump(outputs, args.out)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule', 'dynamic_intervals'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
metric = dataset.evaluate(outputs, **eval_kwargs)
print(metric)
metric_dict = dict(config=args.config, metric=metric)
if args.work_dir is not None and rank == 0:
mmcv.dump(metric_dict, json_file)
if __name__ == '__main__':
main()
| 10,997 | 37.320557 | 79 | py |
mmdetection | mmdetection-master/tools/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import time
import warnings
import mmcv
import torch
import torch.distributed as dist
from mmcv import Config, DictAction
from mmcv.runner import get_dist_info, init_dist
from mmcv.utils import get_git_hash
from mmdet import __version__
from mmdet.apis import init_random_seed, set_random_seed, train_detector
from mmdet.datasets import build_dataset
from mmdet.models import build_detector
from mmdet.utils import (collect_env, get_device, get_root_logger,
replace_cfg_vals, rfnext_init_model,
setup_multi_processes, update_data_root)
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument('--work-dir', help='the dir to save logs and models')
parser.add_argument(
'--resume-from', help='the checkpoint file to resume from')
parser.add_argument(
'--auto-resume',
action='store_true',
help='resume from the latest checkpoint automatically')
parser.add_argument(
'--no-validate',
action='store_true',
help='whether not to evaluate the checkpoint during training')
group_gpus = parser.add_mutually_exclusive_group()
group_gpus.add_argument(
'--gpus',
type=int,
help='(Deprecated, please use --gpu-id) number of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-ids',
type=int,
nargs='+',
help='(Deprecated, please use --gpu-id) ids of gpus to use '
'(only applicable to non-distributed training)')
group_gpus.add_argument(
'--gpu-id',
type=int,
default=0,
help='id of gpu to use '
'(only applicable to non-distributed training)')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--diff-seed',
action='store_true',
help='Whether or not set different seeds for different ranks')
parser.add_argument(
'--deterministic',
action='store_true',
help='whether to set deterministic options for CUDNN backend.')
parser.add_argument(
'--options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file (deprecate), '
'change to --cfg-options instead.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--auto-scale-lr',
action='store_true',
help='enable automatically scaling LR.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
if args.options and args.cfg_options:
raise ValueError(
'--options and --cfg-options cannot be both '
'specified, --options is deprecated in favor of --cfg-options')
if args.options:
warnings.warn('--options is deprecated in favor of --cfg-options')
args.cfg_options = args.options
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.auto_scale_lr:
if 'auto_scale_lr' in cfg and \
'enable' in cfg.auto_scale_lr and \
'base_batch_size' in cfg.auto_scale_lr:
cfg.auto_scale_lr.enable = True
else:
warnings.warn('Can not find "auto_scale_lr" or '
'"auto_scale_lr.enable" or '
'"auto_scale_lr.base_batch_size" in your'
' configuration file. Please update all the '
'configuration files to mmdet >= 2.24.1.')
# set multi-process settings
setup_multi_processes(cfg)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
# work_dir is determined in this priority: CLI > segment in file > filename
if args.work_dir is not None:
# update configs according to CLI args if args.work_dir is not None
cfg.work_dir = args.work_dir
elif cfg.get('work_dir', None) is None:
# use config filename as default work_dir if cfg.work_dir is None
cfg.work_dir = osp.join('./work_dirs',
osp.splitext(osp.basename(args.config))[0])
if args.resume_from is not None:
cfg.resume_from = args.resume_from
cfg.auto_resume = args.auto_resume
if args.gpus is not None:
cfg.gpu_ids = range(1)
warnings.warn('`--gpus` is deprecated because we only support '
'single GPU mode in non-distributed training. '
'Use `gpus=1` now.')
if args.gpu_ids is not None:
cfg.gpu_ids = args.gpu_ids[0:1]
warnings.warn('`--gpu-ids` is deprecated, please use `--gpu-id`. '
'Because we only support single GPU mode in '
'non-distributed training. Use the first GPU '
'in `gpu_ids` now.')
if args.gpus is None and args.gpu_ids is None:
cfg.gpu_ids = [args.gpu_id]
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# re-set gpu_ids with distributed training mode
_, world_size = get_dist_info()
cfg.gpu_ids = range(world_size)
# create work_dir
mmcv.mkdir_or_exist(osp.abspath(cfg.work_dir))
# dump config
cfg.dump(osp.join(cfg.work_dir, osp.basename(args.config)))
# init the logger before other steps
timestamp = time.strftime('%Y%m%d_%H%M%S', time.localtime())
log_file = osp.join(cfg.work_dir, f'{timestamp}.log')
logger = get_root_logger(log_file=log_file, log_level=cfg.log_level)
# init the meta dict to record some important information such as
# environment info and seed, which will be logged
meta = dict()
# log env info
env_info_dict = collect_env()
env_info = '\n'.join([(f'{k}: {v}') for k, v in env_info_dict.items()])
dash_line = '-' * 60 + '\n'
logger.info('Environment info:\n' + dash_line + env_info + '\n' +
dash_line)
meta['env_info'] = env_info
meta['config'] = cfg.pretty_text
# log some basic info
logger.info(f'Distributed training: {distributed}')
logger.info(f'Config:\n{cfg.pretty_text}')
cfg.device = get_device()
# set random seeds
seed = init_random_seed(args.seed, device=cfg.device)
seed = seed + dist.get_rank() if args.diff_seed else seed
logger.info(f'Set random seed to {seed}, '
f'deterministic: {args.deterministic}')
set_random_seed(seed, deterministic=args.deterministic)
cfg.seed = seed
meta['seed'] = seed
meta['exp_name'] = osp.basename(args.config)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
model.init_weights()
# init rfnext if 'RFSearchHook' is defined in cfg
rfnext_init_model(model, cfg=cfg)
datasets = [build_dataset(cfg.data.train)]
if len(cfg.workflow) == 2:
assert 'val' in [mode for (mode, _) in cfg.workflow]
val_dataset = copy.deepcopy(cfg.data.val)
val_dataset.pipeline = cfg.data.train.get(
'pipeline', cfg.data.train.dataset.get('pipeline'))
datasets.append(build_dataset(val_dataset))
if cfg.checkpoint_config is not None:
# save mmdet version, config file content and class names in
# checkpoints as meta data
cfg.checkpoint_config.meta = dict(
mmdet_version=__version__ + get_git_hash()[:7],
CLASSES=datasets[0].CLASSES)
# add an attribute for visualization convenience
model.CLASSES = datasets[0].CLASSES
train_detector(
model,
datasets,
cfg,
distributed=distributed,
validate=(not args.no_validate),
timestamp=timestamp,
meta=meta)
if __name__ == '__main__':
main()
| 9,260 | 36.342742 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/analyze_logs.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import json
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
def cal_train_time(log_dicts, args):
for i, log_dict in enumerate(log_dicts):
print(f'{"-" * 5}Analyze train time of {args.json_logs[i]}{"-" * 5}')
all_times = []
for epoch in log_dict.keys():
if args.include_outliers:
all_times.append(log_dict[epoch]['time'])
else:
all_times.append(log_dict[epoch]['time'][1:])
if not all_times:
raise KeyError(
'Please reduce the log interval in the config so that'
'interval is less than iterations of one epoch.')
all_times = np.array(all_times)
epoch_ave_time = all_times.mean(-1)
slowest_epoch = epoch_ave_time.argmax()
fastest_epoch = epoch_ave_time.argmin()
std_over_epoch = epoch_ave_time.std()
print(f'slowest epoch {slowest_epoch + 1}, '
f'average time is {epoch_ave_time[slowest_epoch]:.4f}')
print(f'fastest epoch {fastest_epoch + 1}, '
f'average time is {epoch_ave_time[fastest_epoch]:.4f}')
print(f'time std over epochs is {std_over_epoch:.4f}')
print(f'average iter time: {np.mean(all_times):.4f} s/iter')
print()
def plot_curve(log_dicts, args):
if args.backend is not None:
plt.switch_backend(args.backend)
sns.set_style(args.style)
# if legend is None, use {filename}_{key} as legend
legend = args.legend
if legend is None:
legend = []
for json_log in args.json_logs:
for metric in args.keys:
legend.append(f'{json_log}_{metric}')
assert len(legend) == (len(args.json_logs) * len(args.keys))
metrics = args.keys
num_metrics = len(metrics)
for i, log_dict in enumerate(log_dicts):
epochs = list(log_dict.keys())
for j, metric in enumerate(metrics):
print(f'plot curve of {args.json_logs[i]}, metric is {metric}')
if metric not in log_dict[epochs[int(args.eval_interval) - 1]]:
if 'mAP' in metric:
raise KeyError(
f'{args.json_logs[i]} does not contain metric '
f'{metric}. Please check if "--no-validate" is '
'specified when you trained the model.')
raise KeyError(
f'{args.json_logs[i]} does not contain metric {metric}. '
'Please reduce the log interval in the config so that '
'interval is less than iterations of one epoch.')
if 'mAP' in metric:
xs = []
ys = []
for epoch in epochs:
ys += log_dict[epoch][metric]
if 'val' in log_dict[epoch]['mode']:
xs.append(epoch)
plt.xlabel('epoch')
plt.plot(xs, ys, label=legend[i * num_metrics + j], marker='o')
else:
xs = []
ys = []
num_iters_per_epoch = log_dict[epochs[0]]['iter'][-2]
for epoch in epochs:
iters = log_dict[epoch]['iter']
if log_dict[epoch]['mode'][-1] == 'val':
iters = iters[:-1]
xs.append(
np.array(iters) + (epoch - 1) * num_iters_per_epoch)
ys.append(np.array(log_dict[epoch][metric][:len(iters)]))
xs = np.concatenate(xs)
ys = np.concatenate(ys)
plt.xlabel('iter')
plt.plot(
xs, ys, label=legend[i * num_metrics + j], linewidth=0.5)
plt.legend()
if args.title is not None:
plt.title(args.title)
if args.out is None:
plt.show()
else:
print(f'save curve to: {args.out}')
plt.savefig(args.out)
plt.cla()
def add_plot_parser(subparsers):
parser_plt = subparsers.add_parser(
'plot_curve', help='parser for plotting curves')
parser_plt.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_plt.add_argument(
'--keys',
type=str,
nargs='+',
default=['bbox_mAP'],
help='the metric that you want to plot')
parser_plt.add_argument(
'--start-epoch',
type=str,
default='1',
help='the epoch that you want to start')
parser_plt.add_argument(
'--eval-interval',
type=str,
default='1',
help='the eval interval when training')
parser_plt.add_argument('--title', type=str, help='title of figure')
parser_plt.add_argument(
'--legend',
type=str,
nargs='+',
default=None,
help='legend of each plot')
parser_plt.add_argument(
'--backend', type=str, default=None, help='backend of plt')
parser_plt.add_argument(
'--style', type=str, default='dark', help='style of plt')
parser_plt.add_argument('--out', type=str, default=None)
def add_time_parser(subparsers):
parser_time = subparsers.add_parser(
'cal_train_time',
help='parser for computing the average time per training iteration')
parser_time.add_argument(
'json_logs',
type=str,
nargs='+',
help='path of train log in json format')
parser_time.add_argument(
'--include-outliers',
action='store_true',
help='include the first value of every epoch when computing '
'the average time')
def parse_args():
parser = argparse.ArgumentParser(description='Analyze Json Log')
# currently only support plot curve and calculate average train time
subparsers = parser.add_subparsers(dest='task', help='task parser')
add_plot_parser(subparsers)
add_time_parser(subparsers)
args = parser.parse_args()
return args
def load_json_logs(json_logs):
# load and convert json_logs to log_dict, key is epoch, value is a sub dict
# keys of sub dict is different metrics, e.g. memory, bbox_mAP
# value of sub dict is a list of corresponding values of all iterations
log_dicts = [dict() for _ in json_logs]
for json_log, log_dict in zip(json_logs, log_dicts):
with open(json_log, 'r') as log_file:
for i, line in enumerate(log_file):
log = json.loads(line.strip())
# skip the first training info line
if i == 0:
continue
# skip lines without `epoch` field
if 'epoch' not in log:
continue
epoch = log.pop('epoch')
if epoch not in log_dict:
log_dict[epoch] = defaultdict(list)
for k, v in log.items():
log_dict[epoch][k].append(v)
return log_dicts
def main():
args = parse_args()
json_logs = args.json_logs
for json_log in json_logs:
assert json_log.endswith('.json')
log_dicts = load_json_logs(json_logs)
eval(args.task)(log_dicts, args)
if __name__ == '__main__':
main()
| 7,359 | 34.902439 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/analyze_results.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
from multiprocessing import Pool
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.evaluation import eval_map
from mmdet.core.visualization import imshow_gt_det_bboxes
from mmdet.datasets import build_dataset, get_loading_pipeline
from mmdet.datasets.api_wrappers import pq_compute_single_core
from mmdet.utils import replace_cfg_vals, update_data_root
def bbox_map_eval(det_result, annotation, nproc=4):
"""Evaluate mAP of single image det result.
Args:
det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotation (dict): Ground truth annotations where keys of
annotations are:
- bboxes: numpy array of shape (n, 4)
- labels: numpy array of shape (n, )
- bboxes_ignore (optional): numpy array of shape (k, 4)
- labels_ignore (optional): numpy array of shape (k, )
nproc (int): Processes used for computing mAP.
Default: 4.
Returns:
float: mAP
"""
# use only bbox det result
if isinstance(det_result, tuple):
bbox_det_result = [det_result[0]]
else:
bbox_det_result = [det_result]
# mAP
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
processes = []
workers = Pool(processes=nproc)
for thr in iou_thrs:
p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), {
'iou_thr': thr,
'logger': 'silent',
'nproc': 1
})
processes.append(p)
workers.close()
workers.join()
mean_aps = []
for p in processes:
mean_aps.append(p.get()[0])
return sum(mean_aps) / len(mean_aps)
class ResultVisualizer:
"""Display and save evaluation results.
Args:
show (bool): Whether to show the image. Default: True.
wait_time (float): Value of waitKey param. Default: 0.
score_thr (float): Minimum score of bboxes to be shown.
Default: 0.
overlay_gt_pred (bool): Whether to plot gts and predictions on the
same image. If False, predictions and gts will be plotted on two
same image which will be concatenated in vertical direction.
The image above is drawn with gt, and the image below is drawn
with the prediction result. Default: False.
"""
def __init__(self,
show=False,
wait_time=0,
score_thr=0,
overlay_gt_pred=False):
self.show = show
self.wait_time = wait_time
self.score_thr = score_thr
self.overlay_gt_pred = overlay_gt_pred
def _save_image_gts_results(self,
dataset,
results,
performances,
out_dir=None):
"""Display or save image with groung truths and predictions from a
model.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
performances (dict): A dict contains samples's indices
in dataset and model's performance on them.
out_dir (str, optional): The filename to write the image.
Defaults: None.
"""
mmcv.mkdir_or_exist(out_dir)
for performance_info in performances:
index, performance = performance_info
data_info = dataset.prepare_train_img(index)
# calc save file path
filename = data_info['filename']
if data_info['img_prefix'] is not None:
filename = osp.join(data_info['img_prefix'], filename)
else:
filename = data_info['filename']
fname, name = osp.splitext(osp.basename(filename))
save_filename = fname + '_' + str(round(performance, 3)) + name
out_file = osp.join(out_dir, save_filename)
imshow_gt_det_bboxes(
data_info['img'],
data_info,
results[index],
dataset.CLASSES,
gt_bbox_color=dataset.PALETTE,
gt_text_color=(200, 200, 200),
gt_mask_color=dataset.PALETTE,
det_bbox_color=dataset.PALETTE,
det_text_color=(200, 200, 200),
det_mask_color=dataset.PALETTE,
show=self.show,
score_thr=self.score_thr,
wait_time=self.wait_time,
out_file=out_file,
overlay_gt_pred=self.overlay_gt_pred)
def evaluate_and_show(self,
dataset,
results,
topk=20,
show_dir='work_dir'):
"""Evaluate and show results.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
show_dir (str, optional): The filename to write the image.
Default: 'work_dir'
eval_fn (callable, optional): Eval function, Default: None.
"""
assert topk > 0
if (topk * 2) > len(dataset):
topk = len(dataset) // 2
if isinstance(results[0], dict):
good_samples, bad_samples = self.panoptic_evaluate(
dataset, results, topk=topk)
elif isinstance(results[0], list):
good_samples, bad_samples = self.detection_evaluate(
dataset, results, topk=topk)
elif isinstance(results[0], tuple):
results_ = [result[0] for result in results]
good_samples, bad_samples = self.detection_evaluate(
dataset, results_, topk=topk)
else:
raise 'The format of result is not supported yet. ' \
'Current dict for panoptic segmentation and list ' \
'or tuple for object detection are supported.'
good_dir = osp.abspath(osp.join(show_dir, 'good'))
bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
self._save_image_gts_results(dataset, results, good_samples, good_dir)
self._save_image_gts_results(dataset, results, bad_samples, bad_dir)
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):
"""Evaluation for object detection.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
eval_fn (callable, optional): Eval function, Default: None.
Returns:
tuple: A tuple contains good samples and bad samples.
good_mAPs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_mAPs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
if eval_fn is None:
eval_fn = bbox_map_eval
else:
assert callable(eval_fn)
prog_bar = mmcv.ProgressBar(len(results))
_mAPs = {}
for i, (result, ) in enumerate(zip(results)):
# self.dataset[i] should not call directly
# because there is a risk of mismatch
data_info = dataset.prepare_train_img(i)
mAP = eval_fn(result, data_info['ann_info'])
_mAPs[i] = mAP
prog_bar.update()
# descending select topk image
_mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
good_mAPs = _mAPs[-topk:]
bad_mAPs = _mAPs[:topk]
return good_mAPs, bad_mAPs
def panoptic_evaluate(self, dataset, results, topk=20):
"""Evaluation for panoptic segmentation.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Panoptic segmentation results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
Returns:
tuple: A tuple contains good samples and bad samples.
good_pqs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_pqs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
# image to annotations
gt_json = dataset.coco.img_ann_map
result_files, tmp_dir = dataset.format_results(results)
pred_json = mmcv.load(result_files['panoptic'])['annotations']
pred_folder = osp.join(tmp_dir.name, 'panoptic')
gt_folder = dataset.seg_prefix
pqs = {}
prog_bar = mmcv.ProgressBar(len(results))
for i in range(len(results)):
data_info = dataset.prepare_train_img(i)
image_id = data_info['img_info']['id']
gt_ann = {
'image_id': image_id,
'segments_info': gt_json[image_id],
'file_name': data_info['img_info']['segm_file']
}
pred_ann = pred_json[i]
pq_stat = pq_compute_single_core(
i, [(gt_ann, pred_ann)],
gt_folder,
pred_folder,
dataset.categories,
dataset.file_client,
print_log=False)
pq_results, classwise_results = pq_stat.pq_average(
dataset.categories, isthing=None)
pqs[i] = pq_results['pq']
prog_bar.update()
if tmp_dir is not None:
tmp_dir.cleanup()
# descending select topk image
pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))
good_pqs = pqs[-topk:]
bad_pqs = pqs[:topk]
return good_pqs, bad_pqs
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test pkl result')
parser.add_argument(
'show_dir', help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=0,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--topk',
default=20,
type=int,
help='saved Number of the highest topk '
'and lowest topk after index sorting')
parser.add_argument(
'--show-score-thr',
type=float,
default=0,
help='score threshold (default: 0.)')
parser.add_argument(
'--overlay-gt-pred',
action='store_true',
help='whether to plot gts and predictions on the same image.'
'If False, predictions and gts will be plotted on two same'
'image which will be concatenated in vertical direction.'
'The image above is drawn with gt, and the image below is'
'drawn with the prediction result.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
cfg.data.test.pop('samples_per_gpu', 0)
if cfg.data.train.type in ('MultiImageMixDataset', 'ClassBalancedDataset',
'RepeatDataset', 'ConcatDataset'):
cfg.data.test.pipeline = get_loading_pipeline(
cfg.data.train.dataset.pipeline)
else:
cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline)
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.prediction_path)
result_visualizer = ResultVisualizer(args.show, args.wait_time,
args.show_score_thr,
args.overlay_gt_pred)
result_visualizer.evaluate_and_show(
dataset, outputs, topk=args.topk, show_dir=args.show_dir)
if __name__ == '__main__':
main()
| 13,742 | 36.143243 | 78 | py |
mmdetection | mmdetection-master/tools/analysis_tools/benchmark.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import time
import torch
from mmcv import Config, DictAction
from mmcv.cnn import fuse_conv_bn
from mmcv.parallel import MMDistributedDataParallel
from mmcv.runner import init_dist, load_checkpoint, wrap_fp16_model
from mmdet.datasets import (build_dataloader, build_dataset,
replace_ImageToTensor)
from mmdet.models import build_detector
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='MMDet benchmark a model')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument(
'--repeat-num',
type=int,
default=1,
help='number of repeat times of measurement for averaging the results')
parser.add_argument(
'--max-iter', type=int, default=2000, help='num of max iter')
parser.add_argument(
'--log-interval', type=int, default=50, help='interval of logging')
parser.add_argument(
'--fuse-conv-bn',
action='store_true',
help='Whether to fuse conv and bn, this will slightly increase'
'the inference speed')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def measure_inference_speed(cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn):
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
# build the dataloader
samples_per_gpu = cfg.data.test.pop('samples_per_gpu', 1)
if samples_per_gpu > 1:
# Replace 'ImageToTensor' to 'DefaultFormatBundle'
cfg.data.test.pipeline = replace_ImageToTensor(cfg.data.test.pipeline)
dataset = build_dataset(cfg.data.test)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
# Because multiple processes will occupy additional CPU resources,
# FPS statistics will be more unstable when workers_per_gpu is not 0.
# It is reasonable to set workers_per_gpu to 0.
workers_per_gpu=0,
dist=True,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
load_checkpoint(model, checkpoint, map_location='cpu')
if is_fuse_conv_bn:
model = fuse_conv_bn(model)
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
model.eval()
# the first several iterations may be very slow so skip them
num_warmup = 5
pure_inf_time = 0
fps = 0
# benchmark with 2000 image and take the average
for i, data in enumerate(data_loader):
torch.cuda.synchronize()
start_time = time.perf_counter()
with torch.no_grad():
model(return_loss=False, rescale=True, **data)
torch.cuda.synchronize()
elapsed = time.perf_counter() - start_time
if i >= num_warmup:
pure_inf_time += elapsed
if (i + 1) % log_interval == 0:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Done image [{i + 1:<3}/ {max_iter}], '
f'fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
if (i + 1) == max_iter:
fps = (i + 1 - num_warmup) / pure_inf_time
print(
f'Overall fps: {fps:.1f} img / s, '
f'times per image: {1000 / fps:.1f} ms / img',
flush=True)
break
return fps
def repeat_measure_inference_speed(cfg,
checkpoint,
max_iter,
log_interval,
is_fuse_conv_bn,
repeat_num=1):
assert repeat_num >= 1
fps_list = []
for _ in range(repeat_num):
#
cp_cfg = copy.deepcopy(cfg)
fps_list.append(
measure_inference_speed(cp_cfg, checkpoint, max_iter, log_interval,
is_fuse_conv_bn))
if repeat_num > 1:
fps_list_ = [round(fps, 1) for fps in fps_list]
times_pre_image_list_ = [round(1000 / fps, 1) for fps in fps_list]
mean_fps_ = sum(fps_list_) / len(fps_list_)
mean_times_pre_image_ = sum(times_pre_image_list_) / len(
times_pre_image_list_)
print(
f'Overall fps: {fps_list_}[{mean_fps_:.1f}] img / s, '
f'times per image: '
f'{times_pre_image_list_}[{mean_times_pre_image_:.1f}] ms / img',
flush=True)
return fps_list
return fps_list[0]
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
if args.launcher == 'none':
raise NotImplementedError('Only supports distributed mode')
else:
init_dist(args.launcher, **cfg.dist_params)
repeat_measure_inference_speed(cfg, args.checkpoint, args.max_iter,
args.log_interval, args.fuse_conv_bn,
args.repeat_num)
if __name__ == '__main__':
main()
| 6,638 | 32.872449 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/coco_error_analysis.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
from argparse import ArgumentParser
from multiprocessing import Pool
import matplotlib.pyplot as plt
import numpy as np
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
def makeplot(rs, ps, outDir, class_name, iou_type):
cs = np.vstack([
np.ones((2, 3)),
np.array([0.31, 0.51, 0.74]),
np.array([0.75, 0.31, 0.30]),
np.array([0.36, 0.90, 0.38]),
np.array([0.50, 0.39, 0.64]),
np.array([1, 0.6, 0]),
])
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
for i in range(len(areaNames)):
area_ps = ps[..., i, 0]
figure_title = iou_type + '-' + class_name + '-' + areaNames[i]
aps = [ps_.mean() for ps_ in area_ps]
ps_curve = [
ps_.mean(axis=1) if ps_.ndim > 1 else ps_ for ps_ in area_ps
]
ps_curve.insert(0, np.zeros(ps_curve[0].shape))
fig = plt.figure()
ax = plt.subplot(111)
for k in range(len(types)):
ax.plot(rs, ps_curve[k + 1], color=[0, 0, 0], linewidth=0.5)
ax.fill_between(
rs,
ps_curve[k],
ps_curve[k + 1],
color=cs[k],
label=str(f'[{aps[k]:.3f}]' + types[k]),
)
plt.xlabel('recall')
plt.ylabel('precision')
plt.xlim(0, 1.0)
plt.ylim(0, 1.0)
plt.title(figure_title)
plt.legend()
# plt.show()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def autolabel(ax, rects):
"""Attach a text label above each bar in *rects*, displaying its height."""
for rect in rects:
height = rect.get_height()
if height > 0 and height <= 1: # for percent values
text_label = '{:2.0f}'.format(height * 100)
else:
text_label = '{:2.0f}'.format(height)
ax.annotate(
text_label,
xy=(rect.get_x() + rect.get_width() / 2, height),
xytext=(0, 3), # 3 points vertical offset
textcoords='offset points',
ha='center',
va='bottom',
fontsize='x-small',
)
def makebarplot(rs, ps, outDir, class_name, iou_type):
areaNames = ['allarea', 'small', 'medium', 'large']
types = ['C75', 'C50', 'Loc', 'Sim', 'Oth', 'BG', 'FN']
fig, ax = plt.subplots()
x = np.arange(len(areaNames)) # the areaNames locations
width = 0.60 # the width of the bars
rects_list = []
figure_title = iou_type + '-' + class_name + '-' + 'ap bar plot'
for i in range(len(types) - 1):
type_ps = ps[i, ..., 0]
aps = [ps_.mean() for ps_ in type_ps.T]
rects_list.append(
ax.bar(
x - width / 2 + (i + 1) * width / len(types),
aps,
width / len(types),
label=types[i],
))
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Mean Average Precision (mAP)')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaNames)
ax.legend()
# Add score texts over bars
for rects in rects_list:
autolabel(ax, rects)
# Save plot
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def get_gt_area_group_numbers(cocoEval):
areaRng = cocoEval.params.areaRng
areaRngStr = [str(aRng) for aRng in areaRng]
areaRngLbl = cocoEval.params.areaRngLbl
areaRngStr2areaRngLbl = dict(zip(areaRngStr, areaRngLbl))
areaRngLbl2Number = dict.fromkeys(areaRngLbl, 0)
for evalImg in cocoEval.evalImgs:
if evalImg:
for gtIgnore in evalImg['gtIgnore']:
if not gtIgnore:
aRngLbl = areaRngStr2areaRngLbl[str(evalImg['aRng'])]
areaRngLbl2Number[aRngLbl] += 1
return areaRngLbl2Number
def make_gt_area_group_numbers_plot(cocoEval, outDir, verbose=True):
areaRngLbl2Number = get_gt_area_group_numbers(cocoEval)
areaRngLbl = areaRngLbl2Number.keys()
if verbose:
print('number of annotations per area group:', areaRngLbl2Number)
# Init figure
fig, ax = plt.subplots()
x = np.arange(len(areaRngLbl)) # the areaNames locations
width = 0.60 # the width of the bars
figure_title = 'number of annotations per area group'
rects = ax.bar(x, areaRngLbl2Number.values(), width)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
ax.set_xticks(x)
ax.set_xticklabels(areaRngLbl)
# Add score texts over bars
autolabel(ax, rects)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def make_gt_area_histogram_plot(cocoEval, outDir):
n_bins = 100
areas = [ann['area'] for ann in cocoEval.cocoGt.anns.values()]
# init figure
figure_title = 'gt annotation areas histogram plot'
fig, ax = plt.subplots()
# Set the number of bins
ax.hist(np.sqrt(areas), bins=n_bins)
# Add some text for labels, title and custom x-axis tick labels, etc.
ax.set_xlabel('Squareroot Area')
ax.set_ylabel('Number of annotations')
ax.set_title(figure_title)
# Save plot
fig.tight_layout()
fig.savefig(outDir + f'/{figure_title}.png')
plt.close(fig)
def analyze_individual_category(k,
cocoDt,
cocoGt,
catId,
iou_type,
areas=None):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------analyzing {k + 1}-{nm["name"]}---------------')
ps_ = {}
dt = copy.deepcopy(cocoDt)
nm = cocoGt.loadCats(catId)[0]
imgIds = cocoGt.getImgIds()
dt_anns = dt.dataset['annotations']
select_dt_anns = []
for ann in dt_anns:
if ann['category_id'] == catId:
select_dt_anns.append(ann)
dt.dataset['annotations'] = select_dt_anns
dt.createIndex()
# compute precision but ignore superclass confusion
gt = copy.deepcopy(cocoGt)
child_catIds = gt.getCatIds(supNms=[nm['supercategory']])
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] in child_catIds and ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_supercategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_supercategory'] = ps_supercategory
# compute precision but ignore any class confusion
gt = copy.deepcopy(cocoGt)
for idx, ann in enumerate(gt.dataset['annotations']):
if ann['category_id'] != catId:
gt.dataset['annotations'][idx]['ignore'] = 1
gt.dataset['annotations'][idx]['iscrowd'] = 1
gt.dataset['annotations'][idx]['category_id'] = catId
cocoEval = COCOeval(gt, copy.deepcopy(dt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.maxDets = [100]
cocoEval.params.iouThrs = [0.1]
cocoEval.params.useCats = 1
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]], [areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps_allcategory = cocoEval.eval['precision'][0, :, k, :, :]
ps_['ps_allcategory'] = ps_allcategory
return k, ps_
def analyze_results(res_file,
ann_file,
res_types,
out_dir,
extraplots=None,
areas=None):
for res_type in res_types:
assert res_type in ['bbox', 'segm']
if areas:
assert len(areas) == 3, '3 integers should be specified as areas, \
representing 3 area regions'
directory = os.path.dirname(out_dir + '/')
if not os.path.exists(directory):
print(f'-------------create {out_dir}-----------------')
os.makedirs(directory)
cocoGt = COCO(ann_file)
cocoDt = cocoGt.loadRes(res_file)
imgIds = cocoGt.getImgIds()
for res_type in res_types:
res_out_dir = out_dir + '/' + res_type + '/'
res_directory = os.path.dirname(res_out_dir)
if not os.path.exists(res_directory):
print(f'-------------create {res_out_dir}-----------------')
os.makedirs(res_directory)
iou_type = res_type
cocoEval = COCOeval(
copy.deepcopy(cocoGt), copy.deepcopy(cocoDt), iou_type)
cocoEval.params.imgIds = imgIds
cocoEval.params.iouThrs = [0.75, 0.5, 0.1]
cocoEval.params.maxDets = [100]
if areas:
cocoEval.params.areaRng = [[0**2, areas[2]], [0**2, areas[0]],
[areas[0], areas[1]],
[areas[1], areas[2]]]
cocoEval.evaluate()
cocoEval.accumulate()
ps = cocoEval.eval['precision']
ps = np.vstack([ps, np.zeros((4, *ps.shape[1:]))])
catIds = cocoGt.getCatIds()
recThrs = cocoEval.params.recThrs
with Pool(processes=48) as pool:
args = [(k, cocoDt, cocoGt, catId, iou_type, areas)
for k, catId in enumerate(catIds)]
analyze_results = pool.starmap(analyze_individual_category, args)
for k, catId in enumerate(catIds):
nm = cocoGt.loadCats(catId)[0]
print(f'--------------saving {k + 1}-{nm["name"]}---------------')
analyze_result = analyze_results[k]
assert k == analyze_result[0]
ps_supercategory = analyze_result[1]['ps_supercategory']
ps_allcategory = analyze_result[1]['ps_allcategory']
# compute precision but ignore superclass confusion
ps[3, :, k, :, :] = ps_supercategory
# compute precision but ignore any class confusion
ps[4, :, k, :, :] = ps_allcategory
# fill in background and false negative errors and plot
ps[ps == -1] = 0
ps[5, :, k, :, :] = ps[4, :, k, :, :] > 0
ps[6, :, k, :, :] = 1.0
makeplot(recThrs, ps[:, :, k], res_out_dir, nm['name'], iou_type)
if extraplots:
makebarplot(recThrs, ps[:, :, k], res_out_dir, nm['name'],
iou_type)
makeplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
if extraplots:
makebarplot(recThrs, ps, res_out_dir, 'allclass', iou_type)
make_gt_area_group_numbers_plot(
cocoEval=cocoEval, outDir=res_out_dir, verbose=True)
make_gt_area_histogram_plot(cocoEval=cocoEval, outDir=res_out_dir)
def main():
parser = ArgumentParser(description='COCO Error Analysis Tool')
parser.add_argument('result', help='result file (json format) path')
parser.add_argument('out_dir', help='dir to save analyze result images')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='annotation file path')
parser.add_argument(
'--types', type=str, nargs='+', default=['bbox'], help='result types')
parser.add_argument(
'--extraplots',
action='store_true',
help='export extra bar/stat plots')
parser.add_argument(
'--areas',
type=int,
nargs='+',
default=[1024, 9216, 10000000000],
help='area regions')
args = parser.parse_args()
analyze_results(
args.result,
args.ann,
args.types,
out_dir=args.out_dir,
extraplots=args.extraplots,
areas=args.areas)
if __name__ == '__main__':
main()
| 12,389 | 35.441176 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/coco_occluded_separated_recall.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser
import mmcv
from mmcv.utils import print_log
from mmdet.datasets import OccludedSeparatedCocoDataset
def main():
parser = ArgumentParser(
description='Compute recall of COCO occluded and separated masks '
'presented in paper https://arxiv.org/abs/2210.10046.')
parser.add_argument('result', help='result file (pkl format) path')
parser.add_argument('--out', help='file path to save evaluation results')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='Score threshold for the recall calculation. Defaults to 0.3')
parser.add_argument(
'--iou-thr',
type=float,
default=0.75,
help='IoU threshold for the recall calculation. Defaults to 0.75.')
parser.add_argument(
'--ann',
default='data/coco/annotations/instances_val2017.json',
help='coco annotation file path')
args = parser.parse_args()
results = mmcv.load(args.result)
assert isinstance(results[0], tuple), \
'The results must be predicted by instance segmentation model.'
dataset = OccludedSeparatedCocoDataset(
ann_file=args.ann, pipeline=[], test_mode=True)
metric_res = dataset.evaluate(results)
if args.out is not None:
mmcv.dump(metric_res, args.out)
print_log(f'Evaluation results have been saved to {args.out}.')
if __name__ == '__main__':
main()
| 1,505 | 32.466667 | 77 | py |
mmdetection | mmdetection-master/tools/analysis_tools/confusion_matrix.py | import argparse
import os
import matplotlib.pyplot as plt
import mmcv
import numpy as np
from matplotlib.ticker import MultipleLocator
from mmcv import Config, DictAction
from mmcv.ops import nms
from mmdet.core.evaluation.bbox_overlaps import bbox_overlaps
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(
description='Generate confusion matrix from detection results')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test .pkl result')
parser.add_argument(
'save_dir', help='directory where confusion matrix will be saved')
parser.add_argument(
'--show', action='store_true', help='show confusion matrix')
parser.add_argument(
'--color-theme',
default='plasma',
help='theme of the matrix color map')
parser.add_argument(
'--score-thr',
type=float,
default=0.3,
help='score threshold to filter detection bboxes')
parser.add_argument(
'--tp-iou-thr',
type=float,
default=0.5,
help='IoU threshold to be considered as matched')
parser.add_argument(
'--nms-iou-thr',
type=float,
default=None,
help='nms IoU threshold, only applied when users want to change the'
'nms IoU threshold.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def calculate_confusion_matrix(dataset,
results,
score_thr=0,
nms_iou_thr=None,
tp_iou_thr=0.5):
"""Calculate the confusion matrix.
Args:
dataset (Dataset): Test or val dataset.
results (list[ndarray]): A list of detection results in each image.
score_thr (float|optional): Score threshold to filter bboxes.
Default: 0.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
tp_iou_thr (float|optional): IoU threshold to be considered as matched.
Default: 0.5.
"""
num_classes = len(dataset.CLASSES)
confusion_matrix = np.zeros(shape=[num_classes + 1, num_classes + 1])
assert len(dataset) == len(results)
prog_bar = mmcv.ProgressBar(len(results))
for idx, per_img_res in enumerate(results):
if isinstance(per_img_res, tuple):
res_bboxes, _ = per_img_res
else:
res_bboxes = per_img_res
ann = dataset.get_ann_info(idx)
gt_bboxes = ann['bboxes']
labels = ann['labels']
analyze_per_img_dets(confusion_matrix, gt_bboxes, labels, res_bboxes,
score_thr, tp_iou_thr, nms_iou_thr)
prog_bar.update()
return confusion_matrix
def analyze_per_img_dets(confusion_matrix,
gt_bboxes,
gt_labels,
result,
score_thr=0,
tp_iou_thr=0.5,
nms_iou_thr=None):
"""Analyze detection results on each image.
Args:
confusion_matrix (ndarray): The confusion matrix,
has shape (num_classes + 1, num_classes + 1).
gt_bboxes (ndarray): Ground truth bboxes, has shape (num_gt, 4).
gt_labels (ndarray): Ground truth labels, has shape (num_gt).
result (ndarray): Detection results, has shape
(num_classes, num_bboxes, 5).
score_thr (float): Score threshold to filter bboxes.
Default: 0.
tp_iou_thr (float): IoU threshold to be considered as matched.
Default: 0.5.
nms_iou_thr (float|optional): nms IoU threshold, the detection results
have done nms in the detector, only applied when users want to
change the nms IoU threshold. Default: None.
"""
true_positives = np.zeros_like(gt_labels)
for det_label, det_bboxes in enumerate(result):
if nms_iou_thr:
det_bboxes, _ = nms(
det_bboxes[:, :4],
det_bboxes[:, -1],
nms_iou_thr,
score_threshold=score_thr)
ious = bbox_overlaps(det_bboxes[:, :4], gt_bboxes)
for i, det_bbox in enumerate(det_bboxes):
score = det_bbox[4]
det_match = 0
if score >= score_thr:
for j, gt_label in enumerate(gt_labels):
if ious[i, j] >= tp_iou_thr:
det_match += 1
if gt_label == det_label:
true_positives[j] += 1 # TP
confusion_matrix[gt_label, det_label] += 1
if det_match == 0: # BG FP
confusion_matrix[-1, det_label] += 1
for num_tp, gt_label in zip(true_positives, gt_labels):
if num_tp == 0: # FN
confusion_matrix[gt_label, -1] += 1
def plot_confusion_matrix(confusion_matrix,
labels,
save_dir=None,
show=True,
title='Normalized Confusion Matrix',
color_theme='plasma'):
"""Draw confusion matrix with matplotlib.
Args:
confusion_matrix (ndarray): The confusion matrix.
labels (list[str]): List of class names.
save_dir (str|optional): If set, save the confusion matrix plot to the
given path. Default: None.
show (bool): Whether to show the plot. Default: True.
title (str): Title of the plot. Default: `Normalized Confusion Matrix`.
color_theme (str): Theme of the matrix color map. Default: `plasma`.
"""
# normalize the confusion matrix
per_label_sums = confusion_matrix.sum(axis=1)[:, np.newaxis]
confusion_matrix = \
confusion_matrix.astype(np.float32) / per_label_sums * 100
num_classes = len(labels)
fig, ax = plt.subplots(
figsize=(0.5 * num_classes, 0.5 * num_classes * 0.8), dpi=180)
cmap = plt.get_cmap(color_theme)
im = ax.imshow(confusion_matrix, cmap=cmap)
plt.colorbar(mappable=im, ax=ax)
title_font = {'weight': 'bold', 'size': 12}
ax.set_title(title, fontdict=title_font)
label_font = {'size': 10}
plt.ylabel('Ground Truth Label', fontdict=label_font)
plt.xlabel('Prediction Label', fontdict=label_font)
# draw locator
xmajor_locator = MultipleLocator(1)
xminor_locator = MultipleLocator(0.5)
ax.xaxis.set_major_locator(xmajor_locator)
ax.xaxis.set_minor_locator(xminor_locator)
ymajor_locator = MultipleLocator(1)
yminor_locator = MultipleLocator(0.5)
ax.yaxis.set_major_locator(ymajor_locator)
ax.yaxis.set_minor_locator(yminor_locator)
# draw grid
ax.grid(True, which='minor', linestyle='-')
# draw label
ax.set_xticks(np.arange(num_classes))
ax.set_yticks(np.arange(num_classes))
ax.set_xticklabels(labels)
ax.set_yticklabels(labels)
ax.tick_params(
axis='x', bottom=False, top=True, labelbottom=False, labeltop=True)
plt.setp(
ax.get_xticklabels(), rotation=45, ha='left', rotation_mode='anchor')
# draw confution matrix value
for i in range(num_classes):
for j in range(num_classes):
ax.text(
j,
i,
'{}%'.format(
int(confusion_matrix[
i,
j]) if not np.isnan(confusion_matrix[i, j]) else -1),
ha='center',
va='center',
color='w',
size=7)
ax.set_ylim(len(confusion_matrix) - 0.5, -0.5) # matplotlib>3.1.1
fig.tight_layout()
if save_dir is not None:
plt.savefig(
os.path.join(save_dir, 'confusion_matrix.png'), format='png')
if show:
plt.show()
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
results = mmcv.load(args.prediction_path)
assert isinstance(results, list)
if isinstance(results[0], list):
pass
elif isinstance(results[0], tuple):
results = [result[0] for result in results]
else:
raise TypeError('invalid type of prediction results')
if isinstance(cfg.data.test, dict):
cfg.data.test.test_mode = True
elif isinstance(cfg.data.test, list):
for ds_cfg in cfg.data.test:
ds_cfg.test_mode = True
dataset = build_dataset(cfg.data.test)
confusion_matrix = calculate_confusion_matrix(dataset, results,
args.score_thr,
args.nms_iou_thr,
args.tp_iou_thr)
plot_confusion_matrix(
confusion_matrix,
dataset.CLASSES + ('background', ),
save_dir=args.save_dir,
show=args.show,
color_theme=args.color_theme)
if __name__ == '__main__':
main()
| 9,981 | 35.430657 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/eval_metric.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import mmcv
from mmcv import Config, DictAction
from mmdet.datasets import build_dataset
from mmdet.utils import replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Evaluate metric of the '
'results saved in pkl format')
parser.add_argument('config', help='Config of the model')
parser.add_argument('pkl_results', help='Results in pickle format')
parser.add_argument(
'--format-only',
action='store_true',
help='Format the output results without perform evaluation. It is'
'useful when you want to format the result to a specific format and '
'submit it to the test server')
parser.add_argument(
'--eval',
type=str,
nargs='+',
help='Evaluation metrics, which depends on the dataset, e.g., "bbox",'
' "segm", "proposal" for COCO, and "mAP", "recall" for PASCAL VOC')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--eval-options',
nargs='+',
action=DictAction,
help='custom options for evaluation, the key-value pair in xxx=yyy '
'format will be kwargs for dataset.evaluate() function')
args = parser.parse_args()
return args
def main():
args = parse_args()
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
assert args.eval or args.format_only, (
'Please specify at least one operation (eval/format the results) with '
'the argument "--eval", "--format-only"')
if args.eval and args.format_only:
raise ValueError('--eval and --format_only cannot be both specified')
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.pkl_results)
kwargs = {} if args.eval_options is None else args.eval_options
if args.format_only:
dataset.format_results(outputs, **kwargs)
if args.eval:
eval_kwargs = cfg.get('evaluation', {}).copy()
# hard-code way to remove EvalHook args
for key in [
'interval', 'tmpdir', 'start', 'gpu_collect', 'save_best',
'rule'
]:
eval_kwargs.pop(key, None)
eval_kwargs.update(dict(metric=args.eval, **kwargs))
print(dataset.evaluate(outputs, **eval_kwargs))
if __name__ == '__main__':
main()
| 3,141 | 34.303371 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/get_flops.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import numpy as np
import torch
from mmcv import Config, DictAction
from mmdet.models import build_detector
try:
from mmcv.cnn import get_model_complexity_info
except ImportError:
raise ImportError('Please upgrade mmcv to >0.6.2')
def parse_args():
parser = argparse.ArgumentParser(description='Train a detector')
parser.add_argument('config', help='train config file path')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[1280, 800],
help='input image size')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
parser.add_argument(
'--size-divisor',
type=int,
default=32,
help='Pad the input image, the minimum size that is divisible '
'by size_divisor, -1 means do not pad the image.')
args = parser.parse_args()
return args
def main():
args = parse_args()
if len(args.shape) == 1:
h = w = args.shape[0]
elif len(args.shape) == 2:
h, w = args.shape
else:
raise ValueError('invalid input shape')
ori_shape = (3, h, w)
divisor = args.size_divisor
if divisor > 0:
h = int(np.ceil(h / divisor)) * divisor
w = int(np.ceil(w / divisor)) * divisor
input_shape = (3, h, w)
cfg = Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
model = build_detector(
cfg.model,
train_cfg=cfg.get('train_cfg'),
test_cfg=cfg.get('test_cfg'))
if torch.cuda.is_available():
model.cuda()
model.eval()
if hasattr(model, 'forward_dummy'):
model.forward = model.forward_dummy
else:
raise NotImplementedError(
'FLOPs counter is currently not currently supported with {}'.
format(model.__class__.__name__))
flops, params = get_model_complexity_info(model, input_shape)
split_line = '=' * 30
if divisor > 0 and \
input_shape != ori_shape:
print(f'{split_line}\nUse size divisor set input shape '
f'from {ori_shape} to {input_shape}\n')
print(f'{split_line}\nInput shape: {input_shape}\n'
f'Flops: {flops}\nParams: {params}\n{split_line}')
print('!!!Please be cautious if you use the results in papers. '
'You may need to check if all ops are supported and verify that the '
'flops computation is correct.')
if __name__ == '__main__':
main()
| 2,992 | 29.540816 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/optimize_anchors.py | # Copyright (c) OpenMMLab. All rights reserved.
"""Optimize anchor settings on a specific dataset.
This script provides two method to optimize YOLO anchors including k-means
anchor cluster and differential evolution. You can use ``--algorithm k-means``
and ``--algorithm differential_evolution`` to switch two method.
Example:
Use k-means anchor cluster::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm k-means --input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
Use differential evolution to optimize anchors::
python tools/analysis_tools/optimize_anchors.py ${CONFIG} \
--algorithm differential_evolution \
--input-shape ${INPUT_SHAPE [WIDTH HEIGHT]} \
--output-dir ${OUTPUT_DIR}
"""
import argparse
import os.path as osp
import mmcv
import numpy as np
import torch
from mmcv import Config
from scipy.optimize import differential_evolution
from mmdet.core import bbox_cxcywh_to_xyxy, bbox_overlaps, bbox_xyxy_to_cxcywh
from mmdet.datasets import build_dataset
from mmdet.utils import get_root_logger, replace_cfg_vals, update_data_root
def parse_args():
parser = argparse.ArgumentParser(description='Optimize anchor parameters.')
parser.add_argument('config', help='Train config file path.')
parser.add_argument(
'--device', default='cuda:0', help='Device used for calculating.')
parser.add_argument(
'--input-shape',
type=int,
nargs='+',
default=[608, 608],
help='input image size')
parser.add_argument(
'--algorithm',
default='differential_evolution',
help='Algorithm used for anchor optimizing.'
'Support k-means and differential_evolution for YOLO.')
parser.add_argument(
'--iters',
default=1000,
type=int,
help='Maximum iterations for optimizer.')
parser.add_argument(
'--output-dir',
default=None,
type=str,
help='Path to save anchor optimize result.')
args = parser.parse_args()
return args
class BaseAnchorOptimizer:
"""Base class for anchor optimizer.
Args:
dataset (obj:`Dataset`): Dataset object.
input_shape (list[int]): Input image shape of the model.
Format in [width, height].
logger (obj:`logging.Logger`): The logger for logging.
device (str, optional): Device used for calculating.
Default: 'cuda:0'
out_dir (str, optional): Path to save anchor optimize result.
Default: None
"""
def __init__(self,
dataset,
input_shape,
logger,
device='cuda:0',
out_dir=None):
self.dataset = dataset
self.input_shape = input_shape
self.logger = logger
self.device = device
self.out_dir = out_dir
bbox_whs, img_shapes = self.get_whs_and_shapes()
ratios = img_shapes.max(1, keepdims=True) / np.array([input_shape])
# resize to input shape
self.bbox_whs = bbox_whs / ratios
def get_whs_and_shapes(self):
"""Get widths and heights of bboxes and shapes of images.
Returns:
tuple[np.ndarray]: Array of bbox shapes and array of image
shapes with shape (num_bboxes, 2) in [width, height] format.
"""
self.logger.info('Collecting bboxes from annotation...')
bbox_whs = []
img_shapes = []
prog_bar = mmcv.ProgressBar(len(self.dataset))
for idx in range(len(self.dataset)):
ann = self.dataset.get_ann_info(idx)
data_info = self.dataset.data_infos[idx]
img_shape = np.array([data_info['width'], data_info['height']])
gt_bboxes = ann['bboxes']
for bbox in gt_bboxes:
wh = bbox[2:4] - bbox[0:2]
img_shapes.append(img_shape)
bbox_whs.append(wh)
prog_bar.update()
print('\n')
bbox_whs = np.array(bbox_whs)
img_shapes = np.array(img_shapes)
self.logger.info(f'Collected {bbox_whs.shape[0]} bboxes.')
return bbox_whs, img_shapes
def get_zero_center_bbox_tensor(self):
"""Get a tensor of bboxes centered at (0, 0).
Returns:
Tensor: Tensor of bboxes with shape (num_bboxes, 4)
in [xmin, ymin, xmax, ymax] format.
"""
whs = torch.from_numpy(self.bbox_whs).to(
self.device, dtype=torch.float32)
bboxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(whs), whs], dim=1))
return bboxes
def optimize(self):
raise NotImplementedError
def save_result(self, anchors, path=None):
anchor_results = []
for w, h in anchors:
anchor_results.append([round(w), round(h)])
self.logger.info(f'Anchor optimize result:{anchor_results}')
if path:
json_path = osp.join(path, 'anchor_optimize_result.json')
mmcv.dump(anchor_results, json_path)
self.logger.info(f'Result saved in {json_path}')
class YOLOKMeansAnchorOptimizer(BaseAnchorOptimizer):
r"""YOLO anchor optimizer using k-means. Code refer to `AlexeyAB/darknet.
<https://github.com/AlexeyAB/darknet/blob/master/src/detector.c>`_.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
"""
def __init__(self, num_anchors, iters, **kwargs):
super(YOLOKMeansAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
def optimize(self):
anchors = self.kmeans_anchors()
self.save_result(anchors, self.out_dir)
def kmeans_anchors(self):
self.logger.info(
f'Start cluster {self.num_anchors} YOLO anchors with K-means...')
bboxes = self.get_zero_center_bbox_tensor()
cluster_center_idx = torch.randint(
0, bboxes.shape[0], (self.num_anchors, )).to(self.device)
assignments = torch.zeros((bboxes.shape[0], )).to(self.device)
cluster_centers = bboxes[cluster_center_idx]
if self.num_anchors == 1:
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
prog_bar = mmcv.ProgressBar(self.iters)
for i in range(self.iters):
converged, assignments = self.kmeans_expectation(
bboxes, assignments, cluster_centers)
if converged:
self.logger.info(f'K-means process has converged at iter {i}.')
break
cluster_centers = self.kmeans_maximization(bboxes, assignments,
cluster_centers)
prog_bar.update()
print('\n')
avg_iou = bbox_overlaps(bboxes,
cluster_centers).max(1)[0].mean().item()
anchors = bbox_xyxy_to_cxcywh(cluster_centers)[:, 2:].cpu().numpy()
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
self.logger.info(f'Anchor cluster finish. Average IOU: {avg_iou}')
return anchors
def kmeans_maximization(self, bboxes, assignments, centers):
"""Maximization part of EM algorithm(Expectation-Maximization)"""
new_centers = torch.zeros_like(centers)
for i in range(centers.shape[0]):
mask = (assignments == i)
if mask.sum():
new_centers[i, :] = bboxes[mask].mean(0)
return new_centers
def kmeans_expectation(self, bboxes, assignments, centers):
"""Expectation part of EM algorithm(Expectation-Maximization)"""
ious = bbox_overlaps(bboxes, centers)
closest = ious.argmax(1)
converged = (closest == assignments).all()
return converged, closest
class YOLODEAnchorOptimizer(BaseAnchorOptimizer):
"""YOLO anchor optimizer using differential evolution algorithm.
Args:
num_anchors (int) : Number of anchors.
iters (int): Maximum iterations for k-means.
strategy (str): The differential evolution strategy to use.
Should be one of:
- 'best1bin'
- 'best1exp'
- 'rand1exp'
- 'randtobest1exp'
- 'currenttobest1exp'
- 'best2exp'
- 'rand2exp'
- 'randtobest1bin'
- 'currenttobest1bin'
- 'best2bin'
- 'rand2bin'
- 'rand1bin'
Default: 'best1bin'.
population_size (int): Total population size of evolution algorithm.
Default: 15.
convergence_thr (float): Tolerance for convergence, the
optimizing stops when ``np.std(pop) <= abs(convergence_thr)
+ convergence_thr * np.abs(np.mean(population_energies))``,
respectively. Default: 0.0001.
mutation (tuple[float]): Range of dithering randomly changes the
mutation constant. Default: (0.5, 1).
recombination (float): Recombination constant of crossover probability.
Default: 0.7.
"""
def __init__(self,
num_anchors,
iters,
strategy='best1bin',
population_size=15,
convergence_thr=0.0001,
mutation=(0.5, 1),
recombination=0.7,
**kwargs):
super(YOLODEAnchorOptimizer, self).__init__(**kwargs)
self.num_anchors = num_anchors
self.iters = iters
self.strategy = strategy
self.population_size = population_size
self.convergence_thr = convergence_thr
self.mutation = mutation
self.recombination = recombination
def optimize(self):
anchors = self.differential_evolution()
self.save_result(anchors, self.out_dir)
def differential_evolution(self):
bboxes = self.get_zero_center_bbox_tensor()
bounds = []
for i in range(self.num_anchors):
bounds.extend([(0, self.input_shape[0]), (0, self.input_shape[1])])
result = differential_evolution(
func=self.avg_iou_cost,
bounds=bounds,
args=(bboxes, ),
strategy=self.strategy,
maxiter=self.iters,
popsize=self.population_size,
tol=self.convergence_thr,
mutation=self.mutation,
recombination=self.recombination,
updating='immediate',
disp=True)
self.logger.info(
f'Anchor evolution finish. Average IOU: {1 - result.fun}')
anchors = [(w, h) for w, h in zip(result.x[::2], result.x[1::2])]
anchors = sorted(anchors, key=lambda x: x[0] * x[1])
return anchors
@staticmethod
def avg_iou_cost(anchor_params, bboxes):
assert len(anchor_params) % 2 == 0
anchor_whs = torch.tensor(
[[w, h]
for w, h in zip(anchor_params[::2], anchor_params[1::2])]).to(
bboxes.device, dtype=bboxes.dtype)
anchor_boxes = bbox_cxcywh_to_xyxy(
torch.cat([torch.zeros_like(anchor_whs), anchor_whs], dim=1))
ious = bbox_overlaps(bboxes, anchor_boxes)
max_ious, _ = ious.max(1)
cost = 1 - max_ious.mean().item()
return cost
def main():
logger = get_root_logger()
args = parse_args()
cfg = args.config
cfg = Config.fromfile(cfg)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
input_shape = args.input_shape
assert len(input_shape) == 2
anchor_type = cfg.model.bbox_head.anchor_generator.type
assert anchor_type == 'YOLOAnchorGenerator', \
f'Only support optimize YOLOAnchor, but get {anchor_type}.'
base_sizes = cfg.model.bbox_head.anchor_generator.base_sizes
num_anchors = sum([len(sizes) for sizes in base_sizes])
train_data_cfg = cfg.data.train
while 'dataset' in train_data_cfg:
train_data_cfg = train_data_cfg['dataset']
dataset = build_dataset(train_data_cfg)
if args.algorithm == 'k-means':
optimizer = YOLOKMeansAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
elif args.algorithm == 'differential_evolution':
optimizer = YOLODEAnchorOptimizer(
dataset=dataset,
input_shape=input_shape,
device=args.device,
num_anchors=num_anchors,
iters=args.iters,
logger=logger,
out_dir=args.output_dir)
else:
raise NotImplementedError(
f'Only support k-means and differential_evolution, '
f'but get {args.algorithm}')
optimizer.optimize()
if __name__ == '__main__':
main()
| 13,359 | 34.437666 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/robustness_eval.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from argparse import ArgumentParser
import mmcv
import numpy as np
def print_coco_results(results):
def _print(result, ap=1, iouThr=None, areaRng='all', maxDets=100):
titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
typeStr = '(AP)' if ap == 1 else '(AR)'
iouStr = '0.50:0.95' \
if iouThr is None else f'{iouThr:0.2f}'
iStr = f' {titleStr:<18} {typeStr} @[ IoU={iouStr:<9} | '
iStr += f'area={areaRng:>6s} | maxDets={maxDets:>3d} ] = {result:0.3f}'
print(iStr)
stats = np.zeros((12, ))
stats[0] = _print(results[0], 1)
stats[1] = _print(results[1], 1, iouThr=.5)
stats[2] = _print(results[2], 1, iouThr=.75)
stats[3] = _print(results[3], 1, areaRng='small')
stats[4] = _print(results[4], 1, areaRng='medium')
stats[5] = _print(results[5], 1, areaRng='large')
stats[6] = _print(results[6], 0, maxDets=1)
stats[7] = _print(results[7], 0, maxDets=10)
stats[8] = _print(results[8], 0)
stats[9] = _print(results[9], 0, areaRng='small')
stats[10] = _print(results[10], 0, areaRng='medium')
stats[11] = _print(results[11], 0, areaRng='large')
def get_coco_style_results(filename,
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
if metric is None:
metrics = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
elif isinstance(metric, list):
metrics = metric
else:
metrics = [metric]
for metric_name in metrics:
assert metric_name in [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10', 'AR100',
'ARs', 'ARm', 'ARl'
]
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, len(metrics)), dtype='float32')
for corr_i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
for metric_j, metric_name in enumerate(metrics):
mAP = eval_output[distortion][severity][task][metric_name]
results[corr_i, severity, metric_j] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if metric is None:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
print_coco_results(P)
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
print_coco_results(mPC)
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
print_coco_results(rPC)
else:
if 'P' in prints:
print(f'Performance on Clean Data [P] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {P[metric_i]:0.3f}')
if 'mPC' in prints:
print(f'Mean Performance under Corruption [mPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} = {mPC[metric_i]:0.3f}')
if 'rPC' in prints:
print(f'Relative Performance under Corruption [rPC] ({task})')
for metric_i, metric_name in enumerate(metrics):
print(f'{metric_name:5} => {rPC[metric_i] * 100:0.1f} %')
return results
def get_voc_style_results(filename, prints='mPC', aggregate='benchmark'):
assert aggregate in ['benchmark', 'all']
if prints == 'all':
prints = ['P', 'mPC', 'rPC']
elif isinstance(prints, str):
prints = [prints]
for p in prints:
assert p in ['P', 'mPC', 'rPC']
eval_output = mmcv.load(filename)
num_distortions = len(list(eval_output.keys()))
results = np.zeros((num_distortions, 6, 20), dtype='float32')
for i, distortion in enumerate(eval_output):
for severity in eval_output[distortion]:
mAP = [
eval_output[distortion][severity][j]['ap']
for j in range(len(eval_output[distortion][severity]))
]
results[i, severity, :] = mAP
P = results[0, 0, :]
if aggregate == 'benchmark':
mPC = np.mean(results[:15, 1:, :], axis=(0, 1))
else:
mPC = np.mean(results[:, 1:, :], axis=(0, 1))
rPC = mPC / P
print(f'\nmodel: {osp.basename(filename)}')
if 'P' in prints:
print(f'Performance on Clean Data [P] in AP50 = {np.mean(P):0.3f}')
if 'mPC' in prints:
print('Mean Performance under Corruption [mPC] in AP50 = '
f'{np.mean(mPC):0.3f}')
if 'rPC' in prints:
print('Relative Performance under Corruption [rPC] in % = '
f'{np.mean(rPC) * 100:0.1f}')
return np.mean(results, axis=2, keepdims=True)
def get_results(filename,
dataset='coco',
task='bbox',
metric=None,
prints='mPC',
aggregate='benchmark'):
assert dataset in ['coco', 'voc', 'cityscapes']
if dataset in ['coco', 'cityscapes']:
results = get_coco_style_results(
filename,
task=task,
metric=metric,
prints=prints,
aggregate=aggregate)
elif dataset == 'voc':
if task != 'bbox':
print('Only bbox analysis is supported for Pascal VOC')
print('Will report bbox results\n')
if metric not in [None, ['AP'], ['AP50']]:
print('Only the AP50 metric is supported for Pascal VOC')
print('Will report AP50 metric\n')
results = get_voc_style_results(
filename, prints=prints, aggregate=aggregate)
return results
def get_distortions_from_file(filename):
eval_output = mmcv.load(filename)
return get_distortions_from_results(eval_output)
def get_distortions_from_results(eval_output):
distortions = []
for i, distortion in enumerate(eval_output):
distortions.append(distortion.replace('_', ' '))
return distortions
def main():
parser = ArgumentParser(description='Corruption Result Analysis')
parser.add_argument('filename', help='result file path')
parser.add_argument(
'--dataset',
type=str,
choices=['coco', 'voc', 'cityscapes'],
default='coco',
help='dataset type')
parser.add_argument(
'--task',
type=str,
nargs='+',
choices=['bbox', 'segm'],
default=['bbox'],
help='task to report')
parser.add_argument(
'--metric',
nargs='+',
choices=[
None, 'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
],
default=None,
help='metric to report')
parser.add_argument(
'--prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print')
parser.add_argument(
'--aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those \
for benchmark corruptions')
args = parser.parse_args()
for task in args.task:
get_results(
args.filename,
dataset=args.dataset,
task=task,
metric=args.metric,
prints=args.prints,
aggregate=args.aggregate)
if __name__ == '__main__':
main()
| 8,112 | 31.194444 | 79 | py |
mmdetection | mmdetection-master/tools/analysis_tools/test_robustness.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import copy
import os
import os.path as osp
import mmcv
import torch
from mmcv import DictAction
from mmcv.parallel import MMDataParallel, MMDistributedDataParallel
from mmcv.runner import (get_dist_info, init_dist, load_checkpoint,
wrap_fp16_model)
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
from mmdet import datasets
from mmdet.apis import multi_gpu_test, set_random_seed, single_gpu_test
from mmdet.core import eval_map
from mmdet.datasets import build_dataloader, build_dataset
from mmdet.models import build_detector
from tools.analysis_tools.robustness_eval import get_results
def coco_eval_with_return(result_files,
result_types,
coco,
max_dets=(100, 300, 1000)):
for res_type in result_types:
assert res_type in ['proposal', 'bbox', 'segm', 'keypoints']
if mmcv.is_str(coco):
coco = COCO(coco)
assert isinstance(coco, COCO)
eval_results = {}
for res_type in result_types:
result_file = result_files[res_type]
assert result_file.endswith('.json')
coco_dets = coco.loadRes(result_file)
img_ids = coco.getImgIds()
iou_type = 'bbox' if res_type == 'proposal' else res_type
cocoEval = COCOeval(coco, coco_dets, iou_type)
cocoEval.params.imgIds = img_ids
if res_type == 'proposal':
cocoEval.params.useCats = 0
cocoEval.params.maxDets = list(max_dets)
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
if res_type == 'segm' or res_type == 'bbox':
metric_names = [
'AP', 'AP50', 'AP75', 'APs', 'APm', 'APl', 'AR1', 'AR10',
'AR100', 'ARs', 'ARm', 'ARl'
]
eval_results[res_type] = {
metric_names[i]: cocoEval.stats[i]
for i in range(len(metric_names))
}
else:
eval_results[res_type] = cocoEval.stats
return eval_results
def voc_eval_with_return(result_file,
dataset,
iou_thr=0.5,
logger='print',
only_ap=True):
det_results = mmcv.load(result_file)
annotations = [dataset.get_ann_info(i) for i in range(len(dataset))]
if hasattr(dataset, 'year') and dataset.year == 2007:
dataset_name = 'voc07'
else:
dataset_name = dataset.CLASSES
mean_ap, eval_results = eval_map(
det_results,
annotations,
scale_ranges=None,
iou_thr=iou_thr,
dataset=dataset_name,
logger=logger)
if only_ap:
eval_results = [{
'ap': eval_results[i]['ap']
} for i in range(len(eval_results))]
return mean_ap, eval_results
def parse_args():
parser = argparse.ArgumentParser(description='MMDet test detector')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('--out', help='output result file')
parser.add_argument(
'--corruptions',
type=str,
nargs='+',
default='benchmark',
choices=[
'all', 'benchmark', 'noise', 'blur', 'weather', 'digital',
'holdout', 'None', 'gaussian_noise', 'shot_noise', 'impulse_noise',
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur', 'snow',
'frost', 'fog', 'brightness', 'contrast', 'elastic_transform',
'pixelate', 'jpeg_compression', 'speckle_noise', 'gaussian_blur',
'spatter', 'saturate'
],
help='corruptions')
parser.add_argument(
'--severities',
type=int,
nargs='+',
default=[0, 1, 2, 3, 4, 5],
help='corruption severity levels')
parser.add_argument(
'--eval',
type=str,
nargs='+',
choices=['proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'],
help='eval types')
parser.add_argument(
'--iou-thr',
type=float,
default=0.5,
help='IoU threshold for pascal voc evaluation')
parser.add_argument(
'--summaries',
type=bool,
default=False,
help='Print summaries for every corruption and severity')
parser.add_argument(
'--workers', type=int, default=32, help='workers per gpu')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--show-dir', help='directory where painted images will be saved')
parser.add_argument(
'--show-score-thr',
type=float,
default=0.3,
help='score threshold (default: 0.3)')
parser.add_argument('--tmpdir', help='tmp dir for writing some results')
parser.add_argument('--seed', type=int, default=None, help='random seed')
parser.add_argument(
'--launcher',
choices=['none', 'pytorch', 'slurm', 'mpi'],
default='none',
help='job launcher')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument(
'--final-prints',
type=str,
nargs='+',
choices=['P', 'mPC', 'rPC'],
default='mPC',
help='corruption benchmark metric to print at the end')
parser.add_argument(
'--final-prints-aggregate',
type=str,
choices=['all', 'benchmark'],
default='benchmark',
help='aggregate all results or only those for benchmark corruptions')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
if 'LOCAL_RANK' not in os.environ:
os.environ['LOCAL_RANK'] = str(args.local_rank)
return args
def main():
args = parse_args()
assert args.out or args.show or args.show_dir, \
('Please specify at least one operation (save or show the results) '
'with the argument "--out", "--show" or "show-dir"')
if args.out is not None and not args.out.endswith(('.pkl', '.pickle')):
raise ValueError('The output file must be a pkl file.')
cfg = mmcv.Config.fromfile(args.config)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
# set cudnn_benchmark
if cfg.get('cudnn_benchmark', False):
torch.backends.cudnn.benchmark = True
cfg.model.pretrained = None
cfg.data.test.test_mode = True
if args.workers == 0:
args.workers = cfg.data.workers_per_gpu
# init distributed env first, since logger depends on the dist info.
if args.launcher == 'none':
distributed = False
else:
distributed = True
init_dist(args.launcher, **cfg.dist_params)
# set random seeds
if args.seed is not None:
set_random_seed(args.seed)
if 'all' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression', 'speckle_noise', 'gaussian_blur', 'spatter',
'saturate'
]
elif 'benchmark' in args.corruptions:
corruptions = [
'gaussian_noise', 'shot_noise', 'impulse_noise', 'defocus_blur',
'glass_blur', 'motion_blur', 'zoom_blur', 'snow', 'frost', 'fog',
'brightness', 'contrast', 'elastic_transform', 'pixelate',
'jpeg_compression'
]
elif 'noise' in args.corruptions:
corruptions = ['gaussian_noise', 'shot_noise', 'impulse_noise']
elif 'blur' in args.corruptions:
corruptions = [
'defocus_blur', 'glass_blur', 'motion_blur', 'zoom_blur'
]
elif 'weather' in args.corruptions:
corruptions = ['snow', 'frost', 'fog', 'brightness']
elif 'digital' in args.corruptions:
corruptions = [
'contrast', 'elastic_transform', 'pixelate', 'jpeg_compression'
]
elif 'holdout' in args.corruptions:
corruptions = ['speckle_noise', 'gaussian_blur', 'spatter', 'saturate']
elif 'None' in args.corruptions:
corruptions = ['None']
args.severities = [0]
else:
corruptions = args.corruptions
rank, _ = get_dist_info()
aggregated_results = {}
for corr_i, corruption in enumerate(corruptions):
aggregated_results[corruption] = {}
for sev_i, corruption_severity in enumerate(args.severities):
# evaluate severity 0 (= no corruption) only once
if corr_i > 0 and corruption_severity == 0:
aggregated_results[corruption][0] = \
aggregated_results[corruptions[0]][0]
continue
test_data_cfg = copy.deepcopy(cfg.data.test)
# assign corruption and severity
if corruption_severity > 0:
corruption_trans = dict(
type='Corrupt',
corruption=corruption,
severity=corruption_severity)
# TODO: hard coded "1", we assume that the first step is
# loading images, which needs to be fixed in the future
test_data_cfg['pipeline'].insert(1, corruption_trans)
# print info
print(f'\nTesting {corruption} at severity {corruption_severity}')
# build the dataloader
# TODO: support multiple images per gpu
# (only minor changes are needed)
dataset = build_dataset(test_data_cfg)
data_loader = build_dataloader(
dataset,
samples_per_gpu=1,
workers_per_gpu=args.workers,
dist=distributed,
shuffle=False)
# build the model and load checkpoint
cfg.model.train_cfg = None
model = build_detector(cfg.model, test_cfg=cfg.get('test_cfg'))
fp16_cfg = cfg.get('fp16', None)
if fp16_cfg is not None:
wrap_fp16_model(model)
checkpoint = load_checkpoint(
model, args.checkpoint, map_location='cpu')
# old versions did not save class info in checkpoints,
# this walkaround is for backward compatibility
if 'CLASSES' in checkpoint.get('meta', {}):
model.CLASSES = checkpoint['meta']['CLASSES']
else:
model.CLASSES = dataset.CLASSES
if not distributed:
model = MMDataParallel(model, device_ids=[0])
show_dir = args.show_dir
if show_dir is not None:
show_dir = osp.join(show_dir, corruption)
show_dir = osp.join(show_dir, str(corruption_severity))
if not osp.exists(show_dir):
osp.makedirs(show_dir)
outputs = single_gpu_test(model, data_loader, args.show,
show_dir, args.show_score_thr)
else:
model = MMDistributedDataParallel(
model.cuda(),
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False)
outputs = multi_gpu_test(model, data_loader, args.tmpdir)
if args.out and rank == 0:
eval_results_filename = (
osp.splitext(args.out)[0] + '_results' +
osp.splitext(args.out)[1])
mmcv.dump(outputs, args.out)
eval_types = args.eval
if cfg.dataset_type == 'VOCDataset':
if eval_types:
for eval_type in eval_types:
if eval_type == 'bbox':
test_dataset = mmcv.runner.obj_from_dict(
cfg.data.test, datasets)
logger = 'print' if args.summaries else None
mean_ap, eval_results = \
voc_eval_with_return(
args.out, test_dataset,
args.iou_thr, logger)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nOnly "bbox" evaluation \
is supported for pascal voc')
else:
if eval_types:
print(f'Starting evaluate {" and ".join(eval_types)}')
if eval_types == ['proposal_fast']:
result_file = args.out
else:
if not isinstance(outputs[0], dict):
result_files = dataset.results2json(
outputs, args.out)
else:
for name in outputs[0]:
print(f'\nEvaluating {name}')
outputs_ = [out[name] for out in outputs]
result_file = args.out
+ f'.{name}'
result_files = dataset.results2json(
outputs_, result_file)
eval_results = coco_eval_with_return(
result_files, eval_types, dataset.coco)
aggregated_results[corruption][
corruption_severity] = eval_results
else:
print('\nNo task was selected for evaluation;'
'\nUse --eval to select a task')
# save results after each evaluation
mmcv.dump(aggregated_results, eval_results_filename)
if rank == 0:
# print final results
print('\nAggregated results:')
prints = args.final_prints
aggregate = args.final_prints_aggregate
if cfg.dataset_type == 'VOCDataset':
get_results(
eval_results_filename,
dataset='voc',
prints=prints,
aggregate=aggregate)
else:
get_results(
eval_results_filename,
dataset='coco',
prints=prints,
aggregate=aggregate)
if __name__ == '__main__':
main()
| 15,222 | 38.234536 | 79 | py |
mmdetection | mmdetection-master/tools/dataset_converters/cityscapes.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import glob
import os.path as osp
import cityscapesscripts.helpers.labels as CSLabels
import mmcv
import numpy as np
import pycocotools.mask as maskUtils
def collect_files(img_dir, gt_dir):
suffix = 'leftImg8bit.png'
files = []
for img_file in glob.glob(osp.join(img_dir, '**/*.png')):
assert img_file.endswith(suffix), img_file
inst_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_instanceIds.png'
# Note that labelIds are not converted to trainId for seg map
segm_file = gt_dir + img_file[
len(img_dir):-len(suffix)] + 'gtFine_labelIds.png'
files.append((img_file, inst_file, segm_file))
assert len(files), f'No images found in {img_dir}'
print(f'Loaded {len(files)} images from {img_dir}')
return files
def collect_annotations(files, nproc=1):
print('Loading annotation images')
if nproc > 1:
images = mmcv.track_parallel_progress(
load_img_info, files, nproc=nproc)
else:
images = mmcv.track_progress(load_img_info, files)
return images
def load_img_info(files):
img_file, inst_file, segm_file = files
inst_img = mmcv.imread(inst_file, 'unchanged')
# ids < 24 are stuff labels (filtering them first is about 5% faster)
unique_inst_ids = np.unique(inst_img[inst_img >= 24])
anno_info = []
for inst_id in unique_inst_ids:
# For non-crowd annotations, inst_id // 1000 is the label_id
# Crowd annotations have <1000 instance ids
label_id = inst_id // 1000 if inst_id >= 1000 else inst_id
label = CSLabels.id2label[label_id]
if not label.hasInstances or label.ignoreInEval:
continue
category_id = label.id
iscrowd = int(inst_id < 1000)
mask = np.asarray(inst_img == inst_id, dtype=np.uint8, order='F')
mask_rle = maskUtils.encode(mask[:, :, None])[0]
area = maskUtils.area(mask_rle)
# convert to COCO style XYWH format
bbox = maskUtils.toBbox(mask_rle)
# for json encoding
mask_rle['counts'] = mask_rle['counts'].decode()
anno = dict(
iscrowd=iscrowd,
category_id=category_id,
bbox=bbox.tolist(),
area=area.tolist(),
segmentation=mask_rle)
anno_info.append(anno)
video_name = osp.basename(osp.dirname(img_file))
img_info = dict(
# remove img_prefix for filename
file_name=osp.join(video_name, osp.basename(img_file)),
height=inst_img.shape[0],
width=inst_img.shape[1],
anno_info=anno_info,
segm_file=osp.join(video_name, osp.basename(segm_file)))
return img_info
def cvt_annotations(image_infos, out_json_name):
out_json = dict()
img_id = 0
ann_id = 0
out_json['images'] = []
out_json['categories'] = []
out_json['annotations'] = []
for image_info in image_infos:
image_info['id'] = img_id
anno_infos = image_info.pop('anno_info')
out_json['images'].append(image_info)
for anno_info in anno_infos:
anno_info['image_id'] = img_id
anno_info['id'] = ann_id
out_json['annotations'].append(anno_info)
ann_id += 1
img_id += 1
for label in CSLabels.labels:
if label.hasInstances and not label.ignoreInEval:
cat = dict(id=label.id, name=label.name)
out_json['categories'].append(cat)
if len(out_json['annotations']) == 0:
out_json.pop('annotations')
mmcv.dump(out_json, out_json_name)
return out_json
def parse_args():
parser = argparse.ArgumentParser(
description='Convert Cityscapes annotations to COCO format')
parser.add_argument('cityscapes_path', help='cityscapes data path')
parser.add_argument('--img-dir', default='leftImg8bit', type=str)
parser.add_argument('--gt-dir', default='gtFine', type=str)
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--nproc', default=1, type=int, help='number of process')
args = parser.parse_args()
return args
def main():
args = parse_args()
cityscapes_path = args.cityscapes_path
out_dir = args.out_dir if args.out_dir else cityscapes_path
mmcv.mkdir_or_exist(out_dir)
img_dir = osp.join(cityscapes_path, args.img_dir)
gt_dir = osp.join(cityscapes_path, args.gt_dir)
set_name = dict(
train='instancesonly_filtered_gtFine_train.json',
val='instancesonly_filtered_gtFine_val.json',
test='instancesonly_filtered_gtFine_test.json')
for split, json_name in set_name.items():
print(f'Converting {split} into {json_name}')
with mmcv.Timer(
print_tmpl='It took {}s to convert Cityscapes annotation'):
files = collect_files(
osp.join(img_dir, split), osp.join(gt_dir, split))
image_infos = collect_annotations(files, nproc=args.nproc)
cvt_annotations(image_infos, osp.join(out_dir, json_name))
if __name__ == '__main__':
main()
| 5,172 | 32.810458 | 75 | py |
mmdetection | mmdetection-master/tools/dataset_converters/images2coco.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import mmcv
from PIL import Image
def parse_args():
parser = argparse.ArgumentParser(
description='Convert images to coco format without annotations')
parser.add_argument('img_path', help='The root path of images')
parser.add_argument(
'classes', type=str, help='The text file name of storage class list')
parser.add_argument(
'out',
type=str,
help='The output annotation json file name, The save dir is in the '
'same directory as img_path')
parser.add_argument(
'-e',
'--exclude-extensions',
type=str,
nargs='+',
help='The suffix of images to be excluded, such as "png" and "bmp"')
args = parser.parse_args()
return args
def collect_image_infos(path, exclude_extensions=None):
img_infos = []
images_generator = mmcv.scandir(path, recursive=True)
for image_path in mmcv.track_iter_progress(list(images_generator)):
if exclude_extensions is None or (
exclude_extensions is not None
and not image_path.lower().endswith(exclude_extensions)):
image_path = os.path.join(path, image_path)
img_pillow = Image.open(image_path)
img_info = {
'filename': image_path,
'width': img_pillow.width,
'height': img_pillow.height,
}
img_infos.append(img_info)
return img_infos
def cvt_to_coco_json(img_infos, classes):
image_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
for category_id, name in enumerate(classes):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for img_dict in img_infos:
file_name = img_dict['filename']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(img_dict['height'])
image_item['width'] = int(img_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
image_id += 1
return coco
def main():
args = parse_args()
assert args.out.endswith(
'json'), 'The output file name must be json suffix'
# 1 load image list info
img_infos = collect_image_infos(args.img_path, args.exclude_extensions)
# 2 convert to coco format data
classes = mmcv.list_from_file(args.classes)
coco_info = cvt_to_coco_json(img_infos, classes)
# 3 dump
save_dir = os.path.join(args.img_path, '..', 'annotations')
mmcv.mkdir_or_exist(save_dir)
save_path = os.path.join(save_dir, args.out)
mmcv.dump(coco_info, save_path)
print(f'save json file: {save_path}')
if __name__ == '__main__':
main()
| 3,109 | 29.490196 | 77 | py |
mmdetection | mmdetection-master/tools/dataset_converters/pascal_voc.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os.path as osp
import xml.etree.ElementTree as ET
import mmcv
import numpy as np
from mmdet.core import voc_classes
label_ids = {name: i for i, name in enumerate(voc_classes())}
def parse_xml(args):
xml_path, img_path = args
tree = ET.parse(xml_path)
root = tree.getroot()
size = root.find('size')
w = int(size.find('width').text)
h = int(size.find('height').text)
bboxes = []
labels = []
bboxes_ignore = []
labels_ignore = []
for obj in root.findall('object'):
name = obj.find('name').text
label = label_ids[name]
difficult = int(obj.find('difficult').text)
bnd_box = obj.find('bndbox')
bbox = [
int(bnd_box.find('xmin').text),
int(bnd_box.find('ymin').text),
int(bnd_box.find('xmax').text),
int(bnd_box.find('ymax').text)
]
if difficult:
bboxes_ignore.append(bbox)
labels_ignore.append(label)
else:
bboxes.append(bbox)
labels.append(label)
if not bboxes:
bboxes = np.zeros((0, 4))
labels = np.zeros((0, ))
else:
bboxes = np.array(bboxes, ndmin=2) - 1
labels = np.array(labels)
if not bboxes_ignore:
bboxes_ignore = np.zeros((0, 4))
labels_ignore = np.zeros((0, ))
else:
bboxes_ignore = np.array(bboxes_ignore, ndmin=2) - 1
labels_ignore = np.array(labels_ignore)
annotation = {
'filename': img_path,
'width': w,
'height': h,
'ann': {
'bboxes': bboxes.astype(np.float32),
'labels': labels.astype(np.int64),
'bboxes_ignore': bboxes_ignore.astype(np.float32),
'labels_ignore': labels_ignore.astype(np.int64)
}
}
return annotation
def cvt_annotations(devkit_path, years, split, out_file):
if not isinstance(years, list):
years = [years]
annotations = []
for year in years:
filelist = osp.join(devkit_path,
f'VOC{year}/ImageSets/Main/{split}.txt')
if not osp.isfile(filelist):
print(f'filelist does not exist: {filelist}, '
f'skip voc{year} {split}')
return
img_names = mmcv.list_from_file(filelist)
xml_paths = [
osp.join(devkit_path, f'VOC{year}/Annotations/{img_name}.xml')
for img_name in img_names
]
img_paths = [
f'VOC{year}/JPEGImages/{img_name}.jpg' for img_name in img_names
]
part_annotations = mmcv.track_progress(parse_xml,
list(zip(xml_paths, img_paths)))
annotations.extend(part_annotations)
if out_file.endswith('json'):
annotations = cvt_to_coco_json(annotations)
mmcv.dump(annotations, out_file)
return annotations
def cvt_to_coco_json(annotations):
image_id = 0
annotation_id = 0
coco = dict()
coco['images'] = []
coco['type'] = 'instance'
coco['categories'] = []
coco['annotations'] = []
image_set = set()
def addAnnItem(annotation_id, image_id, category_id, bbox, difficult_flag):
annotation_item = dict()
annotation_item['segmentation'] = []
seg = []
# bbox[] is x1,y1,x2,y2
# left_top
seg.append(int(bbox[0]))
seg.append(int(bbox[1]))
# left_bottom
seg.append(int(bbox[0]))
seg.append(int(bbox[3]))
# right_bottom
seg.append(int(bbox[2]))
seg.append(int(bbox[3]))
# right_top
seg.append(int(bbox[2]))
seg.append(int(bbox[1]))
annotation_item['segmentation'].append(seg)
xywh = np.array(
[bbox[0], bbox[1], bbox[2] - bbox[0], bbox[3] - bbox[1]])
annotation_item['area'] = int(xywh[2] * xywh[3])
if difficult_flag == 1:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 1
else:
annotation_item['ignore'] = 0
annotation_item['iscrowd'] = 0
annotation_item['image_id'] = int(image_id)
annotation_item['bbox'] = xywh.astype(int).tolist()
annotation_item['category_id'] = int(category_id)
annotation_item['id'] = int(annotation_id)
coco['annotations'].append(annotation_item)
return annotation_id + 1
for category_id, name in enumerate(voc_classes()):
category_item = dict()
category_item['supercategory'] = str('none')
category_item['id'] = int(category_id)
category_item['name'] = str(name)
coco['categories'].append(category_item)
for ann_dict in annotations:
file_name = ann_dict['filename']
ann = ann_dict['ann']
assert file_name not in image_set
image_item = dict()
image_item['id'] = int(image_id)
image_item['file_name'] = str(file_name)
image_item['height'] = int(ann_dict['height'])
image_item['width'] = int(ann_dict['width'])
coco['images'].append(image_item)
image_set.add(file_name)
bboxes = ann['bboxes'][:, :4]
labels = ann['labels']
for bbox_id in range(len(bboxes)):
bbox = bboxes[bbox_id]
label = labels[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=0)
bboxes_ignore = ann['bboxes_ignore'][:, :4]
labels_ignore = ann['labels_ignore']
for bbox_id in range(len(bboxes_ignore)):
bbox = bboxes_ignore[bbox_id]
label = labels_ignore[bbox_id]
annotation_id = addAnnItem(
annotation_id, image_id, label, bbox, difficult_flag=1)
image_id += 1
return coco
def parse_args():
parser = argparse.ArgumentParser(
description='Convert PASCAL VOC annotations to mmdetection format')
parser.add_argument('devkit_path', help='pascal voc devkit path')
parser.add_argument('-o', '--out-dir', help='output path')
parser.add_argument(
'--out-format',
default='pkl',
choices=('pkl', 'coco'),
help='output format, "coco" indicates coco annotation format')
args = parser.parse_args()
return args
def main():
args = parse_args()
devkit_path = args.devkit_path
out_dir = args.out_dir if args.out_dir else devkit_path
mmcv.mkdir_or_exist(out_dir)
years = []
if osp.isdir(osp.join(devkit_path, 'VOC2007')):
years.append('2007')
if osp.isdir(osp.join(devkit_path, 'VOC2012')):
years.append('2012')
if '2007' in years and '2012' in years:
years.append(['2007', '2012'])
if not years:
raise IOError(f'The devkit path {devkit_path} contains neither '
'"VOC2007" nor "VOC2012" subfolder')
out_fmt = f'.{args.out_format}'
if args.out_format == 'coco':
out_fmt = '.json'
for year in years:
if year == '2007':
prefix = 'voc07'
elif year == '2012':
prefix = 'voc12'
elif year == ['2007', '2012']:
prefix = 'voc0712'
for split in ['train', 'val', 'trainval']:
dataset_name = prefix + '_' + split
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, split,
osp.join(out_dir, dataset_name + out_fmt))
if not isinstance(year, list):
dataset_name = prefix + '_test'
print(f'processing {dataset_name} ...')
cvt_annotations(devkit_path, year, 'test',
osp.join(out_dir, dataset_name + out_fmt))
print('Done!')
if __name__ == '__main__':
main()
| 7,841 | 31.94958 | 79 | py |
mmdetection | mmdetection-master/tools/deployment/mmdet2torchserve.py | # Copyright (c) OpenMMLab. All rights reserved.
from argparse import ArgumentParser, Namespace
from pathlib import Path
from tempfile import TemporaryDirectory
import mmcv
try:
from model_archiver.model_packaging import package_model
from model_archiver.model_packaging_utils import ModelExportUtils
except ImportError:
package_model = None
def mmdet2torchserve(
config_file: str,
checkpoint_file: str,
output_folder: str,
model_name: str,
model_version: str = '1.0',
force: bool = False,
):
"""Converts MMDetection model (config + checkpoint) to TorchServe `.mar`.
Args:
config_file:
In MMDetection config format.
The contents vary for each task repository.
checkpoint_file:
In MMDetection checkpoint format.
The contents vary for each task repository.
output_folder:
Folder where `{model_name}.mar` will be created.
The file created will be in TorchServe archive format.
model_name:
If not None, used for naming the `{model_name}.mar` file
that will be created under `output_folder`.
If None, `{Path(checkpoint_file).stem}` will be used.
model_version:
Model's version.
force:
If True, if there is an existing `{model_name}.mar`
file under `output_folder` it will be overwritten.
"""
mmcv.mkdir_or_exist(output_folder)
config = mmcv.Config.fromfile(config_file)
with TemporaryDirectory() as tmpdir:
config.dump(f'{tmpdir}/config.py')
args = Namespace(
**{
'model_file': f'{tmpdir}/config.py',
'serialized_file': checkpoint_file,
'handler': f'{Path(__file__).parent}/mmdet_handler.py',
'model_name': model_name or Path(checkpoint_file).stem,
'version': model_version,
'export_path': output_folder,
'force': force,
'requirements_file': None,
'extra_files': None,
'runtime': 'python',
'archive_format': 'default'
})
manifest = ModelExportUtils.generate_manifest_json(args)
package_model(args, manifest)
def parse_args():
parser = ArgumentParser(
description='Convert MMDetection models to TorchServe `.mar` format.')
parser.add_argument('config', type=str, help='config file path')
parser.add_argument('checkpoint', type=str, help='checkpoint file path')
parser.add_argument(
'--output-folder',
type=str,
required=True,
help='Folder where `{model_name}.mar` will be created.')
parser.add_argument(
'--model-name',
type=str,
default=None,
help='If not None, used for naming the `{model_name}.mar`'
'file that will be created under `output_folder`.'
'If None, `{Path(checkpoint_file).stem}` will be used.')
parser.add_argument(
'--model-version',
type=str,
default='1.0',
help='Number used for versioning.')
parser.add_argument(
'-f',
'--force',
action='store_true',
help='overwrite the existing `{model_name}.mar`')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
if package_model is None:
raise ImportError('`torch-model-archiver` is required.'
'Try: pip install torch-model-archiver')
mmdet2torchserve(args.config, args.checkpoint, args.output_folder,
args.model_name, args.model_version, args.force)
| 3,693 | 32.279279 | 78 | py |
mmdetection | mmdetection-master/tools/deployment/mmdet_handler.py | # Copyright (c) OpenMMLab. All rights reserved.
import base64
import os
import mmcv
import torch
from ts.torch_handler.base_handler import BaseHandler
from mmdet.apis import inference_detector, init_detector
class MMdetHandler(BaseHandler):
threshold = 0.5
def initialize(self, context):
properties = context.system_properties
self.map_location = 'cuda' if torch.cuda.is_available() else 'cpu'
self.device = torch.device(self.map_location + ':' +
str(properties.get('gpu_id')) if torch.cuda.
is_available() else self.map_location)
self.manifest = context.manifest
model_dir = properties.get('model_dir')
serialized_file = self.manifest['model']['serializedFile']
checkpoint = os.path.join(model_dir, serialized_file)
self.config_file = os.path.join(model_dir, 'config.py')
self.model = init_detector(self.config_file, checkpoint, self.device)
self.initialized = True
def preprocess(self, data):
images = []
for row in data:
image = row.get('data') or row.get('body')
if isinstance(image, str):
image = base64.b64decode(image)
image = mmcv.imfrombytes(image)
images.append(image)
return images
def inference(self, data, *args, **kwargs):
results = inference_detector(self.model, data)
return results
def postprocess(self, data):
# Format output following the example ObjectDetectionHandler format
output = []
for image_index, image_result in enumerate(data):
output.append([])
if isinstance(image_result, tuple):
bbox_result, segm_result = image_result
if isinstance(segm_result, tuple):
segm_result = segm_result[0] # ms rcnn
else:
bbox_result, segm_result = image_result, None
for class_index, class_result in enumerate(bbox_result):
class_name = self.model.CLASSES[class_index]
for bbox in class_result:
bbox_coords = bbox[:-1].tolist()
score = float(bbox[-1])
if score >= self.threshold:
output[image_index].append({
'class_name': class_name,
'bbox': bbox_coords,
'score': score
})
return output
| 2,560 | 34.569444 | 79 | py |
mmdetection | mmdetection-master/tools/deployment/onnx2tensorrt.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import os.path as osp
import warnings
import numpy as np
import onnx
import torch
from mmcv import Config
from mmcv.tensorrt import is_tensorrt_plugin_loaded, onnx2trt, save_trt_engine
from mmdet.core.export import preprocess_example_input
from mmdet.core.export.model_wrappers import (ONNXRuntimeDetector,
TensorRTDetector)
from mmdet.datasets import DATASETS
def get_GiB(x: int):
"""return x GiB."""
return x * (1 << 30)
def onnx2tensorrt(onnx_file,
trt_file,
input_config,
verify=False,
show=False,
workspace_size=1,
verbose=False):
import tensorrt as trt
onnx_model = onnx.load(onnx_file)
max_shape = input_config['max_shape']
min_shape = input_config['min_shape']
opt_shape = input_config['opt_shape']
fp16_mode = False
# create trt engine and wrapper
opt_shape_dict = {'input': [min_shape, opt_shape, max_shape]}
max_workspace_size = get_GiB(workspace_size)
trt_engine = onnx2trt(
onnx_model,
opt_shape_dict,
log_level=trt.Logger.VERBOSE if verbose else trt.Logger.ERROR,
fp16_mode=fp16_mode,
max_workspace_size=max_workspace_size)
save_dir, _ = osp.split(trt_file)
if save_dir:
os.makedirs(save_dir, exist_ok=True)
save_trt_engine(trt_engine, trt_file)
print(f'Successfully created TensorRT engine: {trt_file}')
if verify:
# prepare input
one_img, one_meta = preprocess_example_input(input_config)
img_list, img_meta_list = [one_img], [[one_meta]]
img_list = [_.cuda().contiguous() for _ in img_list]
# wrap ONNX and TensorRT model
onnx_model = ONNXRuntimeDetector(onnx_file, CLASSES, device_id=0)
trt_model = TensorRTDetector(trt_file, CLASSES, device_id=0)
# inference with wrapped model
with torch.no_grad():
onnx_results = onnx_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
trt_results = trt_model(
img_list, img_metas=img_meta_list, return_loss=False)[0]
if show:
out_file_ort, out_file_trt = None, None
else:
out_file_ort, out_file_trt = 'show-ort.png', 'show-trt.png'
show_img = one_meta['show_img']
score_thr = 0.3
onnx_model.show_result(
show_img,
onnx_results,
score_thr=score_thr,
show=True,
win_name='ONNXRuntime',
out_file=out_file_ort)
trt_model.show_result(
show_img,
trt_results,
score_thr=score_thr,
show=True,
win_name='TensorRT',
out_file=out_file_trt)
with_mask = trt_model.with_masks
# compare a part of result
if with_mask:
compare_pairs = list(zip(onnx_results, trt_results))
else:
compare_pairs = [(onnx_results, trt_results)]
err_msg = 'The numerical values are different between Pytorch' + \
' and ONNX, but it does not necessarily mean the' + \
' exported ONNX model is problematic.'
# check the numerical value
for onnx_res, pytorch_res in compare_pairs:
for o_res, p_res in zip(onnx_res, pytorch_res):
np.testing.assert_allclose(
o_res, p_res, rtol=1e-03, atol=1e-05, err_msg=err_msg)
print('The numerical values are the same between Pytorch and ONNX')
def parse_normalize_cfg(test_pipeline):
transforms = None
for pipeline in test_pipeline:
if 'transforms' in pipeline:
transforms = pipeline['transforms']
break
assert transforms is not None, 'Failed to find `transforms`'
norm_config_li = [_ for _ in transforms if _['type'] == 'Normalize']
assert len(norm_config_li) == 1, '`norm_config` should only have one'
norm_config = norm_config_li[0]
return norm_config
def parse_args():
parser = argparse.ArgumentParser(
description='Convert MMDetection models from ONNX to TensorRT')
parser.add_argument('config', help='test config file path')
parser.add_argument('model', help='Filename of input ONNX model')
parser.add_argument(
'--trt-file',
type=str,
default='tmp.trt',
help='Filename of output TensorRT engine')
parser.add_argument(
'--input-img', type=str, default='', help='Image for test')
parser.add_argument(
'--show', action='store_true', help='Whether to show output results')
parser.add_argument(
'--dataset',
type=str,
default='coco',
help='Dataset name. This argument is deprecated and will be \
removed in future releases.')
parser.add_argument(
'--verify',
action='store_true',
help='Verify the outputs of ONNXRuntime and TensorRT')
parser.add_argument(
'--verbose',
action='store_true',
help='Whether to verbose logging messages while creating \
TensorRT engine. Defaults to False.')
parser.add_argument(
'--to-rgb',
action='store_false',
help='Feed model with RGB or BGR image. Default is RGB. This \
argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--shape',
type=int,
nargs='+',
default=[400, 600],
help='Input size of the model')
parser.add_argument(
'--mean',
type=float,
nargs='+',
default=[123.675, 116.28, 103.53],
help='Mean value used for preprocess input data. This argument \
is deprecated and will be removed in future releases.')
parser.add_argument(
'--std',
type=float,
nargs='+',
default=[58.395, 57.12, 57.375],
help='Variance value used for preprocess input data. \
This argument is deprecated and will be removed in future releases.')
parser.add_argument(
'--min-shape',
type=int,
nargs='+',
default=None,
help='Minimum input size of the model in TensorRT')
parser.add_argument(
'--max-shape',
type=int,
nargs='+',
default=None,
help='Maximum input size of the model in TensorRT')
parser.add_argument(
'--workspace-size',
type=int,
default=1,
help='Max workspace size in GiB')
args = parser.parse_args()
return args
if __name__ == '__main__':
assert is_tensorrt_plugin_loaded(), 'TensorRT plugin should be compiled.'
args = parse_args()
warnings.warn(
'Arguments like `--to-rgb`, `--mean`, `--std`, `--dataset` would be \
parsed directly from config file and are deprecated and will be \
removed in future releases.')
if not args.input_img:
args.input_img = osp.join(osp.dirname(__file__), '../../demo/demo.jpg')
cfg = Config.fromfile(args.config)
def parse_shape(shape):
if len(shape) == 1:
shape = (1, 3, shape[0], shape[0])
elif len(args.shape) == 2:
shape = (1, 3) + tuple(shape)
else:
raise ValueError('invalid input shape')
return shape
if args.shape:
input_shape = parse_shape(args.shape)
else:
img_scale = cfg.test_pipeline[1]['img_scale']
input_shape = (1, 3, img_scale[1], img_scale[0])
if not args.max_shape:
max_shape = input_shape
else:
max_shape = parse_shape(args.max_shape)
if not args.min_shape:
min_shape = input_shape
else:
min_shape = parse_shape(args.min_shape)
dataset = DATASETS.get(cfg.data.test['type'])
assert (dataset is not None)
CLASSES = dataset.CLASSES
normalize_cfg = parse_normalize_cfg(cfg.test_pipeline)
input_config = {
'min_shape': min_shape,
'opt_shape': input_shape,
'max_shape': max_shape,
'input_shape': input_shape,
'input_path': args.input_img,
'normalize_cfg': normalize_cfg
}
# Create TensorRT engine
onnx2tensorrt(
args.model,
args.trt_file,
input_config,
verify=args.verify,
show=args.show,
workspace_size=args.workspace_size,
verbose=args.verbose)
# Following strings of text style are from colorama package
bright_style, reset_style = '\x1b[1m', '\x1b[0m'
red_text, blue_text = '\x1b[31m', '\x1b[34m'
white_background = '\x1b[107m'
msg = white_background + bright_style + red_text
msg += 'DeprecationWarning: This tool will be deprecated in future. '
msg += blue_text + 'Welcome to use the unified model deployment toolbox '
msg += 'MMDeploy: https://github.com/open-mmlab/mmdeploy'
msg += reset_style
warnings.warn(msg)
| 9,035 | 32.842697 | 79 | py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.