repo
stringlengths 2
99
| file
stringlengths 13
225
| code
stringlengths 0
18.3M
| file_length
int64 0
18.3M
| avg_line_length
float64 0
1.36M
| max_line_length
int64 0
4.26M
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_refiners/test_matting_refiners.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmedit.models import PlainRefiner
def assert_dict_keys_equal(dictionary, target_keys):
"""Check if the keys of the dictionary is equal to the target key set."""
assert isinstance(dictionary, dict)
assert set(dictionary.keys()) == set(target_keys)
def assert_tensor_with_shape(tensor, shape):
""""Check if the shape of the tensor is equal to the target shape."""
assert isinstance(tensor, torch.Tensor)
assert tensor.shape == shape
def test_plain_refiner():
"""Test PlainRefiner."""
model = PlainRefiner()
model.init_weights()
model.train()
merged, alpha, trimap, raw_alpha = _demo_inputs_pair()
prediction = model(torch.cat([merged, raw_alpha.sigmoid()], 1), raw_alpha)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
# test forward with gpu
if torch.cuda.is_available():
model = PlainRefiner()
model.init_weights()
model.train()
model.cuda()
merged, alpha, trimap, raw_alpha = _demo_inputs_pair(cuda=True)
prediction = model(
torch.cat([merged, raw_alpha.sigmoid()], 1), raw_alpha)
assert_tensor_with_shape(prediction, torch.Size([1, 1, 64, 64]))
def _demo_inputs_pair(img_shape=(64, 64), batch_size=1, cuda=False):
"""
Create a superset of inputs needed to run refiner.
Args:
img_shape (tuple): shape of the input image.
batch_size (int): batch size of the input batch.
cuda (bool): whether transfer input into gpu.
"""
color_shape = (batch_size, 3, img_shape[0], img_shape[1])
gray_shape = (batch_size, 1, img_shape[0], img_shape[1])
merged = torch.from_numpy(np.random.random(color_shape).astype(np.float32))
alpha = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
trimap = torch.from_numpy(np.random.random(gray_shape).astype(np.float32))
raw_alpha = torch.from_numpy(
np.random.random(gray_shape).astype(np.float32))
if cuda:
merged = merged.cuda()
alpha = alpha.cuda()
trimap = trimap.cuda()
raw_alpha = raw_alpha.cuda()
return merged, alpha, trimap, raw_alpha
| 2,239 | 34.555556 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_refiners/test_deepfill_refiner.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.models import (ContextualAttentionNeck, DeepFillDecoder,
DeepFillEncoder, DeepFillRefiner, GLDilationNeck)
def test_deepfill_refiner():
refiner = DeepFillRefiner()
x = torch.rand((2, 5, 256, 256))
mask = x.new_ones((2, 1, 256, 256))
mask[..., 30:100, 40:100] = 0.
res, offset = refiner(x, mask)
assert res.shape == (2, 3, 256, 256)
assert offset.shape == (2, 32, 32, 32, 32)
# check model architecture
assert isinstance(refiner.encoder_attention, DeepFillEncoder)
assert isinstance(refiner.encoder_conv, DeepFillEncoder)
assert isinstance(refiner.contextual_attention_neck,
ContextualAttentionNeck)
assert isinstance(refiner.decoder, DeepFillDecoder)
assert isinstance(refiner.dilation_neck, GLDilationNeck)
if torch.cuda.is_available():
refiner = DeepFillRefiner().cuda()
x = torch.rand((2, 5, 256, 256)).cuda()
res, offset = refiner(x, mask.cuda())
assert res.shape == (2, 3, 256, 256)
assert offset.shape == (2, 32, 32, 32, 32)
# check model architecture
assert isinstance(refiner.encoder_attention, DeepFillEncoder)
assert isinstance(refiner.encoder_conv, DeepFillEncoder)
assert isinstance(refiner.contextual_attention_neck,
ContextualAttentionNeck)
assert isinstance(refiner.decoder, DeepFillDecoder)
assert isinstance(refiner.dilation_neck, GLDilationNeck)
| 1,564 | 37.170732 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_discriminators/test_unet_disc.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.components import UNetDiscriminatorWithSpectralNorm
def test_unet_disc_with_spectral_norm():
# cpu
disc = UNetDiscriminatorWithSpectralNorm(in_channels=3)
img = torch.randn(1, 3, 16, 16)
disc(img)
with pytest.raises(TypeError):
# pretrained must be a string path
disc.init_weights(pretrained=233)
# cuda
if torch.cuda.is_available():
disc = disc.cuda()
img = img.cuda()
disc(img)
with pytest.raises(TypeError):
# pretrained must be a string path
disc.init_weights(pretrained=233)
| 680 | 24.222222 | 70 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_discriminators/test_light_cnn.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.builder import build_component
from mmedit.models.components.discriminators.light_cnn import MaxFeature
def test_max_feature():
# cpu
conv2d = MaxFeature(16, 16, filter_type='conv2d')
x1 = torch.rand(3, 16, 16, 16)
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
linear = MaxFeature(16, 16, filter_type='linear')
x2 = torch.rand(3, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# gpu
if torch.cuda.is_available():
x1 = x1.cuda()
x2 = x2.cuda()
conv2d = conv2d.cuda()
linear = linear.cuda()
y1 = conv2d(x1)
assert y1.shape == (3, 16, 16, 16)
y2 = linear(x2)
assert y2.shape == (3, 16)
# filter_type should be conv2d or linear
with pytest.raises(ValueError):
MaxFeature(12, 12, filter_type='conv1d')
def test_light_cnn():
cfg = dict(type='LightCNN', in_channels=3)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
inputs = torch.rand((2, 3, 128, 128))
output = net(inputs)
assert output.shape == (2, 1)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(inputs.cuda())
assert output.shape == (2, 1)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
| 1,475 | 27.384615 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_discriminators/test_discriminators.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
import torch
from mmedit.models import build_component
def test_ttsr_dict():
cfg = dict(type='TTSRDiscriminator', in_channels=3, in_size=160)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
inputs = torch.rand((2, 3, 160, 160))
output = net(inputs)
assert output.shape == (2, 1)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(inputs.cuda())
assert output.shape == (2, 1)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
def test_patch_discriminator():
# color, BN
cfg = dict(
type='PatchDiscriminator',
in_channels=3,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02))
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 6, 6)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 6, 6)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
# gray, IN
cfg = dict(
type='PatchDiscriminator',
in_channels=1,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='IN'),
init_cfg=dict(type='normal', gain=0.02))
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 1, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 6, 6)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 6, 6)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
# test norm_cfg assertions
bad_cfg = copy.deepcopy(cfg)
bad_cfg['norm_cfg'] = None
with pytest.raises(AssertionError):
_ = build_component(bad_cfg)
bad_cfg['norm_cfg'] = dict(tp='BN')
with pytest.raises(AssertionError):
_ = build_component(bad_cfg)
def test_smpatch_discriminator():
# color, BN
cfg = dict(
type='SoftMaskPatchDiscriminator',
in_channels=3,
base_channels=64,
num_conv=3,
with_spectral_norm=True)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 3, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 6, 6)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 6, 6)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
# gray, IN
cfg = dict(
type='SoftMaskPatchDiscriminator',
in_channels=1,
base_channels=64,
num_conv=3,
with_spectral_norm=True)
net = build_component(cfg)
net.init_weights(pretrained=None)
# cpu
input_shape = (1, 1, 64, 64)
img = _demo_inputs(input_shape)
output = net(img)
assert output.shape == (1, 1, 6, 6)
# gpu
if torch.cuda.is_available():
net.init_weights(pretrained=None)
net = net.cuda()
output = net(img.cuda())
assert output.shape == (1, 1, 6, 6)
# pretrained should be str or None
with pytest.raises(TypeError):
net.init_weights(pretrained=[1])
def _demo_inputs(input_shape=(1, 3, 64, 64)):
"""Create a superset of inputs needed to run backbone.
Args:
input_shape (tuple): input batch dimensions.
Default: (1, 3, 64, 64).
Returns:
imgs: (Tensor): Images in FloatTensor with desired shapes.
"""
imgs = np.random.random(input_shape)
imgs = torch.FloatTensor(imgs)
return imgs
| 4,322 | 26.01875 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_discriminators/test_deepfill_disc.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmedit.models.components import (DeepFillv1Discriminators,
MultiLayerDiscriminator)
def test_deepfillv1_disc():
model_config = dict(
global_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=256,
fc_in_channels=256 * 16 * 16,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
local_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=512,
fc_in_channels=512 * 8 * 8,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)))
disc = DeepFillv1Discriminators(**model_config)
disc.init_weights()
global_x = torch.rand((2, 3, 256, 256))
local_x = torch.rand((2, 3, 128, 128))
global_pred, local_pred = disc((global_x, local_x))
assert global_pred.shape == (2, 1)
assert local_pred.shape == (2, 1)
assert isinstance(disc.global_disc, MultiLayerDiscriminator)
assert isinstance(disc.local_disc, MultiLayerDiscriminator)
with pytest.raises(TypeError):
disc.init_weights(model_config)
if torch.cuda.is_available():
disc = DeepFillv1Discriminators(**model_config).cuda()
disc.init_weights()
global_x = torch.rand((2, 3, 256, 256)).cuda()
local_x = torch.rand((2, 3, 128, 128)).cuda()
global_pred, local_pred = disc((global_x, local_x))
assert global_pred.shape == (2, 1)
assert local_pred.shape == (2, 1)
| 1,862 | 34.826923 | 68 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_components/test_discriminators/test_multi_layer_disc.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
import torch.nn as nn
from mmedit.models.components import MultiLayerDiscriminator
def test_multi_layer_disc():
with pytest.raises(AssertionError):
# fc_in_channels must be greater than 0
multi_disc = MultiLayerDiscriminator(
3, 236, fc_in_channels=-100, out_act_cfg=None)
with pytest.raises(TypeError):
# stride_list must be a tuple of int with length of 1 or
# length of num_conv
multi_disc = MultiLayerDiscriminator(
3, 256, num_convs=3, stride_list=(1, 2))
input_g = torch.randn(1, 3, 256, 256)
# test multi-layer discriminators without fc layer
multi_disc = MultiLayerDiscriminator(
in_channels=3, max_channels=256, fc_in_channels=None)
multi_disc.init_weights()
disc_pred = multi_disc(input_g)
assert disc_pred.shape == (1, 256, 8, 8)
multi_disc = MultiLayerDiscriminator(
in_channels=3, max_channels=256, fc_in_channels=100)
assert isinstance(multi_disc.fc.activate, nn.ReLU)
multi_disc = MultiLayerDiscriminator(3, 236, fc_in_channels=None)
assert multi_disc.with_out_act
assert not multi_disc.with_fc
assert isinstance(multi_disc.conv5.activate, nn.ReLU)
multi_disc = MultiLayerDiscriminator(
3, 236, fc_in_channels=None, out_act_cfg=None)
assert not multi_disc.conv5.with_activation
with pytest.raises(TypeError):
multi_disc.init_weights(pretrained=dict(igccc=4396))
input_g = torch.randn(1, 3, 16, 16)
multi_disc = MultiLayerDiscriminator(
in_channels=3,
max_channels=256,
num_convs=2,
fc_in_channels=4 * 4 * 128,
fc_out_channels=10,
with_spectral_norm=True)
multi_disc.init_weights()
disc_pred = multi_disc(input_g)
assert disc_pred.shape == (1, 10)
assert multi_disc.conv1.with_spectral_norm
assert multi_disc.conv2.with_spectral_norm
assert hasattr(multi_disc.fc.linear, 'weight_orig')
num_convs = 3
multi_disc = MultiLayerDiscriminator(
in_channels=64,
max_channels=512,
num_convs=num_convs,
kernel_size=4,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
out_act_cfg=dict(type='ReLU'),
with_input_norm=False,
with_out_convs=True)
# check input conv
assert not multi_disc.conv1.with_norm
assert isinstance(multi_disc.conv1.activate, nn.LeakyReLU)
assert multi_disc.conv1.stride == (2, 2)
# check intermediate conv
for i in range(1, num_convs):
assert getattr(multi_disc, f'conv{i + 1}').with_norm
assert isinstance(
getattr(multi_disc, f'conv{i + 1}').activate, nn.LeakyReLU)
assert getattr(multi_disc, f'conv{i + 1}').stride == (2, 2)
# check out_conv
assert multi_disc.conv4.with_norm
assert multi_disc.conv4.with_activation
assert multi_disc.conv4.stride == (1, 1)
assert not multi_disc.conv5.with_norm
assert not multi_disc.conv5.with_activation
assert multi_disc.conv5.stride == (1, 1)
| 3,133 | 34.613636 | 71 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_deepfill_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import tempfile
import pytest
import torch
from mmcv import Config
from mmedit.core import build_optimizers
from mmedit.models import DeepFillv1Inpaintor
def test_two_stage_inpaintor():
model = dict(
disc_input_with_mask=True,
encdec=dict(type='DeepFillEncoderDecoder'),
disc=dict(
type='DeepFillv1Discriminators',
global_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=256,
fc_in_channels=256 * 16 * 16,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
local_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=512,
fc_in_channels=512 * 8 * 8,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2))),
stage1_loss_type=('loss_l1_hole', 'loss_l1_valid'),
stage2_loss_type=('loss_l1_hole', 'loss_l1_valid', 'loss_gan'),
loss_gan=dict(
type='GANLoss',
gan_type='hinge',
loss_weight=1,
),
loss_l1_hole=dict(
type='L1Loss',
loss_weight=1.0,
),
loss_l1_valid=dict(
type='L1Loss',
loss_weight=1.0,
),
pretrained=None)
train_cfg = Config(dict(disc_step=1, local_size=(128, 128)))
test_cfg = Config(dict(metrics=['l1']))
tsinpaintor = DeepFillv1Inpaintor(
**model, train_cfg=train_cfg, test_cfg=test_cfg)
# check architecture
assert tsinpaintor.stage1_loss_type == ('loss_l1_hole', 'loss_l1_valid')
assert tsinpaintor.stage2_loss_type == ('loss_l1_hole', 'loss_l1_valid',
'loss_gan')
assert tsinpaintor.with_l1_hole_loss
assert tsinpaintor.with_l1_valid_loss
assert not tsinpaintor.with_composed_percep_loss
assert not tsinpaintor.with_out_percep_loss
assert tsinpaintor.with_gan
if torch.cuda.is_available():
# prepare data
gt_img = torch.rand((2, 3, 256, 256)).cuda()
mask = torch.zeros((2, 1, 256, 256)).cuda()
mask[..., 50:180, 60:170] = 1.
masked_img = gt_img * (1. - mask)
bbox_tensor = torch.tensor([[50, 60, 110, 110], [50, 60, 110,
110]]).cuda()
data_batch = dict(
gt_img=gt_img,
mask=mask,
masked_img=masked_img,
mask_bbox=bbox_tensor)
# prepare model and optimizer
tsinpaintor.cuda()
optimizers_config = dict(
generator=dict(type='Adam', lr=0.0001),
disc=dict(type='Adam', lr=0.0001))
optims = build_optimizers(tsinpaintor, optimizers_config)
# check train_step with standard deepfillv2 model
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
# check train step w/o disc step
tsinpaintor.train_cfg.disc_step = 0
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' not in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
tsinpaintor.train_cfg.disc_step = 1
# check train step w/ multiple disc step
tsinpaintor.train_cfg.disc_step = 5
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' in log_vars
assert 'stage1_loss_l1_hole' not in log_vars
assert outputs['results']['fake_res'].size() == (2, 3, 256, 256)
tsinpaintor.train_cfg.disc_step = 1
# test forward test w/o save image
outputs = tsinpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
# test forward test w/o eval metrics
tsinpaintor.test_cfg = dict()
tsinpaintor.eval_with_metrics = False
outputs = tsinpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in [
'stage1_fake_res', 'stage2_fake_res', 'fake_res', 'fake_img'
]:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
# check train_step with not implemented loss type
with pytest.raises(NotImplementedError):
model_ = copy.deepcopy(model)
model_['stage1_loss_type'] = ('igccc', )
tsinpaintor = DeepFillv1Inpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
# test input w/o ones and disc input w/o mask
model_ = dict(
disc_input_with_mask=False,
input_with_ones=False,
encdec=dict(
type='DeepFillEncoderDecoder',
stage1=dict(
type='GLEncoderDecoder',
encoder=dict(type='DeepFillEncoder', in_channels=4),
decoder=dict(type='DeepFillDecoder', in_channels=128),
dilation_neck=dict(
type='GLDilationNeck',
in_channels=128,
act_cfg=dict(type='ELU'))),
stage2=dict(
type='DeepFillRefiner',
encoder_attention=dict(
type='DeepFillEncoder',
encoder_type='stage2_attention',
in_channels=4),
encoder_conv=dict(
type='DeepFillEncoder',
encoder_type='stage2_conv',
in_channels=4)),
),
disc=dict(
type='DeepFillv1Discriminators',
global_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=256,
fc_in_channels=256 * 16 * 16,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2)),
local_disc_cfg=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=512,
fc_in_channels=512 * 8 * 8,
fc_out_channels=1,
num_convs=4,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2))),
stage1_loss_type=('loss_l1_hole', 'loss_l1_valid'),
stage2_loss_type=('loss_l1_hole', 'loss_l1_valid', 'loss_gan'),
loss_gan=dict(
type='GANLoss',
gan_type='hinge',
loss_weight=1,
),
loss_l1_hole=dict(
type='L1Loss',
loss_weight=1.0,
),
loss_gp=dict(type='GradientPenaltyLoss', loss_weight=10.),
loss_tv=dict(
type='MaskedTVLoss',
loss_weight=0.1,
),
loss_l1_valid=dict(
type='L1Loss',
loss_weight=1.0,
),
loss_disc_shift=dict(type='DiscShiftLoss'),
pretrained=None)
tsinpaintor = DeepFillv1Inpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
outputs = tsinpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
# test w/o stage1 loss
model_ = copy.deepcopy(model)
model_['stage1_loss_type'] = None
tsinpaintor = DeepFillv1Inpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' in log_vars
assert 'stage1_loss_l1_hole' not in log_vars
assert 'stage1_loss_l1_valid' not in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
# test w/o stage2 loss
model_ = copy.deepcopy(model)
model_['stage2_loss_type'] = None
tsinpaintor = DeepFillv1Inpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss_global' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' not in log_vars
assert 'stage2_loss_l1_valid' not in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
| 12,857 | 38.441718 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_gl_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv import Config
from mmedit.models import build_model
def test_gl_inpaintor():
cfg = Config.fromfile('tests/data/inpaintor_config/gl_test.py')
gl = build_model(cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
assert gl.__class__.__name__ == 'GLInpaintor'
if torch.cuda.is_available():
gt_img = torch.randn(1, 3, 256, 256)
mask = torch.zeros_like(gt_img)[:, 0:1, ...]
mask[..., 100:210, 100:210] = 1.
masked_img = gt_img * (1. - mask)
mask_bbox = torch.tensor([[100, 100, 110, 110]])
gl.cuda()
data_batch = dict(
gt_img=gt_img.cuda(),
mask=mask.cuda(),
masked_img=masked_img.cuda(),
mask_bbox=mask_bbox.cuda())
optim_g = torch.optim.SGD(gl.generator.parameters(), lr=0.1)
optim_d = torch.optim.SGD(gl.disc.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g, disc=optim_d)
for i in range(5):
outputs = gl.train_step(data_batch, optim_dict)
if i <= 2:
assert 'loss_l1_hole' in outputs['log_vars']
assert 'fake_loss' not in outputs['log_vars']
assert 'real_loss' not in outputs['log_vars']
assert 'loss_g_fake' not in outputs['log_vars']
elif i == 3:
assert 'loss_l1_hole' not in outputs['log_vars']
assert 'fake_loss' in outputs['log_vars']
assert 'real_loss' in outputs['log_vars']
assert 'loss_g_fake' not in outputs['log_vars']
else:
assert 'loss_l1_hole' in outputs['log_vars']
assert 'fake_loss' in outputs['log_vars']
assert 'real_loss' in outputs['log_vars']
assert 'loss_g_fake' in outputs['log_vars']
gl_dirty = build_model(
cfg.model_dirty, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
gl_dirty.cuda()
res, loss = gl_dirty.generator_loss(gt_img, gt_img, gt_img, data_batch)
assert len(loss) == 0
| 2,138 | 37.196429 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_pconv_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import tempfile
from unittest.mock import patch
import pytest
import torch
from mmcv import Config
from mmedit.models import build_model
from mmedit.models.losses import PerceptualVGG
@patch.object(PerceptualVGG, 'init_weights')
def test_pconv_inpaintor(init_weights):
cfg = Config.fromfile(
'tests/data/inpaintor_config/pconv_inpaintor_test.py')
if torch.cuda.is_available():
pconv_inpaintor = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
assert pconv_inpaintor.__class__.__name__ == 'PConvInpaintor'
pconv_inpaintor.cuda()
gt_img = torch.randn((1, 3, 256, 256)).cuda()
mask = torch.zeros_like(gt_img)
mask[..., 50:160, 100:210] = 1.
masked_img = gt_img * (1. - mask)
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
optim_g = torch.optim.SGD(
pconv_inpaintor.generator.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g)
outputs = pconv_inpaintor.train_step(data_batch, optim_dict)
assert outputs['results']['fake_res'].shape == (1, 3, 256, 256)
assert outputs['results']['final_mask'].shape == (1, 3, 256, 256)
assert 'loss_l1_hole' in outputs['log_vars']
assert 'loss_l1_valid' in outputs['log_vars']
assert 'loss_tv' in outputs['log_vars']
# test forward dummy
res = pconv_inpaintor.forward_dummy(
torch.cat([masked_img, mask], dim=1))
assert res.shape == (1, 3, 256, 256)
# test forward test w/o save image
outputs = pconv_inpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
assert outputs['eval_result']['psnr'] > 0
assert outputs['eval_result']['ssim'] > 0
# test forward test w/o eval metrics
pconv_inpaintor.test_cfg = dict()
pconv_inpaintor.eval_with_metrics = False
outputs = pconv_inpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in ['fake_res', 'fake_img']:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = pconv_inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
# reset mock to clear some memory usage
init_weights.reset_mock()
| 3,937 | 36.865385 | 74 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_two_stage_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import tempfile
import pytest
import torch
from mmcv import Config
from mmedit.core import build_optimizers
from mmedit.models import TwoStageInpaintor
def test_two_stage_inpaintor():
model = dict(
disc_input_with_mask=True,
encdec=dict(
type='DeepFillEncoderDecoder',
stage1=dict(
type='GLEncoderDecoder',
encoder=dict(
type='DeepFillEncoder',
conv_type='gated_conv',
channel_factor=0.75),
decoder=dict(
type='DeepFillDecoder',
conv_type='gated_conv',
in_channels=96,
channel_factor=0.75),
dilation_neck=dict(
type='GLDilationNeck',
in_channels=96,
conv_type='gated_conv',
act_cfg=dict(type='ELU'))),
stage2=dict(
type='DeepFillRefiner',
encoder_attention=dict(
type='DeepFillEncoder',
encoder_type='stage2_attention',
conv_type='gated_conv',
channel_factor=0.75),
encoder_conv=dict(
type='DeepFillEncoder',
encoder_type='stage2_conv',
conv_type='gated_conv',
channel_factor=0.75),
dilation_neck=dict(
type='GLDilationNeck',
in_channels=96,
conv_type='gated_conv',
act_cfg=dict(type='ELU')),
contextual_attention=dict(
type='ContextualAttentionNeck',
in_channels=96,
conv_type='gated_conv'),
decoder=dict(
type='DeepFillDecoder',
in_channels=192,
conv_type='gated_conv'))),
disc=dict(
type='MultiLayerDiscriminator',
in_channels=4,
max_channels=256,
fc_in_channels=256 * 4 * 4,
fc_out_channels=1,
num_convs=6,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
with_spectral_norm=True,
),
stage1_loss_type=('loss_l1_hole', 'loss_l1_valid'),
stage2_loss_type=('loss_l1_hole', 'loss_l1_valid', 'loss_gan'),
loss_gan=dict(
type='GANLoss',
gan_type='hinge',
loss_weight=1,
),
loss_l1_hole=dict(
type='L1Loss',
loss_weight=1.0,
),
loss_l1_valid=dict(
type='L1Loss',
loss_weight=1.0,
),
pretrained=None)
train_cfg = Config(dict(disc_step=1))
test_cfg = Config(dict(metrics=['l1', 'psnr', 'ssim']))
tsinpaintor = TwoStageInpaintor(
**model, train_cfg=train_cfg, test_cfg=test_cfg)
# check architecture
assert tsinpaintor.stage1_loss_type == ('loss_l1_hole', 'loss_l1_valid')
assert tsinpaintor.stage2_loss_type == ('loss_l1_hole', 'loss_l1_valid',
'loss_gan')
assert tsinpaintor.with_l1_hole_loss
assert tsinpaintor.with_l1_valid_loss
assert not tsinpaintor.with_composed_percep_loss
assert not tsinpaintor.with_out_percep_loss
assert tsinpaintor.with_gan
if torch.cuda.is_available():
# prepare data
gt_img = torch.rand((2, 3, 256, 256)).cuda()
mask = torch.zeros((2, 1, 256, 256)).cuda()
mask[..., 50:180, 60:170] = 1.
masked_img = gt_img * (1. - mask)
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
# prepare model and optimizer
tsinpaintor.cuda()
optimizers_config = dict(
generator=dict(type='Adam', lr=0.0001),
disc=dict(type='Adam', lr=0.0001))
optims = build_optimizers(tsinpaintor, optimizers_config)
# check train_step with standard deepfillv2 model
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
# check train step w/o disc step
tsinpaintor.train_cfg.disc_step = 0
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' not in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
tsinpaintor.train_cfg.disc_step = 1
# check train step w/ multiple disc step
tsinpaintor.train_cfg.disc_step = 5
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' in log_vars
assert 'stage1_loss_l1_hole' not in log_vars
assert outputs['results']['fake_res'].size() == (2, 3, 256, 256)
tsinpaintor.train_cfg.disc_step = 1
# test forward test w/o save image
outputs = tsinpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
assert outputs['eval_result']['psnr'] > 0
assert outputs['eval_result']['ssim'] > 0
# test forward test w/o eval metrics
tsinpaintor.test_cfg = dict()
tsinpaintor.eval_with_metrics = False
outputs = tsinpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in [
'stage1_fake_res', 'stage2_fake_res', 'fake_res', 'fake_img'
]:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = tsinpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
# check train_step with not implemented loss type
with pytest.raises(NotImplementedError):
model_ = copy.deepcopy(model)
model_['stage1_loss_type'] = ('igccc', )
tsinpaintor = TwoStageInpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
# test input w/o ones and disc input w/o mask
model_ = dict(
disc_input_with_mask=False,
input_with_ones=False,
encdec=dict(
type='DeepFillEncoderDecoder',
stage1=dict(
type='GLEncoderDecoder',
encoder=dict(
type='DeepFillEncoder',
in_channels=4,
conv_type='gated_conv',
channel_factor=0.75),
decoder=dict(
type='DeepFillDecoder',
conv_type='gated_conv',
in_channels=96,
channel_factor=0.75),
dilation_neck=dict(
type='GLDilationNeck',
in_channels=96,
conv_type='gated_conv',
act_cfg=dict(type='ELU'))),
stage2=dict(
type='DeepFillRefiner',
encoder_attention=dict(
type='DeepFillEncoder',
in_channels=4,
encoder_type='stage2_attention',
conv_type='gated_conv',
channel_factor=0.75),
encoder_conv=dict(
type='DeepFillEncoder',
in_channels=4,
encoder_type='stage2_conv',
conv_type='gated_conv',
channel_factor=0.75),
dilation_neck=dict(
type='GLDilationNeck',
in_channels=96,
conv_type='gated_conv',
act_cfg=dict(type='ELU')),
contextual_attention=dict(
type='ContextualAttentionNeck',
in_channels=96,
conv_type='gated_conv'),
decoder=dict(
type='DeepFillDecoder',
in_channels=192,
conv_type='gated_conv'))),
disc=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=256,
fc_in_channels=256 * 4 * 4,
fc_out_channels=1,
num_convs=6,
norm_cfg=None,
act_cfg=dict(type='ELU'),
out_act_cfg=dict(type='LeakyReLU', negative_slope=0.2),
with_spectral_norm=True,
),
stage1_loss_type=('loss_l1_hole', 'loss_l1_valid'),
stage2_loss_type=('loss_l1_hole', 'loss_l1_valid', 'loss_gan'),
loss_gan=dict(
type='GANLoss',
gan_type='hinge',
loss_weight=1,
),
loss_l1_hole=dict(
type='L1Loss',
loss_weight=1.0,
),
loss_gp=dict(type='GradientPenaltyLoss', loss_weight=10.),
loss_tv=dict(
type='MaskedTVLoss',
loss_weight=0.1,
),
loss_l1_valid=dict(
type='L1Loss',
loss_weight=1.0,
),
pretrained=None)
tsinpaintor = TwoStageInpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
outputs = tsinpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
# test w/o stage1 loss
model_ = copy.deepcopy(model)
model_['stage1_loss_type'] = None
tsinpaintor = TwoStageInpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' in log_vars
assert 'stage1_loss_l1_hole' not in log_vars
assert 'stage1_loss_l1_valid' not in log_vars
assert 'stage2_loss_l1_hole' in log_vars
assert 'stage2_loss_l1_valid' in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
# test w/o stage2 loss
model_ = copy.deepcopy(model)
model_['stage2_loss_type'] = None
tsinpaintor = TwoStageInpaintor(
**model_, train_cfg=train_cfg, test_cfg=test_cfg).cuda()
outputs = tsinpaintor.train_step(data_batch, optims)
assert outputs['num_samples'] == 2
log_vars = outputs['log_vars']
assert 'real_loss' in log_vars
assert 'stage1_loss_l1_hole' in log_vars
assert 'stage1_loss_l1_valid' in log_vars
assert 'stage2_loss_l1_hole' not in log_vars
assert 'stage2_loss_l1_valid' not in log_vars
assert 'stage1_fake_res' in outputs['results']
assert 'stage2_fake_res' in outputs['results']
assert outputs['results']['stage1_fake_res'].size() == (2, 3, 256, 256)
| 14,419 | 38.184783 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_one_stage_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import tempfile
from unittest.mock import patch
import pytest
import torch
from mmcv import Config
from mmedit.models import build_model
from mmedit.models.backbones import GLEncoderDecoder
def test_one_stage_inpaintor():
cfg = Config.fromfile('tests/data/inpaintor_config/one_stage_gl.py')
# mock perceptual loss for test speed
cfg.model.loss_composed_percep = None
inpaintor = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# modify attributes for mocking
inpaintor.with_composed_percep_loss = True
inpaintor.loss_percep = None
# test attributes
assert inpaintor.__class__.__name__ == 'OneStageInpaintor'
assert isinstance(inpaintor.generator, GLEncoderDecoder)
assert inpaintor.with_l1_hole_loss
assert inpaintor.with_l1_valid_loss
assert inpaintor.with_tv_loss
assert inpaintor.with_composed_percep_loss
assert inpaintor.with_out_percep_loss
assert inpaintor.with_gan
assert inpaintor.with_gp_loss
assert inpaintor.with_disc_shift_loss
assert inpaintor.is_train
assert inpaintor.train_cfg['disc_step'] == 1
assert inpaintor.disc_step_count == 0
with patch.object(
inpaintor, 'loss_percep', return_value=(torch.tensor(1.0), None)):
input_x = torch.randn(1, 3, 256, 256)
with pytest.raises(NotImplementedError):
inpaintor.forward_train(input_x)
if torch.cuda.is_available():
gt_img = torch.randn(1, 3, 256, 256).cuda()
mask = torch.zeros_like(gt_img)[:, 0:1, ...]
mask[..., 20:100, 100:120] = 1.
masked_img = gt_img * (1. - mask)
inpaintor.cuda()
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
output = inpaintor.forward_test(**data_batch)
assert 'eval_result' in output
output = inpaintor.val_step(data_batch)
assert 'eval_result' in output
optim_g = torch.optim.SGD(inpaintor.generator.parameters(), lr=0.1)
optim_d = torch.optim.SGD(inpaintor.disc.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g, disc=optim_d)
outputs = inpaintor.train_step(data_batch, optim_dict)
assert outputs['num_samples'] == 1
results = outputs['results']
assert results['fake_res'].shape == (1, 3, 256, 256)
assert 'loss_l1_hole' in outputs['log_vars']
assert 'loss_l1_valid' in outputs['log_vars']
assert 'loss_composed_percep' in outputs['log_vars']
assert 'loss_composed_style' not in outputs['log_vars']
assert 'loss_out_percep' in outputs['log_vars']
assert 'loss_out_style' not in outputs['log_vars']
assert 'loss_tv' in outputs['log_vars']
assert 'fake_loss' in outputs['log_vars']
assert 'real_loss' in outputs['log_vars']
assert 'loss_g_fake' in outputs['log_vars']
# test forward dummy
res = inpaintor.forward_dummy(torch.cat([masked_img, mask], dim=1))
assert res.shape == (1, 3, 256, 256)
# test forward test w/o save image
outputs = inpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_result' in outputs
assert outputs['eval_result']['l1'] > 0
assert outputs['eval_result']['psnr'] > 0
assert outputs['eval_result']['ssim'] > 0
# test forward test w/o eval metrics
inpaintor.test_cfg = dict()
inpaintor.eval_with_metrics = False
outputs = inpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in ['fake_res', 'fake_img']:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
cfg_ = copy.deepcopy(cfg)
cfg_.train_cfg.disc_step = 2
inpaintor = build_model(
cfg_.model, train_cfg=cfg_.train_cfg, test_cfg=cfg_.test_cfg)
inpaintor.cuda()
assert inpaintor.train_cfg.disc_step == 2
outputs = inpaintor.train_step(data_batch, optim_dict)
assert 'loss_l1_hole' not in outputs['log_vars']
| 6,045 | 39.306667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_models/test_inpaintors/test_aot_inpaintor.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os
import tempfile
import pytest
import torch
from mmcv import Config
from mmedit.models import build_model
from mmedit.models.backbones.encoder_decoders import AOTEncoderDecoder
def test_aot_inpaintor():
cfg = Config.fromfile('tests/data/inpaintor_config/aot_test.py')
inpaintor = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# test attributes
assert inpaintor.__class__.__name__ == 'AOTInpaintor'
assert isinstance(inpaintor.generator, AOTEncoderDecoder)
assert inpaintor.with_l1_valid_loss
assert inpaintor.with_composed_percep_loss
assert inpaintor.with_out_percep_loss
assert inpaintor.with_gan
assert inpaintor.is_train
assert inpaintor.train_cfg['disc_step'] == 1
assert inpaintor.disc_step_count == 0
input_x = torch.randn(1, 3, 256, 256)
with pytest.raises(NotImplementedError):
inpaintor.forward_train(input_x)
gt_img = torch.randn(1, 3, 256, 256)
mask = torch.zeros_like(gt_img)[:, 0:1, ...]
mask[..., 20:100, 100:120] = 1.
masked_img = gt_img * (1. - mask) + mask
inpaintor
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
output = inpaintor.forward_test(**data_batch)
assert 'eval_results' in output
output = inpaintor.val_step(data_batch)
assert 'eval_results' in output
optim_g = torch.optim.SGD(inpaintor.generator.parameters(), lr=0.1)
optim_d = torch.optim.SGD(inpaintor.disc.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g, disc=optim_d)
outputs = inpaintor.train_step(data_batch, optim_dict)
assert outputs['num_samples'] == 1
results = outputs['results']
assert results['fake_res'].shape == (1, 3, 256, 256)
assert 'loss_l1_valid' in outputs['log_vars']
assert 'loss_out_percep' in outputs['log_vars']
assert 'disc_losses' in outputs['log_vars']
assert 'loss_g_fake' in outputs['log_vars']
# test forward test w/o save image
outputs = inpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_results' in outputs
assert outputs['eval_results']['l1'] > 0
assert outputs['eval_results']['psnr'] > 0
assert outputs['eval_results']['ssim'] > 0
# test forward test w/o eval metrics
inpaintor.test_cfg = dict()
inpaintor.eval_with_metrics = False
outputs = inpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in ['fake_res', 'fake_img']:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
cfg_ = copy.deepcopy(cfg)
cfg_.train_cfg.disc_step = 2
inpaintor = build_model(
cfg_.model, train_cfg=cfg_.train_cfg, test_cfg=cfg_.test_cfg)
assert inpaintor.train_cfg.disc_step == 2
outputs = inpaintor.train_step(data_batch, optim_dict)
assert 'loss_l1_hole' not in outputs['log_vars']
# Test on GPU
if torch.cuda.is_available():
gt_img = torch.randn(1, 3, 256, 256).cuda()
mask = torch.zeros_like(gt_img)[:, 0:1, ...]
mask[..., 20:100, 100:120] = 1.
masked_img = gt_img * (1. - mask) + mask
inpaintor.cuda()
data_batch = dict(gt_img=gt_img, mask=mask, masked_img=masked_img)
output = inpaintor.forward_test(**data_batch)
assert 'eval_results' in output
output = inpaintor.val_step(data_batch)
assert 'eval_results' in output
optim_g = torch.optim.SGD(inpaintor.generator.parameters(), lr=0.1)
optim_d = torch.optim.SGD(inpaintor.disc.parameters(), lr=0.1)
optim_dict = dict(generator=optim_g, disc=optim_d)
outputs = inpaintor.train_step(data_batch, optim_dict)
assert outputs['num_samples'] == 1
results = outputs['results']
assert results['fake_res'].shape == (1, 3, 256, 256)
assert 'loss_l1_valid' in outputs['log_vars']
assert 'loss_out_percep' in outputs['log_vars']
assert 'disc_losses' in outputs['log_vars']
assert 'loss_g_fake' in outputs['log_vars']
# test forward test w/o save image
outputs = inpaintor.forward_test(
masked_img[0:1], mask[0:1], gt_img=gt_img[0:1, ...])
assert 'eval_results' in outputs
assert outputs['eval_results']['l1'] > 0
assert outputs['eval_results']['psnr'] > 0
assert outputs['eval_results']['ssim'] > 0
# test forward test w/o eval metrics
inpaintor.test_cfg = dict()
inpaintor.eval_with_metrics = False
outputs = inpaintor.forward_test(masked_img[0:1], mask[0:1])
for key in ['fake_res', 'fake_img']:
assert outputs[key].size() == (1, 3, 256, 256)
# test forward test w/ save image
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')])
assert os.path.exists(os.path.join(tmpdir, 'igccc_4396.png'))
# test forward test w/ save image w/ gt_img
with tempfile.TemporaryDirectory() as tmpdir:
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
assert os.path.exists(os.path.join(tmpdir, 'igccc.png'))
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=tmpdir,
iteration=4396,
gt_img=gt_img[0:1, ...])
with pytest.raises(AssertionError):
outputs = inpaintor.forward_test(
masked_img[0:1],
mask[0:1],
save_image=True,
save_path=None,
iteration=4396,
meta=[dict(gt_img_path='igccc.png')],
gt_img=gt_img[0:1, ...])
cfg_ = copy.deepcopy(cfg)
cfg_.train_cfg.disc_step = 2
inpaintor = build_model(
cfg_.model, train_cfg=cfg_.train_cfg, test_cfg=cfg_.test_cfg)
inpaintor.cuda()
assert inpaintor.train_cfg.disc_step == 2
outputs = inpaintor.train_step(data_batch, optim_dict)
assert 'loss_l1_hole' not in outputs['log_vars']
| 8,089 | 36.281106 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_datasets/test_vfi_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import pytest
from mmcv.utils.testing import assert_dict_has_keys
from mmedit.datasets import BaseVFIDataset, build_dataset
class TestVFIDataset:
pipeline = [
dict(type='LoadImageFromFileList', io_backend='disk', key='inputs'),
dict(type='LoadImageFromFile', io_backend='disk', key='target'),
dict(type='FramesToTensor', keys=['inputs']),
dict(type='ImageToTensor', keys=['target']),
]
folder = 'tests/data/vimeo90k'
ann_file = 'tests/data/vimeo90k/vfi_ann.txt'
def test_base_vfi_dataset(self):
dataset = BaseVFIDataset(self.pipeline, self.folder, self.ann_file)
dataset.__init__(self.pipeline, self.folder, self.ann_file)
dataset.load_annotations()
assert dataset.folder == self.folder
assert dataset.ann_file == self.ann_file
setattr(dataset, 'data_infos', [
dict(
inputs_path=[
'tests/data/vimeo90k/00001/0266/im1.png',
'tests/data/vimeo90k/00001/0266/im3.png'
],
target_path='tests/data/vimeo90k/00001/0266/im2.png',
key='00001/0266')
])
data = dataset.__getitem__(0)
assert_dict_has_keys(data, ['folder', 'ann_file'])
results = [dict(eval_result=dict(psnr=1.1, ssim=0.3))]
eval_result = dataset.evaluate(results)
assert_dict_has_keys(eval_result, ['psnr', 'ssim'])
with pytest.raises(TypeError):
dataset.evaluate(results[0])
with pytest.raises(AssertionError):
dataset.evaluate(results + results)
def test_vfi_vimeo90k_dataset(self):
dataset_cfg = dict(
type='VFIVimeo90KDataset',
folder=self.folder,
ann_file=self.ann_file,
pipeline=self.pipeline)
dataset = build_dataset(dataset_cfg)
data_infos = dataset.data_infos[0]
assert_dict_has_keys(data_infos, ['inputs_path', 'target_path', 'key'])
def test_vfi_dataset():
test_ = TestVFIDataset()
test_.test_base_vfi_dataset()
test_.test_vfi_vimeo90k_dataset()
| 2,170 | 34.016129 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_datasets/test_sr_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from pathlib import Path
from unittest.mock import patch
import numpy as np
import pytest
from mmcv.utils.testing import assert_dict_has_keys
from mmedit.datasets import (BaseSRDataset, SRAnnotationDataset,
SRFacialLandmarkDataset, SRFolderDataset,
SRFolderGTDataset, SRFolderMultipleGTDataset,
SRFolderRefDataset, SRFolderVideoDataset,
SRLmdbDataset, SRREDSDataset,
SRREDSMultipleGTDataset, SRTestMultipleGTDataset,
SRVid4Dataset, SRVimeo90KDataset,
SRVimeo90KMultipleGTDataset)
def mock_open(*args, **kwargs):
"""unittest.mock_open wrapper.
unittest.mock_open doesn't support iteration. Wrap it to fix this bug.
Reference: https://stackoverflow.com/a/41656192
"""
import unittest
f_open = unittest.mock.mock_open(*args, **kwargs)
f_open.return_value.__iter__ = lambda self: iter(self.readline, '')
return f_open
class TestSRDatasets:
@classmethod
def setup_class(cls):
cls.data_prefix = Path(__file__).parent.parent.parent / 'data'
def test_base_super_resolution_dataset(self):
class ToyDataset(BaseSRDataset):
"""Toy dataset for testing SRDataset."""
def __init__(self, pipeline, test_mode=False):
super().__init__(pipeline, test_mode)
def load_annotations(self):
pass
def __len__(self):
return 2
toy_dataset = ToyDataset(pipeline=[])
file_paths = [
osp.join('gt', 'baboon.png'),
osp.join('lq', 'baboon_x4.png')
]
file_paths = [str(self.data_prefix / v) for v in file_paths]
result = toy_dataset.scan_folder(self.data_prefix)
assert set(file_paths).issubset(set(result))
result = toy_dataset.scan_folder(str(self.data_prefix))
assert set(file_paths).issubset(set(result))
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
# test evaluate function
results = [{
'eval_result': {
'PSNR': 20,
'SSIM': 0.6
}
}, {
'eval_result': {
'PSNR': 30,
'SSIM': 0.8
}
}]
with pytest.raises(TypeError):
# results must be a list
toy_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
# The length of results should be equal to the dataset len
toy_dataset.evaluate(results=[results[0]])
eval_result = toy_dataset.evaluate(results=results)
assert eval_result == {'PSNR': 25, 'SSIM': 0.7}
with pytest.raises(AssertionError):
results = [{
'eval_result': {
'PSNR': 20,
'SSIM': 0.6
}
}, {
'eval_result': {
'PSNR': 30
}
}]
# Length of evaluation result should be the same as the dataset len
toy_dataset.evaluate(results=results)
def test_sr_annotation_dataset(self):
# setup
anno_file_path = self.data_prefix / 'train.txt'
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = [
'lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt',
'gt_ori_shape'
]
# input path is Path object
sr_annotation_dataset = SRAnnotationDataset(
lq_folder=self.data_prefix / 'lq',
gt_folder=self.data_prefix / 'gt',
ann_file=anno_file_path,
pipeline=sr_pipeline,
scale=4,
filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(self.data_prefix / 'lq' / 'baboon_x4.png'),
gt_path=str(self.data_prefix / 'gt' / 'baboon.png'))
]
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_annotation_dataset = SRAnnotationDataset(
lq_folder=str(self.data_prefix / 'lq'),
gt_folder=str(self.data_prefix / 'gt'),
ann_file=str(anno_file_path),
pipeline=sr_pipeline,
scale=4,
filename_tmpl='{}_x4')
data_infos = sr_annotation_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(self.data_prefix / 'lq' / 'baboon_x4.png'),
gt_path=str(self.data_prefix / 'gt' / 'baboon.png'))
]
result = sr_annotation_dataset[0]
assert (len(sr_annotation_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = ['lq_path', 'gt_path', 'scale', 'lq', 'gt']
lq_folder = self.data_prefix / 'lq'
gt_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_dataset = SRFolderDataset(
lq_folder=lq_folder,
gt_folder=gt_folder,
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'))
]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_folder_dataset = SRFolderDataset(
lq_folder=str(lq_folder),
gt_folder=str(gt_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'))
]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_gt_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='ImageToTensor', keys=['gt'])
]
target_keys = ['gt_path', 'gt']
gt_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_dataset = SRFolderGTDataset(
gt_folder=gt_folder,
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [dict(gt_path=str(gt_folder / 'baboon.png'))]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_folder_dataset = SRFolderGTDataset(
gt_folder=str(gt_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl=filename_tmpl)
data_infos = sr_folder_dataset.data_infos
assert data_infos == [dict(gt_path=str(gt_folder / 'baboon.png'))]
result = sr_folder_dataset[0]
assert (len(sr_folder_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
def test_sr_folder_ref_dataset(self):
# setup
sr_pipeline = [
dict(type='LoadImageFromFile', io_backend='disk', key='lq'),
dict(type='LoadImageFromFile', io_backend='disk', key='gt'),
dict(type='LoadImageFromFile', io_backend='disk', key='ref'),
dict(type='PairedRandomCrop', gt_patch_size=128),
dict(type='ImageToTensor', keys=['lq', 'gt', 'ref'])
]
target_keys = [
'lq_path', 'gt_path', 'ref_path', 'scale', 'lq', 'gt', 'ref'
]
lq_folder = self.data_prefix / 'lq'
gt_folder = self.data_prefix / 'gt'
ref_folder = self.data_prefix / 'gt'
filename_tmpl = '{}_x4'
# input path is Path object
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=lq_folder,
gt_folder=gt_folder,
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'),
ref_path=str(ref_folder / 'baboon.png'))
]
result = sr_folder_ref_dataset[0]
assert len(sr_folder_ref_dataset) == 1
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
data_infos = sr_folder_ref_dataset.data_infos
assert data_infos == [
dict(
lq_path=str(lq_folder / 'baboon_x4.png'),
gt_path=str(gt_folder / 'baboon.png'),
ref_path=str(ref_folder / 'baboon.png'))
]
result = sr_folder_ref_dataset[0]
assert len(sr_folder_ref_dataset) == 1
assert assert_dict_has_keys(result, target_keys)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(self.data_prefix / 'image'), # fake gt_folder
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(self.data_prefix / 'image'), # fake lq_folder
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(lq_folder),
gt_folder=str(self.data_prefix / 'bg'), # fake gt_folder
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=str(self.data_prefix / 'bg'), # fake lq_folder
gt_folder=str(gt_folder),
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
with pytest.raises(AssertionError):
sr_folder_ref_dataset = SRFolderRefDataset(
lq_folder=None,
gt_folder=None,
ref_folder=str(ref_folder),
pipeline=sr_pipeline,
scale=4,
filename_tmpl_lq=filename_tmpl)
def test_sr_landmark_dataset(self):
# setup
sr_pipeline = [
dict(
type='LoadImageFromFile',
io_backend='disk',
key='gt',
flag='color',
channel_order='rgb',
backend='cv2')
]
target_keys = ['gt_path', 'bbox', 'shape', 'landmark']
gt_folder = self.data_prefix / 'face'
ann_file = self.data_prefix / 'facemark_ann.npy'
# input path is Path object
sr_landmark_dataset = SRFacialLandmarkDataset(
gt_folder=gt_folder,
ann_file=ann_file,
pipeline=sr_pipeline,
scale=4)
data_infos = sr_landmark_dataset.data_infos
assert len(data_infos) == 1
result = sr_landmark_dataset[0]
assert len(sr_landmark_dataset) == 1
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_landmark_dataset = SRFacialLandmarkDataset(
gt_folder=str(gt_folder),
ann_file=str(ann_file),
pipeline=sr_pipeline,
scale=4)
data_infos = sr_landmark_dataset.data_infos
assert len(data_infos) == 1
result = sr_landmark_dataset[0]
assert len(sr_landmark_dataset) == 1
assert assert_dict_has_keys(result, target_keys)
def test_sr_lmdb_dataset(self):
# setup
lq_lmdb_folder = self.data_prefix / 'lq.lmdb'
sr_pipeline = [
dict(
type='LoadImageFromFile',
io_backend='lmdb',
key='lq',
db_path=lq_lmdb_folder),
dict(
type='LoadImageFromFile',
io_backend='lmdb',
key='gt',
db_path=lq_lmdb_folder),
dict(type='ImageToTensor', keys=['lq', 'gt'])
]
target_keys = [
'lq_path', 'gt_path', 'scale', 'lq', 'lq_ori_shape', 'gt',
'gt_ori_shape'
]
# input path is Path object
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert data_infos == [dict(lq_path='baboon', gt_path='baboon')]
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
# input path is str
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=str(lq_lmdb_folder),
gt_folder=(lq_lmdb_folder), # fake gt_folder
pipeline=sr_pipeline,
scale=1)
data_infos = sr_lmdb_dataset.data_infos
assert data_infos == [dict(lq_path='baboon', gt_path='baboon')]
result = sr_lmdb_dataset[0]
assert (len(sr_lmdb_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=self.data_prefix, # normal folder
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=str(self.data_prefix), # normal folder
gt_folder=lq_lmdb_folder, # fake gt_folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=self.data_prefix, # normal folder
pipeline=sr_pipeline,
scale=1)
with pytest.raises(ValueError):
sr_lmdb_dataset = SRLmdbDataset(
lq_folder=lq_lmdb_folder,
gt_folder=str(self.data_prefix), # normal folder
pipeline=sr_pipeline,
scale=1)
def test_reds_dataset():
root_path = Path(__file__).parent.parent.parent / 'data'
txt_content = ('000/00000001.png (720, 1280, 3)\n'
'001/00000001.png (720, 1280, 3)\n'
'250/00000001.png (720, 1280, 3)\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
# official val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=False)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('000', '00000001'),
max_frame_num=100,
num_input_frames=5),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('001', '00000001'),
max_frame_num=100,
num_input_frames=5)
]
# REDS4 val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=False)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('001', '00000001'),
max_frame_num=100,
num_input_frames=5),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('250', '00000001'),
max_frame_num=100,
num_input_frames=5)
]
with pytest.raises(ValueError):
# wrong val_partitaion
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
# test mode
# official val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=True)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('250', '00000001'),
max_frame_num=100,
num_input_frames=5)
]
# REDS4 val partition
reds_dataset = SRREDSDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=True)
assert reds_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('000', '00000001'),
max_frame_num=100,
num_input_frames=5)
]
def test_vimeo90k_dataset():
root_path = Path(__file__).parent.parent.parent / 'data'
txt_content = ('00001/0266 (256, 448, 3)\n00002/0268 (256, 448, 3)\n')
mocked_open_function = mock_open(read_data=txt_content)
lq_paths_1 = [
str(root_path / '00001' / '0266' / f'im{v}.png') for v in range(1, 8)
]
gt_paths_1 = [str(root_path / '00001' / '0266' / 'im4.png')]
lq_paths_2 = [
str(root_path / '00002' / '0268' / f'im{v}.png') for v in range(1, 8)
]
gt_paths_2 = [str(root_path / '00002' / '0268' / 'im4.png')]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=7,
pipeline=[],
scale=4,
test_mode=False)
assert vimeo90k_dataset.data_infos == [
dict(
lq_path=lq_paths_1,
gt_path=gt_paths_1,
key=osp.join('00001', '0266')),
dict(
lq_path=lq_paths_2,
gt_path=gt_paths_2,
key=osp.join('00002', '0268'))
]
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
vimeo90k_dataset = SRVimeo90KDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
test_mode=False)
def test_vid4_dataset():
root_path = Path(__file__).parent.parent.parent / 'data'
txt_content = ('calendar 1 (320,480,3)\ncity 2 (320,480,3)\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
vid4_dataset = SRVid4Dataset(
lq_folder=root_path / 'lq',
gt_folder=root_path / 'gt',
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
test_mode=False,
metric_average_mode='clip',
filename_tmpl='{:08d}')
assert vid4_dataset.data_infos == [
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key=osp.join('calendar', '00000000'),
num_input_frames=5,
max_frame_num=1),
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key=osp.join('city', '00000000'),
num_input_frames=5,
max_frame_num=2),
dict(
lq_path=str(root_path / 'lq'),
gt_path=str(root_path / 'gt'),
key=osp.join('city', '00000001'),
num_input_frames=5,
max_frame_num=2),
]
# test evaluate function ('clip' mode)
results = [{
'eval_result': {
'PSNR': 21,
'SSIM': 0.75
}
}, {
'eval_result': {
'PSNR': 22,
'SSIM': 0.8
}
}, {
'eval_result': {
'PSNR': 24,
'SSIM': 0.9
}
}]
eval_result = vid4_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.8)
# test evaluate function ('all' mode)
vid4_dataset = SRVid4Dataset(
lq_folder=root_path / 'lq',
gt_folder=root_path / 'gt',
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
test_mode=False,
metric_average_mode='all',
filename_tmpl='{:08d}')
eval_result = vid4_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22.3333333)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.81666666)
with pytest.raises(AssertionError):
# num_input_frames should be odd numbers
SRVid4Dataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=6,
pipeline=[],
scale=4,
test_mode=False)
with pytest.raises(ValueError):
# metric_average_mode can only be either 'folder' or 'all'
SRVid4Dataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
num_input_frames=5,
pipeline=[],
scale=4,
metric_average_mode='abc',
test_mode=False)
with pytest.raises(TypeError):
# results must be a list
vid4_dataset.evaluate(results=5)
with pytest.raises(AssertionError):
# The length of results should be equal to the dataset len
vid4_dataset.evaluate(results=[results[0]])
def test_sr_reds_multiple_gt_dataset():
root_path = Path(__file__).parent.parent.parent / 'data'
# official val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=15,
pipeline=[],
scale=4,
val_partition='official',
test_mode=False)
assert len(reds_dataset.data_infos) == 240 # 240 training clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='000',
sequence_length=100,
num_input_frames=15)
# REDS4 val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=20,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=False)
assert len(reds_dataset.data_infos) == 266 # 266 training clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='001',
sequence_length=100,
num_input_frames=20) # 000 is been removed
with pytest.raises(ValueError):
# wrong val_partitaion
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='wrong_val_partition',
test_mode=False)
# test mode
# official val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='official',
test_mode=True)
assert len(reds_dataset.data_infos) == 30 # 30 test clips
assert reds_dataset.data_infos[0] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='240',
sequence_length=100,
num_input_frames=5)
# REDS4 val partition
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
test_mode=True)
assert len(reds_dataset.data_infos) == 4 # 4 test clips
assert reds_dataset.data_infos[1] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='011',
sequence_length=100,
num_input_frames=5)
# REDS4 val partition (repeat > 1)
reds_dataset = SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
repeat=2,
test_mode=True)
assert len(reds_dataset.data_infos) == 8 # 4 test clips
assert reds_dataset.data_infos[5] == dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='011',
sequence_length=100,
num_input_frames=5)
# REDS4 val partition (repeat != int)
with pytest.raises(TypeError):
SRREDSMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
val_partition='REDS4',
repeat=1.5,
test_mode=True)
def test_sr_vimeo90k_mutiple_gt_dataset():
root_path = Path(__file__).parent.parent.parent / 'data' / 'vimeo90k'
txt_content = ('00001/0266 (256,448,3)\n')
mocked_open_function = mock_open(read_data=txt_content)
num_input_frames = 5
lq_paths = [
str(root_path / '00001' / '0266' / f'im{v}.png')
for v in range(1, num_input_frames + 1)
]
gt_paths = [
str(root_path / '00001' / '0266' / f'im{v}.png')
for v in range(1, num_input_frames + 1)
]
with patch('builtins.open', mocked_open_function):
vimeo90k_dataset = SRVimeo90KMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
ann_file='fake_ann_file',
pipeline=[],
scale=4,
num_input_frames=num_input_frames,
test_mode=False)
assert vimeo90k_dataset.data_infos == [
dict(
lq_path=lq_paths,
gt_path=gt_paths,
key=osp.join('00001', '0266'))
]
def test_sr_test_multiple_gt_dataset():
root_path = Path(
__file__).parent.parent.parent / 'data' / 'test_multiple_gt'
test_dataset = SRTestMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
sequence_length=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_2',
sequence_length=1)
]
def test_sr_folder_multiple_gt_dataset():
root_path = Path(
__file__).parent.parent.parent / 'data' / 'test_multiple_gt'
# test without num_input_frames
test_dataset = SRFolderMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
num_input_frames=2,
sequence_length=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_2',
num_input_frames=1,
sequence_length=1)
]
# test with num_input_frames
test_dataset = SRFolderMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
num_input_frames=1,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
num_input_frames=1,
sequence_length=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_2',
num_input_frames=1,
sequence_length=1)
]
# with annotation file (without num_input_frames)
txt_content = ('sequence_1 2\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
# test without num_input_frames
test_dataset = SRFolderMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
ann_file='fake_ann_file',
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
num_input_frames=2,
sequence_length=2),
]
# with annotation file (with num_input_frames)
test_dataset = SRFolderMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
ann_file='fake_ann_file',
num_input_frames=1,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key='sequence_1',
num_input_frames=1,
sequence_length=2),
]
# num_input_frames must be a positive integer
with pytest.raises(ValueError):
SRFolderMultipleGTDataset(
lq_folder=root_path,
gt_folder=root_path,
pipeline=[],
scale=4,
num_input_frames=-1,
test_mode=True)
def test_sr_folder_video_dataset():
root_path = Path(
__file__).parent.parent.parent / 'data' / 'test_multiple_gt'
test_dataset = SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('sequence_1', '00000000'),
num_input_frames=5,
max_frame_num=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('sequence_1', '00000001'),
num_input_frames=5,
max_frame_num=2),
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('sequence_2', '00000000'),
num_input_frames=5,
max_frame_num=1),
]
# with annotation file
txt_content = ('sequence_1/00000000 2\n')
mocked_open_function = mock_open(read_data=txt_content)
with patch('builtins.open', mocked_open_function):
test_dataset = SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
ann_file='fake_ann_file',
test_mode=True)
assert test_dataset.data_infos == [
dict(
lq_path=str(root_path),
gt_path=str(root_path),
key=osp.join('sequence_1', '00000000'),
num_input_frames=5,
max_frame_num=2),
]
# test evaluate function ('clip' mode)
test_dataset = SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
metric_average_mode='clip',
test_mode=True)
results = [{
'eval_result': {
'PSNR': 21,
'SSIM': 0.75
}
}, {
'eval_result': {
'PSNR': 23,
'SSIM': 0.85
}
}, {
'eval_result': {
'PSNR': 24,
'SSIM': 0.9
}
}]
eval_result = test_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 23)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.85)
# test evaluate function ('all' mode)
test_dataset = SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
metric_average_mode='all',
test_mode=True)
eval_result = test_dataset.evaluate(results)
np.testing.assert_almost_equal(eval_result['PSNR'], 22.6666666)
np.testing.assert_almost_equal(eval_result['SSIM'], 0.83333333)
# num_input_frames should be odd numbers
with pytest.raises(AssertionError):
SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=6,
pipeline=[],
scale=4,
test_mode=True)
# metric_average_mode can only be either 'folder' or 'all'
with pytest.raises(ValueError):
SRFolderVideoDataset(
lq_folder=root_path,
gt_folder=root_path,
num_input_frames=5,
pipeline=[],
scale=4,
metric_average_mode='abc',
test_mode=False)
# results must be a list
with pytest.raises(TypeError):
test_dataset.evaluate(results=5)
# The length of results should be equal to the dataset len
with pytest.raises(AssertionError):
test_dataset.evaluate(results=[results[0]])
| 37,364 | 32.541293 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_datasets/test_matting_datasets.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from pathlib import Path
import numpy as np
import pytest
from mmedit.datasets import AdobeComp1kDataset
class TestMattingDatasets:
@classmethod
def setup_class(cls):
# create para for creating a dataset.
cls.data_prefix = Path(__file__).parent.parent.parent / 'data'
cls.ann_file = osp.join(cls.data_prefix, 'test_list.json')
cls.pipeline = [
dict(type='LoadImageFromFile', key='alpha', flag='grayscale')
]
def test_comp1k_dataset(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
self.data_prefix)
first_data = comp1k_dataset[0]
assert 'alpha' in first_data
assert isinstance(first_data['alpha'], np.ndarray)
assert first_data['alpha'].shape == (552, 800)
def test_comp1k_evaluate(self):
comp1k_dataset = AdobeComp1kDataset(self.ann_file, self.pipeline,
self.data_prefix)
with pytest.raises(TypeError):
comp1k_dataset.evaluate('Not a list object')
results = [{
'pred_alpha': None,
'eval_result': {
'SAD': 26,
'MSE': 0.006
}
}, {
'pred_alpha': None,
'eval_result': {
'SAD': 24,
'MSE': 0.004
}
}]
eval_result = comp1k_dataset.evaluate(results)
assert set(eval_result.keys()) == set(['SAD', 'MSE'])
assert eval_result['SAD'] == 25
assert eval_result['MSE'] == 0.005
| 1,685 | 29.107143 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_datasets/test_generation_datasets.py | # Copyright (c) OpenMMLab. All rights reserved.
from pathlib import Path
import pytest
from mmcv.utils.testing import assert_dict_has_keys
from mmedit.datasets import (BaseGenerationDataset, GenerationPairedDataset,
GenerationUnpairedDataset)
class TestGenerationDatasets:
@classmethod
def setup_class(cls):
cls.data_prefix = Path(__file__).parent.parent.parent / 'data'
def test_base_generation_dataset(self):
class ToyDataset(BaseGenerationDataset):
"""Toy dataset for testing Generation Dataset."""
def load_annotations(self):
pass
toy_dataset = ToyDataset(pipeline=[])
file_paths = [
'paired/test/3.jpg', 'paired/train/1.jpg', 'paired/train/2.jpg'
]
file_paths = [str(self.data_prefix / v) for v in file_paths]
# test scan_folder
result = toy_dataset.scan_folder(self.data_prefix)
assert set(file_paths).issubset(set(result))
result = toy_dataset.scan_folder(str(self.data_prefix))
assert set(file_paths).issubset(set(result))
with pytest.raises(TypeError):
toy_dataset.scan_folder(123)
# test evaluate
toy_dataset.data_infos = file_paths
with pytest.raises(TypeError):
_ = toy_dataset.evaluate(1)
test_results = [dict(saved_flag=True), dict(saved_flag=True)]
with pytest.raises(AssertionError):
_ = toy_dataset.evaluate(test_results)
test_results = [
dict(saved_flag=True),
dict(saved_flag=True),
dict(saved_flag=False)
]
eval_result = toy_dataset.evaluate(test_results)
assert eval_result['val_saved_number'] == 2
def test_generation_paired_dataset(self):
# setup
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [
dict(
type='LoadPairedImageFromFile',
io_backend='disk',
key='pair',
flag='color'),
dict(
type='Resize',
keys=['img_a', 'img_b'],
scale=(286, 286),
interpolation='bicubic'),
dict(
type='FixedCrop',
keys=['img_a', 'img_b'],
crop_size=(256, 256)),
dict(type='Flip', keys=['img_a', 'img_b'], direction='horizontal'),
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
dict(
type='Normalize',
keys=['img_a', 'img_b'],
to_rgb=True,
**img_norm_cfg),
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
dict(
type='Collect',
keys=['img_a', 'img_b'],
meta_keys=['img_a_path', 'img_b_path'])
]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
pair_folder = self.data_prefix / 'paired'
# input path is Path object
generation_paried_dataset = GenerationPairedDataset(
dataroot=pair_folder, pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
]
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
'3.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
'3.jpg'))
# input path is str
generation_paried_dataset = GenerationPairedDataset(
dataroot=str(pair_folder), pipeline=pipeline, test_mode=True)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'test' / '3.jpg'))
]
result = generation_paried_dataset[0]
assert (len(generation_paried_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder / 'test' /
'3.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder / 'test' /
'3.jpg'))
# test_mode = False
generation_paried_dataset = GenerationPairedDataset(
dataroot=str(pair_folder), pipeline=pipeline, test_mode=False)
data_infos = generation_paried_dataset.data_infos
assert data_infos == [
dict(pair_path=str(pair_folder / 'train' / '1.jpg')),
dict(pair_path=str(pair_folder / 'train' / '2.jpg'))
]
assert (len(generation_paried_dataset) == 2)
result = generation_paried_dataset[0]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder /
'train' / '1.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder /
'train' / '1.jpg'))
result = generation_paried_dataset[1]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(pair_folder /
'train' / '2.jpg'))
assert (result['meta'].data['img_b_path'] == str(pair_folder /
'train' / '2.jpg'))
def test_generation_unpaired_dataset(self):
# setup
img_norm_cfg = dict(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
pipeline = [
dict(
type='LoadImageFromFile',
io_backend='disk',
key='img_a',
flag='color'),
dict(
type='LoadImageFromFile',
io_backend='disk',
key='img_b',
flag='color'),
dict(
type='Resize',
keys=['img_a', 'img_b'],
scale=(286, 286),
interpolation='bicubic'),
dict(
type='Crop',
keys=['img_a', 'img_b'],
crop_size=(256, 256),
random_crop=True),
dict(type='Flip', keys=['img_a'], direction='horizontal'),
dict(type='Flip', keys=['img_b'], direction='horizontal'),
dict(type='RescaleToZeroOne', keys=['img_a', 'img_b']),
dict(
type='Normalize',
keys=['img_a', 'img_b'],
to_rgb=True,
**img_norm_cfg),
dict(type='ImageToTensor', keys=['img_a', 'img_b']),
dict(
type='Collect',
keys=['img_a', 'img_b'],
meta_keys=['img_a_path', 'img_b_path'])
]
target_keys = ['img_a', 'img_b', 'meta']
target_meta_keys = ['img_a_path', 'img_b_path']
unpair_folder = self.data_prefix / 'unpaired'
# input path is Path object
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=unpair_folder, pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
]
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'testA' / '5.jpg'))
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
'testB' / '6.jpg'))
# input path is str
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=True)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'testA' / '5.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'testB' / '6.jpg'))
]
result = generation_unpaired_dataset[0]
assert (len(generation_unpaired_dataset) == 1)
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'testA' / '5.jpg'))
assert (result['meta'].data['img_b_path'] == str(unpair_folder /
'testB' / '6.jpg'))
# test_mode = False
generation_unpaired_dataset = GenerationUnpairedDataset(
dataroot=str(unpair_folder), pipeline=pipeline, test_mode=False)
data_infos_a = generation_unpaired_dataset.data_infos_a
data_infos_b = generation_unpaired_dataset.data_infos_b
assert data_infos_a == [
dict(path=str(unpair_folder / 'trainA' / '1.jpg')),
dict(path=str(unpair_folder / 'trainA' / '2.jpg'))
]
assert data_infos_b == [
dict(path=str(unpair_folder / 'trainB' / '3.jpg')),
dict(path=str(unpair_folder / 'trainB' / '4.jpg'))
]
assert (len(generation_unpaired_dataset) == 2)
img_b_paths = [
str(unpair_folder / 'trainB' / '3.jpg'),
str(unpair_folder / 'trainB' / '4.jpg')
]
result = generation_unpaired_dataset[0]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'trainA' / '1.jpg'))
assert result['meta'].data['img_b_path'] in img_b_paths
result = generation_unpaired_dataset[1]
assert assert_dict_has_keys(result, target_keys)
assert assert_dict_has_keys(result['meta'].data, target_meta_keys)
assert (result['meta'].data['img_a_path'] == str(unpair_folder /
'trainA' / '2.jpg'))
assert result['meta'].data['img_b_path'] in img_b_paths
| 11,413 | 43.24031 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_datasets/test_repeat_dataset.py | # Copyright (c) OpenMMLab. All rights reserved.
from torch.utils.data import Dataset
from mmedit.datasets import RepeatDataset
def test_repeat_dataset():
class ToyDataset(Dataset):
def __init__(self):
super().__init__()
self.members = [1, 2, 3, 4, 5]
def __len__(self):
return len(self.members)
def __getitem__(self, idx):
return self.members[idx % 5]
toy_dataset = ToyDataset()
repeat_dataset = RepeatDataset(toy_dataset, 2)
assert len(repeat_dataset) == 10
assert repeat_dataset[2] == 3
assert repeat_dataset[8] == 4
| 623 | 23 | 50 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_formating.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmedit.datasets.pipelines import (Collect, FormatTrimap, GetMaskedImage,
ImageToTensor, ToTensor)
from mmedit.datasets.pipelines.formating import FramesToTensor
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def test_to_tensor():
to_tensor = ToTensor(['str'])
with pytest.raises(TypeError):
results = dict(str='0')
to_tensor(results)
target_keys = ['tensor', 'numpy', 'sequence', 'int', 'float']
to_tensor = ToTensor(target_keys)
ori_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1)
results = to_tensor(ori_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, ori_results[key])
# Add an additional key which is not in keys.
ori_results = dict(
tensor=torch.randn(2, 3),
numpy=np.random.randn(2, 3),
sequence=list(range(10)),
int=1,
float=0.1,
str='test')
results = to_tensor(ori_results)
assert check_keys_contain(results.keys(), target_keys)
for key in target_keys:
assert isinstance(results[key], torch.Tensor)
assert torch.equal(results[key].data, ori_results[key])
assert repr(
to_tensor) == to_tensor.__class__.__name__ + f'(keys={target_keys})'
def test_image_to_tensor():
ori_results = dict(img=np.random.randn(256, 256, 3))
keys = ['img']
to_float32 = False
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert results['img'].shape == torch.Size([3, 256, 256])
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert results['img'].dtype == torch.float32
ori_results = dict(img=np.random.randint(256, size=(256, 256)))
keys = ['img']
to_float32 = True
image_to_tensor = ImageToTensor(keys)
results = image_to_tensor(ori_results)
assert results['img'].shape == torch.Size([1, 256, 256])
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data, ori_results['img'])
assert results['img'].dtype == torch.float32
assert repr(image_to_tensor) == (
image_to_tensor.__class__.__name__ +
f'(keys={keys}, to_float32={to_float32})')
def test_frames_to_tensor():
with pytest.raises(TypeError):
# results[key] should be a list
ori_results = dict(img=np.random.randn(12, 12, 3))
FramesToTensor(['img'])(ori_results)
ori_results = dict(
img=[np.random.randn(12, 12, 3),
np.random.randn(12, 12, 3)])
keys = ['img']
frames_to_tensor = FramesToTensor(keys, to_float32=False)
results = frames_to_tensor(ori_results)
assert results['img'].shape == torch.Size([2, 3, 12, 12])
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[0, ...], ori_results['img'][0])
assert torch.equal(results['img'].data[1, ...], ori_results['img'][1])
assert results['img'].dtype == torch.float64
ori_results = dict(
img=[np.random.randn(12, 12, 3),
np.random.randn(12, 12, 3)])
frames_to_tensor = FramesToTensor(keys, to_float32=True)
results = frames_to_tensor(ori_results)
assert results['img'].shape == torch.Size([2, 3, 12, 12])
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[0, ...], ori_results['img'][0])
assert torch.equal(results['img'].data[1, ...], ori_results['img'][1])
assert results['img'].dtype == torch.float32
ori_results = dict(img=[np.random.randn(12, 12), np.random.randn(12, 12)])
frames_to_tensor = FramesToTensor(keys, to_float32=True)
results = frames_to_tensor(ori_results)
assert results['img'].shape == torch.Size([2, 1, 12, 12])
assert isinstance(results['img'], torch.Tensor)
assert torch.equal(results['img'].data[0, ...], ori_results['img'][0])
assert torch.equal(results['img'].data[1, ...], ori_results['img'][1])
assert results['img'].dtype == torch.float32
def test_masked_img():
img = np.random.rand(4, 4, 1).astype(np.float32)
mask = np.zeros((4, 4, 1), dtype=np.float32)
mask[1, 1] = 1
results = dict(gt_img=img, mask=mask)
get_masked_img = GetMaskedImage()
results = get_masked_img(results)
masked_img = img * (1. - mask)
assert np.array_equal(results['masked_img'], masked_img)
name_ = repr(get_masked_img)
class_name = get_masked_img.__class__.__name__
assert name_ == class_name + "(img_name='gt_img', mask_name='mask')"
def test_format_trimap():
ori_trimap = np.random.randint(3, size=(64, 64))
ori_trimap[ori_trimap == 1] = 128
ori_trimap[ori_trimap == 2] = 255
from mmcv.parallel import DataContainer
ori_result = dict(
trimap=torch.from_numpy(ori_trimap.copy()), meta=DataContainer({}))
format_trimap = FormatTrimap(to_onehot=False)
results = format_trimap(ori_result)
result_trimap = results['trimap']
assert result_trimap.shape == (1, 64, 64)
assert ((result_trimap.numpy() == 0) == (ori_trimap == 0)).all()
assert ((result_trimap.numpy() == 1) == (ori_trimap == 128)).all()
assert ((result_trimap.numpy() == 2) == (ori_trimap == 255)).all()
ori_result = dict(
trimap=torch.from_numpy(ori_trimap.copy()), meta=DataContainer({}))
format_trimap = FormatTrimap(to_onehot=True)
results = format_trimap(ori_result)
result_trimap = results['trimap']
assert result_trimap.shape == (3, 64, 64)
assert ((result_trimap[0, ...].numpy() == 1) == (ori_trimap == 0)).all()
assert ((result_trimap[1, ...].numpy() == 1) == (ori_trimap == 128)).all()
assert ((result_trimap[2, ...].numpy() == 1) == (ori_trimap == 255)).all()
assert repr(format_trimap) == format_trimap.__class__.__name__ + (
'(to_onehot=True)')
def test_collect():
inputs = dict(
img=np.random.randn(256, 256, 3),
label=[1],
img_name='test_image.png',
ori_shape=(256, 256, 3),
img_shape=(256, 256, 3),
pad_shape=(256, 256, 3),
flip_direction='vertical',
img_norm_cfg=dict(to_bgr=False))
keys = ['img', 'label']
meta_keys = ['img_shape', 'img_name', 'ori_shape']
collect = Collect(keys, meta_keys=meta_keys)
results = collect(inputs)
assert set(list(results.keys())) == set(['img', 'label', 'meta'])
inputs.pop('img')
assert set(results['meta'].data.keys()) == set(meta_keys)
for key in results['meta'].data:
assert results['meta'].data[key] == inputs[key]
assert repr(collect) == (
collect.__class__.__name__ +
f'(keys={keys}, meta_keys={collect.meta_keys})')
| 7,142 | 36.793651 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_normalization.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.datasets.pipelines import Normalize, RescaleToZeroOne
class TestAugmentations:
@staticmethod
def assert_img_equal(img, ref_img, ratio_thr=0.999):
"""Check if img and ref_img are matched approximately."""
assert img.shape == ref_img.shape
assert img.dtype == ref_img.dtype
area = ref_img.shape[-1] * ref_img.shape[-2]
diff = np.abs(img.astype('int32') - ref_img.astype('int32'))
assert np.sum(diff <= 1) / float(area) > ratio_thr
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def check_normalize(self, origin_img, result_img, norm_cfg):
"""Check if the origin_img are normalized correctly into result_img
in a given norm_cfg."""
target_img = result_img.copy()
target_img *= norm_cfg['std'][None, None, :]
target_img += norm_cfg['mean'][None, None, :]
if norm_cfg['to_rgb']:
target_img = target_img[:, ::-1, ...].copy()
self.assert_img_equal(origin_img, target_img)
def test_normalize(self):
with pytest.raises(TypeError):
Normalize(['alpha'], dict(mean=[123.675, 116.28, 103.53]),
[58.395, 57.12, 57.375])
with pytest.raises(TypeError):
Normalize(['alpha'], [123.675, 116.28, 103.53],
dict(std=[58.395, 57.12, 57.375]))
target_keys = ['merged', 'img_norm_cfg']
merged = np.random.rand(240, 320, 3).astype(np.float32)
results = dict(merged=merged)
config = dict(
keys=['merged'],
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=False)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(merged, normalize_results['merged'],
normalize_results['img_norm_cfg'])
merged = np.random.rand(240, 320, 3).astype(np.float32)
results = dict(merged=merged)
config = dict(
keys=['merged'],
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(merged, normalize_results['merged'],
normalize_results['img_norm_cfg'])
assert normalize.__repr__() == (
normalize.__class__.__name__ +
f"(keys={ ['merged']}, mean={np.array([123.675, 116.28, 103.53])},"
f' std={np.array([58.395, 57.12, 57.375])}, to_rgb=True)')
# input is an image list
merged = np.random.rand(240, 320, 3).astype(np.float32)
merged_2 = np.random.rand(240, 320, 3).astype(np.float32)
results = dict(merged=[merged, merged_2])
config = dict(
keys=['merged'],
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=False)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(merged, normalize_results['merged'][0],
normalize_results['img_norm_cfg'])
self.check_normalize(merged_2, normalize_results['merged'][1],
normalize_results['img_norm_cfg'])
merged = np.random.rand(240, 320, 3).astype(np.float32)
merged_2 = np.random.rand(240, 320, 3).astype(np.float32)
results = dict(merged=[merged, merged_2])
config = dict(
keys=['merged'],
mean=[123.675, 116.28, 103.53],
std=[58.395, 57.12, 57.375],
to_rgb=True)
normalize = Normalize(**config)
normalize_results = normalize(results)
assert self.check_keys_contain(normalize_results.keys(), target_keys)
self.check_normalize(merged, normalize_results['merged'][0],
normalize_results['img_norm_cfg'])
self.check_normalize(merged_2, normalize_results['merged'][1],
normalize_results['img_norm_cfg'])
def test_rescale_to_zero_one(self):
target_keys = ['alpha']
alpha = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=alpha)
rescale_to_zero_one = RescaleToZeroOne(keys=['alpha'])
rescale_to_zero_one_results = rescale_to_zero_one(results)
assert self.check_keys_contain(rescale_to_zero_one_results.keys(),
target_keys)
assert rescale_to_zero_one_results['alpha'].shape == (240, 320)
np.testing.assert_almost_equal(rescale_to_zero_one_results['alpha'],
alpha / 255.)
assert repr(rescale_to_zero_one) == (
rescale_to_zero_one.__class__.__name__ + f"(keys={['alpha']})")
# input is image list
alpha = np.random.rand(240, 320).astype(np.float32)
alpha_2 = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=[alpha, alpha_2])
rescale_to_zero_one = RescaleToZeroOne(keys=['alpha'])
rescale_to_zero_one_results = rescale_to_zero_one(results)
assert rescale_to_zero_one_results['alpha'][0].shape == (240, 320)
assert rescale_to_zero_one_results['alpha'][1].shape == (240, 320)
np.testing.assert_almost_equal(rescale_to_zero_one_results['alpha'][0],
alpha / 255.)
np.testing.assert_almost_equal(rescale_to_zero_one_results['alpha'][1],
alpha_2 / 255.)
| 6,083 | 43.735294 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_random_degradations.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.datasets.pipelines import (DegradationsWithShuffle, RandomBlur,
RandomJPEGCompression, RandomNoise,
RandomResize, RandomVideoCompression)
def test_random_noise():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# Gaussian noise
model = RandomNoise(
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# Poisson noise
model = RandomNoise(
params=dict(
noise_type=['poisson'],
noise_prob=[1],
poisson_scale=[0, 1],
poisson_gray_noise_prob=1),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=1,
prob=0)
model = RandomNoise(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_jpeg_compression():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomJPEGCompression(params=dict(quality=[5, 50]), keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradations with prob < 1
params = dict(quality=[5, 50], prob=0)
model = RandomJPEGCompression(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_video_compression():
results = {}
results['lq'] = [np.ones((8, 8, 3)).astype(np.float32)] * 5
model = RandomVideoCompression(
params=dict(
codec=['libx264', 'h264', 'mpeg4'],
codec_prob=[1 / 3., 1 / 3., 1 / 3.],
bitrate=[1e4, 1e5]),
keys=['lq'])
results = model(results)
assert results['lq'][0].shape == (8, 8, 3)
assert len(results['lq']) == 5
# skip degradations with prob < 1
params = dict(
codec=['libx264', 'h264', 'mpeg4'],
codec_prob=[1 / 3., 1 / 3., 1 / 3.],
bitrate=[1e4, 1e5],
prob=0)
model = RandomVideoCompression(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_resize():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# upscale
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] >= 8 and results['lq'].shape[1] >= 8
# downscale
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 1, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] <= 8 and results['lq'].shape[1] <= 8
# keep size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.]),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] == 8 and results['lq'].shape[1] == 8
# given target_size
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 32)),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (16, 32, 3)
# step_size > 0
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
resize_step=0.05),
keys=['lq'])
results = model(results)
# is_size_even is True
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
model = RandomResize(
params=dict(
resize_mode_prob=[0, 1, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
resize_step=0.05,
is_size_even=True),
keys=['lq'])
results = model(results)
assert results['lq'].shape[0] % 2 == 0
assert results['lq'].shape[1] % 2 == 0
# skip degradation
model = RandomResize(
params=dict(
resize_mode_prob=[1, 0, 0],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
prob=0),
keys=['lq'])
assert model(results) == results
with pytest.raises(NotImplementedError):
params = dict(
resize_mode_prob=[1],
resize_scale=[1],
resize_opt=['abc'],
resize_prob=[1])
model = RandomResize(params=params, keys=['lq'])
results = model(results)
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_random_blur():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# isotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic generalized Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['generalized_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# isotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_iso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# anisotropic plateau Gaussian
model = RandomBlur(
params=dict(
kernel_size=[41],
kernel_list=['plateau_aniso'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size < 13)
model = RandomBlur(
params=dict(
kernel_size=[11],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (kernel size >= 13)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# sinc (given omega)
model = RandomBlur(
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1]),
keys=['lq'])
results = model(results)
assert results['lq'].shape == (8, 8, 3)
# skip degradation
params = dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
prob=0)
model = RandomBlur(params=params, keys=['lq'])
assert model(results) == results
assert repr(model) == model.__class__.__name__ + f'(params={params}, ' \
+ "keys=['lq'])"
def test_degradations_with_shuffle():
results = {}
results['lq'] = np.ones((8, 8, 3)).astype(np.float32)
# shuffle all
model = DegradationsWithShuffle(
degradations=[
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(
type='RandomJPEGCompression',
params=dict(quality=[5, 10])),
dict(
type='RandomJPEGCompression',
params=dict(quality=[15, 20]))
]
],
keys=['lq'],
shuffle_idx=None)
model(results)
# shuffle last 2
degradations = [
dict(
type='RandomBlur',
params=dict(
kernel_size=[15],
kernel_list=['sinc'],
kernel_prob=[1],
sigma_x=[0.2, 10],
sigma_y=[0.2, 10],
rotate_angle=[-3.1416, 3.1416],
omega=[0.1, 0.1])),
dict(
type='RandomResize',
params=dict(
resize_mode_prob=[0, 0, 1],
resize_scale=[0.5, 1.5],
resize_opt=['bilinear', 'area', 'bicubic'],
resize_prob=[1 / 3., 1 / 3., 1 / 3.],
target_size=(16, 16))),
[
dict(type='RandomJPEGCompression', params=dict(quality=[5, 10])),
dict(type='RandomJPEGCompression', params=dict(quality=[15, 20]))
]
]
model = DegradationsWithShuffle(
degradations=degradations, keys=['lq'], shuffle_idx=(1, 2))
model(results)
assert repr(model) == model.__class__.__name__ \
+ f'(degradations={degradations}, ' \
+ "keys=['lq'], " \
+ 'shuffle_idx=(1, 2))'
| 12,524 | 29.849754 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_generate_assistant.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmedit.datasets.pipelines import (GenerateCoordinateAndCell,
GenerateHeatmap)
def test_generate_heatmap():
inputs = dict(landmark=[(1, 2), (3, 4)])
generate_heatmap = GenerateHeatmap('landmark', 4, 16)
results = generate_heatmap(inputs)
assert set(list(results.keys())) == set(['landmark', 'heatmap'])
assert results['heatmap'][:, :, 0].shape == (16, 16)
assert repr(generate_heatmap) == (
f'{generate_heatmap.__class__.__name__}, '
f'keypoint={generate_heatmap.keypoint}, '
f'ori_size={generate_heatmap.ori_size}, '
f'target_size={generate_heatmap.target_size}, '
f'sigma={generate_heatmap.sigma}')
generate_heatmap = GenerateHeatmap('landmark', (4, 5), (16, 17))
results = generate_heatmap(inputs)
assert set(list(results.keys())) == set(['landmark', 'heatmap'])
assert results['heatmap'][:, :, 0].shape == (17, 16)
def test_generate_coordinate_and_cell():
tensor1 = torch.randn((3, 64, 48))
inputs1 = dict(lq=tensor1)
coordinate1 = GenerateCoordinateAndCell(scale=3.1, target_size=(128, 96))
results1 = coordinate1(inputs1)
assert set(list(results1.keys())) == set(['lq', 'coord', 'cell'])
assert repr(coordinate1) == (
coordinate1.__class__.__name__ +
f'sample_quantity={coordinate1.sample_quantity}, ' +
f'scale={coordinate1.scale}, ' +
f'target_size={coordinate1.target_size}')
tensor2 = torch.randn((3, 64, 48))
inputs2 = dict(gt=tensor2)
coordinate2 = GenerateCoordinateAndCell(
sample_quantity=64 * 48, scale=3.1, target_size=(128, 96))
results2 = coordinate2(inputs2)
assert set(list(results2.keys())) == set(['gt', 'coord', 'cell'])
assert results2['gt'].shape == (64 * 48, 3)
inputs3 = dict()
coordinate3 = GenerateCoordinateAndCell(
sample_quantity=64 * 48, scale=3.1, target_size=(128, 96))
results3 = coordinate3(inputs3)
assert set(list(results3.keys())) == set(['coord', 'cell'])
| 2,102 | 39.442308 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_matlab_resize.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.datasets.pipelines import MATLABLikeResize
def test_matlab_like_resize():
results = {}
# give scale
results['lq'] = np.ones((16, 16, 3))
imresize = MATLABLikeResize(keys=['lq'], scale=0.25)
results = imresize(results)
assert results['lq'].shape == (4, 4, 3)
# give scale
results['lq'] = np.ones((16, 16, 3))
imresize = MATLABLikeResize(keys=['lq'], output_shape=(6, 6))
results = imresize(results)
assert results['lq'].shape == (6, 6, 3)
# kernel must equal 'bicubic'
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'], kernel='abc')
# kernel_width must equal 4.0
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'], kernel_width=10)
# scale and output_shape cannot be both None
with pytest.raises(ValueError):
MATLABLikeResize(keys=['lq'])
assert repr(imresize) == imresize.__class__.__name__ \
+ "(keys=['lq'], scale=None, output_shape=(6, 6), " \
+ 'kernel=bicubic, kernel_width=4.0)'
| 1,121 | 28.526316 | 65 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_trimap.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import cv2
import numpy as np
import pytest
from mmedit.datasets.pipelines import (CompositeFg, GenerateSeg,
GenerateSoftSeg, GenerateTrimap,
GenerateTrimapWithDistTransform,
MergeFgAndBg, PerturbBg,
TransformTrimap)
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def generate_ref_trimap(alpha, kernel_size, iterations, random):
"""Check if a trimap's value is correct."""
if isinstance(kernel_size, int):
kernel_size = kernel_size, kernel_size + 1
if isinstance(iterations, int):
iterations = iterations, iterations + 1
if random:
min_kernel, max_kernel = kernel_size
kernel_num = max_kernel - min_kernel
erode_ksize = min_kernel + np.random.randint(kernel_num)
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(erode_ksize, erode_ksize))
dilate_ksize = min_kernel + np.random.randint(kernel_num)
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(dilate_ksize, dilate_ksize))
min_iteration, max_iteration = iterations
erode_iter = np.random.randint(min_iteration, max_iteration)
dilate_iter = np.random.randint(min_iteration, max_iteration)
else:
erode_ksize, dilate_ksize = kernel_size
erode_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(erode_ksize, erode_ksize))
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(dilate_ksize, dilate_ksize))
erode_iter, dilate_iter = iterations
h, w = alpha.shape
# erode
erode_kh = erode_kw = erode_ksize
eroded = np.zeros_like(alpha)
src = alpha
pad = ((erode_kh // 2, (erode_kh - 1) // 2), (erode_kw // 2,
(erode_kw - 1) // 2))
for _ in range(erode_iter):
src = np.pad(src, pad, 'constant', constant_values=np.max(src))
for i in range(h):
for j in range(w):
target = src[i:i + erode_kh, j:j + erode_kw]
eroded[i, j] = np.min(
(target * erode_kernel)[erode_kernel == 1])
src = eroded
# dilate
dilate_kh = dilate_kw = dilate_ksize
dilated = np.zeros_like(alpha)
src = alpha
pad = ((dilate_kh // 2, (dilate_kh - 1) // 2), (dilate_kw // 2,
(dilate_kw - 1) // 2))
for _ in range(dilate_iter):
src = np.pad(src, pad, constant_values=np.min(src))
for i in range(h):
for j in range(w):
target = src[i:i + dilate_kh, j:j + dilate_kw]
dilated[i, j] = np.max(
(target * dilate_kernel)[dilate_kernel == 1])
src = dilated
ref_trimap = np.zeros_like(alpha)
ref_trimap.fill(128)
ref_trimap[eroded >= 255] = 255
ref_trimap[dilated <= 0] = 0
return ref_trimap
def test_merge_fg_and_bg():
target_keys = ['fg', 'bg', 'alpha', 'merged']
fg = np.random.randn(32, 32, 3)
bg = np.random.randn(32, 32, 3)
alpha = np.random.randn(32, 32)
results = dict(fg=fg, bg=bg, alpha=alpha)
merge_fg_and_bg = MergeFgAndBg()
merge_fg_and_bg_results = merge_fg_and_bg(results)
assert check_keys_contain(merge_fg_and_bg_results.keys(), target_keys)
assert merge_fg_and_bg_results['merged'].shape == fg.shape
def test_generate_trimap():
with pytest.raises(ValueError):
# kernel_size must be an int or a tuple of 2 int
GenerateTrimap(1.5)
with pytest.raises(ValueError):
# kernel_size must be an int or a tuple of 2 int
GenerateTrimap((3, 3, 3))
with pytest.raises(ValueError):
# iterations must be an int or a tuple of 2 int
GenerateTrimap(3, iterations=1.5)
with pytest.raises(ValueError):
# iterations must be an int or a tuple of 2 int
GenerateTrimap(3, iterations=(3, 3, 3))
target_keys = ['alpha', 'trimap']
# check random mode
kernel_size = (3, 5)
iterations = (3, 5)
random = True
alpha = np.random.randn(32, 32)
results = dict(alpha=alpha)
generate_trimap = GenerateTrimap(kernel_size, iterations, random)
np.random.seed(123)
generate_trimap_results = generate_trimap(results)
trimap = generate_trimap_results['trimap']
assert check_keys_contain(generate_trimap_results.keys(), target_keys)
assert trimap.shape == alpha.shape
np.random.seed(123)
ref_trimap = generate_ref_trimap(alpha, kernel_size, iterations, random)
assert (trimap == ref_trimap).all()
# check non-random mode
kernel_size = (3, 5)
iterations = (5, 3)
random = False
generate_trimap = GenerateTrimap(kernel_size, iterations, random)
generate_trimap_results = generate_trimap(results)
trimap = generate_trimap_results['trimap']
assert check_keys_contain(generate_trimap_results.keys(), target_keys)
assert trimap.shape == alpha.shape
ref_trimap = generate_ref_trimap(alpha, kernel_size, iterations, random)
assert (trimap == ref_trimap).all()
# check repr string
kernel_size = 1
iterations = 1
generate_trimap = GenerateTrimap(kernel_size, iterations)
kernels = [
cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
(kernel_size, kernel_size))
]
assert repr(generate_trimap) == (
generate_trimap.__class__.__name__ +
f'(kernels={kernels}, iterations={(iterations, iterations + 1)}, '
f'random=True)')
def test_generate_trimap_with_dist_transform():
with pytest.raises(ValueError):
# dist_thr must be an float that is greater than 1
GenerateTrimapWithDistTransform(dist_thr=-1)
target_keys = ['alpha', 'trimap']
alpha = np.random.randint(0, 256, (32, 32))
alpha[:8, :8] = 0
alpha[-8:, -8:] = 255
results = dict(alpha=alpha)
generate_trimap = GenerateTrimapWithDistTransform(dist_thr=3, random=False)
generate_trimap_results = generate_trimap(results)
trimap = generate_trimap_results['trimap']
assert check_keys_contain(generate_trimap_results.keys(), target_keys)
assert trimap.shape == alpha.shape
alpha = np.random.randint(0, 256, (32, 32))
results = dict(alpha=alpha)
generate_trimap = GenerateTrimapWithDistTransform(dist_thr=3, random=True)
generate_trimap_results = generate_trimap(results)
trimap = generate_trimap_results['trimap']
assert check_keys_contain(generate_trimap_results.keys(), target_keys)
assert trimap.shape == alpha.shape
assert repr(generate_trimap) == (
generate_trimap.__class__.__name__ + '(dist_thr=3, random=True)')
def test_composite_fg():
target_keys = ['alpha', 'fg', 'bg']
np.random.seed(0)
fg = np.random.rand(32, 32, 3).astype(np.float32)
bg = np.random.rand(32, 32, 3).astype(np.float32)
alpha = np.random.rand(32, 32).astype(np.float32)
results = dict(alpha=alpha, fg=fg, bg=bg)
# use merged dir as fake fg dir, trimap dir as fake alpha dir for unittest
composite_fg = CompositeFg([
f'tests{os.sep}data{os.sep}fg', f'tests{os.sep}data{os.sep}merged'
], [f'tests{os.sep}data{os.sep}alpha', f'tests{os.sep}data{os.sep}trimap'])
correct_fg_list = [
f'tests{os.sep}data{os.sep}fg{os.sep}GT05.jpg',
f'tests{os.sep}data{os.sep}merged{os.sep}GT05.jpg'
]
correct_alpha_list = [
f'tests{os.sep}data{os.sep}alpha{os.sep}GT05.jpg',
f'tests{os.sep}data{os.sep}trimap{os.sep}GT05.png'
]
assert composite_fg.fg_list == correct_fg_list
assert composite_fg.alpha_list == correct_alpha_list
composite_fg_results = composite_fg(results)
assert check_keys_contain(composite_fg_results.keys(), target_keys)
assert composite_fg_results['fg'].shape == (32, 32, 3)
fg = np.random.rand(32, 32, 3).astype(np.float32)
bg = np.random.rand(32, 32, 3).astype(np.float32)
alpha = np.random.rand(32, 32).astype(np.float32)
results = dict(alpha=alpha, fg=fg, bg=bg)
composite_fg = CompositeFg(
f'tests{os.sep}data{os.sep}fg',
f'tests{os.sep}data{os.sep}alpha',
interpolation='bilinear')
composite_fg_results = composite_fg(results)
assert check_keys_contain(composite_fg_results.keys(), target_keys)
assert composite_fg_results['fg'].shape == (32, 32, 3)
strs = (f"(fg_dirs=['tests{os.sep}data{os.sep}fg'], "
f"alpha_dirs=['tests{os.sep}data{os.sep}alpha'], "
"interpolation='bilinear')")
assert repr(composite_fg) == composite_fg.__class__.__name__ + \
strs.replace('\\', '\\\\')
def test_generate_seg():
with pytest.raises(ValueError):
# crop area should not exceed the image size
img = np.random.rand(32, 32, 3)
GenerateSeg._crop_hole(img, (0, 0), (64, 64))
target_keys = ['alpha', 'trimap', 'seg', 'num_holes']
alpha = np.random.randint(0, 255, (32, 32))
trimap = np.zeros_like(alpha)
trimap[(alpha > 0) & (alpha < 255)] = 128
trimap[alpha == 255] = 255
results = dict(alpha=alpha, trimap=trimap)
generate_seg = GenerateSeg()
generate_seg_results = generate_seg(results)
assert check_keys_contain(generate_seg_results.keys(), target_keys)
assert generate_seg_results['seg'].shape == alpha.shape
assert isinstance(generate_seg_results['num_holes'], int)
assert generate_seg_results['num_holes'] < 3
# check repr string and the default setting
assert repr(generate_seg) == generate_seg.__class__.__name__ + (
'(kernel_size=5, erode_iter_range=(10, 20), '
'dilate_iter_range=(15, 30), num_holes_range=(0, 3), '
'hole_sizes=[(15, 15), (25, 25), (35, 35), (45, 45)], '
'blur_ksizes=[(21, 21), (31, 31), (41, 41)]')
def test_perturb_bg():
with pytest.raises(ValueError):
# gammma_ratio must be a float between [0, 1]
PerturbBg(-0.5)
with pytest.raises(ValueError):
# gammma_ratio must be a float between [0, 1]
PerturbBg(1.1)
target_keys = ['bg', 'noisy_bg']
# set a random seed to make sure the test goes through every branch
np.random.seed(123)
img_shape = (32, 32, 3)
results = dict(bg=np.random.randint(0, 255, img_shape))
perturb_bg = PerturbBg(0.6)
perturb_bg_results = perturb_bg(results)
assert check_keys_contain(perturb_bg_results.keys(), target_keys)
assert perturb_bg_results['noisy_bg'].shape == img_shape
img_shape = (32, 32, 3)
results = dict(bg=np.random.randint(0, 255, img_shape))
perturb_bg = PerturbBg(0.6)
perturb_bg_results = perturb_bg(results)
assert check_keys_contain(perturb_bg_results.keys(), target_keys)
assert perturb_bg_results['noisy_bg'].shape == img_shape
repr_str = perturb_bg.__class__.__name__ + '(gamma_ratio=0.6)'
assert repr(perturb_bg) == repr_str
def test_generate_soft_seg():
with pytest.raises(TypeError):
# fg_thr must be a float
GenerateSoftSeg(fg_thr=[0.2])
with pytest.raises(TypeError):
# border_width must be an int
GenerateSoftSeg(border_width=25.)
with pytest.raises(TypeError):
# erode_ksize must be an int
GenerateSoftSeg(erode_ksize=5.)
with pytest.raises(TypeError):
# dilate_ksize must be an int
GenerateSoftSeg(dilate_ksize=5.)
with pytest.raises(TypeError):
# erode_iter_range must be a tuple of 2 int
GenerateSoftSeg(erode_iter_range=(3, 5, 7))
with pytest.raises(TypeError):
# dilate_iter_range must be a tuple of 2 int
GenerateSoftSeg(dilate_iter_range=(3, 5, 7))
with pytest.raises(TypeError):
# blur_ksizes must be a list of tuple
GenerateSoftSeg(blur_ksizes=[21, 21])
target_keys = ['seg', 'soft_seg']
seg = np.random.randint(0, 255, (512, 512))
results = dict(seg=seg)
generate_soft_seg = GenerateSoftSeg(
erode_ksize=3,
dilate_ksize=3,
erode_iter_range=(1, 2),
dilate_iter_range=(1, 2),
blur_ksizes=[(11, 11)])
generate_soft_seg_results = generate_soft_seg(results)
assert check_keys_contain(generate_soft_seg_results.keys(), target_keys)
assert generate_soft_seg_results['soft_seg'].shape == seg.shape
repr_str = generate_soft_seg.__class__.__name__ + (
'(fg_thr=0.2, border_width=25, erode_ksize=3, dilate_ksize=3, '
'erode_iter_range=(1, 2), dilate_iter_range=(1, 2), '
'blur_ksizes=[(11, 11)])')
assert repr(generate_soft_seg) == repr_str
def test_transform_trimap():
results = dict()
transform = TransformTrimap()
target_keys = ['trimap', 'transformed_trimap']
with pytest.raises(KeyError):
results_transformed = transform(results)
with pytest.raises(AssertionError):
dummy_trimap = np.zeros((100, 100, 1), dtype=np.uint8)
results['trimap'] = dummy_trimap
results_transformed = transform(results)
results = dict()
# generate dummy trimap with shape (100,100)
dummy_trimap = np.zeros((100, 100), dtype=np.uint8)
dummy_trimap[:50, :50] = 255
results['trimap'] = dummy_trimap
results_transformed = transform(results)
assert check_keys_contain(results_transformed.keys(), target_keys)
assert results_transformed['trimap'].shape == dummy_trimap.shape
assert results_transformed[
'transformed_trimap'].shape[:2] == dummy_trimap.shape
repr_str = transform.__class__.__name__
assert repr(transform) == repr_str
| 13,973 | 37.180328 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_pipeline_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from mmedit.datasets.pipelines.utils import (adjust_gamma, dtype_range,
make_coord)
def test_adjust_gamma():
"""Test Gamma Correction
Adpted from
# https://github.com/scikit-image/scikit-image/blob/7e4840bd9439d1dfb6beaf549998452c99f97fdd/skimage/exposure/tests/test_exposure.py#L534 # noqa
"""
# Check that the shape is maintained.
img = np.ones([1, 1])
result = adjust_gamma(img, 1.5)
assert img.shape == result.shape
# Same image should be returned for gamma equal to one.
image = np.random.uniform(0, 255, (8, 8))
result = adjust_gamma(image, 1)
np.testing.assert_array_equal(result, image)
# White image should be returned for gamma equal to zero.
image = np.random.uniform(0, 255, (8, 8))
result = adjust_gamma(image, 0)
dtype = image.dtype.type
np.testing.assert_array_equal(result, dtype_range[dtype][1])
# Verifying the output with expected results for gamma
# correction with gamma equal to half.
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([[0, 31, 45, 55, 63, 71, 78, 84],
[90, 95, 100, 105, 110, 115, 119, 123],
[127, 131, 135, 139, 142, 146, 149, 153],
[156, 159, 162, 165, 168, 171, 174, 177],
[180, 183, 186, 188, 191, 194, 196, 199],
[201, 204, 206, 209, 211, 214, 216, 218],
[221, 223, 225, 228, 230, 232, 234, 236],
[238, 241, 243, 245, 247, 249, 251, 253]],
dtype=np.uint8)
result = adjust_gamma(image, 0.5)
np.testing.assert_array_equal(result, expected)
# Verifying the output with expected results for gamma
# correction with gamma equal to two.
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
expected = np.array([[0, 0, 0, 0, 1, 1, 2, 3], [4, 5, 6, 7, 9, 10, 12, 14],
[16, 18, 20, 22, 25, 27, 30, 33],
[36, 39, 42, 45, 49, 52, 56, 60],
[64, 68, 72, 76, 81, 85, 90, 95],
[100, 105, 110, 116, 121, 127, 132, 138],
[144, 150, 156, 163, 169, 176, 182, 189],
[196, 203, 211, 218, 225, 233, 241, 249]],
dtype=np.uint8)
result = adjust_gamma(image, 2)
np.testing.assert_array_equal(result, expected)
# Test invalid image input
image = np.arange(0, 255, 4, np.uint8).reshape((8, 8))
with pytest.raises(ValueError):
adjust_gamma(image, -1)
def test_make_coord():
h, w = 20, 30
coord = make_coord((h, w), ranges=((10, 20), (-5, 5)))
assert type(coord) == torch.Tensor
assert coord.shape == (h * w, 2)
coord = make_coord((h, w), flatten=False)
assert type(coord) == torch.Tensor
assert coord.shape == (h, w, 2)
test_make_coord()
| 3,075 | 36.512195 | 149 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_loading.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
from pathlib import Path
import mmcv
import numpy as np
import pytest
from mmedit.datasets.pipelines import (GetSpatialDiscountMask,
LoadImageFromFile,
LoadImageFromFileList, LoadMask,
LoadPairedImageFromFile,
RandomLoadResizeBg)
def test_load_image_from_file():
path_baboon = Path(
__file__).parent.parent.parent / 'data' / 'gt' / 'baboon.png'
img_baboon = mmcv.imread(str(path_baboon), flag='color')
path_baboon_x4 = Path(
__file__).parent.parent.parent / 'data' / 'lq' / 'baboon_x4.png'
img_baboon_x4 = mmcv.imread(str(path_baboon_x4), flag='color')
# read gt image
# input path is Path object
results = dict(gt_path=path_baboon)
config = dict(io_backend='disk', key='gt')
image_loader = LoadImageFromFile(**config)
results = image_loader(results)
assert results['gt'].shape == (480, 500, 3)
np.testing.assert_almost_equal(results['gt'], img_baboon)
assert results['gt_path'] == str(path_baboon)
# input path is str
results = dict(gt_path=str(path_baboon))
results = image_loader(results)
assert results['gt'].shape == (480, 500, 3)
np.testing.assert_almost_equal(results['gt'], img_baboon)
assert results['gt_path'] == str(path_baboon)
# read lq image
# input path is Path object
results = dict(lq_path=path_baboon_x4)
config = dict(io_backend='disk', key='lq')
image_loader = LoadImageFromFile(**config)
results = image_loader(results)
assert results['lq'].shape == (120, 125, 3)
np.testing.assert_almost_equal(results['lq'], img_baboon_x4)
assert results['lq_path'] == str(path_baboon_x4)
# input path is str
results = dict(lq_path=str(path_baboon_x4))
results = image_loader(results)
assert results['lq'].shape == (120, 125, 3)
np.testing.assert_almost_equal(results['lq'], img_baboon_x4)
assert results['lq_path'] == str(path_baboon_x4)
assert repr(image_loader) == (
image_loader.__class__.__name__ +
('(io_backend=disk, key=lq, '
'flag=color, save_original_img=False, channel_order=bgr, '
'use_cache=False)'))
results = dict(lq_path=path_baboon_x4)
config = dict(
io_backend='disk', key='lq', flag='grayscale', save_original_img=True)
image_loader = LoadImageFromFile(**config)
results = image_loader(results)
assert results['lq'].shape == (120, 125)
assert results['lq_ori_shape'] == (120, 125)
np.testing.assert_almost_equal(results['ori_lq'], results['lq'])
assert id(results['ori_lq']) != id(results['lq'])
# test: use_cache
results = dict(gt_path=path_baboon)
config = dict(io_backend='disk', key='gt', use_cache=True)
image_loader = LoadImageFromFile(**config)
assert image_loader.cache is None
assert repr(image_loader) == (
image_loader.__class__.__name__ +
('(io_backend=disk, key=gt, '
'flag=color, save_original_img=False, channel_order=bgr, '
'use_cache=True)'))
results = image_loader(results)
assert image_loader.cache is not None
assert str(path_baboon) in image_loader.cache
assert results['gt'].shape == (480, 500, 3)
assert results['gt_path'] == str(path_baboon)
np.testing.assert_almost_equal(results['gt'], img_baboon)
# convert to y-channel (bgr2y)
results = dict(gt_path=path_baboon)
config = dict(io_backend='disk', key='gt', convert_to='y')
image_loader = LoadImageFromFile(**config)
results = image_loader(results)
assert results['gt'].shape == (480, 500, 1)
img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
np.testing.assert_almost_equal(results['gt'], img_baboon_y)
assert results['gt_path'] == str(path_baboon)
# convert to y-channel (rgb2y)
results = dict(gt_path=path_baboon)
config = dict(
io_backend='disk', key='gt', channel_order='rgb', convert_to='y')
image_loader = LoadImageFromFile(**config)
results = image_loader(results)
assert results['gt'].shape == (480, 500, 1)
img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
np.testing.assert_almost_equal(results['gt'], img_baboon_y)
assert results['gt_path'] == str(path_baboon)
# convert to y-channel (ValueError)
results = dict(gt_path=path_baboon)
config = dict(io_backend='disk', key='gt', convert_to='abc')
image_loader = LoadImageFromFile(**config)
with pytest.raises(ValueError):
results = image_loader(results)
def test_load_image_from_file_list():
path_baboon = Path(
__file__).parent.parent.parent / 'data' / 'gt' / 'baboon.png'
img_baboon = mmcv.imread(str(path_baboon), flag='color')
path_baboon_x4 = Path(
__file__).parent.parent.parent / 'data' / 'lq' / 'baboon_x4.png'
img_baboon_x4 = mmcv.imread(str(path_baboon_x4), flag='color')
# input path is Path object
results = dict(lq_path=[path_baboon_x4, path_baboon])
config = dict(io_backend='disk', key='lq')
image_loader = LoadImageFromFileList(**config)
results = image_loader(results)
np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
np.testing.assert_almost_equal(results['lq'][1], img_baboon)
assert results['lq_ori_shape'] == [(120, 125, 3), (480, 500, 3)]
assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]
# input path is str
results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
config = dict(io_backend='disk', key='lq')
image_loader = LoadImageFromFileList(**config)
results = image_loader(results)
np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
np.testing.assert_almost_equal(results['lq'][1], img_baboon)
assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]
# save ori_img
results = dict(lq_path=[path_baboon_x4])
config = dict(io_backend='disk', key='lq', save_original_img=True)
image_loader = LoadImageFromFileList(**config)
results = image_loader(results)
np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4)
assert results['lq_ori_shape'] == [(120, 125, 3)]
assert results['lq_path'] == [str(path_baboon_x4)]
np.testing.assert_almost_equal(results['ori_lq'][0], img_baboon_x4)
with pytest.raises(TypeError):
# filepath should be list
results = dict(lq_path=path_baboon_x4)
image_loader(results)
# convert to y-channel (bgr2y)
results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
config = dict(io_backend='disk', key='lq', convert_to='y')
image_loader = LoadImageFromFileList(**config)
results = image_loader(results)
img_baboon_x4_y = mmcv.bgr2ycbcr(img_baboon_x4, y_only=True)
img_baboon_y = mmcv.bgr2ycbcr(img_baboon, y_only=True)
img_baboon_x4_y = np.expand_dims(img_baboon_x4_y, axis=2)
img_baboon_y = np.expand_dims(img_baboon_y, axis=2)
np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4_y)
np.testing.assert_almost_equal(results['lq'][1], img_baboon_y)
assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]
# convert to y-channel (rgb2y)
results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
config = dict(
io_backend='disk', key='lq', channel_order='rgb', convert_to='y')
image_loader = LoadImageFromFileList(**config)
results = image_loader(results)
np.testing.assert_almost_equal(results['lq'][0], img_baboon_x4_y)
np.testing.assert_almost_equal(results['lq'][1], img_baboon_y)
assert results['lq_path'] == [str(path_baboon_x4), str(path_baboon)]
# convert to y-channel (ValueError)
results = dict(lq_path=[str(path_baboon_x4), str(path_baboon)])
config = dict(io_backend='disk', key='lq', convert_to='abc')
image_loader = LoadImageFromFileList(**config)
with pytest.raises(ValueError):
results = image_loader(results)
class TestMattingLoading:
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@classmethod
def setup_class(cls):
data_prefix = 'tests/data'
ann_file = osp.join(data_prefix, 'test_list.json')
data_infos = mmcv.load(ann_file)
cls.results = dict()
for data_info in data_infos:
for key in data_info:
cls.results[key] = osp.join(data_prefix, data_info[key])
def test_random_load_bg(self):
target_keys = ['bg']
results = dict(fg=np.random.rand(128, 128))
random_load_bg = RandomLoadResizeBg('tests/data/bg')
for _ in range(2):
random_load_bg_results = random_load_bg(results)
assert self.check_keys_contain(random_load_bg_results.keys(),
target_keys)
assert isinstance(random_load_bg_results['bg'], np.ndarray)
assert random_load_bg_results['bg'].shape == (128, 128, 3)
assert repr(random_load_bg) == random_load_bg.__class__.__name__ + (
"(bg_dir='tests/data/bg')")
class TestInpaintLoading:
@classmethod
def setup_class(cls):
cls.img_path = Path(__file__).parent.parent.parent.joinpath(
'data/image/test.png')
cls.results = dict(img_info=dict(filename=cls.img_path))
def test_load_mask(self):
# test mask mode: set
mask_config = dict(
mask_list_file='./tests/data/mask_list.txt',
prefix='./tests/data',
io_backend='disk',
flag='unchanged',
file_client_kwargs=dict())
set_loader = LoadMask('set', mask_config)
class_name = set_loader.__class__.__name__
assert repr(set_loader) == class_name + "(mask_mode='set')"
for _ in range(2):
results = dict()
results = set_loader(results)
gt_mask = mmcv.imread(
'./tests/data/mask/test.png', flag='unchanged')
assert np.array_equal(results['mask'], gt_mask[..., 0:1] / 255.)
mask_config = dict(
mask_list_file='./tests/data/mask_list_single_ch.txt',
prefix='./tests/data',
io_backend='disk',
flag='unchanged',
file_client_kwargs=dict())
# test mask mode: set with input as single channel image
set_loader = LoadMask('set', mask_config)
results = dict()
results = set_loader(results)
gt_mask = mmcv.imread(
'./tests/data/mask/test_single_ch.png', flag='unchanged')
gt_mask = np.expand_dims(gt_mask, axis=2)
assert np.array_equal(results['mask'], gt_mask[..., 0:1] / 255.)
# test mask mode: ff
mask_config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
mean_angle=1.2,
angle_range=0.4,
brush_width=(12, 40))
ff_loader = LoadMask('ff', mask_config)
results = dict()
results = ff_loader(results)
assert results['mask'].shape == (256, 256, 1)
# test mask mode: irregular holes
mask_config = dict(
img_shape=(256, 256),
num_vertices=(4, 12),
max_angle=4.,
length_range=(10, 100),
brush_width=(10, 40),
area_ratio_range=(0.15, 0.5))
irregular_loader = LoadMask('irregular', mask_config)
results = dict()
results = irregular_loader(results)
assert results['mask'].shape == (256, 256, 1)
# test mask mode: bbox
mask_config = dict(img_shape=(256, 256), max_bbox_shape=128)
bbox_loader = LoadMask('bbox', mask_config)
results = dict()
results = bbox_loader(results)
assert results['mask'].shape == (256, 256, 1)
# test mask mode: file
mask_loader = LoadMask('file')
mask = mask_loader(
dict(mask_path='./tests/data/mask/test_single_ch.png'))
assert mask['mask'].shape == (256, 256, 1)
with pytest.raises(NotImplementedError):
loader = LoadMask('xxxx', mask_config)
results = loader(results)
class TestGenerationLoading:
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@classmethod
def setup_class(cls):
cls.pair_path = osp.join('tests', 'data/paired/train/1.jpg')
cls.results = dict(pair_path=cls.pair_path)
cls.pair_img = mmcv.imread(str(cls.pair_path), flag='color')
w = cls.pair_img.shape[1]
new_w = w // 2
cls.img_a = cls.pair_img[:, :new_w, :]
cls.img_b = cls.pair_img[:, new_w:, :]
cls.pair_shape = cls.pair_img.shape
cls.img_shape = cls.img_a.shape
cls.pair_shape_gray = (256, 512, 1)
cls.img_shape_gray = (256, 256, 1)
def test_load_paired_image_from_file(self):
# RGB
target_keys = [
'pair_path', 'pair', 'pair_ori_shape', 'img_a_path', 'img_a',
'img_a_ori_shape', 'img_b_path', 'img_b', 'img_b_ori_shape'
]
config = dict(io_backend='disk', key='pair', flag='color')
results = copy.deepcopy(self.results)
load_paired_image_from_file = LoadPairedImageFromFile(**config)
results = load_paired_image_from_file(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert results['pair'].shape == self.pair_shape
assert results['pair_ori_shape'] == self.pair_shape
np.testing.assert_equal(results['pair'], self.pair_img)
assert results['pair_path'] == self.pair_path
assert results['img_a'].shape == self.img_shape
assert results['img_a_ori_shape'] == self.img_shape
np.testing.assert_equal(results['img_a'], self.img_a)
assert results['img_a_path'] == self.pair_path
assert results['img_b'].shape == self.img_shape
assert results['img_b_ori_shape'] == self.img_shape
np.testing.assert_equal(results['img_b'], self.img_b)
assert results['img_b_path'] == self.pair_path
# Grayscale & save_original_img
target_keys = [
'pair_path', 'pair', 'pair_ori_shape', 'ori_pair', 'img_a_path',
'img_a', 'img_a_ori_shape', 'ori_img_a', 'img_b_path', 'img_b',
'img_b_ori_shape', 'ori_img_b'
]
config = dict(
io_backend='disk',
key='pair',
flag='grayscale',
save_original_img=True)
results = copy.deepcopy(self.results)
load_paired_image_from_file = LoadPairedImageFromFile(**config)
results = load_paired_image_from_file(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert results['pair'].shape == self.pair_shape_gray
assert results['pair_ori_shape'] == self.pair_shape_gray
np.testing.assert_equal(results['pair'], results['ori_pair'])
assert id(results['ori_pair']) != id(results['pair'])
assert results['pair_path'] == self.pair_path
assert results['img_a'].shape == self.img_shape_gray
assert results['img_a_ori_shape'] == self.img_shape_gray
np.testing.assert_equal(results['img_a'], results['ori_img_a'])
assert id(results['ori_img_a']) != id(results['img_a'])
assert results['img_a_path'] == self.pair_path
assert results['img_b'].shape == self.img_shape_gray
assert results['img_b_ori_shape'] == self.img_shape_gray
np.testing.assert_equal(results['img_b'], results['ori_img_b'])
assert id(results['ori_img_b']) != id(results['img_b'])
assert results['img_b_path'] == self.pair_path
def test_dct_mask():
mask = np.zeros((64, 64, 1))
mask[20:40, 20:40] = 1.
mask_bbox = [20, 20, 20, 20]
results = dict(mask=mask, mask_bbox=mask_bbox)
dct_mask = GetSpatialDiscountMask()
results = dct_mask(results)
assert 'discount_mask' in results
assert results['discount_mask'].shape == (64, 64, 1)
mask_height = mask_width = 20
gamma = 0.99
beta = 1.5
mask_values = np.ones((mask_width, mask_height, 1))
for i in range(mask_width):
for j in range(mask_height):
mask_values[i,
j] = max(gamma**(min(i, mask_width - i - 1) * beta),
gamma**(min(j, mask_height - j - 1) * beta))
dct_mask_test = np.zeros_like(mask)
dct_mask_test[20:40, 20:40, ...] = mask_values
np.testing.assert_almost_equal(dct_mask_test, results['discount_mask'])
repr_str = dct_mask.__class__.__name__ + (f'(gamma={dct_mask.gamma}, '
f'beta={dct_mask.beta})')
assert repr_str == repr(dct_mask)
| 17,240 | 39.952494 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_compose.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.datasets.pipelines import Compose, ImageToTensor
def check_keys_equal(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys) == set(result_keys)
def test_compose():
with pytest.raises(TypeError):
Compose('LoadAlpha')
target_keys = ['img', 'meta']
img = np.random.randn(256, 256, 3)
results = dict(img=img, abandoned_key=None, img_name='test_image.png')
test_pipeline = [
dict(type='Collect', keys=['img'], meta_keys=['img_name']),
dict(type='ImageToTensor', keys=['img'])
]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert check_keys_equal(compose_results.keys(), target_keys)
assert check_keys_equal(compose_results['meta'].data.keys(), ['img_name'])
results = None
image_to_tensor = ImageToTensor(keys=[])
test_pipeline = [image_to_tensor]
compose = Compose(test_pipeline)
compose_results = compose(results)
assert compose_results is None
assert repr(compose) == (
compose.__class__.__name__ + f'(\n {image_to_tensor}\n)')
| 1,222 | 30.358974 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_random_down_sampling.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
from mmedit.datasets.pipelines import RandomDownSampling
def test_random_down_sampling():
img1 = np.uint8(np.random.randn(480, 640, 3) * 255)
inputs1 = dict(gt=img1)
down_sampling1 = RandomDownSampling(
scale_min=1, scale_max=4, patch_size=None)
results1 = down_sampling1(inputs1)
assert set(list(results1.keys())) == set(['gt', 'lq', 'scale'])
assert repr(down_sampling1) == (
down_sampling1.__class__.__name__ +
f' scale_min={down_sampling1.scale_min}, ' +
f'scale_max={down_sampling1.scale_max}, ' +
f'patch_size={down_sampling1.patch_size}, ' +
f'interpolation={down_sampling1.interpolation}, ' +
f'backend={down_sampling1.backend}')
img2 = np.uint8(np.random.randn(480, 640, 3) * 255)
inputs2 = dict(gt=img2)
down_sampling2 = RandomDownSampling(
scale_min=1, scale_max=4, patch_size=48)
results2 = down_sampling2(inputs2)
assert set(list(results2.keys())) == set(['gt', 'lq', 'scale'])
assert repr(down_sampling2) == (
down_sampling2.__class__.__name__ +
f' scale_min={down_sampling2.scale_min}, ' +
f'scale_max={down_sampling2.scale_max}, ' +
f'patch_size={down_sampling2.patch_size}, ' +
f'interpolation={down_sampling2.interpolation}, ' +
f'backend={down_sampling2.backend}')
| 1,415 | 39.457143 | 67 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_augmentation.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import os.path as osp
import numpy as np
import pytest
import torch
# yapf: disable
from mmedit.datasets.pipelines import (BinarizeImage, ColorJitter, CopyValues,
Flip, GenerateFrameIndices,
GenerateFrameIndiceswithPadding,
GenerateSegmentIndices, MirrorSequence,
Pad, Quantize, RandomAffine,
RandomJitter, RandomMaskDilation,
RandomTransposeHW, Resize,
TemporalReverse, UnsharpMasking)
from mmedit.datasets.pipelines.augmentation import RandomRotation
class TestAugmentations:
@classmethod
def setup_class(cls):
cls.results = dict()
cls.img_gt = np.random.rand(256, 128, 3).astype(np.float32)
cls.img_lq = np.random.rand(64, 32, 3).astype(np.float32)
cls.results = dict(
lq=cls.img_lq,
gt=cls.img_gt,
scale=4,
lq_path='fake_lq_path',
gt_path='fake_gt_path')
cls.results['img'] = np.random.rand(256, 256, 3).astype(np.float32)
cls.results['mask'] = np.random.rand(256, 256, 1).astype(np.float32)
cls.results['img_tensor'] = torch.rand((3, 256, 256))
cls.results['mask_tensor'] = torch.zeros((1, 256, 256))
cls.results['mask_tensor'][:, 50:150, 40:140] = 1.
@staticmethod
def assert_img_equal(img, ref_img, ratio_thr=0.999):
"""Check if img and ref_img are matched approximately."""
assert img.shape == ref_img.shape
assert img.dtype == ref_img.dtype
area = ref_img.shape[-1] * ref_img.shape[-2]
diff = np.abs(img.astype('int32') - ref_img.astype('int32'))
assert np.sum(diff <= 1) / float(area) > ratio_thr
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
@staticmethod
def check_flip(origin_img, result_img, flip_type):
"""Check if the origin_img are flipped correctly into result_img
in different flip_types"""
h, w, c = origin_img.shape
if flip_type == 'horizontal':
for i in range(h):
for j in range(w):
for k in range(c):
if result_img[i, j, k] != origin_img[i, w - 1 - j, k]:
return False
else:
for i in range(h):
for j in range(w):
for k in range(c):
if result_img[i, j, k] != origin_img[h - 1 - i, j, k]:
return False
return True
def test_binarize(self):
mask_ = np.zeros((5, 5, 1))
mask_[2, 2, :] = 0.6
gt_mask = mask_.copy()
gt_mask[2, 2, :] = 1.
results = dict(mask=mask_.copy())
binarize = BinarizeImage(['mask'], 0.5, to_int=False)
results = binarize(results)
assert np.array_equal(results['mask'], gt_mask.astype(np.float32))
results = dict(mask=mask_.copy())
binarize = BinarizeImage(['mask'], 0.5, to_int=True)
results = binarize(results)
assert np.array_equal(results['mask'], gt_mask.astype(np.int32))
assert str(binarize) == (
binarize.__class__.__name__ +
f"(keys={['mask']}, binary_thr=0.5, to_int=True)")
def test_flip(self):
results = copy.deepcopy(self.results)
with pytest.raises(ValueError):
Flip(keys=['lq', 'gt'], direction='vertically')
# horizontal
np.random.seed(1)
target_keys = ['lq', 'gt', 'flip', 'flip_direction']
flip = Flip(keys=['lq', 'gt'], flip_ratio=1, direction='horizontal')
results = flip(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert self.check_flip(self.img_lq, results['lq'],
results['flip_direction'])
assert self.check_flip(self.img_gt, results['gt'],
results['flip_direction'])
assert results['lq'].shape == self.img_lq.shape
assert results['gt'].shape == self.img_gt.shape
# vertical
results = copy.deepcopy(self.results)
flip = Flip(keys=['lq', 'gt'], flip_ratio=1, direction='vertical')
results = flip(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert self.check_flip(self.img_lq, results['lq'],
results['flip_direction'])
assert self.check_flip(self.img_gt, results['gt'],
results['flip_direction'])
assert results['lq'].shape == self.img_lq.shape
assert results['gt'].shape == self.img_gt.shape
assert repr(flip) == flip.__class__.__name__ + (
f"(keys={['lq', 'gt']}, flip_ratio=1, "
f"direction={results['flip_direction']})")
# flip a list
# horizontal
flip = Flip(keys=['lq', 'gt'], flip_ratio=1, direction='horizontal')
results = dict(
lq=[self.img_lq, np.copy(self.img_lq)],
gt=[self.img_gt, np.copy(self.img_gt)],
scale=4,
lq_path='fake_lq_path',
gt_path='fake_gt_path')
flip_rlt = flip(copy.deepcopy(results))
assert self.check_keys_contain(flip_rlt.keys(), target_keys)
assert self.check_flip(self.img_lq, flip_rlt['lq'][0],
flip_rlt['flip_direction'])
assert self.check_flip(self.img_gt, flip_rlt['gt'][0],
flip_rlt['flip_direction'])
np.testing.assert_almost_equal(flip_rlt['gt'][0], flip_rlt['gt'][1])
np.testing.assert_almost_equal(flip_rlt['lq'][0], flip_rlt['lq'][1])
# vertical
flip = Flip(keys=['lq', 'gt'], flip_ratio=1, direction='vertical')
flip_rlt = flip(copy.deepcopy(results))
assert self.check_keys_contain(flip_rlt.keys(), target_keys)
assert self.check_flip(self.img_lq, flip_rlt['lq'][0],
flip_rlt['flip_direction'])
assert self.check_flip(self.img_gt, flip_rlt['gt'][0],
flip_rlt['flip_direction'])
np.testing.assert_almost_equal(flip_rlt['gt'][0], flip_rlt['gt'][1])
np.testing.assert_almost_equal(flip_rlt['lq'][0], flip_rlt['lq'][1])
# no flip
flip = Flip(keys=['lq', 'gt'], flip_ratio=0, direction='vertical')
results = flip(copy.deepcopy(results))
assert self.check_keys_contain(results.keys(), target_keys)
np.testing.assert_almost_equal(results['gt'][0], self.img_gt)
np.testing.assert_almost_equal(results['lq'][0], self.img_lq)
np.testing.assert_almost_equal(results['gt'][0], results['gt'][1])
np.testing.assert_almost_equal(results['lq'][0], results['lq'][1])
def test_pad(self):
target_keys = ['alpha']
alpha = np.random.rand(319, 321).astype(np.float32)
results = dict(alpha=alpha)
pad = Pad(keys=['alpha'], ds_factor=32, mode='constant')
pad_results = pad(results)
assert self.check_keys_contain(pad_results.keys(), target_keys)
assert pad_results['alpha'].shape == (320, 352)
assert self.check_pad(alpha, results['alpha'], 'constant')
alpha = np.random.rand(319, 321).astype(np.float32)
results = dict(alpha=alpha)
pad = Pad(keys=['alpha'], ds_factor=32, mode='reflect')
pad_results = pad(results)
assert self.check_keys_contain(pad_results.keys(), target_keys)
assert pad_results['alpha'].shape == (320, 352)
assert self.check_pad(alpha, results['alpha'], 'reflect')
alpha = np.random.rand(320, 320).astype(np.float32)
results = dict(alpha=alpha)
pad = Pad(keys=['alpha'], ds_factor=32, mode='reflect')
pad_results = pad(results)
assert self.check_keys_contain(pad_results.keys(), target_keys)
assert pad_results['alpha'].shape == (320, 320)
assert self.check_pad(alpha, results['alpha'], 'reflect')
assert repr(pad) == pad.__class__.__name__ + (
f"(keys={['alpha']}, ds_factor=32, mode={'reflect'})")
@staticmethod
def check_pad(origin_img, result_img, mode, ds_factor=32):
"""Check if the origin_img is padded correctly.
Supported modes for checking are 'constant' (with 'constant_values' of
0) and 'reflect'.
Supported images should be 2 dimensional.
"""
if mode not in ['constant', 'reflect']:
raise NotImplementedError(
f'Pad checking of mode {mode} is not implemented.')
assert len(origin_img.shape) == 2, 'Image should be 2 dimensional.'
h, w = origin_img.shape
new_h = ds_factor * (h - 1) // ds_factor + 1
new_w = ds_factor * (w - 1) // ds_factor + 1
# check the bottom rectangle
for i in range(h, new_h):
for j in range(0, w):
target = origin_img[h - i, j] if mode == 'reflect' else 0
if result_img[i, j] != target:
return False
# check the right rectangle
for i in range(0, h):
for j in range(w, new_w):
target = origin_img[i, w - j] if mode == 'reflect' else 0
if result_img[i, j] != target:
return False
# check the bottom right rectangle
for i in range(h, new_h):
for j in range(w, new_w):
target = origin_img[h - i, w - j] if mode == 'reflect' else 0
if result_img[i, j] != target:
return False
return True
def test_random_affine(self):
with pytest.raises(AssertionError):
RandomAffine(None, -1)
with pytest.raises(AssertionError):
RandomAffine(None, 0, translate='Not a tuple')
with pytest.raises(AssertionError):
RandomAffine(None, 0, translate=(0, 0, 0))
with pytest.raises(AssertionError):
RandomAffine(None, 0, translate=(0, 2))
with pytest.raises(AssertionError):
RandomAffine(None, 0, scale='Not a tuple')
with pytest.raises(AssertionError):
RandomAffine(None, 0, scale=(0.8, 1., 1.2))
with pytest.raises(AssertionError):
RandomAffine(None, 0, scale=(-0.8, 1.))
with pytest.raises(AssertionError):
RandomAffine(None, 0, shear=-1)
with pytest.raises(AssertionError):
RandomAffine(None, 0, shear=(0, 1, 2))
with pytest.raises(AssertionError):
RandomAffine(None, 0, flip_ratio='Not a float')
target_keys = ['fg', 'alpha']
# Test identical transformation
alpha = np.random.rand(4, 4).astype(np.float32)
fg = np.random.rand(4, 4).astype(np.float32)
results = dict(alpha=alpha, fg=fg)
random_affine = RandomAffine(['fg', 'alpha'],
degrees=0, flip_ratio=0.0)
random_affine_results = random_affine(results)
assert np.allclose(alpha, random_affine_results['alpha'])
assert np.allclose(fg, random_affine_results['fg'])
# Test flip in both direction
alpha = np.random.rand(4, 4).astype(np.float32)
fg = np.random.rand(4, 4).astype(np.float32)
results = dict(alpha=alpha, fg=fg)
random_affine = RandomAffine(['fg', 'alpha'],
degrees=0, flip_ratio=1.0)
random_affine_results = random_affine(results)
assert np.allclose(alpha[::-1, ::-1], random_affine_results['alpha'])
assert np.allclose(fg[::-1, ::-1], random_affine_results['fg'])
# test random affine with different valid setting combinations
# only shape are tested
alpha = np.random.rand(240, 320).astype(np.float32)
fg = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=alpha, fg=fg)
random_affine = RandomAffine(['fg', 'alpha'],
degrees=30,
translate=(0, 1),
shear=(10, 20),
flip_ratio=0.5)
random_affine_results = random_affine(results)
assert self.check_keys_contain(random_affine_results.keys(),
target_keys)
assert random_affine_results['fg'].shape == (240, 320)
assert random_affine_results['alpha'].shape == (240, 320)
alpha = np.random.rand(240, 320).astype(np.float32)
fg = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=alpha, fg=fg)
random_affine = RandomAffine(['fg', 'alpha'],
degrees=(-30, 30),
scale=(0.8, 1.25),
shear=10,
flip_ratio=0.5)
random_affine_results = random_affine(results)
assert self.check_keys_contain(random_affine_results.keys(),
target_keys)
assert random_affine_results['fg'].shape == (240, 320)
assert random_affine_results['alpha'].shape == (240, 320)
alpha = np.random.rand(240, 320).astype(np.float32)
fg = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=alpha, fg=fg)
random_affine = RandomAffine(['fg', 'alpha'], degrees=30)
random_affine_results = random_affine(results)
assert self.check_keys_contain(random_affine_results.keys(),
target_keys)
assert random_affine_results['fg'].shape == (240, 320)
assert random_affine_results['alpha'].shape == (240, 320)
assert repr(random_affine) == random_affine.__class__.__name__ + (
f'(keys={target_keys}, degrees={(-30, 30)}, '
f'translate={None}, scale={None}, '
f'shear={None}, flip_ratio={0})')
def test_random_jitter(self):
with pytest.raises(AssertionError):
RandomJitter(-40)
with pytest.raises(AssertionError):
RandomJitter((-40, 40, 40))
target_keys = ['fg']
fg = np.random.rand(240, 320, 3).astype(np.float32)
alpha = np.random.rand(240, 320).astype(np.float32)
results = dict(fg=fg.copy(), alpha=alpha)
random_jitter = RandomJitter(40)
random_jitter_results = random_jitter(results)
assert self.check_keys_contain(random_jitter_results.keys(),
target_keys)
assert random_jitter_results['fg'].shape == (240, 320, 3)
fg = np.random.rand(240, 320, 3).astype(np.float32)
alpha = np.random.rand(240, 320).astype(np.float32)
results = dict(fg=fg.copy(), alpha=alpha)
random_jitter = RandomJitter((-50, 50))
random_jitter_results = random_jitter(results)
assert self.check_keys_contain(random_jitter_results.keys(),
target_keys)
assert random_jitter_results['fg'].shape == (240, 320, 3)
assert repr(random_jitter) == random_jitter.__class__.__name__ + (
'hue_range=(-50, 50)')
def test_color_jitter(self):
results = copy.deepcopy(self.results)
results['gt'] = (results['gt'] * 255).astype(np.uint8)
target_keys = ['gt']
color_jitter = ColorJitter(
keys=['gt'], brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5)
color_jitter_results = color_jitter(results)
assert self.check_keys_contain(color_jitter_results.keys(),
target_keys)
assert color_jitter_results['gt'].shape == self.img_gt.shape
assert repr(color_jitter) == color_jitter.__class__.__name__ + (
f"(keys=['gt'], to_rgb=False)")
@staticmethod
def check_transposehw(origin_img, result_img):
"""Check if the origin_imgs are transposed correctly"""
h, w, c = origin_img.shape
for i in range(c):
for j in range(h):
for k in range(w):
if result_img[k, j, i] != origin_img[j, k, i]: # noqa:E501
return False
return True
def test_transposehw(self):
results = self.results.copy()
target_keys = ['lq', 'gt', 'transpose']
transposehw = RandomTransposeHW(keys=['lq', 'gt'], transpose_ratio=1)
results = transposehw(results)
assert self.check_keys_contain(results.keys(), target_keys)
assert self.check_transposehw(self.img_lq, results['lq'])
assert self.check_transposehw(self.img_gt, results['gt'])
assert results['lq'].shape == (32, 64, 3)
assert results['gt'].shape == (128, 256, 3)
assert repr(transposehw) == transposehw.__class__.__name__ + (
f"(keys={['lq', 'gt']}, transpose_ratio=1)")
# for image list
ori_results = dict(
lq=[self.img_lq, np.copy(self.img_lq)],
gt=[self.img_gt, np.copy(self.img_gt)],
scale=4,
lq_path='fake_lq_path',
gt_path='fake_gt_path')
target_keys = ['lq', 'gt', 'transpose']
transposehw = RandomTransposeHW(keys=['lq', 'gt'], transpose_ratio=1)
results = transposehw(ori_results.copy())
assert self.check_keys_contain(results.keys(), target_keys)
assert self.check_transposehw(self.img_lq, results['lq'][0])
assert self.check_transposehw(self.img_gt, results['gt'][1])
np.testing.assert_almost_equal(results['gt'][0], results['gt'][1])
np.testing.assert_almost_equal(results['lq'][0], results['lq'][1])
# no transpose
target_keys = ['lq', 'gt', 'transpose']
transposehw = RandomTransposeHW(keys=['lq', 'gt'], transpose_ratio=0)
results = transposehw(ori_results.copy())
assert self.check_keys_contain(results.keys(), target_keys)
np.testing.assert_almost_equal(results['gt'][0], self.img_gt)
np.testing.assert_almost_equal(results['lq'][0], self.img_lq)
np.testing.assert_almost_equal(results['gt'][0], results['gt'][1])
np.testing.assert_almost_equal(results['lq'][0], results['lq'][1])
def test_random_dilation(self):
mask = np.zeros((3, 3, 1), dtype=np.float32)
mask[1, 1] = 1
gt_mask = np.ones_like(mask)
results = dict(mask=mask.copy())
dilation = RandomMaskDilation(['mask'],
binary_thr=0.5,
kernel_min=3,
kernel_max=3)
results = dilation(results)
assert np.array_equal(results['mask'], gt_mask)
assert results['mask_dilate_kernel_size'] == 3
assert str(dilation) == (
dilation.__class__.__name__ +
f"(keys={['mask']}, kernel_min=3, kernel_max=3)")
def test_resize(self):
with pytest.raises(AssertionError):
Resize([], scale=0.5)
with pytest.raises(AssertionError):
Resize(['gt_img'], size_factor=32, scale=0.5)
with pytest.raises(AssertionError):
Resize(['gt_img'], size_factor=32, keep_ratio=True)
with pytest.raises(AssertionError):
Resize(['gt_img'], max_size=32, size_factor=None)
with pytest.raises(ValueError):
Resize(['gt_img'], scale=-0.5)
with pytest.raises(TypeError):
Resize(['gt_img'], (0.4, 0.2))
with pytest.raises(TypeError):
Resize(['gt_img'], dict(test=None))
target_keys = ['alpha']
alpha = np.random.rand(240, 320).astype(np.float32)
results = dict(alpha=alpha)
resize = Resize(keys=['alpha'], size_factor=32, max_size=None)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert resize_results['alpha'].shape == (224, 320, 1)
resize = Resize(keys=['alpha'], size_factor=32, max_size=320)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert resize_results['alpha'].shape == (224, 320, 1)
resize = Resize(keys=['alpha'], size_factor=32, max_size=200)
resize_results = resize(results)
assert self.check_keys_contain(resize_results.keys(), target_keys)
assert resize_results['alpha'].shape == (192, 192, 1)
resize = Resize(['gt_img'], (-1, 200))
assert resize.scale == (np.inf, 200)
results = dict(gt_img=self.results['img'].copy())
resize_keep_ratio = Resize(['gt_img'], scale=0.5, keep_ratio=True)
results = resize_keep_ratio(results)
assert results['gt_img'].shape[:2] == (128, 128)
assert results['scale_factor'] == 0.5
results = dict(gt_img=self.results['img'].copy())
resize_keep_ratio = Resize(['gt_img'],
scale=(128, 128),
keep_ratio=False)
results = resize_keep_ratio(results)
assert results['gt_img'].shape[:2] == (128, 128)
# test input with shape (256, 256)
results = dict(gt_img=self.results['img'][..., 0].copy(), alpha=alpha)
resize = Resize(['gt_img', 'alpha'],
scale=(128, 128),
keep_ratio=False,
output_keys=['lq_img', 'beta'])
results = resize(results)
assert results['gt_img'].shape == (256, 256)
assert results['lq_img'].shape == (128, 128, 1)
assert results['alpha'].shape == (240, 320)
assert results['beta'].shape == (128, 128, 1)
name_ = str(resize_keep_ratio)
assert name_ == resize_keep_ratio.__class__.__name__ + (
"(keys=['gt_img'], output_keys=['gt_img'], "
'scale=(128, 128), '
f'keep_ratio={False}, size_factor=None, '
'max_size=None, interpolation=bilinear)')
def test_random_rotation(self):
with pytest.raises(ValueError):
RandomRotation(None, degrees=-10.0)
with pytest.raises(TypeError):
RandomRotation(None, degrees=('0.0', '45.0'))
target_keys = ['degrees']
results = copy.deepcopy(self.results)
random_rotation = RandomRotation(['img'], degrees=(0, 45))
random_rotation_results = random_rotation(results)
assert self.check_keys_contain(
random_rotation_results.keys(), target_keys)
assert random_rotation_results['img'].shape == (256, 256, 3)
assert random_rotation_results['degrees'] == (0, 45)
assert repr(random_rotation) == random_rotation.__class__.__name__ + (
"(keys=['img'], degrees=(0, 45))")
# test single degree integer
random_rotation = RandomRotation(['img'], degrees=45)
random_rotation_results = random_rotation(results)
assert self.check_keys_contain(
random_rotation_results.keys(), target_keys)
assert random_rotation_results['img'].shape == (256, 256, 3)
assert random_rotation_results['degrees'] == (-45, 45)
# test image dim == 2
grey_scale_img = np.random.rand(256, 256).astype(np.float32)
results = dict(img=grey_scale_img.copy())
random_rotation = RandomRotation(['img'], degrees=(0, 45))
random_rotation_results = random_rotation(results)
assert self.check_keys_contain(
random_rotation_results.keys(), target_keys)
assert random_rotation_results['img'].shape == (256, 256, 1)
def test_frame_index_generation_with_padding(self):
with pytest.raises(ValueError):
# Wrong padding mode
GenerateFrameIndiceswithPadding(padding='fake')
results = dict(
lq_path='fake_lq_root',
gt_path='fake_gt_root',
key=osp.join('000', '00000000'),
max_frame_num=100,
num_input_frames=5)
target_keys = ['lq_path', 'gt_path', 'key']
replicate_idx = [0, 0, 0, 1, 2]
reflection_idx = [2, 1, 0, 1, 2]
reflection_circle_idx = [4, 3, 0, 1, 2]
circle_idx = [3, 4, 0, 1, 2]
# replicate
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in replicate_idx]
gt_paths = [osp.join('fake_gt_root', '000', '00000000.png')]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='replicate')
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# reflection
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in reflection_idx]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='reflection')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# reflection_circle
lq_paths = [
osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in reflection_circle_idx
]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='reflection_circle')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# circle
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in circle_idx]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='circle')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
results = dict(
lq_path='fake_lq_root',
gt_path='fake_gt_root',
key=osp.join('000', '00000099'),
max_frame_num=100,
num_input_frames=5)
target_keys = ['lq_path', 'gt_path', 'key']
replicate_idx = [97, 98, 99, 99, 99]
reflection_idx = [97, 98, 99, 98, 97]
reflection_circle_idx = [97, 98, 99, 96, 95]
circle_idx = [97, 98, 99, 95, 96]
# replicate
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in replicate_idx]
gt_paths = [osp.join('fake_gt_root', '000', '00000099.png')]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='replicate')
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# reflection
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in reflection_idx]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='reflection')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# reflection_circle
lq_paths = [
osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in reflection_circle_idx
]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='reflection_circle')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
# circle
lq_paths = [osp.join('fake_lq_root', '000',
f'{v:08d}.png') for v in circle_idx]
frame_index_generator = GenerateFrameIndiceswithPadding(
padding='circle')
rlt = frame_index_generator(copy.deepcopy(results))
assert rlt['lq_path'] == lq_paths
assert rlt['gt_path'] == gt_paths
name_ = repr(frame_index_generator)
assert name_ == frame_index_generator.__class__.__name__ + (
"(padding='circle')")
def test_frame_index_generator(self):
results = dict(
lq_path='fake_lq_root',
gt_path='fake_gt_root',
key=osp.join('000', '00000010'),
num_input_frames=3)
target_keys = ['lq_path', 'gt_path', 'key', 'interval']
frame_index_generator = GenerateFrameIndices(
interval_list=[1], frames_per_clip=99)
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
name_ = repr(frame_index_generator)
assert name_ == frame_index_generator.__class__.__name__ + (
'(interval_list=[1], frames_per_clip=99)')
# index out of range
frame_index_generator = GenerateFrameIndices(interval_list=[10])
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
# index out of range
results['key'] = osp.join('000', '00000099')
frame_index_generator = GenerateFrameIndices(interval_list=[2, 3])
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
def test_temporal_reverse(self):
img_lq1 = np.random.rand(4, 4, 3).astype(np.float32)
img_lq2 = np.random.rand(4, 4, 3).astype(np.float32)
img_gt = np.random.rand(8, 8, 3).astype(np.float32)
results = dict(lq=[img_lq1, img_lq2], gt=[img_gt])
target_keys = ['lq', 'gt', 'reverse']
temporal_reverse = TemporalReverse(keys=['lq', 'gt'], reverse_ratio=1)
results = temporal_reverse(results)
assert self.check_keys_contain(results.keys(), target_keys)
np.testing.assert_almost_equal(results['lq'][0], img_lq2)
np.testing.assert_almost_equal(results['lq'][1], img_lq1)
np.testing.assert_almost_equal(results['gt'][0], img_gt)
assert repr(
temporal_reverse) == temporal_reverse.__class__.__name__ + (
f"(keys={['lq', 'gt']}, reverse_ratio=1)")
results = dict(lq=[img_lq1, img_lq2], gt=[img_gt])
temporal_reverse = TemporalReverse(keys=['lq', 'gt'], reverse_ratio=0)
results = temporal_reverse(results)
assert self.check_keys_contain(results.keys(), target_keys)
np.testing.assert_almost_equal(results['lq'][0], img_lq1)
np.testing.assert_almost_equal(results['lq'][1], img_lq2)
np.testing.assert_almost_equal(results['gt'][0], img_gt)
def test_frame_index_generation_for_recurrent(self):
results = dict(
lq_path='fake_lq_root',
gt_path='fake_gt_root',
key='000',
num_input_frames=10,
sequence_length=100)
target_keys = [
'lq_path', 'gt_path', 'key', 'interval', 'num_input_frames',
'sequence_length'
]
frame_index_generator = GenerateSegmentIndices(interval_list=[1, 5, 9])
rlt = frame_index_generator(copy.deepcopy(results))
assert self.check_keys_contain(rlt.keys(), target_keys)
name_ = repr(frame_index_generator)
assert name_ == frame_index_generator.__class__.__name__ + (
'(interval_list=[1, 5, 9])')
# interval too large
results = dict(
lq_path='fake_lq_root',
gt_path='fake_gt_root',
key='000',
num_input_frames=11,
sequence_length=100)
frame_index_generator = GenerateSegmentIndices(interval_list=[10])
with pytest.raises(ValueError):
frame_index_generator(copy.deepcopy(results))
def test_mirror_sequence(self):
lqs = [np.random.rand(4, 4, 3) for _ in range(0, 5)]
gts = [np.random.rand(16, 16, 3) for _ in range(0, 5)]
target_keys = ['lq', 'gt']
mirror_sequence = MirrorSequence(keys=['lq', 'gt'])
results = dict(lq=lqs, gt=gts)
results = mirror_sequence(results)
assert self.check_keys_contain(results.keys(), target_keys)
for i in range(0, 5):
np.testing.assert_almost_equal(results['lq'][i],
results['lq'][-i - 1])
np.testing.assert_almost_equal(results['gt'][i],
results['gt'][-i - 1])
assert repr(mirror_sequence) == mirror_sequence.__class__.__name__ + (
"(keys=['lq', 'gt'])")
# each key should contain a list of nparray
with pytest.raises(TypeError):
results = dict(lq=0, gt=gts)
mirror_sequence(results)
def test_quantize(self):
results = {}
# clip (>1)
results['gt'] = 1.1 * np.ones((1, 1, 3)).astype(np.float32)
model = Quantize(keys=['gt'])
assert np.array_equal(
model(results)['gt'],
np.ones((1, 1, 3)).astype(np.float32))
# clip (<0)
results['gt'] = -0.1 * np.ones((1, 1, 3)).astype(np.float32)
model = Quantize(keys=['gt'])
assert np.array_equal(
model(results)['gt'],
np.zeros((1, 1, 3)).astype(np.float32))
# round
results['gt'] = (1 / 255. + 1e-8) * np.ones(
(1, 1, 3)).astype(np.float32)
model = Quantize(keys=['gt'])
assert np.array_equal(
model(results)['gt'], (1 / 255.) * np.ones(
(1, 1, 3)).astype(np.float32))
def test_copy_value(self):
with pytest.raises(AssertionError):
CopyValues(src_keys='gt', dst_keys='lq')
with pytest.raises(ValueError):
CopyValues(src_keys=['gt', 'mask'], dst_keys=['lq'])
results = {}
results['gt'] = np.zeros((1)).astype(np.float32)
copy_ = CopyValues(src_keys=['gt'], dst_keys=['lq'])
assert np.array_equal(copy_(results)['lq'], results['gt'])
assert repr(copy_) == copy_.__class__.__name__ + (
f"(src_keys=['gt'])"
f"(dst_keys=['lq'])")
def test_unsharp_masking(self):
results = {}
unsharp_masking = UnsharpMasking(
kernel_size=15, sigma=0, weight=0.5, threshold=10, keys=['gt'])
# single image
results['gt'] = np.zeros((8, 8, 3)).astype(np.float32)
results = unsharp_masking(results)
assert isinstance(results['gt_unsharp'], np.ndarray)
# sequence of images
results['gt'] = [np.zeros((8, 8, 3)).astype(np.float32)] * 2
results = unsharp_masking(results)
assert isinstance(results['gt_unsharp'], list)
assert repr(unsharp_masking) == unsharp_masking.__class__.__name__ + (
"(keys=['gt'], kernel_size=15, sigma=0, weight=0.5, threshold=10)")
# kernel_size must be odd
with pytest.raises(ValueError):
unsharp_masking = UnsharpMasking(
kernel_size=10, sigma=0, weight=0.5, threshold=10, keys=['gt'])
| 35,573 | 41.860241 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_data/test_pipelines/test_crop.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import numpy as np
import pytest
from mmedit.datasets.pipelines import (Crop, CropAroundCenter, CropAroundFg,
CropAroundUnknown, CropLike, FixedCrop,
ModCrop, PairedRandomCrop,
RandomResizedCrop)
class TestAugmentations:
@classmethod
def setup_class(cls):
cls.results = dict()
cls.img_gt = np.random.rand(256, 128, 3).astype(np.float32)
cls.img_lq = np.random.rand(64, 32, 3).astype(np.float32)
cls.results = dict(
lq=cls.img_lq,
gt=cls.img_gt,
scale=4,
lq_path='fake_lq_path',
gt_path='fake_gt_path')
cls.results['img'] = np.random.rand(256, 256, 3).astype(np.float32)
cls.results['img_a'] = np.random.rand(286, 286, 3).astype(np.float32)
cls.results['img_b'] = np.random.rand(286, 286, 3).astype(np.float32)
@staticmethod
def check_crop(result_img_shape, result_bbox):
crop_w = result_bbox[2] - result_bbox[0]
"""Check if the result_bbox is in correspond to result_img_shape."""
crop_h = result_bbox[3] - result_bbox[1]
crop_shape = (crop_h, crop_w)
return result_img_shape == crop_shape
@staticmethod
def check_crop_around_semi(alpha):
return ((alpha > 0) & (alpha < 255)).any()
@staticmethod
def check_keys_contain(result_keys, target_keys):
"""Check if all elements in target_keys is in result_keys."""
return set(target_keys).issubset(set(result_keys))
def test_crop(self):
with pytest.raises(TypeError):
Crop(['img'], (0.23, 0.1))
# test center crop
results = copy.deepcopy(self.results)
center_crop = Crop(['img'], crop_size=(128, 128), random_crop=False)
results = center_crop(results)
assert results['img_crop_bbox'] == [64, 64, 128, 128]
assert np.array_equal(self.results['img'][64:192, 64:192, :],
results['img'])
# test random crop
results = copy.deepcopy(self.results)
random_crop = Crop(['img'], crop_size=(128, 128), random_crop=True)
results = random_crop(results)
assert 0 <= results['img_crop_bbox'][0] <= 128
assert 0 <= results['img_crop_bbox'][1] <= 128
assert results['img_crop_bbox'][2] == 128
assert results['img_crop_bbox'][3] == 128
# test random crop for larger size than the original shape
results = copy.deepcopy(self.results)
random_crop = Crop(['img'], crop_size=(512, 512), random_crop=True)
results = random_crop(results)
assert np.array_equal(self.results['img'], results['img'])
assert str(random_crop) == (
random_crop.__class__.__name__ +
"keys=['img'], crop_size=(512, 512), random_crop=True")
# test center crop for size larger than original shape
results = copy.deepcopy(self.results)
center_crop = Crop(['img'],
crop_size=(512, 512),
random_crop=False,
is_pad_zeros=True)
gt_pad = np.pad(
self.results['img'], ((128, 128), (128, 128), (0, 0)),
mode='constant',
constant_values=0)
results = center_crop(results)
assert results['img_crop_bbox'] == [128, 128, 512, 512]
assert np.array_equal(gt_pad, results['img'])
def test_random_resized_crop(self):
with pytest.raises(TypeError):
RandomResizedCrop(['img'], crop_size=(0.23, 0.1))
with pytest.raises(TypeError):
RandomResizedCrop(['img'], crop_size=(128, 128), scale=(1, 1))
with pytest.raises(TypeError):
RandomResizedCrop(['img'],
crop_size=(128, 128),
scale=(0.5, 0.5),
ratio=(1, 2))
# test random crop
results = copy.deepcopy(self.results)
random_resized_crop = RandomResizedCrop(['img'], crop_size=(128, 128))
results = random_resized_crop(results)
assert 0 <= results['img_crop_bbox'][0] <= 256
assert 0 <= results['img_crop_bbox'][1] <= 256
assert results['img_crop_bbox'][2] <= 256
assert results['img_crop_bbox'][3] <= 256
assert results['img'].shape == (128, 128, 3)
# test random crop with integer crop size
results = copy.deepcopy(self.results)
random_resized_crop = RandomResizedCrop(['img'], crop_size=128)
results = random_resized_crop(results)
assert 0 <= results['img_crop_bbox'][0] <= 256
assert 0 <= results['img_crop_bbox'][1] <= 256
assert results['img_crop_bbox'][2] <= 256
assert results['img_crop_bbox'][3] <= 256
assert results['img'].shape == (128, 128, 3)
assert str(random_resized_crop) == (
random_resized_crop.__class__.__name__ +
"(keys=['img'], crop_size=(128, 128), scale=(0.08, 1.0), "
f'ratio={(3. / 4., 4. / 3.)}, interpolation=bilinear)')
# test random crop for larger size than the original shape
results = copy.deepcopy(self.results)
random_resized_crop = RandomResizedCrop(['img'], crop_size=(512, 512))
results = random_resized_crop(results)
assert results['img'].shape == (512, 512, 3)
assert str(random_resized_crop) == (
random_resized_crop.__class__.__name__ +
"(keys=['img'], crop_size=(512, 512), scale=(0.08, 1.0), "
f'ratio={(3. / 4., 4. / 3.)}, interpolation=bilinear)')
# test center crop for in_ratio < min(self.ratio)
results = copy.deepcopy(self.results)
center_crop = RandomResizedCrop(['img'],
crop_size=(128, 128),
ratio=(100.0, 200.0))
results = center_crop(results)
assert results['img_crop_bbox'] == [126, 0, 256, 3]
assert results['img'].shape == (128, 128, 3)
# test center crop for in_ratio > max(self.ratio)
results = copy.deepcopy(self.results)
center_crop = RandomResizedCrop(['img'],
crop_size=(128, 128),
ratio=(0.01, 0.02))
results = center_crop(results)
assert results['img_crop_bbox'] == [0, 125, 5, 256]
assert results['img'].shape == (128, 128, 3)
def test_fixed_crop(self):
with pytest.raises(TypeError):
FixedCrop(['img_a', 'img_b'], (0.23, 0.1))
with pytest.raises(TypeError):
FixedCrop(['img_a', 'img_b'], (256, 256), (0, 0.1))
# test shape consistency
results = copy.deepcopy(self.results)
fixed_crop = FixedCrop(['img_a', 'img'], crop_size=(128, 128))
with pytest.raises(ValueError):
results = fixed_crop(results)
# test sequence
results = copy.deepcopy(self.results)
results['img_a'] = [results['img_a'], results['img_a']]
results['img_b'] = [results['img_b'], results['img_b']]
fixed_crop = FixedCrop(['img_a', 'img_b'], crop_size=(128, 128))
results = fixed_crop(results)
for img in results['img_a']:
assert img.shape == (128, 128, 3)
for img in results['img_b']:
assert img.shape == (128, 128, 3)
# test given pos crop
results = copy.deepcopy(self.results)
given_pos_crop = FixedCrop(['img_a', 'img_b'],
crop_size=(256, 256),
crop_pos=(1, 1))
results = given_pos_crop(results)
assert results['img_a_crop_bbox'] == [1, 1, 256, 256]
assert results['img_b_crop_bbox'] == [1, 1, 256, 256]
assert np.array_equal(self.results['img_a'][1:257, 1:257, :],
results['img_a'])
assert np.array_equal(self.results['img_b'][1:257, 1:257, :],
results['img_b'])
# test given pos crop if pos > suitable pos
results = copy.deepcopy(self.results)
given_pos_crop = FixedCrop(['img_a', 'img_b'],
crop_size=(256, 256),
crop_pos=(280, 280))
results = given_pos_crop(results)
assert results['img_a_crop_bbox'] == [280, 280, 6, 6]
assert results['img_b_crop_bbox'] == [280, 280, 6, 6]
assert np.array_equal(self.results['img_a'][280:, 280:, :],
results['img_a'])
assert np.array_equal(self.results['img_b'][280:, 280:, :],
results['img_b'])
assert str(given_pos_crop) == (
given_pos_crop.__class__.__name__ +
"keys=['img_a', 'img_b'], crop_size=(256, 256), " +
'crop_pos=(280, 280)')
# test random initialized fixed crop
results = copy.deepcopy(self.results)
random_fixed_crop = FixedCrop(['img_a', 'img_b'],
crop_size=(256, 256),
crop_pos=None)
results = random_fixed_crop(results)
assert 0 <= results['img_a_crop_bbox'][0] <= 30
assert 0 <= results['img_a_crop_bbox'][1] <= 30
assert results['img_a_crop_bbox'][2] == 256
assert results['img_a_crop_bbox'][3] == 256
x_offset, y_offset, crop_w, crop_h = results['img_a_crop_bbox']
assert x_offset == results['img_b_crop_bbox'][0]
assert y_offset == results['img_b_crop_bbox'][1]
assert crop_w == results['img_b_crop_bbox'][2]
assert crop_h == results['img_b_crop_bbox'][3]
assert np.array_equal(
self.results['img_a'][y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, :],
results['img_a'])
assert np.array_equal(
self.results['img_b'][y_offset:y_offset + crop_h,
x_offset:x_offset + crop_w, :],
results['img_b'])
# test given pos crop for lager size than the original shape
results = copy.deepcopy(self.results)
given_pos_crop = FixedCrop(['img_a', 'img_b'],
crop_size=(512, 512),
crop_pos=(1, 1))
results = given_pos_crop(results)
assert results['img_a_crop_bbox'] == [1, 1, 285, 285]
assert results['img_b_crop_bbox'] == [1, 1, 285, 285]
assert np.array_equal(self.results['img_a'][1:, 1:, :],
results['img_a'])
assert np.array_equal(self.results['img_b'][1:, 1:, :],
results['img_b'])
assert str(given_pos_crop) == (
given_pos_crop.__class__.__name__ +
"keys=['img_a', 'img_b'], crop_size=(512, 512), crop_pos=(1, 1)")
# test random initialized fixed crop for lager size
# than the original shape
results = copy.deepcopy(self.results)
random_fixed_crop = FixedCrop(['img_a', 'img_b'],
crop_size=(512, 512),
crop_pos=None)
results = random_fixed_crop(results)
assert results['img_a_crop_bbox'] == [0, 0, 286, 286]
assert results['img_b_crop_bbox'] == [0, 0, 286, 286]
assert np.array_equal(self.results['img_a'], results['img_a'])
assert np.array_equal(self.results['img_b'], results['img_b'])
assert str(random_fixed_crop) == (
random_fixed_crop.__class__.__name__ +
"keys=['img_a', 'img_b'], crop_size=(512, 512), crop_pos=None")
def test_crop_around_center(self):
with pytest.raises(TypeError):
CropAroundCenter(320.)
with pytest.raises(AssertionError):
CropAroundCenter((320, 320, 320))
target_keys = ['fg', 'bg', 'alpha', 'trimap', 'crop_bbox']
fg = np.random.rand(240, 320, 3)
bg = np.random.rand(240, 320, 3)
trimap = np.random.rand(240, 320)
alpha = np.random.rand(240, 320)
# make sure there would be semi-transparent area
trimap[128, 128] = 128
results = dict(fg=fg, bg=bg, trimap=trimap, alpha=alpha)
crop_around_center = CropAroundCenter(crop_size=320)
crop_around_center_results = crop_around_center(results)
assert self.check_keys_contain(crop_around_center_results.keys(),
target_keys)
assert self.check_crop(crop_around_center_results['alpha'].shape,
crop_around_center_results['crop_bbox'])
assert self.check_crop_around_semi(crop_around_center_results['alpha'])
# make sure there would be semi-transparent area
trimap[:, :] = 128
results = dict(fg=fg, bg=bg, trimap=trimap, alpha=alpha)
crop_around_center = CropAroundCenter(crop_size=200)
crop_around_center_results = crop_around_center(results)
assert self.check_keys_contain(crop_around_center_results.keys(),
target_keys)
assert self.check_crop(crop_around_center_results['alpha'].shape,
crop_around_center_results['crop_bbox'])
assert self.check_crop_around_semi(crop_around_center_results['alpha'])
repr_str = crop_around_center.__class__.__name__ + (
f'(crop_size={(200, 200)})')
assert repr(crop_around_center) == repr_str
def test_crop_around_unknown(self):
with pytest.raises(ValueError):
# keys must contain 'alpha'
CropAroundUnknown(['fg', 'bg'], [320])
with pytest.raises(TypeError):
# crop_size must be a list
CropAroundUnknown(['alpha'], 320)
with pytest.raises(TypeError):
# crop_size must be a list of int
CropAroundUnknown(['alpha'], [320.])
with pytest.raises(ValueError):
# unknown_source must be either 'alpha' or 'trimap'
CropAroundUnknown(['alpha', 'fg'], [320], unknown_source='fg')
with pytest.raises(ValueError):
# if unknown_source is 'trimap', then keys must contain it
CropAroundUnknown(['alpha', 'fg'], [320], unknown_source='trimap')
keys = ['fg', 'bg', 'merged', 'alpha', 'trimap', 'ori_merged']
target_keys = [
'fg', 'bg', 'merged', 'alpha', 'trimap', 'ori_merged', 'crop_bbox'
]
# test cropping using trimap to decide unknown area
fg = np.random.rand(240, 320, 3)
bg = np.random.rand(240, 320, 3)
merged = np.random.rand(240, 320, 3)
ori_merged = merged.copy()
alpha = np.zeros((240, 320))
# make sure there would be unknown area
alpha[:16, -16:] = 128
trimap = np.zeros_like(alpha)
trimap[alpha > 0] = 128
trimap[alpha == 255] = 255
results = dict(
fg=fg,
bg=bg,
merged=merged,
ori_merged=ori_merged,
alpha=alpha,
trimap=trimap)
crop_around_semi_trans = CropAroundUnknown(
keys, crop_sizes=[320], unknown_source='trimap')
crop_around_semi_trans_results = crop_around_semi_trans(results)
assert self.check_keys_contain(crop_around_semi_trans_results.keys(),
target_keys)
assert self.check_crop(crop_around_semi_trans_results['alpha'].shape,
crop_around_semi_trans_results['crop_bbox'])
assert self.check_crop_around_semi(
crop_around_semi_trans_results['alpha'])
keys = ['fg', 'bg', 'merged', 'alpha', 'ori_merged']
target_keys = [
'fg', 'bg', 'merged', 'alpha', 'ori_merged', 'crop_bbox'
]
# test cropping using alpha to decide unknown area
fg = np.random.rand(240, 320, 3)
bg = np.random.rand(240, 320, 3)
merged = np.random.rand(240, 320, 3)
ori_merged = merged.copy()
alpha = np.random.rand(240, 320)
# make sure there would be unknown area
alpha[120:160, 120:160] = 128
results = dict(
fg=fg, bg=bg, merged=merged, ori_merged=ori_merged, alpha=alpha)
crop_around_semi_trans = CropAroundUnknown(
keys, crop_sizes=[160], unknown_source='alpha')
crop_around_semi_trans_results = crop_around_semi_trans(results)
assert self.check_keys_contain(crop_around_semi_trans_results.keys(),
target_keys)
assert self.check_crop(crop_around_semi_trans_results['alpha'].shape,
crop_around_semi_trans_results['crop_bbox'])
assert self.check_crop_around_semi(
crop_around_semi_trans_results['alpha'])
# test cropping when there is no unknown area
fg = np.random.rand(240, 320, 3)
bg = np.random.rand(240, 320, 3)
merged = np.random.rand(240, 320, 3)
ori_merged = merged.copy()
alpha = np.zeros((240, 320))
results = dict(
fg=fg, bg=bg, merged=merged, ori_merged=ori_merged, alpha=alpha)
crop_around_semi_trans = CropAroundUnknown(
keys, crop_sizes=[240], unknown_source='alpha')
crop_around_semi_trans_results = crop_around_semi_trans(results)
assert self.check_keys_contain(crop_around_semi_trans_results.keys(),
target_keys)
assert self.check_crop(crop_around_semi_trans_results['alpha'].shape,
crop_around_semi_trans_results['crop_bbox'])
repr_str = (
crop_around_semi_trans.__class__.__name__ +
f"(keys={keys}, crop_sizes={[(240, 240)]}, unknown_source='alpha',"
" interpolations=['bilinear', 'bilinear', 'bilinear', 'bilinear', "
"'bilinear'])")
assert crop_around_semi_trans.__repr__() == repr_str
def test_crop_around_fg(self):
with pytest.raises(ValueError):
# keys must contain 'seg'
CropAroundFg(['fg', 'bg'])
with pytest.raises(TypeError):
# bd_ratio_range must be a tuple of 2 float
CropAroundFg(['seg', 'merged'], bd_ratio_range=0.1)
keys = ['bg', 'merged', 'seg']
target_keys = ['bg', 'merged', 'seg', 'crop_bbox']
bg = np.random.rand(60, 60, 3)
merged = np.random.rand(60, 60, 3)
seg = np.random.rand(60, 60)
results = dict(bg=bg, merged=merged, seg=seg)
crop_around_fg = CropAroundFg(keys)
crop_around_fg_results = crop_around_fg(results)
assert self.check_keys_contain(crop_around_fg_results.keys(),
target_keys)
assert self.check_crop(crop_around_fg_results['seg'].shape,
crop_around_fg_results['crop_bbox'])
crop_around_fg = CropAroundFg(keys, test_mode=True)
crop_around_fg_results = crop_around_fg(results)
result_img_shape = crop_around_fg_results['seg'].shape
assert self.check_keys_contain(crop_around_fg_results.keys(),
target_keys)
assert self.check_crop(result_img_shape,
crop_around_fg_results['crop_bbox'])
# it should be a square in test mode
assert result_img_shape[0] == result_img_shape[1]
def test_modcrop(self):
# color image
results = dict(gt=np.random.randn(257, 258, 3), scale=4)
modcrop = ModCrop()
results = modcrop(results)
assert results['gt'].shape == (256, 256, 3)
# gray image
results = dict(gt=np.random.randn(257, 258), scale=4)
results = modcrop(results)
assert results['gt'].shape == (256, 256)
# Wrong img ndim
with pytest.raises(ValueError):
results = dict(gt=np.random.randn(1, 257, 258, 3), scale=4)
results = modcrop(results)
def test_paired_random_crop(self):
results = self.results.copy()
pairedrandomcrop = PairedRandomCrop(128)
results = pairedrandomcrop(results)
assert results['gt'].shape == (128, 128, 3)
assert results['lq'].shape == (32, 32, 3)
# Scale mismatches. GT (h, w) is not {scale} multiplication of LQ's.
with pytest.raises(ValueError):
results = dict(
gt=np.random.randn(128, 128, 3),
lq=np.random.randn(64, 64, 3),
scale=4,
gt_path='fake_gt_path',
lq_path='fake_lq_path')
results = pairedrandomcrop(results)
# LQ (h, w) is smaller than patch size.
with pytest.raises(ValueError):
results = dict(
gt=np.random.randn(32, 32, 3),
lq=np.random.randn(8, 8, 3),
scale=4,
gt_path='fake_gt_path',
lq_path='fake_lq_path')
results = pairedrandomcrop(results)
assert repr(pairedrandomcrop) == (
pairedrandomcrop.__class__.__name__ + '(gt_patch_size=128)')
# for image list
results = dict(
lq=[self.img_lq, self.img_lq],
gt=[self.img_gt, self.img_gt],
scale=4,
lq_path='fake_lq_path',
gt_path='fake_gt_path')
pairedrandomcrop = PairedRandomCrop(128)
results = pairedrandomcrop(results)
for v in results['gt']:
assert v.shape == (128, 128, 3)
for v in results['lq']:
assert v.shape == (32, 32, 3)
np.testing.assert_almost_equal(results['gt'][0], results['gt'][1])
np.testing.assert_almost_equal(results['lq'][0], results['lq'][1])
def test_crop_like():
img = np.uint8(np.random.randn(480, 640, 3) * 255)
img_ref = np.uint8(np.random.randn(512, 512, 3) * 255)
inputs = dict(gt=img, ref=img_ref)
crop_like = CropLike(target_key='gt', reference_key='ref')
results = crop_like(inputs)
assert set(list(results.keys())) == set(['gt', 'ref'])
assert repr(crop_like) == (
crop_like.__class__.__name__ +
f' target_key={crop_like.target_key}, ' +
f'reference_key={crop_like.reference_key}')
assert results['gt'].shape == (512, 512, 3)
sum_diff = np.sum(abs(results['gt'][:480, :512] - img[:480, :512]))
assert sum_diff < 1e-6
inputs = dict(gt=img, ref=img_ref[:, :, 0])
crop_like = CropLike(target_key='gt', reference_key='ref')
results = crop_like(inputs)
assert set(list(results.keys())) == set(['gt', 'ref'])
assert results['gt'].shape == (512, 512, 3)
sum_diff = np.sum(abs(results['gt'][:480, :512] - img[:480, :512]))
assert sum_diff < 1e-6
inputs = dict(gt=img[:, :, 0], ref=img_ref)
crop_like = CropLike(target_key='gt', reference_key='ref')
results = crop_like(inputs)
assert set(list(results.keys())) == set(['gt', 'ref'])
assert results['gt'].shape == (512, 512)
sum_diff = np.sum(abs(results['gt'][:480, :512] - img[:480, :512, 0]))
assert sum_diff < 1e-6
| 23,399 | 43.067797 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_tensor2img.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
import torch
from torchvision.utils import make_grid
from mmedit.core import tensor2img
def test_tensor2img():
tensor_4d_1 = torch.FloatTensor(2, 3, 4, 4).uniform_(0, 1)
tensor_4d_2 = torch.FloatTensor(1, 3, 4, 4).uniform_(0, 1)
tensor_4d_3 = torch.FloatTensor(3, 1, 4, 4).uniform_(0, 1)
tensor_4d_4 = torch.FloatTensor(1, 1, 4, 4).uniform_(0, 1)
tensor_3d_1 = torch.FloatTensor(3, 4, 4).uniform_(0, 1)
tensor_3d_2 = torch.FloatTensor(3, 6, 6).uniform_(0, 1)
tensor_3d_3 = torch.FloatTensor(1, 6, 6).uniform_(0, 1)
tensor_2d = torch.FloatTensor(4, 4).uniform_(0, 1)
with pytest.raises(TypeError):
# input is not a tensor
tensor2img(4)
with pytest.raises(TypeError):
# input is not a list of tensors
tensor2img([tensor_3d_1, 4])
with pytest.raises(ValueError):
# unsupported 5D tensor
tensor2img(torch.FloatTensor(2, 2, 3, 4, 4).uniform_(0, 1))
# 4d
rlt = tensor2img(tensor_4d_1, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_4d_1_np = make_grid(tensor_4d_1, nrow=1, normalize=False).numpy()
tensor_4d_1_np = np.transpose(tensor_4d_1_np[[2, 1, 0], :, :], (1, 2, 0))
np.testing.assert_almost_equal(rlt, (tensor_4d_1_np * 255).round())
rlt = tensor2img(tensor_4d_2, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_4d_2_np = tensor_4d_2.squeeze().numpy()
tensor_4d_2_np = np.transpose(tensor_4d_2_np[[2, 1, 0], :, :], (1, 2, 0))
np.testing.assert_almost_equal(rlt, (tensor_4d_2_np * 255).round())
rlt = tensor2img(tensor_4d_3, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_4d_3_np = make_grid(tensor_4d_3, nrow=1, normalize=False).numpy()
tensor_4d_3_np = np.transpose(tensor_4d_3_np[[2, 1, 0], :, :], (1, 2, 0))
np.testing.assert_almost_equal(rlt, (tensor_4d_3_np * 255).round())
rlt = tensor2img(tensor_4d_4, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_4d_4_np = tensor_4d_4.squeeze().numpy()
np.testing.assert_almost_equal(rlt, (tensor_4d_4_np * 255).round())
# 3d
rlt = tensor2img([tensor_3d_1, tensor_3d_2],
out_type=np.uint8,
min_max=(0, 1))
assert rlt[0].dtype == np.uint8
tensor_3d_1_np = tensor_3d_1.numpy()
tensor_3d_1_np = np.transpose(tensor_3d_1_np[[2, 1, 0], :, :], (1, 2, 0))
tensor_3d_2_np = tensor_3d_2.numpy()
tensor_3d_2_np = np.transpose(tensor_3d_2_np[[2, 1, 0], :, :], (1, 2, 0))
np.testing.assert_almost_equal(rlt[0], (tensor_3d_1_np * 255).round())
np.testing.assert_almost_equal(rlt[1], (tensor_3d_2_np * 255).round())
rlt = tensor2img(tensor_3d_3, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_3d_3_np = tensor_3d_3.squeeze().numpy()
np.testing.assert_almost_equal(rlt, (tensor_3d_3_np * 255).round())
# 2d
rlt = tensor2img(tensor_2d, out_type=np.uint8, min_max=(0, 1))
assert rlt.dtype == np.uint8
tensor_2d_np = tensor_2d.numpy()
np.testing.assert_almost_equal(rlt, (tensor_2d_np * 255).round())
rlt = tensor2img(tensor_2d, out_type=np.float32, min_max=(0, 1))
assert rlt.dtype == np.float32
np.testing.assert_almost_equal(rlt, tensor_2d_np)
rlt = tensor2img(tensor_2d, out_type=np.float32, min_max=(0.1, 0.5))
assert rlt.dtype == np.float32
tensor_2d_np = (np.clip(tensor_2d_np, 0.1, 0.5) - 0.1) / 0.4
np.testing.assert_almost_equal(rlt, tensor_2d_np)
| 3,609 | 41.97619 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_pix2pix.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
from unittest.mock import patch
import mmcv
import pytest
import torch
from mmcv.parallel import DataContainer as DC
from mmcv.runner import obj_from_dict
from mmedit.models import build_model
from mmedit.models.backbones import UnetGenerator
from mmedit.models.components import PatchDiscriminator
from mmedit.models.losses import GANLoss, L1Loss
def test_pix2pix():
model_cfg = dict(
type='Pix2Pix',
generator=dict(
type='UnetGenerator',
in_channels=3,
out_channels=3,
num_down=8,
base_channels=64,
norm_cfg=dict(type='BN'),
use_dropout=True,
init_cfg=dict(type='normal', gain=0.02)),
discriminator=dict(
type='PatchDiscriminator',
in_channels=6,
base_channels=64,
num_conv=3,
norm_cfg=dict(type='BN'),
init_cfg=dict(type='normal', gain=0.02)),
gan_loss=dict(
type='GANLoss',
gan_type='vanilla',
real_label_val=1.0,
fake_label_val=0,
loss_weight=1.0),
pixel_loss=dict(type='L1Loss', loss_weight=100.0, reduction='mean'))
train_cfg = None
test_cfg = None
# build synthesizer
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test checking gan loss cannot be None
with pytest.raises(AssertionError):
bad_model_cfg = copy.deepcopy(model_cfg)
bad_model_cfg['gan_loss'] = None
_ = build_model(bad_model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert synthesizer.__class__.__name__ == 'Pix2Pix'
assert isinstance(synthesizer.generator, UnetGenerator)
assert isinstance(synthesizer.discriminator, PatchDiscriminator)
assert isinstance(synthesizer.gan_loss, GANLoss)
assert isinstance(synthesizer.pixel_loss, L1Loss)
assert synthesizer.train_cfg is None
assert synthesizer.test_cfg is None
# prepare data
inputs = torch.rand(1, 3, 256, 256)
targets = torch.rand(1, 3, 256, 256)
data_batch = {'img_a': inputs, 'img_b': targets}
img_meta = {}
img_meta['img_a_path'] = 'img_a_path'
img_meta['img_b_path'] = 'img_b_path'
data_batch['meta'] = [img_meta]
# prepare optimizer
optim_cfg = dict(type='Adam', lr=2e-4, betas=(0.5, 0.999))
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminator').parameters()))
}
# test forward_dummy
with torch.no_grad():
output = synthesizer.forward_dummy(data_batch['img_a'])
assert torch.is_tensor(output)
assert output.size() == (1, 3, 256, 256)
# test forward_test
with torch.no_grad():
outputs = synthesizer(inputs, targets, [img_meta], test_mode=True)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# val_step
with torch.no_grad():
outputs = synthesizer.val_step(data_batch)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# test forward_train
outputs = synthesizer(inputs, targets, [img_meta], test_mode=False)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# test train_step
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
synthesizer = synthesizer.cuda()
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(
params=getattr(synthesizer, 'discriminator').parameters()))
}
data_batch_cuda = copy.deepcopy(data_batch)
data_batch_cuda['img_a'] = inputs.cuda()
data_batch_cuda['img_b'] = targets.cuda()
data_batch_cuda['meta'] = [DC(img_meta, cpu_only=True).data]
# forward_test
with torch.no_grad():
outputs = synthesizer(
data_batch_cuda['img_a'],
data_batch_cuda['img_b'],
data_batch_cuda['meta'],
test_mode=True)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# val_step
with torch.no_grad():
outputs = synthesizer.val_step(data_batch_cuda)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# test forward_train
outputs = synthesizer(
data_batch_cuda['img_a'],
data_batch_cuda['img_b'],
data_batch_cuda['meta'],
test_mode=False)
assert torch.equal(outputs['real_a'], data_batch_cuda['img_a'])
assert torch.equal(outputs['real_b'], data_batch_cuda['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
# train_step
outputs = synthesizer.train_step(data_batch_cuda, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g',
'loss_pixel'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'],
data_batch_cuda['img_a'].cpu())
assert torch.equal(outputs['results']['real_b'],
data_batch_cuda['img_b'].cpu())
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
# test disc_steps and disc_init_steps
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
train_cfg = dict(disc_steps=2, disc_init_steps=2)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminator').parameters()))
}
# iter 0, 1
for i in range(2):
assert synthesizer.step_counter == i
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
assert outputs['log_vars'].get('loss_gan_g') is None
assert outputs['log_vars'].get('loss_pixel') is None
for v in ['loss_gan_d_fake', 'loss_gan_d_real']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
assert synthesizer.step_counter == i + 1
# iter 2, 3, 4, 5
for i in range(2, 6):
assert synthesizer.step_counter == i
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
log_check_list = [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
]
if i % 2 == 1:
assert outputs['log_vars'].get('loss_gan_g') is None
assert outputs['log_vars'].get('loss_pixel') is None
log_check_list.remove('loss_gan_g')
log_check_list.remove('loss_pixel')
for v in log_check_list:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
assert synthesizer.step_counter == i + 1
# test without pixel loss
model_cfg_ = copy.deepcopy(model_cfg)
model_cfg_.pop('pixel_loss')
synthesizer = build_model(model_cfg_, train_cfg=None, test_cfg=None)
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminator').parameters()))
}
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
assert outputs['log_vars'].get('loss_pixel') is None
for v in ['loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g']:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_a'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
# test b2a translation
data_batch['img_a'] = inputs.cpu()
data_batch['img_b'] = targets.cpu()
train_cfg = dict(direction='b2a')
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
optimizer = {
'generator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'generator').parameters())),
'discriminator':
obj_from_dict(
optim_cfg, torch.optim,
dict(params=getattr(synthesizer, 'discriminator').parameters()))
}
assert synthesizer.step_counter == 0
outputs = synthesizer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['results'], dict)
for v in [
'loss_gan_d_fake', 'loss_gan_d_real', 'loss_gan_g', 'loss_pixel'
]:
assert isinstance(outputs['log_vars'][v], float)
assert outputs['num_samples'] == 1
assert torch.equal(outputs['results']['real_a'], data_batch['img_b'])
assert torch.equal(outputs['results']['real_b'], data_batch['img_a'])
assert torch.is_tensor(outputs['results']['fake_b'])
assert outputs['results']['fake_b'].size() == (1, 3, 256, 256)
assert synthesizer.step_counter == 1
# test save image
# show input
train_cfg = None
test_cfg = dict(show_input=True)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(mmcv, 'imwrite', return_value=True):
# test save path not None Assertion
with pytest.raises(AssertionError):
with torch.no_grad():
_ = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True)
# iteration is None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path')
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
assert outputs['saved_flag']
# iteration is not None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path',
iteration=1000)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
assert outputs['saved_flag']
# not show input
train_cfg = None
test_cfg = dict(show_input=False)
synthesizer = build_model(
model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
with patch.object(mmcv, 'imwrite', return_value=True):
# test save path not None Assertion
with pytest.raises(AssertionError):
with torch.no_grad():
_ = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True)
# iteration is None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path')
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
assert outputs['saved_flag']
# iteration is not None
with torch.no_grad():
outputs = synthesizer(
inputs,
targets, [img_meta],
test_mode=True,
save_image=True,
save_path='save_path',
iteration=1000)
assert torch.equal(outputs['real_a'], data_batch['img_a'])
assert torch.equal(outputs['real_b'], data_batch['img_b'])
assert torch.is_tensor(outputs['fake_b'])
assert outputs['fake_b'].size() == (1, 3, 256, 256)
assert outputs['saved_flag']
| 16,144 | 38.864198 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_setup_env.py | # Copyright (c) OpenMMLab. All rights reserved.
import multiprocessing as mp
import os
import platform
import cv2
from mmcv import Config
from mmedit.utils import setup_multi_processes
def test_setup_multi_processes():
# temp save system setting
sys_start_mehod = mp.get_start_method(allow_none=True)
sys_cv_threads = cv2.getNumThreads()
# pop and temp save system env vars
sys_omp_threads = os.environ.pop('OMP_NUM_THREADS', default=None)
sys_mkl_threads = os.environ.pop('MKL_NUM_THREADS', default=None)
# test config without setting env
config = dict(data=dict(workers_per_gpu=2))
cfg = Config(config)
setup_multi_processes(cfg)
assert os.getenv('OMP_NUM_THREADS') == '1'
assert os.getenv('MKL_NUM_THREADS') == '1'
# when set to 0, the num threads will be 1
assert cv2.getNumThreads() == 1
if platform.system() != 'Windows':
assert mp.get_start_method() == 'fork'
# test num workers <= 1
os.environ.pop('OMP_NUM_THREADS')
os.environ.pop('MKL_NUM_THREADS')
config = dict(data=dict(workers_per_gpu=0))
cfg = Config(config)
setup_multi_processes(cfg)
assert 'OMP_NUM_THREADS' not in os.environ
assert 'MKL_NUM_THREADS' not in os.environ
# test manually set env var
os.environ['OMP_NUM_THREADS'] = '4'
config = dict(data=dict(workers_per_gpu=2))
cfg = Config(config)
setup_multi_processes(cfg)
assert os.getenv('OMP_NUM_THREADS') == '4'
# test manually set opencv threads and mp start method
config = dict(
data=dict(workers_per_gpu=2),
opencv_num_threads=4,
mp_start_method='spawn')
cfg = Config(config)
setup_multi_processes(cfg)
assert cv2.getNumThreads() == 4
assert mp.get_start_method() == 'spawn'
# revert setting to avoid affecting other programs
if sys_start_mehod:
mp.set_start_method(sys_start_mehod, force=True)
cv2.setNumThreads(sys_cv_threads)
if sys_omp_threads:
os.environ['OMP_NUM_THREADS'] = sys_omp_threads
else:
os.environ.pop('OMP_NUM_THREADS')
if sys_mkl_threads:
os.environ['MKL_NUM_THREADS'] = sys_mkl_threads
else:
os.environ.pop('MKL_NUM_THREADS')
| 2,222 | 31.217391 | 69 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_mask_generation.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmedit.core.mask import (bbox2mask, brush_stroke_mask, get_irregular_mask,
random_bbox)
def test_bbox_mask():
# default config for random bbox mask
cfg = dict(
img_shape=(256, 256),
max_bbox_shape=100,
max_bbox_delta=10,
min_margin=10)
bbox = random_bbox(**cfg)
mask_bbox = bbox2mask(cfg['img_shape'], bbox)
assert mask_bbox.shape == (256, 256, 1)
zero_area = np.sum((mask_bbox == 0).astype(np.uint8))
ones_area = np.sum((mask_bbox == 1).astype(np.uint8))
assert zero_area + ones_area == 256 * 256
assert mask_bbox.dtype == np.uint8
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_shape'] = 300
bbox = random_bbox(**cfg_)
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_delta'] = 300
bbox = random_bbox(**cfg_)
with pytest.raises(ValueError):
cfg_ = cfg.copy()
cfg_['max_bbox_shape'] = 254
bbox = random_bbox(**cfg_)
cfg_ = cfg.copy()
cfg_['max_bbox_delta'] = 1
bbox = random_bbox(**cfg_)
mask_bbox = bbox2mask(cfg['img_shape'], bbox)
assert mask_bbox.shape == (256, 256, 1)
def test_free_form_mask():
img_shape = (256, 256, 3)
for _ in range(10):
mask = brush_stroke_mask(img_shape)
assert mask.shape == (256, 256, 1)
img_shape = (256, 256, 3)
mask = brush_stroke_mask(img_shape, num_vertices=8)
assert mask.shape == (256, 256, 1)
zero_area = np.sum((mask == 0).astype(np.uint8))
ones_area = np.sum((mask == 1).astype(np.uint8))
assert zero_area + ones_area == 256 * 256
assert mask.dtype == np.uint8
img_shape = (256, 256, 3)
mask = brush_stroke_mask(img_shape, brush_width=10)
assert mask.shape == (256, 256, 1)
with pytest.raises(TypeError):
mask = brush_stroke_mask(img_shape, num_vertices=dict())
with pytest.raises(TypeError):
mask = brush_stroke_mask(img_shape, brush_width=dict())
def test_irregular_mask():
img_shape = (256, 256)
for _ in range(10):
mask = get_irregular_mask(img_shape)
assert mask.shape == (256, 256, 1)
assert 0.15 < (np.sum(mask) / (img_shape[0] * img_shape[1])) < 0.50
zero_area = np.sum((mask == 0).astype(np.uint8))
ones_area = np.sum((mask == 1).astype(np.uint8))
assert zero_area + ones_area == 256 * 256
assert mask.dtype == np.uint8
with pytest.raises(TypeError):
mask = get_irregular_mask(img_shape, brush_width=dict())
with pytest.raises(TypeError):
mask = get_irregular_mask(img_shape, length_range=dict())
with pytest.raises(TypeError):
mask = get_irregular_mask(img_shape, num_vertices=dict())
mask = get_irregular_mask(img_shape, brush_width=10)
assert mask.shape == (256, 256, 1)
mask = get_irregular_mask(img_shape, length_range=10)
assert mask.shape == (256, 256, 1)
mask = get_irregular_mask(img_shape, num_vertices=10)
assert mask.shape == (256, 256, 1)
| 3,158 | 30.59 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_modify_args.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
from unittest.mock import patch
from mmedit.utils import modify_args
def test_modify_args():
def _parse_args():
parser = argparse.ArgumentParser(description='Generation demo')
parser.add_argument('--config-path', help='test config file path')
args = parser.parse_args()
return args
with patch('argparse._sys.argv', ['test.py', '--config_path=config.py']):
modify_args()
args = _parse_args()
assert args.config_path == 'config.py'
| 563 | 27.2 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_utils/test_onnx_wraper.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import mmcv
import numpy as np
import pytest
import torch
from packaging import version
from mmedit.models import build_model
@pytest.mark.skipif(torch.__version__ == 'parrots', reason='skip parrots.')
@pytest.mark.skipif(
version.parse(torch.__version__) < version.parse('1.4.0'),
reason='skip if torch=1.3.x')
def test_restorer_wrapper():
try:
import onnxruntime as ort
from mmedit.core.export.wrappers import (ONNXRuntimeEditing,
ONNXRuntimeRestorer)
except ImportError:
pytest.skip('ONNXRuntime is not available.')
onnx_path = 'tmp.onnx'
scale = 4
train_cfg = None
test_cfg = None
cfg = dict(
model=dict(
type='BasicRestorer',
generator=dict(
type='SRCNN',
channels=(3, 4, 2, 3),
kernel_sizes=(9, 1, 5),
upscale_factor=scale),
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean')),
train_cfg=train_cfg,
test_cfg=test_cfg)
cfg = mmcv.Config(cfg)
pytorch_model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
# prepare data
inputs = torch.rand(1, 3, 2, 2)
targets = torch.rand(1, 3, 8, 8)
data_batch = {'lq': inputs, 'gt': targets}
pytorch_model.forward = pytorch_model.forward_dummy
with torch.no_grad():
torch.onnx.export(
pytorch_model,
inputs,
onnx_path,
input_names=['input'],
output_names=['output'],
export_params=True,
keep_initializers_as_inputs=False,
verbose=False,
opset_version=11)
wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0)
# os.remove(onnx_path)
assert isinstance(wrap_model.wrapper, ONNXRuntimeRestorer)
if ort.get_device() == 'GPU':
data_batch = {'lq': inputs.cuda(), 'gt': targets.cuda()}
with torch.no_grad():
outputs = wrap_model(**data_batch, test_mode=True)
assert isinstance(outputs, dict)
assert 'output' in outputs
output = outputs['output']
assert isinstance(output, torch.Tensor)
assert output.shape == targets.shape
@pytest.mark.skipif(torch.__version__ == 'parrots', reason='skip parrots.')
@pytest.mark.skipif(
version.parse(torch.__version__) < version.parse('1.4.0'),
reason='skip if torch=1.3.x')
def test_mattor_wrapper():
try:
import onnxruntime as ort
from mmedit.core.export.wrappers import (ONNXRuntimeEditing,
ONNXRuntimeMattor)
except ImportError:
pytest.skip('ONNXRuntime is not available.')
onnx_path = 'tmp.onnx'
train_cfg = None
test_cfg = dict(refine=False, metrics=['SAD', 'MSE', 'GRAD', 'CONN'])
cfg = dict(
model=dict(
type='DIM',
backbone=dict(
type='SimpleEncoderDecoder',
encoder=dict(type='VGG16', in_channels=4),
decoder=dict(type='PlainDecoder')),
pretrained='open-mmlab://mmedit/vgg16',
loss_alpha=dict(type='CharbonnierLoss', loss_weight=0.5),
loss_comp=dict(type='CharbonnierCompLoss', loss_weight=0.5)),
train_cfg=train_cfg,
test_cfg=test_cfg)
cfg = mmcv.Config(cfg)
pytorch_model = build_model(
cfg.model, train_cfg=cfg.train_cfg, test_cfg=cfg.test_cfg)
img_shape = (32, 32)
merged = torch.rand(1, 3, img_shape[1], img_shape[0])
trimap = torch.rand(1, 1, img_shape[1], img_shape[0])
data_batch = {'merged': merged, 'trimap': trimap}
inputs = torch.cat([merged, trimap], dim=1)
pytorch_model.forward = pytorch_model.forward_dummy
with torch.no_grad():
torch.onnx.export(
pytorch_model,
inputs,
onnx_path,
input_names=['input'],
output_names=['output'],
export_params=True,
keep_initializers_as_inputs=False,
verbose=False,
opset_version=11)
wrap_model = ONNXRuntimeEditing(onnx_path, cfg, 0)
os.remove(onnx_path)
assert isinstance(wrap_model.wrapper, ONNXRuntimeMattor)
if ort.get_device() == 'GPU':
merged = merged.cuda()
trimap = trimap.cuda()
data_batch = {'merged': merged, 'trimap': trimap}
ori_alpha = np.random.random(img_shape).astype(np.float32)
ori_trimap = np.random.randint(256, size=img_shape).astype(np.float32)
data_batch['meta'] = [
dict(
ori_alpha=ori_alpha,
ori_trimap=ori_trimap,
merged_ori_shape=img_shape)
]
with torch.no_grad():
outputs = wrap_model(**data_batch, test_mode=True)
assert isinstance(outputs, dict)
assert 'pred_alpha' in outputs
pred_alpha = outputs['pred_alpha']
assert isinstance(pred_alpha, np.ndarray)
assert pred_alpha.shape == img_shape
| 5,043 | 30.924051 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/data/inpaintor_config/one_stage_gl.py | # Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='OneStageInpaintor',
encdec=dict(
type='GLEncoderDecoder',
encoder=dict(type='GLEncoder'),
decoder=dict(type='GLDecoder'),
dilation_neck=dict(type='GLDilationNeck')),
disc=dict(
type='MultiLayerDiscriminator',
in_channels=3,
max_channels=512,
fc_in_channels=512 * 4 * 4,
fc_out_channels=1024,
num_convs=6,
norm_cfg=dict(type='BN'),
),
loss_gan=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=0.001,
),
loss_gp=dict(
type='GradientPenaltyLoss',
loss_weight=1.,
),
loss_disc_shift=dict(type='DiscShiftLoss', loss_weight=0.001),
loss_composed_percep=dict(
type='PerceptualLoss',
layer_weights={'0': 1.},
perceptual_weight=0.1,
style_weight=0,
),
loss_out_percep=True,
loss_l1_hole=dict(type='L1Loss', loss_weight=1.0),
loss_l1_valid=dict(type='L1Loss', loss_weight=1.0),
loss_tv=dict(type='MaskedTVLoss', loss_weight=0.01),
pretrained=None)
train_cfg = dict(disc_step=1)
test_cfg = dict(metrics=['l1', 'psnr', 'ssim'])
| 1,214 | 27.928571 | 66 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/data/inpaintor_config/gl_test.py | # Copyright (c) OpenMMLab. All rights reserved.
global_disc_cfg = dict(
in_channels=3,
max_channels=512,
fc_in_channels=512 * 4 * 4,
fc_out_channels=1024,
num_convs=6,
norm_cfg=dict(type='BN'))
local_disc_cfg = dict(
in_channels=3,
max_channels=512,
fc_in_channels=512 * 4 * 4,
fc_out_channels=1024,
num_convs=5,
norm_cfg=dict(type='BN'))
model = dict(
type='GLInpaintor',
encdec=dict(
type='GLEncoderDecoder',
encoder=dict(type='GLEncoder'),
decoder=dict(type='GLDecoder'),
dilation_neck=dict(type='GLDilationNeck')),
disc=dict(
type='GLDiscs',
global_disc_cfg=global_disc_cfg,
local_disc_cfg=local_disc_cfg),
loss_gan=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=0.001,
),
loss_l1_hole=dict(type='L1Loss', loss_weight=1.0),
loss_l1_valid=dict(type='L1Loss', loss_weight=1.0),
pretrained=None)
train_cfg = dict(
disc_step=1, start_iter=0, iter_tc=2, iter_td=3, local_size=(128, 128))
test_cfg = dict()
model_dirty = dict(
type='GLInpaintor',
encdec=dict(
type='GLEncoderDecoder',
encoder=dict(type='GLEncoder'),
decoder=dict(type='GLDecoder'),
dilation_neck=dict(type='GLDilationNeck')),
disc=dict(
type='GLDiscs',
global_disc_cfg=global_disc_cfg,
local_disc_cfg=local_disc_cfg),
loss_gan=None,
loss_l1_hole=None,
loss_l1_valid=None,
pretrained=None)
| 1,512 | 26.017857 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/data/inpaintor_config/aot_test.py | # Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='AOTInpaintor',
encdec=dict(
type='AOTEncoderDecoder',
encoder=dict(type='AOTEncoder'),
decoder=dict(type='AOTDecoder'),
dilation_neck=dict(type='AOTBlockNeck')),
disc=dict(
type='SoftMaskPatchDiscriminator',
in_channels=3,
base_channels=64,
num_conv=3,
with_spectral_norm=True,
),
loss_gan=dict(
type='GANLoss',
gan_type='vanilla',
loss_weight=0.01,
),
loss_composed_percep=dict(
type='PerceptualLoss',
layer_weights={'0': 1.},
perceptual_weight=0.1,
style_weight=0,
),
loss_out_percep=True,
loss_l1_valid=dict(type='L1Loss', loss_weight=1.0),
pretrained=None)
train_cfg = dict(disc_step=1)
test_cfg = dict(metrics=['l1', 'psnr', 'ssim'])
| 880 | 25.69697 | 55 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/data/inpaintor_config/pconv_inpaintor_test.py | # Copyright (c) OpenMMLab. All rights reserved.
model = dict(
type='PConvInpaintor',
encdec=dict(
type='PConvEncoderDecoder',
encoder=dict(type='PConvEncoder'),
decoder=dict(type='PConvDecoder')),
loss_l1_hole=dict(type='L1Loss', loss_weight=1.0),
loss_l1_valid=dict(type='L1Loss', loss_weight=1.0),
loss_composed_percep=dict(
type='PerceptualLoss',
layer_weights={'0': 1.},
perceptual_weight=0.1,
style_weight=0),
loss_out_percep=True,
loss_tv=dict(type='MaskedTVLoss', loss_weight=0.01),
pretrained=None)
train_cfg = dict(disc_step=0)
test_cfg = dict(metrics=['l1', 'psnr', 'ssim'])
| 674 | 31.142857 | 56 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/tests/test_metrics/test_metrics.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import pytest
from mmedit.core.evaluation.metrics import (connectivity, gradient_error, mse,
niqe, psnr, reorder_image, sad,
ssim)
def test_reorder_image():
img_hw = np.ones((32, 32))
img_hwc = np.ones((32, 32, 3))
img_chw = np.ones((3, 32, 32))
with pytest.raises(ValueError):
reorder_image(img_hw, 'HH')
output = reorder_image(img_hw)
assert output.shape == (32, 32, 1)
output = reorder_image(img_hwc)
assert output.shape == (32, 32, 3)
output = reorder_image(img_chw, input_order='CHW')
assert output.shape == (32, 32, 3)
def test_calculate_psnr():
img_hw_1 = np.ones((32, 32))
img_hwc_1 = np.ones((32, 32, 3))
img_chw_1 = np.ones((3, 32, 32))
img_hw_2 = np.ones((32, 32)) * 2
img_hwc_2 = np.ones((32, 32, 3)) * 2
img_chw_2 = np.ones((3, 32, 32)) * 2
with pytest.raises(ValueError):
psnr(img_hw_1, img_hw_2, crop_border=0, input_order='HH')
with pytest.raises(ValueError):
psnr(img_hw_1, img_hw_2, crop_border=0, convert_to='ABC')
psnr_result = psnr(img_hw_1, img_hw_2, crop_border=0)
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_hwc_1, img_hwc_2, crop_border=0, input_order='HWC')
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_chw_1, img_chw_2, crop_border=0, input_order='CHW')
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_hw_1, img_hw_2, crop_border=2)
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_hwc_1, img_hwc_2, crop_border=3, input_order='HWC')
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_chw_1, img_chw_2, crop_border=4, input_order='CHW')
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_hwc_1, img_hwc_2, crop_border=0, convert_to=None)
np.testing.assert_almost_equal(psnr_result, 48.1308036)
psnr_result = psnr(img_hwc_1, img_hwc_2, crop_border=0, convert_to='Y')
np.testing.assert_almost_equal(psnr_result, 49.4527218)
# test float inf
psnr_result = psnr(img_hw_1, img_hw_1, crop_border=0)
assert psnr_result == float('inf')
# test uint8
img_hw_1 = np.zeros((32, 32), dtype=np.uint8)
img_hw_2 = np.ones((32, 32), dtype=np.uint8) * 255
psnr_result = psnr(img_hw_1, img_hw_2, crop_border=0)
assert psnr_result == 0
def test_calculate_ssim():
img_hw_1 = np.ones((32, 32))
img_hwc_1 = np.ones((32, 32, 3))
img_chw_1 = np.ones((3, 32, 32))
img_hw_2 = np.ones((32, 32)) * 2
img_hwc_2 = np.ones((32, 32, 3)) * 2
img_chw_2 = np.ones((3, 32, 32)) * 2
with pytest.raises(ValueError):
ssim(img_hw_1, img_hw_2, crop_border=0, input_order='HH')
with pytest.raises(ValueError):
ssim(img_hw_1, img_hw_2, crop_border=0, input_order='ABC')
ssim_result = ssim(img_hw_1, img_hw_2, crop_border=0)
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_hwc_1, img_hwc_2, crop_border=0, input_order='HWC')
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_chw_1, img_chw_2, crop_border=0, input_order='CHW')
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_hw_1, img_hw_2, crop_border=2)
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_hwc_1, img_hwc_2, crop_border=3, input_order='HWC')
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_chw_1, img_chw_2, crop_border=4, input_order='CHW')
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_hwc_1, img_hwc_2, crop_border=0, convert_to=None)
np.testing.assert_almost_equal(ssim_result, 0.9130623)
ssim_result = ssim(img_hwc_1, img_hwc_2, crop_border=0, convert_to='Y')
np.testing.assert_almost_equal(ssim_result, 0.9987801)
def test_calculate_niqe():
img = mmcv.imread('tests/data/gt/baboon.png')
result = niqe(img[:, :, 0], crop_border=0, input_order='HW')
np.testing.assert_almost_equal(result, 5.62525, decimal=5)
result = niqe(img, crop_border=0, input_order='HWC', convert_to='y')
np.testing.assert_almost_equal(result, 5.72957, decimal=5)
result = niqe(img, crop_border=0, input_order='HWC', convert_to='gray')
np.testing.assert_almost_equal(result, 5.73154, decimal=5)
result = niqe(
img.transpose(2, 0, 1),
crop_border=0,
input_order='CHW',
convert_to='y')
np.testing.assert_almost_equal(result, 5.72957, decimal=5)
result = niqe(
img.transpose(2, 0, 1),
crop_border=0,
input_order='CHW',
convert_to='gray')
np.testing.assert_almost_equal(result, 5.73154, decimal=5)
result = niqe(img[:, :, 0], crop_border=6, input_order='HW')
np.testing.assert_almost_equal(result, 5.82981, decimal=5)
result = niqe(img, crop_border=6, input_order='HWC', convert_to='y')
np.testing.assert_almost_equal(result, 6.10074, decimal=5)
result = niqe(
img.transpose(2, 0, 1),
crop_border=6,
input_order='CHW',
convert_to='y')
np.testing.assert_almost_equal(result, 6.10074, decimal=5)
def test_sad():
alpha = np.ones((32, 32)) * 255
pred_alpha = np.zeros((32, 32))
trimap = np.zeros((32, 32))
trimap[:16, :16] = 128
trimap[16:, 16:] = 255
with pytest.raises(AssertionError):
# pred_alpha should be masked by trimap before evaluation
sad(alpha, trimap, pred_alpha)
with pytest.raises(ValueError):
# input should all be two dimensional
sad(alpha[..., None], trimap, pred_alpha)
# mask pred_alpha
pred_alpha[trimap == 0] = 0
pred_alpha[trimap == 255] = 255
sad_result = sad(alpha, trimap, pred_alpha)
np.testing.assert_almost_equal(sad_result, 0.768)
def test_mse():
alpha = np.ones((32, 32)) * 255
pred_alpha = np.zeros((32, 32))
trimap = np.zeros((32, 32))
trimap[:16, :16] = 128
trimap[16:, 16:] = 255
with pytest.raises(AssertionError):
# pred_alpha should be masked by trimap before evaluation
mse(alpha, trimap, pred_alpha)
with pytest.raises(ValueError):
# input should all be two dimensional
mse(alpha[..., None], trimap, pred_alpha)
# mask pred_alpha
pred_alpha[trimap == 0] = 0
pred_alpha[trimap == 255] = 255
mse_result = mse(alpha, trimap, pred_alpha)
np.testing.assert_almost_equal(mse_result, 3.0)
def test_gradient_error():
"""Test gradient error for evaluating predicted alpha matte."""
alpha = np.ones((32, 32)) * 255
pred_alpha = np.zeros((32, 32))
trimap = np.zeros((32, 32))
trimap[:16, :16] = 128
trimap[16:, 16:] = 255
with pytest.raises(ValueError):
# pred_alpha should be masked by trimap before evaluation
gradient_error(alpha, trimap, pred_alpha)
with pytest.raises(ValueError):
# input should all be two dimensional
gradient_error(alpha[..., None], trimap, pred_alpha)
# mask pred_alpha
pred_alpha[trimap == 0] = 0
pred_alpha[trimap == 255] = 255
gradient_result = gradient_error(alpha, trimap, pred_alpha)
np.testing.assert_almost_equal(gradient_result, 0.0028887)
def test_connectivity():
"""Test connectivity error for evaluating predicted alpha matte."""
alpha = np.ones((32, 32)) * 255
pred_alpha = np.zeros((32, 32))
trimap = np.zeros((32, 32))
trimap[:16, :16] = 128
trimap[16:, 16:] = 255
with pytest.raises(ValueError):
# pred_alpha should be masked by trimap before evaluation
connectivity(alpha, trimap, pred_alpha)
with pytest.raises(ValueError):
# input should all be two dimensional
connectivity(alpha[..., None], trimap, pred_alpha)
# mask pred_alpha
pred_alpha[trimap == 0] = 0
pred_alpha[trimap == 255] = 255
connectivity_result = connectivity(alpha, trimap, pred_alpha)
np.testing.assert_almost_equal(connectivity_result, 0.256)
| 8,279 | 34.536481 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/demo/restoration_video_demo.py | # Copyright (c) OpenMMLab. All rights reserved.
import argparse
import os
import cv2
import mmcv
import numpy as np
import torch
from mmedit.apis import init_model, restoration_video_inference
from mmedit.core import tensor2img
from mmedit.utils import modify_args
VIDEO_EXTENSIONS = ('.mp4', '.mov')
def parse_args():
modify_args()
parser = argparse.ArgumentParser(description='Restoration demo')
parser.add_argument('config', help='test config file path')
parser.add_argument('checkpoint', help='checkpoint file')
parser.add_argument('input_dir', help='directory of the input video')
parser.add_argument('output_dir', help='directory of the output video')
parser.add_argument(
'--start-idx',
type=int,
default=0,
help='index corresponds to the first frame of the sequence')
parser.add_argument(
'--filename-tmpl',
default='{:08d}.png',
help='template of the file names')
parser.add_argument(
'--window-size',
type=int,
default=0,
help='window size if sliding-window framework is used')
parser.add_argument(
'--max-seq-len',
type=int,
default=None,
help='maximum sequence length if recurrent framework is used')
parser.add_argument('--device', type=int, default=0, help='CUDA device id')
args = parser.parse_args()
return args
def main():
""" Demo for video restoration models.
Note that we accept video as input/output, when 'input_dir'/'output_dir'
is set to the path to the video. But using videos introduces video
compression, which lowers the visual quality. If you want actual quality,
please save them as separate images (.png).
"""
args = parse_args()
model = init_model(
args.config, args.checkpoint, device=torch.device('cuda', args.device))
output = restoration_video_inference(model, args.input_dir,
args.window_size, args.start_idx,
args.filename_tmpl, args.max_seq_len)
file_extension = os.path.splitext(args.output_dir)[1]
if file_extension in VIDEO_EXTENSIONS: # save as video
h, w = output.shape[-2:]
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
video_writer = cv2.VideoWriter(args.output_dir, fourcc, 25, (w, h))
for i in range(0, output.size(1)):
img = tensor2img(output[:, i, :, :, :])
video_writer.write(img.astype(np.uint8))
cv2.destroyAllWindows()
video_writer.release()
else:
for i in range(args.start_idx, args.start_idx + output.size(1)):
output_i = output[:, i - args.start_idx, :, :, :]
output_i = tensor2img(output_i)
save_path_i = f'{args.output_dir}/{args.filename_tmpl.format(i)}'
mmcv.imwrite(output_i, save_path_i)
if __name__ == '__main__':
main()
| 2,938 | 32.781609 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_ntire_vsr.py | exp_name = 'basicvsr_plusplus_c128n25_600k_ntire_vsr'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=128,
num_blocks=25,
is_low_res_input=True,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'),
ensemble=dict(type='SpatialTemporalEnsemble', is_temporal_ensemble=False),
)
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
test_dataset_type = 'SRFolderMultipleGTDataset'
test_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
test=dict(
type=test_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=4,
test_mode=True),
)
| 2,031 | 29.328358 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_vimeo90k_bd.py | exp_name = 'basicvsr_plusplus_c64n7_4x2_300k_vimeo90k_bd'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=-1)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0, convert_to='y')
# dataset settings
train_dataset_type = 'SRVimeo90KMultipleGTDataset'
val_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=2, drop_last=True), # 4 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/vimeo90k/BDx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt',
pipeline=train_pipeline,
scale=4,
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BDx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
scale=4,
test_mode=True),
# test
test=dict(
type=val_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 300000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[300000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = 'experiments/basicvsr_plusplus_c64n7_8x1_600k_reds4/iter_600000.pth' # noqa
resume_from = None
workflow = [('train', 1)]
| 4,942 | 28.422619 | 88 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_reds4.py | exp_name = 'basicvsr_plusplus_c64n7_8x1_600k_reds4'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR'], crop_border=0)
# dataset settings
train_dataset_type = 'SRREDSMultipleGTDataset'
val_dataset_type = 'SRREDSMultipleGTDataset'
test_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
test_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=1, drop_last=True), # 8 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/REDS/train_sharp_bicubic/X4',
gt_folder='data/REDS/train_sharp',
num_input_frames=30,
pipeline=train_pipeline,
scale=4,
val_partition='REDS4',
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/REDS/train_sharp_bicubic/X4',
gt_folder='data/REDS/train_sharp',
num_input_frames=100,
pipeline=test_pipeline,
scale=4,
val_partition='REDS4',
repeat=2,
test_mode=True),
# test
test=dict(
type=test_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 600000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[600000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
| 4,519 | 28.350649 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_deblur_dvd.py | exp_name = 'basicvsr_plusplus_deblur_dvd'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=15,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
train_dataset_type = 'SRFolderMultipleGTDataset'
val_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.jpg'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.jpg'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.jpg'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(
samples_per_gpu=1, drop_last=True, persistent_workers=False), # 8 gpus
val_dataloader=dict(samples_per_gpu=1, persistent_workers=False),
test_dataloader=dict(
samples_per_gpu=1, workers_per_gpu=1, persistent_workers=False),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/DVD/quantitative_datasets/LQ',
gt_folder='data/DVD/quantitative_datasets/GT',
num_input_frames=30,
pipeline=train_pipeline,
scale=1,
ann_file='data/DVD_train.txt',
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/DVD/quantitative_datasets/LQ',
gt_folder='data/DVD/quantitative_datasets/GT',
pipeline=test_pipeline,
scale=1,
ann_file='data/DVD_test.txt',
test_mode=True),
# test
test=dict(
type=val_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 600000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[600000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
| 4,751 | 27.97561 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_denoise.py | exp_name = 'basicvsr_plusplus_denoise'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=15,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
train_dataset_type = 'SRFolderMultipleGTDataset'
val_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.jpg'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='FixedCrop', keys=['gt'], crop_size=(256, 256)),
dict(type='RescaleToZeroOne', keys=['gt']),
dict(type='Flip', keys=['gt'], flip_ratio=0.5, direction='horizontal'),
dict(type='Flip', keys=['gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['gt'], transpose_ratio=0.5),
dict(type='CopyValues', src_keys=['gt'], dst_keys=['lq']),
dict(
type='RandomNoise',
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[0, 50],
gaussian_gray_noise_prob=0,
),
keys=['lq'],
),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['gt']),
dict(type='CopyValues', src_keys=['gt'], dst_keys=['lq']),
dict(
type='RandomNoise',
params=dict(
noise_type=['gaussian'],
noise_prob=[1],
gaussian_sigma=[50, 50], # change to your desired noise level
gaussian_gray_noise_prob=0,
),
keys=['lq'],
),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.jpg'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(
samples_per_gpu=1, drop_last=True, persistent_workers=False), # 8 gpus
val_dataloader=dict(samples_per_gpu=1, persistent_workers=False),
test_dataloader=dict(
samples_per_gpu=1, workers_per_gpu=1, persistent_workers=False),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/DAVIS/JPEGImages/480p',
gt_folder='data/DAVIS/JPEGImages/480p',
num_input_frames=25,
pipeline=train_pipeline,
scale=1,
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/Set8',
gt_folder='data/Set8',
pipeline=test_pipeline,
scale=1,
test_mode=True),
test=dict(
type=val_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 600000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[600000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./experiments/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
| 4,987 | 27.666667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_ntire_decompress_track2.py | exp_name = 'basicvsr_plusplus_c128n25_600k_ntire_decompress_track2'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=128,
num_blocks=25,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'),
ensemble=dict(type='SpatialTemporalEnsemble', is_temporal_ensemble=False),
)
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
test_dataset_type = 'SRFolderMultipleGTDataset'
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
start_idx=1,
filename_tmpl='{:03d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
test=dict(
type=test_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=4,
test_mode=True),
)
| 2,022 | 28.318841 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_ntire_decompress_track3.py | exp_name = 'basicvsr_plusplus_c128n25_600k_ntire_decompress_track3'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=128,
num_blocks=25,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'),
ensemble=dict(type='SpatialTemporalEnsemble', is_temporal_ensemble=False),
)
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
test_dataset_type = 'SRFolderMultipleGTDataset'
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
start_idx=1,
filename_tmpl='{:03d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
test=dict(
type=test_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=4,
test_mode=True),
)
| 2,022 | 28.318841 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_vimeo90k_bi.py | exp_name = 'basicvsr_plusplus_c64n7_4x2_300k_vimeo90k_bi'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=7,
is_low_res_input=True,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth'),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=-1)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0, convert_to='y')
# dataset settings
train_dataset_type = 'SRVimeo90KMultipleGTDataset'
val_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='MirrorSequence', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
val_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
test_pipeline = [
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='MirrorSequence', keys=['lq']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=2, drop_last=True), # 4 gpus
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/vimeo90k/BIx4',
gt_folder='data/vimeo90k/GT',
ann_file='data/vimeo90k/meta_info_Vimeo90K_train_GT.txt',
pipeline=train_pipeline,
scale=4,
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/Vid4/BIx4',
gt_folder='data/Vid4/GT',
pipeline=val_pipeline,
scale=4,
test_mode=True),
# test
test=dict(
type=val_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 300000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[300000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = 'experiments/basicvsr_plusplus_c64n7_8x1_600k_reds4/iter_600000.pth' # noqa
resume_from = None
workflow = [('train', 1)]
| 4,942 | 28.422619 | 88 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_deblur_gopro.py | exp_name = 'basicvsr_plusplus_deblur_gopro'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=64,
num_blocks=15,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
train_dataset_type = 'SRFolderMultipleGTDataset'
val_dataset_type = 'SRFolderMultipleGTDataset'
train_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='PairedRandomCrop', gt_patch_size=256),
dict(
type='Flip', keys=['lq', 'gt'], flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip', keys=['lq', 'gt'], flip_ratio=0.5, direction='vertical'),
dict(type='RandomTransposeHW', keys=['lq', 'gt'], transpose_ratio=0.5),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(type='Collect', keys=['lq', 'gt'], meta_keys=['lq_path', 'gt_path'])
]
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(
samples_per_gpu=1, drop_last=True, persistent_workers=False), # 8 gpus
val_dataloader=dict(samples_per_gpu=1, persistent_workers=False),
test_dataloader=dict(
samples_per_gpu=1, workers_per_gpu=1, persistent_workers=False),
# train
train=dict(
type='RepeatDataset',
times=1000,
dataset=dict(
type=train_dataset_type,
lq_folder='data/GoPro/train/blur',
gt_folder='data/GoPro/train/GT',
num_input_frames=30,
pipeline=train_pipeline,
scale=1,
ann_file='data/GoPro_train.txt',
test_mode=False)),
# val
val=dict(
type=val_dataset_type,
lq_folder='data/GoPro/test/blur',
gt_folder='data/GoPro/test/GT',
pipeline=test_pipeline,
scale=1,
ann_file='data/GoPro_test.txt',
test_mode=True),
# test
test=dict(
type=val_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=1,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 200000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[200000],
restart_weights=[1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./experiments/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
| 4,705 | 27.695122 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/configs/basicvsr_plusplus_ntire_decompress_track1.py | exp_name = 'basicvsr_plusplus_c128n25_600k_ntire_decompress_track1'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlus',
mid_channels=128,
num_blocks=25,
is_low_res_input=False,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=100),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'),
ensemble=dict(type='SpatialTemporalEnsemble', is_temporal_ensemble=False),
)
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
test_dataset_type = 'SRFolderMultipleGTDataset'
test_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
start_idx=1,
filename_tmpl='{:03d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(type='GenerateSegmentIndices', interval_list=[1]),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
test=dict(
type=test_dataset_type,
lq_folder='data/test/LQ',
gt_folder='data/test/GT',
pipeline=test_pipeline,
scale=4,
test_mode=True),
)
| 2,022 | 28.318841 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/version.py | # Copyright (c) Open-MMLab. All rights reserved.
__version__ = '0.14.0'
def parse_version_info(version_str):
ver_info = []
for x in version_str.split('.'):
if x.isdigit():
ver_info.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
ver_info.append(int(patch_version[0]))
ver_info.append(f'rc{patch_version[1]}')
return tuple(ver_info)
version_info = parse_version_info(__version__)
| 482 | 24.421053 | 52 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
from .version import __version__, version_info
try:
from mmcv.utils import digit_version
except ImportError:
def digit_version(version_str):
digit_ver = []
for x in version_str.split('.'):
if x.isdigit():
digit_ver.append(int(x))
elif x.find('rc') != -1:
patch_version = x.split('rc')
digit_ver.append(int(patch_version[0]) - 1)
digit_ver.append(int(patch_version[1]))
return digit_ver
MMCV_MIN = '1.3.13'
MMCV_MAX = '1.6'
mmcv_min_version = digit_version(MMCV_MIN)
mmcv_max_version = digit_version(MMCV_MAX)
mmcv_version = digit_version(mmcv.__version__)
assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \
f'mmcv=={mmcv.__version__} is used but incompatible. ' \
f'Please install mmcv-full>={mmcv_min_version}, <={mmcv_max_version}.'
__all__ = ['__version__', 'version_info']
| 988 | 27.257143 | 74 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/test.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import pickle
import shutil
import tempfile
import mmcv
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def single_gpu_test(model,
data_loader,
save_image=False,
save_path=None,
iteration=None):
"""Test model with a single gpu.
This method tests model with a single gpu and displays test progress bar.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
save_image (bool): Whether save image. Default: False.
save_path (str): The path to save image. Default: None.
iteration (int): Iteration number. It is used for the save image name.
Default: None.
Returns:
list: The prediction results.
"""
if save_image and save_path is None:
raise ValueError(
"When 'save_image' is True, you should also set 'save_path'.")
model.eval()
results = []
dataset = data_loader.dataset
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(
test_mode=True,
save_image=save_image,
save_path=save_path,
iteration=iteration,
**data)
results.append(result)
# get batch size
for _, v in data.items():
if isinstance(v, torch.Tensor):
batch_size = v.size(0)
break
for _ in range(batch_size):
prog_bar.update()
return results
def multi_gpu_test(model,
data_loader,
tmpdir=None,
gpu_collect=False,
save_image=False,
save_path=None,
iteration=None,
empty_cache=False):
"""Test model with multiple gpus.
This method tests model with multiple gpus and collects the results
under two different modes: gpu and cpu modes. By setting 'gpu_collect=True'
it encodes results to gpu tensors and use gpu communication for results
collection. On cpu mode it saves the results on different gpus to 'tmpdir'
and collects them by the rank 0 worker.
Args:
model (nn.Module): Model to be tested.
data_loader (nn.Dataloader): Pytorch data loader.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode.
gpu_collect (bool): Option to use either gpu or cpu to collect results.
save_image (bool): Whether save image. Default: False.
save_path (str): The path to save image. Default: None.
iteration (int): Iteration number. It is used for the save image name.
Default: None.
empty_cache (bool): empty cache in every iteration. Default: False.
Returns:
list: The prediction results.
"""
if save_image and save_path is None:
raise ValueError(
"When 'save_image' is True, you should also set 'save_path'.")
model.eval()
results = []
dataset = data_loader.dataset
rank, world_size = get_dist_info()
if rank == 0:
prog_bar = mmcv.ProgressBar(len(dataset))
for data in data_loader:
with torch.no_grad():
result = model(
test_mode=True,
save_image=save_image,
save_path=save_path,
iteration=iteration,
**data)
results.append(result)
if empty_cache:
torch.cuda.empty_cache()
if rank == 0:
# get batch size
for _, v in data.items():
if isinstance(v, torch.Tensor):
batch_size = v.size(0)
break
for _ in range(batch_size * world_size):
prog_bar.update()
# collect results from all ranks
if gpu_collect:
results = collect_results_gpu(results, len(dataset))
else:
results = collect_results_cpu(results, len(dataset), tmpdir)
return results
def collect_results_cpu(result_part, size, tmpdir=None):
"""Collect results in cpu mode.
It saves the results on different gpus to 'tmpdir' and collects
them by the rank 0 worker.
Args:
result_part (list): Results to be collected
size (int): Result size.
tmpdir (str): Path of directory to save the temporary results from
different gpus under cpu mode. Default: None
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# create a tmp dir if it is not specified
if tmpdir is None:
MAX_LEN = 512
# 32 is whitespace
dir_tensor = torch.full((MAX_LEN, ),
32,
dtype=torch.uint8,
device='cuda')
if rank == 0:
mmcv.mkdir_or_exist('.dist_test')
tmpdir = tempfile.mkdtemp(dir='.dist_test')
tmpdir = torch.tensor(
bytearray(tmpdir.encode()), dtype=torch.uint8, device='cuda')
dir_tensor[:len(tmpdir)] = tmpdir
dist.broadcast(dir_tensor, 0)
tmpdir = dir_tensor.cpu().numpy().tobytes().decode().rstrip()
else:
mmcv.mkdir_or_exist(tmpdir)
# synchronizes all processes to make sure tmpdir exist
dist.barrier()
# dump the part result to the dir
mmcv.dump(result_part, osp.join(tmpdir, 'part_{}.pkl'.format(rank)))
# synchronizes all processes for loading pickle file
dist.barrier()
# collect all parts
if rank != 0:
return None
# load results of all parts from tmp dir
part_list = []
for i in range(world_size):
part_file = osp.join(tmpdir, 'part_{}.pkl'.format(i))
part_list.append(mmcv.load(part_file))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
# remove tmp dir
shutil.rmtree(tmpdir)
return ordered_results
def collect_results_gpu(result_part, size):
"""Collect results in gpu mode.
It encodes results to gpu tensors and use gpu communication for results
collection.
Args:
result_part (list): Results to be collected
size (int): Result size.
Returns:
list: Ordered results.
"""
rank, world_size = get_dist_info()
# dump result part to tensor with pickle
part_tensor = torch.tensor(
bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device='cuda')
# gather all result part tensor shape
shape_tensor = torch.tensor(part_tensor.shape, device='cuda')
shape_list = [shape_tensor.clone() for _ in range(world_size)]
dist.all_gather(shape_list, shape_tensor)
# padding result part tensor to max length
shape_max = torch.tensor(shape_list).max()
part_send = torch.zeros(shape_max, dtype=torch.uint8, device='cuda')
part_send[:shape_tensor[0]] = part_tensor
part_recv_list = [
part_tensor.new_zeros(shape_max) for _ in range(world_size)
]
# gather all result part
dist.all_gather(part_recv_list, part_send)
if rank != 0:
return None
part_list = []
for recv, shape in zip(part_recv_list, shape_list):
part_list.append(pickle.loads(recv[:shape[0]].cpu().numpy().tobytes()))
# sort the results
ordered_results = []
for res in zip(*part_list):
ordered_results.extend(list(res))
# the dataloader may pad some samples
ordered_results = ordered_results[:size]
return ordered_results
| 7,852 | 32.417021 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_face_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
try:
from facexlib.utils.face_restoration_helper import FaceRestoreHelper
has_facexlib = True
except ImportError:
has_facexlib = False
def restoration_face_inference(model, img, upscale_factor=1, face_size=1024):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
upscale_factor (int, optional): The number of times the input image
is upsampled. Default: 1.
face_size (int, optional): The size of the cropped and aligned faces.
Default: 1024.
Returns:
Tensor: The predicted restoration result.
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove gt from test_pipeline
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(test_pipeline)
# face helper for detecting and aligning faces
assert has_facexlib, 'Please install FaceXLib to use the demo.'
face_helper = FaceRestoreHelper(
upscale_factor,
face_size=face_size,
crop_ratio=(1, 1),
det_model='retinaface_resnet50',
template_3points=True,
save_ext='png',
device=device)
face_helper.read_image(img)
# get face landmarks for each face
face_helper.get_face_landmarks_5(
only_center_face=False, eye_dist_threshold=None)
# align and warp each face
face_helper.align_warp_face()
for i, img in enumerate(face_helper.cropped_faces):
# prepare data
data = dict(lq=img.astype(np.float32))
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
with torch.no_grad():
output = model(test_mode=True, **data)['output'].clip_(0, 1)
output = output.squeeze(0).permute(1, 2, 0)[:, :, [2, 1, 0]]
output = output.cpu().numpy() * 255 # (0, 255)
face_helper.add_restored_face(output)
face_helper.get_inverse_affine(None)
restored_img = face_helper.paste_faces_to_input_image(upsample_img=None)
return restored_img
| 3,069 | 33.494382 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/generation_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmcv.parallel import collate, scatter
from mmedit.core import tensor2img
from mmedit.datasets.pipelines import Compose
def generation_inference(model, img, img_unpaired=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
img_unpaired (str, optional): File path of the unpaired image.
If not None, perform unpaired image generation. Default: None.
Returns:
np.ndarray: The predicted generation result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
if img_unpaired is None:
data = dict(pair_path=img)
else:
data = dict(img_a_path=img, img_b_path=img_unpaired)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
results = model(test_mode=True, **data)
# process generation shown mode
if img_unpaired is None:
if model.show_input:
output = np.concatenate([
tensor2img(results['real_a'], min_max=(-1, 1)),
tensor2img(results['fake_b'], min_max=(-1, 1)),
tensor2img(results['real_b'], min_max=(-1, 1))
],
axis=1)
else:
output = tensor2img(results['fake_b'], min_max=(-1, 1))
else:
if model.show_input:
output = np.concatenate([
tensor2img(results['real_a'], min_max=(-1, 1)),
tensor2img(results['fake_b'], min_max=(-1, 1)),
tensor2img(results['real_b'], min_max=(-1, 1)),
tensor2img(results['fake_a'], min_max=(-1, 1))
],
axis=1)
else:
if model.test_direction == 'a2b':
output = tensor2img(results['fake_b'], min_max=(-1, 1))
else:
output = tensor2img(results['fake_a'], min_max=(-1, 1))
return output
| 2,229 | 34.967742 | 74 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/inpainting_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
def inpainting_inference(model, masked_img, mask):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
masked_img (str): File path of image with mask.
mask (str): Mask file path.
Returns:
Tensor: The predicted inpainting result.
"""
device = next(model.parameters()).device # model device
infer_pipeline = [
dict(type='LoadImageFromFile', key='masked_img'),
dict(type='LoadMask', mask_mode='file', mask_config=dict()),
dict(type='Pad', keys=['masked_img', 'mask'], mode='reflect'),
dict(
type='Normalize',
keys=['masked_img'],
mean=[127.5] * 3,
std=[127.5] * 3,
to_rgb=False),
dict(type='GetMaskedImage', img_name='masked_img'),
dict(
type='Collect',
keys=['masked_img', 'mask'],
meta_keys=['masked_img_path']),
dict(type='ImageToTensor', keys=['masked_img', 'mask'])
]
# build the data pipeline
test_pipeline = Compose(infer_pipeline)
# prepare data
data = dict(masked_img_path=masked_img, mask_path=mask)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['fake_img']
| 1,546 | 29.94 | 70 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.parallel import collate, scatter
from mmedit.datasets.pipelines import Compose
def restoration_inference(model, img, ref=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img (str): File path of input image.
ref (str | None): File path of reference image. Default: None.
Returns:
Tensor: The predicted restoration result.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove gt from test_pipeline
keys_to_remove = ['gt', 'gt_path']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
if ref: # Ref-SR
data = dict(lq_path=img, ref_path=ref)
else: # SISR
data = dict(lq_path=img)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['output']
| 1,606 | 33.191489 | 72 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/matting_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import torch
from mmcv.parallel import collate, scatter
from mmcv.runner import load_checkpoint
from mmedit.datasets.pipelines import Compose
from mmedit.models import build_model
def init_model(config, checkpoint=None, device='cuda:0'):
"""Initialize a model from config file.
Args:
config (str or :obj:`mmcv.Config`): Config file path or the config
object.
checkpoint (str, optional): Checkpoint path. If left as None, the model
will not load any weights.
device (str): Which device the model will deploy. Default: 'cuda:0'.
Returns:
nn.Module: The constructed model.
"""
if isinstance(config, str):
config = mmcv.Config.fromfile(config)
elif not isinstance(config, mmcv.Config):
raise TypeError('config must be a filename or Config object, '
f'but got {type(config)}')
config.model.pretrained = None
config.test_cfg.metrics = None
model = build_model(config.model, test_cfg=config.test_cfg)
if checkpoint is not None:
checkpoint = load_checkpoint(model, checkpoint)
model.cfg = config # save the config in the model for convenience
model.to(device)
model.eval()
return model
def matting_inference(model, img, trimap):
"""Inference image(s) with the model.
Args:
model (nn.Module): The loaded model.
img (str): Image file path.
trimap (str): Trimap file path.
Returns:
np.ndarray: The predicted alpha matte.
"""
cfg = model.cfg
device = next(model.parameters()).device # model device
# remove alpha from test_pipeline
keys_to_remove = ['alpha', 'ori_alpha']
for key in keys_to_remove:
for pipeline in list(cfg.test_pipeline):
if 'key' in pipeline and key == pipeline['key']:
cfg.test_pipeline.remove(pipeline)
if 'keys' in pipeline and key in pipeline['keys']:
pipeline['keys'].remove(key)
if len(pipeline['keys']) == 0:
cfg.test_pipeline.remove(pipeline)
if 'meta_keys' in pipeline and key in pipeline['meta_keys']:
pipeline['meta_keys'].remove(key)
# build the data pipeline
test_pipeline = Compose(cfg.test_pipeline)
# prepare data
data = dict(merged_path=img, trimap_path=trimap)
data = test_pipeline(data)
data = scatter(collate([data], samples_per_gpu=1), [device])[0]
# forward the model
with torch.no_grad():
result = model(test_mode=True, **data)
return result['pred_alpha']
| 2,659 | 33.545455 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/video_interpolation_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import os
import os.path as osp
import cv2
import mmcv
import numpy as np
import torch
from mmcv.fileio import FileClient
from mmcv.parallel import collate
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov', '.avi')
FILE_CLIENT = FileClient('disk')
def read_image(filepath):
"""Read image from file.
Args:
filepath (str): File path.
Returns:
image (np.array): Image.
"""
img_bytes = FILE_CLIENT.get(filepath)
image = mmcv.imfrombytes(
img_bytes, flag='color', channel_order='rgb', backend='pillow')
return image
def read_frames(source, start_index, num_frames, from_video, end_index):
"""Read frames from file or video.
Args:
source (list | mmcv.VideoReader): Source of frames.
start_index (int): Start index of frames.
num_frames (int): frames number to be read.
from_video (bool): Weather read frames from video.
end_index (int): The end index of frames.
Returns:
images (np.array): Images.
"""
images = []
last_index = min(start_index + num_frames, end_index)
# read frames from video
if from_video:
for index in range(start_index, last_index):
if index >= source.frame_cnt:
break
images.append(np.flip(source.get_frame(index), axis=2))
else:
files = source[start_index:last_index]
images = [read_image(f) for f in files]
return images
def video_interpolation_inference(model,
input_dir,
output_dir,
start_idx=0,
end_idx=None,
batch_size=4,
fps_multiplier=0,
fps=0,
filename_tmpl='{:08d}.png'):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
input_dir (str): Directory of the input video.
output_dir (str): Directory of the output video.
start_idx (int): The index corresponding to the first frame in the
sequence. Default: 0
end_idx (int | None): The index corresponding to the last interpolated
frame in the sequence. If it is None, interpolate to the last
frame of video or sequence. Default: None
batch_size (int): Batch size. Default: 4
fps_multiplier (float): multiply the fps based on the input video.
Default: 0.
fps (float): frame rate of the output video. Default: 0.
filename_tmpl (str): template of the file names. Default: '{:08d}.png'
Returns:
output (list[numpy.array]): The predicted interpolation result.
It is an image sequence.
input_fps (float): The fps of input video. If the input is an image
sequence, input_fps=0.0
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList',
'LoadImageFromFile'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
# compose the pipeline
test_pipeline = Compose(test_pipeline)
# check if the input is a video
input_file_extension = os.path.splitext(input_dir)[1]
if input_file_extension in VIDEO_EXTENSIONS:
source = mmcv.VideoReader(input_dir)
input_fps = source.fps
length = source.frame_cnt
from_video = True
h, w = source.height, source.width
if fps_multiplier:
assert fps_multiplier > 0, '`fps_multiplier` cannot be negative'
output_fps = fps_multiplier * input_fps
else:
output_fps = fps if fps > 0 else input_fps * 2
else:
files = os.listdir(input_dir)
files = [osp.join(input_dir, f) for f in files]
files.sort()
source = files
length = files.__len__()
from_video = False
example_frame = read_image(files[0])
h, w = example_frame.shape[:2]
output_fps = fps
# check if the output is a video
output_file_extension = os.path.splitext(output_dir)[1]
if output_file_extension in VIDEO_EXTENSIONS:
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
target = cv2.VideoWriter(output_dir, fourcc, output_fps, (w, h))
to_video = True
else:
to_video = False
end_idx = min(end_idx, length) if end_idx is not None else length
# calculate step args
step_size = model.step_frames * batch_size
lenth_per_step = model.required_frames + model.step_frames * (
batch_size - 1)
repeat_frame = model.required_frames - model.step_frames
prog_bar = mmcv.ProgressBar(
math.ceil(
(end_idx + step_size - lenth_per_step - start_idx) / step_size))
output_index = start_idx
for start_index in range(start_idx, end_idx, step_size):
images = read_frames(
source, start_index, lenth_per_step, from_video, end_index=end_idx)
# data prepare
data = dict(inputs=images, inputs_path=None, key=input_dir)
data = [test_pipeline(data)]
data = collate(data, samples_per_gpu=1)['inputs']
# data.shape: [1, t, c, h, w]
# forward the model
data = model.split_frames(data)
input_tensors = data.clone().detach()
with torch.no_grad():
output = model(data.to(device), test_mode=True)['output']
if len(output.shape) == 4:
output = output.unsqueeze(1)
output_tensors = output.cpu()
if len(output_tensors.shape) == 4:
output_tensors = output_tensors.unsqueeze(1)
result = model.merge_frames(input_tensors, output_tensors)
if not start_idx == start_index:
result = result[0 - repeat_frame:]
prog_bar.update()
# save frames
if to_video:
for frame in result:
target.write(frame)
else:
for frame in result:
save_path = osp.join(output_dir,
filename_tmpl.format(output_index))
mmcv.imwrite(frame, save_path)
output_index += 1
if start_index + lenth_per_step >= end_idx:
break
print()
print(f'Output dir: {output_dir}')
if to_video:
target.release()
| 6,978 | 33.043902 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .generation_inference import generation_inference
from .inpainting_inference import inpainting_inference
from .matting_inference import init_model, matting_inference
from .restoration_face_inference import restoration_face_inference
from .restoration_inference import restoration_inference
from .restoration_video_inference import restoration_video_inference
from .test import multi_gpu_test, single_gpu_test
from .train import init_random_seed, set_random_seed, train_model
from .video_interpolation_inference import video_interpolation_inference
__all__ = [
'train_model', 'set_random_seed', 'init_model', 'matting_inference',
'inpainting_inference', 'restoration_inference', 'generation_inference',
'multi_gpu_test', 'single_gpu_test', 'restoration_video_inference',
'restoration_face_inference', 'video_interpolation_inference',
'init_random_seed'
]
| 928 | 47.894737 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/train.py | # Copyright (c) OpenMMLab. All rights reserved.
import os
import os.path as osp
import random
import warnings
import mmcv
import numpy as np
import torch
import torch.distributed as dist
from mmcv.parallel import MMDataParallel
from mmcv.runner import HOOKS, IterBasedRunner, get_dist_info
from mmcv.utils import build_from_cfg
from mmedit.core import DistEvalIterHook, EvalIterHook, build_optimizers
from mmedit.core.distributed_wrapper import DistributedDataParallelWrapper
from mmedit.datasets.builder import build_dataloader, build_dataset
from mmedit.utils import get_root_logger
def init_random_seed(seed=None, device='cuda'):
"""Initialize random seed.
If the seed is not set, the seed will be automatically randomized,
and then broadcast to all processes to prevent some potential bugs.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is not None:
return seed
# Make sure all ranks share the same random seed to prevent
# some potential bugs. Please refer to
# https://github.com/open-mmlab/mmdetection/issues/6339
rank, world_size = get_dist_info()
seed = np.random.randint(2**31)
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
def set_random_seed(seed, deterministic=False):
"""Set random seed.
Args:
seed (int): Seed to be used.
deterministic (bool): Whether to set the deterministic option for
CUDNN backend, i.e., set `torch.backends.cudnn.deterministic`
to True and `torch.backends.cudnn.benchmark` to False.
Default: False.
"""
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
os.environ['PYTHONHASHSEED'] = str(seed)
if deterministic:
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def train_model(model,
dataset,
cfg,
distributed=False,
validate=False,
timestamp=None,
meta=None):
"""Train model entry function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
distributed (bool): Whether to use distributed training.
Default: False.
validate (bool): Whether to do evaluation. Default: False.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None
"""
logger = get_root_logger(log_level=cfg.log_level)
# start training
if distributed:
_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
else:
_non_dist_train(
model,
dataset,
cfg,
validate=validate,
logger=logger,
timestamp=timestamp,
meta=meta)
def _dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(seed=cfg.get('seed'), drop_last=False, dist=True),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus
find_unused_parameters = cfg.get('find_unused_parameters', False)
model = DistributedDataParallelWrapper(
model,
device_ids=[torch.cuda.current_device()],
broadcast_buffers=False,
find_unused_parameters=find_unused_parameters)
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
DistEvalIterHook(
data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
def _non_dist_train(model,
dataset,
cfg,
validate=False,
logger=None,
timestamp=None,
meta=None):
"""Non-Distributed training function.
Args:
model (nn.Module): The model to be trained.
dataset (:obj:`Dataset`): Train dataset.
cfg (dict): The config dict for training.
validate (bool): Whether to do evaluation. Default: False.
logger (logging.Logger | None): Logger for training. Default: None.
timestamp (str | None): Local time for runner. Default: None.
meta (dict | None): Meta dict to record some important information.
Default: None.
"""
dataset = dataset if isinstance(dataset, (list, tuple)) else [dataset]
# step 1: give default values and override (if exist) from cfg.data
loader_cfg = {
**dict(
seed=cfg.get('seed'),
drop_last=False,
dist=False,
num_gpus=cfg.gpus),
**({} if torch.__version__ != 'parrots' else dict(
prefetch_num=2,
pin_memory=False,
)),
**dict((k, cfg.data[k]) for k in [
'samples_per_gpu',
'workers_per_gpu',
'shuffle',
'seed',
'drop_last',
'prefetch_num',
'pin_memory',
] if k in cfg.data)
}
# step 2: cfg.data.train_dataloader has highest priority
train_loader_cfg = dict(loader_cfg, **cfg.data.get('train_dataloader', {}))
data_loaders = [build_dataloader(ds, **train_loader_cfg) for ds in dataset]
# put model on gpus/cpus
model = MMDataParallel(model, device_ids=range(cfg.gpus))
# build runner
optimizer = build_optimizers(model, cfg.optimizers)
runner = IterBasedRunner(
model,
optimizer=optimizer,
work_dir=cfg.work_dir,
logger=logger,
meta=meta)
# an ugly walkaround to make the .log and .log.json filenames the same
runner.timestamp = timestamp
# register hooks
runner.register_training_hooks(
cfg.lr_config,
checkpoint_config=cfg.checkpoint_config,
log_config=cfg.log_config)
# visual hook
if cfg.get('visual_config', None) is not None:
cfg.visual_config['output_dir'] = os.path.join(
cfg.work_dir, cfg.visual_config['output_dir'])
runner.register_hook(mmcv.build_from_cfg(cfg.visual_config, HOOKS))
# evaluation hook
if validate and cfg.get('evaluation', None) is not None:
dataset = build_dataset(cfg.data.val)
if ('val_samples_per_gpu' in cfg.data
or 'val_workers_per_gpu' in cfg.data):
warnings.warn('"val_samples_per_gpu/val_workers_per_gpu" have '
'been deprecated. Please use '
'"val_dataloader=dict(samples_per_gpu=1)" instead. '
'Details see '
'https://github.com/open-mmlab/mmediting/pull/201')
val_loader_cfg = {
**loader_cfg,
**dict(shuffle=False, drop_last=False),
**dict((newk, cfg.data[oldk]) for oldk, newk in [
('val_samples_per_gpu', 'samples_per_gpu'),
('val_workers_per_gpu', 'workers_per_gpu'),
] if oldk in cfg.data),
**cfg.data.get('val_dataloader', {})
}
data_loader = build_dataloader(dataset, **val_loader_cfg)
save_path = osp.join(cfg.work_dir, 'val_visuals')
runner.register_hook(
EvalIterHook(data_loader, save_path=save_path, **cfg.evaluation),
priority='LOW')
# user-defined hooks
if cfg.get('custom_hooks', None):
custom_hooks = cfg.custom_hooks
assert isinstance(custom_hooks, list), \
f'custom_hooks expect list type, but got {type(custom_hooks)}'
for hook_cfg in cfg.custom_hooks:
assert isinstance(hook_cfg, dict), \
'Each item in custom_hooks expects dict type, but got ' \
f'{type(hook_cfg)}'
hook_cfg = hook_cfg.copy()
priority = hook_cfg.pop('priority', 'NORMAL')
hook = build_from_cfg(hook_cfg, HOOKS)
runner.register_hook(hook, priority=priority)
if cfg.resume_from:
runner.resume(cfg.resume_from)
elif cfg.load_from:
runner.load_checkpoint(cfg.load_from)
runner.run(data_loaders, cfg.workflow, cfg.total_iters)
| 12,897 | 34.629834 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/apis/restoration_video_inference.py | # Copyright (c) OpenMMLab. All rights reserved.
import glob
import os.path as osp
import re
from functools import reduce
import mmcv
import numpy as np
import torch
from mmedit.datasets.pipelines import Compose
VIDEO_EXTENSIONS = ('.mp4', '.mov')
def pad_sequence(data, window_size):
padding = window_size // 2
data = torch.cat([
data[:, 1 + padding:1 + 2 * padding].flip(1), data,
data[:, -1 - 2 * padding:-1 - padding].flip(1)
],
dim=1)
return data
def restoration_video_inference(model,
img_dir,
window_size,
start_idx,
filename_tmpl,
max_seq_len=None):
"""Inference image with the model.
Args:
model (nn.Module): The loaded model.
img_dir (str): Directory of the input video.
window_size (int): The window size used in sliding-window framework.
This value should be set according to the settings of the network.
A value smaller than 0 means using recurrent framework.
start_idx (int): The index corresponds to the first frame in the
sequence.
filename_tmpl (str): Template for file name.
max_seq_len (int | None): The maximum sequence length that the model
processes. If the sequence length is larger than this number,
the sequence is split into multiple segments. If it is None,
the entire sequence is processed at once.
Returns:
Tensor: The predicted restoration result.
"""
device = next(model.parameters()).device # model device
# build the data pipeline
if model.cfg.get('demo_pipeline', None):
test_pipeline = model.cfg.demo_pipeline
elif model.cfg.get('test_pipeline', None):
test_pipeline = model.cfg.test_pipeline
else:
test_pipeline = model.cfg.val_pipeline
# check if the input is a video
file_extension = osp.splitext(img_dir)[1]
if file_extension in VIDEO_EXTENSIONS:
video_reader = mmcv.VideoReader(img_dir)
# load the images
data = dict(lq=[], lq_path=None, key=img_dir)
for frame in video_reader:
data['lq'].append(np.flip(frame, axis=2))
# remove the data loading pipeline
tmp_pipeline = []
for pipeline in test_pipeline:
if pipeline['type'] not in [
'GenerateSegmentIndices', 'LoadImageFromFileList'
]:
tmp_pipeline.append(pipeline)
test_pipeline = tmp_pipeline
else:
# the first element in the pipeline must be 'GenerateSegmentIndices'
if test_pipeline[0]['type'] != 'GenerateSegmentIndices':
raise TypeError('The first element in the pipeline must be '
f'"GenerateSegmentIndices", but got '
f'"{test_pipeline[0]["type"]}".')
# specify start_idx and filename_tmpl
test_pipeline[0]['start_idx'] = start_idx
test_pipeline[0]['filename_tmpl'] = filename_tmpl
# prepare data
sequence_length = len(glob.glob(osp.join(img_dir, '*')))
img_dir_split = re.split(r'[\\/]', img_dir)
key = img_dir_split[-1]
lq_folder = reduce(osp.join, img_dir_split[:-1])
data = dict(
lq_path=lq_folder,
gt_path='',
key=key,
sequence_length=sequence_length)
# compose the pipeline
test_pipeline = Compose(test_pipeline)
data = test_pipeline(data)
data = data['lq'].unsqueeze(0) # in cpu
# forward the model
with torch.no_grad():
if window_size > 0: # sliding window framework
data = pad_sequence(data, window_size)
result = []
for i in range(0, data.size(1) - 2 * (window_size // 2)):
data_i = data[:, i:i + window_size].to(device)
result.append(model(lq=data_i, test_mode=True)['output'].cpu())
result = torch.stack(result, dim=1)
else: # recurrent framework
if max_seq_len is None:
result = model(
lq=data.to(device), test_mode=True)['output'].cpu()
else:
result = []
for i in range(0, data.size(1), max_seq_len):
result.append(
model(
lq=data[:, i:i + max_seq_len].to(device),
test_mode=True)['output'].cpu())
result = torch.cat(result, dim=1)
return result
| 4,669 | 34.923077 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/distributed_wrapper.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn as nn
from mmcv.parallel import MODULE_WRAPPERS, MMDistributedDataParallel
from mmcv.parallel.scatter_gather import scatter_kwargs
from torch.cuda._utils import _get_device_index
@MODULE_WRAPPERS.register_module()
class DistributedDataParallelWrapper(nn.Module):
"""A DistributedDataParallel wrapper for models in MMediting.
In MMedting, there is a need to wrap different modules in the models
with separate DistributedDataParallel. Otherwise, it will cause
errors for GAN training.
More specific, the GAN model, usually has two sub-modules:
generator and discriminator. If we wrap both of them in one
standard DistributedDataParallel, it will cause errors during training,
because when we update the parameters of the generator (or discriminator),
the parameters of the discriminator (or generator) is not updated, which is
not allowed for DistributedDataParallel.
So we design this wrapper to separately wrap DistributedDataParallel
for generator and discriminator.
In this wrapper, we perform two operations:
1. Wrap the modules in the models with separate MMDistributedDataParallel.
Note that only modules with parameters will be wrapped.
2. Do scatter operation for 'forward', 'train_step' and 'val_step'.
Note that the arguments of this wrapper is the same as those in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Args:
module (nn.Module): Module that needs to be wrapped.
device_ids (list[int | `torch.device`]): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
dim (int, optional): Same as that in the official scatter function in
pytorch. Defaults to 0.
broadcast_buffers (bool): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Defaults to False.
find_unused_parameters (bool, optional): Same as that in
`torch.nn.parallel.distributed.DistributedDataParallel`.
Traverse the autograd graph of all tensors contained in returned
value of the wrapped module’s forward function. Defaults to False.
kwargs (dict): Other arguments used in
`torch.nn.parallel.distributed.DistributedDataParallel`.
"""
def __init__(self,
module,
device_ids,
dim=0,
broadcast_buffers=False,
find_unused_parameters=False,
**kwargs):
super().__init__()
assert len(device_ids) == 1, (
'Currently, DistributedDataParallelWrapper only supports one'
'single CUDA device for each process.'
f'The length of device_ids must be 1, but got {len(device_ids)}.')
self.module = module
self.dim = dim
self.to_ddp(
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.output_device = _get_device_index(device_ids[0], True)
def to_ddp(self, device_ids, dim, broadcast_buffers,
find_unused_parameters, **kwargs):
"""Wrap models with separate MMDistributedDataParallel.
It only wraps the modules with parameters.
"""
for name, module in self.module._modules.items():
if next(module.parameters(), None) is None:
module = module.cuda()
elif all(not p.requires_grad for p in module.parameters()):
module = module.cuda()
else:
module = MMDistributedDataParallel(
module.cuda(),
device_ids=device_ids,
dim=dim,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=find_unused_parameters,
**kwargs)
self.module._modules[name] = module
def scatter(self, inputs, kwargs, device_ids):
"""Scatter function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
device_ids (int): Device id.
"""
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, *inputs, **kwargs):
"""Forward function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
return self.module(*inputs[0], **kwargs[0])
def train_step(self, *inputs, **kwargs):
"""Train step function.
Args:
inputs (Tensor): Input Tensor.
kwargs (dict): Args for
``mmcv.parallel.scatter_gather.scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.train_step(*inputs[0], **kwargs[0])
return output
def val_step(self, *inputs, **kwargs):
"""Validation step function.
Args:
inputs (tuple): Input data.
kwargs (dict): Args for ``scatter_kwargs``.
"""
inputs, kwargs = self.scatter(inputs, kwargs,
[torch.cuda.current_device()])
output = self.module.val_step(*inputs[0], **kwargs[0])
return output
| 5,720 | 39.864286 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/misc.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import numpy as np
import torch
from torchvision.utils import make_grid
def tensor2img(tensor, out_type=np.uint8, min_max=(0, 1)):
"""Convert torch Tensors into image numpy arrays.
After clamping to (min, max), image values will be normalized to [0, 1].
For different tensor shapes, this function will have different behaviors:
1. 4D mini-batch Tensor of shape (N x 3/1 x H x W):
Use `make_grid` to stitch images in the batch dimension, and then
convert it to numpy array.
2. 3D Tensor of shape (3/1 x H x W) and 2D Tensor of shape (H x W):
Directly change to numpy array.
Note that the image channel in input tensors should be RGB order. This
function will convert it to cv2 convention, i.e., (H x W x C) with BGR
order.
Args:
tensor (Tensor | list[Tensor]): Input tensors.
out_type (numpy type): Output types. If ``np.uint8``, transform outputs
to uint8 type with range [0, 255]; otherwise, float type with
range [0, 1]. Default: ``np.uint8``.
min_max (tuple): min and max values for clamp.
Returns:
(Tensor | list[Tensor]): 3D ndarray of shape (H x W x C) or 2D ndarray
of shape (H x W).
"""
if not (torch.is_tensor(tensor) or
(isinstance(tensor, list)
and all(torch.is_tensor(t) for t in tensor))):
raise TypeError(
f'tensor or list of tensors expected, got {type(tensor)}')
if torch.is_tensor(tensor):
tensor = [tensor]
result = []
for _tensor in tensor:
# Squeeze two times so that:
# 1. (1, 1, h, w) -> (h, w) or
# 3. (1, 3, h, w) -> (3, h, w) or
# 2. (n>1, 3/1, h, w) -> (n>1, 3/1, h, w)
_tensor = _tensor.squeeze(0).squeeze(0)
_tensor = _tensor.float().detach().cpu().clamp_(*min_max)
_tensor = (_tensor - min_max[0]) / (min_max[1] - min_max[0])
n_dim = _tensor.dim()
if n_dim == 4:
img_np = make_grid(
_tensor, nrow=int(math.sqrt(_tensor.size(0))),
normalize=False).numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 3:
img_np = _tensor.numpy()
img_np = np.transpose(img_np[[2, 1, 0], :, :], (1, 2, 0))
elif n_dim == 2:
img_np = _tensor.numpy()
else:
raise ValueError('Only support 4D, 3D or 2D tensor. '
f'But received with dimension: {n_dim}')
if out_type == np.uint8:
# Unlike MATLAB, numpy.unit8() WILL NOT round by default.
img_np = (img_np * 255.0).round()
img_np = img_np.astype(out_type)
result.append(img_np)
result = result[0] if len(result) == 1 else result
return result
| 2,898 | 37.653333 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .evaluation import (DistEvalIterHook, EvalIterHook, L1Evaluation, mse,
psnr, reorder_image, sad, ssim)
from .hooks import VisualizationHook
from .misc import tensor2img
from .optimizer import build_optimizers
from .scheduler import LinearLrUpdaterHook
__all__ = [
'build_optimizers', 'tensor2img', 'EvalIterHook', 'DistEvalIterHook',
'mse', 'psnr', 'reorder_image', 'sad', 'ssim', 'LinearLrUpdaterHook',
'VisualizationHook', 'L1Evaluation'
]
| 533 | 37.142857 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/mask.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import cv2
import mmcv
import numpy as np
from PIL import Image, ImageDraw
def random_bbox(img_shape, max_bbox_shape, max_bbox_delta=40, min_margin=20):
"""Generate a random bbox for the mask on a given image.
In our implementation, the max value cannot be obtained since we use
`np.random.randint`. And this may be different with other standard scripts
in the community.
Args:
img_shape (tuple[int]): The size of a image, in the form of (h, w).
max_bbox_shape (int | tuple[int]): Maximum shape of the mask box,
in the form of (h, w). If it is an integer, the mask box will be
square.
max_bbox_delta (int | tuple[int]): Maximum delta of the mask box,
in the form of (delta_h, delta_w). If it is an integer, delta_h
and delta_w will be the same. Mask shape will be randomly sampled
from the range of `max_bbox_shape - max_bbox_delta` and
`max_bbox_shape`. Default: (40, 40).
min_margin (int | tuple[int]): The minimum margin size from the
edges of mask box to the image boarder, in the form of
(margin_h, margin_w). If it is an integer, margin_h and margin_w
will be the same. Default: (20, 20).
Returns:
tuple[int]: The generated box, (top, left, h, w).
"""
if not isinstance(max_bbox_shape, tuple):
max_bbox_shape = (max_bbox_shape, max_bbox_shape)
if not isinstance(max_bbox_delta, tuple):
max_bbox_delta = (max_bbox_delta, max_bbox_delta)
if not isinstance(min_margin, tuple):
min_margin = (min_margin, min_margin)
assert mmcv.is_tuple_of(max_bbox_shape, int)
assert mmcv.is_tuple_of(max_bbox_delta, int)
assert mmcv.is_tuple_of(min_margin, int)
img_h, img_w = img_shape[:2]
max_mask_h, max_mask_w = max_bbox_shape
max_delta_h, max_delta_w = max_bbox_delta
margin_h, margin_w = min_margin
if max_mask_h > img_h or max_mask_w > img_w:
raise ValueError(f'mask shape {max_bbox_shape} should be smaller than '
f'image shape {img_shape}')
if (max_delta_h // 2 * 2 >= max_mask_h
or max_delta_w // 2 * 2 >= max_mask_w):
raise ValueError(f'mask delta {max_bbox_delta} should be smaller than'
f'mask shape {max_bbox_shape}')
if img_h - max_mask_h < 2 * margin_h or img_w - max_mask_w < 2 * margin_w:
raise ValueError(f'Margin {min_margin} cannot be satisfied for img'
f'shape {img_shape} and mask shape {max_bbox_shape}')
# get the max value of (top, left)
max_top = img_h - margin_h - max_mask_h
max_left = img_w - margin_w - max_mask_w
# randomly select a (top, left)
top = np.random.randint(margin_h, max_top)
left = np.random.randint(margin_w, max_left)
# randomly shrink the shape of mask box according to `max_bbox_delta`
# the center of box is fixed
delta_top = np.random.randint(0, max_delta_h // 2 + 1)
delta_left = np.random.randint(0, max_delta_w // 2 + 1)
top = top + delta_top
left = left + delta_left
h = max_mask_h - delta_top
w = max_mask_w - delta_left
return (top, left, h, w)
def bbox2mask(img_shape, bbox, dtype='uint8'):
"""Generate mask in ndarray from bbox.
The returned mask has the shape of (h, w, 1). '1' indicates the
hole and '0' indicates the valid regions.
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
Args:
img_shape (tuple[int]): The size of the image.
bbox (tuple[int]): Configuration tuple, (top, left, height, width)
dtype (str): Indicate the data type of returned masks. Default: 'uint8'
Return:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
height, width = img_shape[:2]
mask = np.zeros((height, width, 1), dtype=dtype)
mask[bbox[0]:bbox[0] + bbox[2], bbox[1]:bbox[1] + bbox[3], :] = 1
return mask
def brush_stroke_mask(img_shape,
num_vertices=(4, 12),
mean_angle=2 * math.pi / 5,
angle_range=2 * math.pi / 15,
brush_width=(12, 40),
max_loops=4,
dtype='uint8'):
"""Generate free-form mask.
The method of generating free-form mask is in the following paper:
Free-Form Image Inpainting with Gated Convolution.
When you set the config of this type of mask. You may note the usage of
`np.random.randint` and the range of `np.random.randint` is [left, right).
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
TODO: Rewrite the implementation of this function.
Args:
img_shape (tuple[int]): Size of the image.
num_vertices (int | tuple[int]): Min and max number of vertices. If
only give an integer, we will fix the number of vertices.
Default: (4, 12).
mean_angle (float): Mean value of the angle in each vertex. The angle
is measured in radians. Default: 2 * math.pi / 5.
angle_range (float): Range of the random angle.
Default: 2 * math.pi / 15.
brush_width (int | tuple[int]): (min_width, max_width). If only give
an integer, we will fix the width of brush. Default: (12, 40).
max_loops (int): The max number of for loops of drawing strokes.
dtype (str): Indicate the data type of returned masks.
Default: 'uint8'.
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
img_h, img_w = img_shape[:2]
if isinstance(num_vertices, int):
min_num_vertices, max_num_vertices = num_vertices, num_vertices + 1
elif isinstance(num_vertices, tuple):
min_num_vertices, max_num_vertices = num_vertices
else:
raise TypeError('The type of num_vertices should be int'
f'or tuple[int], but got type: {num_vertices}')
if isinstance(brush_width, tuple):
min_width, max_width = brush_width
elif isinstance(brush_width, int):
min_width, max_width = brush_width, brush_width + 1
else:
raise TypeError('The type of brush_width should be int'
f'or tuple[int], but got type: {brush_width}')
average_radius = math.sqrt(img_h * img_h + img_w * img_w) / 8
mask = Image.new('L', (img_w, img_h), 0)
loop_num = np.random.randint(1, max_loops)
num_vertex_list = np.random.randint(
min_num_vertices, max_num_vertices, size=loop_num)
angle_min_list = np.random.uniform(0, angle_range, size=loop_num)
angle_max_list = np.random.uniform(0, angle_range, size=loop_num)
for loop_n in range(loop_num):
num_vertex = num_vertex_list[loop_n]
angle_min = mean_angle - angle_min_list[loop_n]
angle_max = mean_angle + angle_max_list[loop_n]
angles = []
vertex = []
# set random angle on each vertex
angles = np.random.uniform(angle_min, angle_max, size=num_vertex)
reverse_mask = (np.arange(num_vertex, dtype=np.float32) % 2) == 0
angles[reverse_mask] = 2 * math.pi - angles[reverse_mask]
h, w = mask.size
# set random vertices
vertex.append((np.random.randint(0, w), np.random.randint(0, h)))
r_list = np.random.normal(
loc=average_radius, scale=average_radius // 2, size=num_vertex)
for i in range(num_vertex):
r = np.clip(r_list[i], 0, 2 * average_radius)
new_x = np.clip(vertex[-1][0] + r * math.cos(angles[i]), 0, w)
new_y = np.clip(vertex[-1][1] + r * math.sin(angles[i]), 0, h)
vertex.append((int(new_x), int(new_y)))
# draw brush strokes according to the vertex and angle list
draw = ImageDraw.Draw(mask)
width = np.random.randint(min_width, max_width)
draw.line(vertex, fill=1, width=width)
for v in vertex:
draw.ellipse((v[0] - width // 2, v[1] - width // 2,
v[0] + width // 2, v[1] + width // 2),
fill=1)
# randomly flip the mask
if np.random.normal() > 0:
mask.transpose(Image.FLIP_LEFT_RIGHT)
if np.random.normal() > 0:
mask.transpose(Image.FLIP_TOP_BOTTOM)
mask = np.array(mask).astype(dtype=getattr(np, dtype))
mask = mask[:, :, None]
return mask
def random_irregular_mask(img_shape,
num_vertices=(4, 8),
max_angle=4,
length_range=(10, 100),
brush_width=(10, 40),
dtype='uint8'):
"""Generate random irregular masks.
This is a modified version of free-form mask implemented in
'brush_stroke_mask'.
We prefer to use `uint8` as the data type of masks, which may be different
from other codes in the community.
TODO: Rewrite the implementation of this function.
Args:
img_shape (tuple[int]): Size of the image.
num_vertices (int | tuple[int]): Min and max number of vertices. If
only give an integer, we will fix the number of vertices.
Default: (4, 8).
max_angle (float): Max value of angle at each vertex. Default 4.0.
length_range (int | tuple[int]): (min_length, max_length). If only give
an integer, we will fix the length of brush. Default: (10, 100).
brush_width (int | tuple[int]): (min_width, max_width). If only give
an integer, we will fix the width of brush. Default: (10, 40).
dtype (str): Indicate the data type of returned masks. Default: 'uint8'
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
h, w = img_shape[:2]
mask = np.zeros((h, w), dtype=dtype)
if isinstance(length_range, int):
min_length, max_length = length_range, length_range + 1
elif isinstance(length_range, tuple):
min_length, max_length = length_range
else:
raise TypeError('The type of length_range should be int'
f'or tuple[int], but got type: {length_range}')
if isinstance(num_vertices, int):
min_num_vertices, max_num_vertices = num_vertices, num_vertices + 1
elif isinstance(num_vertices, tuple):
min_num_vertices, max_num_vertices = num_vertices
else:
raise TypeError('The type of num_vertices should be int'
f'or tuple[int], but got type: {num_vertices}')
if isinstance(brush_width, int):
min_brush_width, max_brush_width = brush_width, brush_width + 1
elif isinstance(brush_width, tuple):
min_brush_width, max_brush_width = brush_width
else:
raise TypeError('The type of brush_width should be int'
f'or tuple[int], but got type: {brush_width}')
num_v = np.random.randint(min_num_vertices, max_num_vertices)
for i in range(num_v):
start_x = np.random.randint(w)
start_y = np.random.randint(h)
# from the start point, randomly setlect n \in [1, 6] directions.
direction_num = np.random.randint(1, 6)
angle_list = np.random.randint(0, max_angle, size=direction_num)
length_list = np.random.randint(
min_length, max_length, size=direction_num)
brush_width_list = np.random.randint(
min_brush_width, max_brush_width, size=direction_num)
for direct_n in range(direction_num):
angle = 0.01 + angle_list[direct_n]
if i % 2 == 0:
angle = 2 * math.pi - angle
length = length_list[direct_n]
brush_w = brush_width_list[direct_n]
# compute end point according to the random angle
end_x = (start_x + length * np.sin(angle)).astype(np.int32)
end_y = (start_y + length * np.cos(angle)).astype(np.int32)
cv2.line(mask, (start_y, start_x), (end_y, end_x), 1, brush_w)
start_x, start_y = end_x, end_y
mask = np.expand_dims(mask, axis=2)
return mask
def get_irregular_mask(img_shape, area_ratio_range=(0.15, 0.5), **kwargs):
"""Get irregular mask with the constraints in mask ratio
Args:
img_shape (tuple[int]): Size of the image.
area_ratio_range (tuple(float)): Contain the minimum and maximum area
ratio. Default: (0.15, 0.5).
Returns:
numpy.ndarray: Mask in the shape of (h, w, 1).
"""
mask = random_irregular_mask(img_shape, **kwargs)
min_ratio, max_ratio = area_ratio_range
while not min_ratio < (np.sum(mask) /
(img_shape[0] * img_shape[1])) < max_ratio:
mask = random_irregular_mask(img_shape, **kwargs)
return mask
| 12,928 | 39.785489 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/scheduler/lr_updater.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner import HOOKS, LrUpdaterHook
@HOOKS.register_module()
class LinearLrUpdaterHook(LrUpdaterHook):
"""Linear learning rate scheduler for image generation.
In the beginning, the learning rate is 'base_lr' defined in mmcv.
We give a target learning rate 'target_lr' and a start point 'start'
(iteration / epoch). Before 'start', we fix learning rate as 'base_lr';
After 'start', we linearly update learning rate to 'target_lr'.
Args:
target_lr (float): The target learning rate. Default: 0.
start (int): The start point (iteration / epoch, specified by args
'by_epoch' in its parent class in mmcv) to update learning rate.
Default: 0.
interval (int): The interval to update the learning rate. Default: 1.
"""
def __init__(self, target_lr=0, start=0, interval=1, **kwargs):
super().__init__(**kwargs)
self.target_lr = target_lr
self.start = start
self.interval = interval
def get_lr(self, runner, base_lr):
"""Calculates the learning rate.
Args:
runner (object): The passed runner.
base_lr (float): Base learning rate.
Returns:
float: Current learning rate.
"""
if self.by_epoch:
progress = runner.epoch
max_progress = runner.max_epochs
else:
progress = runner.iter
max_progress = runner.max_iters
assert max_progress >= self.start
if max_progress == self.start:
return base_lr
# Before 'start', fix lr; After 'start', linearly update lr.
factor = (max(0, progress - self.start) // self.interval) / (
(max_progress - self.start) // self.interval)
return base_lr + (self.target_lr - base_lr) * factor
| 1,880 | 34.490566 | 77 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/scheduler/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .lr_updater import LinearLrUpdaterHook
__all__ = ['LinearLrUpdaterHook']
| 127 | 24.6 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/metric_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import cv2
import numpy as np
def gaussian(x, sigma):
"""Gaussian function.
Args:
x (array_like): The independent variable.
sigma (float): Standard deviation of the gaussian function.
Return:
ndarray or scalar: Gaussian value of `x`.
"""
return np.exp(-x**2 / (2 * sigma**2)) / (sigma * np.sqrt(2 * np.pi))
def dgaussian(x, sigma):
"""Gradient of gaussian.
Args:
x (array_like): The independent variable.
sigma (float): Standard deviation of the gaussian function.
Return:
ndarray or scalar: Gradient of gaussian of `x`.
"""
return -x * gaussian(x, sigma) / sigma**2
def gauss_filter(sigma, epsilon=1e-2):
"""Gradient of gaussian.
Args:
sigma (float): Standard deviation of the gaussian kernel.
epsilon (float): Small value used when calculating kernel size.
Default: 1e-2.
Return:
tuple[ndarray]: Gaussian filter along x and y axis.
"""
half_size = np.ceil(
sigma * np.sqrt(-2 * np.log(np.sqrt(2 * np.pi) * sigma * epsilon)))
size = int(2 * half_size + 1)
# create filter in x axis
filter_x = np.zeros((size, size))
for i in range(size):
for j in range(size):
filter_x[i, j] = gaussian(i - half_size, sigma) * dgaussian(
j - half_size, sigma)
# normalize filter
norm = np.sqrt((filter_x**2).sum())
filter_x = filter_x / norm
filter_y = np.transpose(filter_x)
return filter_x, filter_y
def gauss_gradient(img, sigma):
"""Gaussian gradient.
From https://www.mathworks.com/matlabcentral/mlc-downloads/downloads/
submissions/8060/versions/2/previews/gaussgradient/gaussgradient.m/
index.html
Args:
img (ndarray): Input image.
sigma (float): Standard deviation of the gaussian kernel.
Return:
ndarray: Gaussian gradient of input `img`.
"""
filter_x, filter_y = gauss_filter(sigma)
img_filtered_x = cv2.filter2D(
img, -1, filter_x, borderType=cv2.BORDER_REPLICATE)
img_filtered_y = cv2.filter2D(
img, -1, filter_y, borderType=cv2.BORDER_REPLICATE)
return np.sqrt(img_filtered_x**2 + img_filtered_y**2)
| 2,273 | 26.731707 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/eval_hooks.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
from mmcv.runner import Hook
from torch.utils.data import DataLoader
class EvalIterHook(Hook):
"""Non-Distributed evaluation hook for iteration-based runner.
This hook will regularly perform evaluation in a given interval when
performing in non-distributed environment.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
eval_kwargs (dict): Other eval kwargs. It contains:
save_image (bool): Whether to save image.
save_path (str): The path to save image.
"""
def __init__(self, dataloader, interval=1, **eval_kwargs):
if not isinstance(dataloader, DataLoader):
raise TypeError('dataloader must be a pytorch DataLoader, '
f'but got { type(dataloader)}')
self.dataloader = dataloader
self.interval = interval
self.eval_kwargs = eval_kwargs
self.save_image = self.eval_kwargs.pop('save_image', False)
self.save_path = self.eval_kwargs.pop('save_path', None)
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import single_gpu_test
results = single_gpu_test(
runner.model,
self.dataloader,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
self.evaluate(runner, results)
def evaluate(self, runner, results):
"""Evaluation function.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
results (dict): Model forward results.
"""
eval_res = self.dataloader.dataset.evaluate(
results, logger=runner.logger, **self.eval_kwargs)
for name, val in eval_res.items():
runner.log_buffer.output[name] = val
runner.log_buffer.ready = True
class DistEvalIterHook(EvalIterHook):
"""Distributed evaluation hook.
Args:
dataloader (DataLoader): A PyTorch dataloader.
interval (int): Evaluation interval. Default: 1.
tmpdir (str | None): Temporary directory to save the results of all
processes. Default: None.
gpu_collect (bool): Whether to use gpu or cpu to collect results.
Default: False.
eval_kwargs (dict): Other eval kwargs. It may contain:
save_image (bool): Whether save image.
save_path (str): The path to save image.
"""
def __init__(self,
dataloader,
interval=1,
gpu_collect=False,
**eval_kwargs):
super().__init__(dataloader, interval, **eval_kwargs)
self.gpu_collect = gpu_collect
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (``mmcv.runner.BaseRunner``): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
runner.log_buffer.clear()
from mmedit.apis import multi_gpu_test
results = multi_gpu_test(
runner.model,
self.dataloader,
tmpdir=osp.join(runner.work_dir, '.eval_hook'),
gpu_collect=self.gpu_collect,
save_image=self.save_image,
save_path=self.save_path,
iteration=runner.iter)
if runner.rank == 0:
print('\n')
self.evaluate(runner, results)
| 3,766 | 33.87963 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/metrics.py | # Copyright (c) OpenMMLab. All rights reserved.
import math
import cv2
import mmcv
import numpy as np
from scipy.ndimage import convolve
from scipy.special import gamma
from mmedit.datasets.pipelines.matlab_like_resize import MATLABLikeResize
from .metric_utils import gauss_gradient
def sad(alpha, trimap, pred_alpha):
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
assert (pred_alpha[trimap == 0] == 0).all()
assert (pred_alpha[trimap == 255] == 255).all()
alpha = alpha.astype(np.float64) / 255
pred_alpha = pred_alpha.astype(np.float64) / 255
sad_result = np.abs(pred_alpha - alpha).sum() / 1000
return sad_result
def mse(alpha, trimap, pred_alpha):
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
assert (pred_alpha[trimap == 0] == 0).all()
assert (pred_alpha[trimap == 255] == 255).all()
alpha = alpha.astype(np.float64) / 255
pred_alpha = pred_alpha.astype(np.float64) / 255
weight_sum = (trimap == 128).sum()
if weight_sum != 0:
mse_result = ((pred_alpha - alpha)**2).sum() / weight_sum
else:
mse_result = 0
return mse_result
def gradient_error(alpha, trimap, pred_alpha, sigma=1.4):
"""Gradient error for evaluating alpha matte prediction.
Args:
alpha (ndarray): Ground-truth alpha matte.
trimap (ndarray): Input trimap with its value in {0, 128, 255}.
pred_alpha (ndarray): Predicted alpha matte.
sigma (float): Standard deviation of the gaussian kernel. Default: 1.4.
"""
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
if not ((pred_alpha[trimap == 0] == 0).all() and
(pred_alpha[trimap == 255] == 255).all()):
raise ValueError(
'pred_alpha should be masked by trimap before evaluation')
alpha = alpha.astype(np.float64)
pred_alpha = pred_alpha.astype(np.float64)
alpha_normed = np.zeros_like(alpha)
pred_alpha_normed = np.zeros_like(pred_alpha)
cv2.normalize(alpha, alpha_normed, 1., 0., cv2.NORM_MINMAX)
cv2.normalize(pred_alpha, pred_alpha_normed, 1., 0., cv2.NORM_MINMAX)
alpha_grad = gauss_gradient(alpha_normed, sigma).astype(np.float32)
pred_alpha_grad = gauss_gradient(pred_alpha_normed,
sigma).astype(np.float32)
grad_loss = ((alpha_grad - pred_alpha_grad)**2 * (trimap == 128)).sum()
# same as SAD, divide by 1000 to reduce the magnitude of the result
return grad_loss / 1000
def connectivity(alpha, trimap, pred_alpha, step=0.1):
"""Connectivity error for evaluating alpha matte prediction.
Args:
alpha (ndarray): Ground-truth alpha matte with shape (height, width).
Value range of alpha is [0, 255].
trimap (ndarray): Input trimap with shape (height, width). Elements
in trimap are one of {0, 128, 255}.
pred_alpha (ndarray): Predicted alpha matte with shape (height, width).
Value range of pred_alpha is [0, 255].
step (float): Step of threshold when computing intersection between
`alpha` and `pred_alpha`.
"""
if alpha.ndim != 2 or trimap.ndim != 2 or pred_alpha.ndim != 2:
raise ValueError(
'input alpha, trimap and pred_alpha should has two dimensions, '
f'alpha {alpha.shape}, please check their shape: '
f'trimap {trimap.shape}, pred_alpha {pred_alpha.shape}')
if not ((pred_alpha[trimap == 0] == 0).all() and
(pred_alpha[trimap == 255] == 255).all()):
raise ValueError(
'pred_alpha should be masked by trimap before evaluation')
alpha = alpha.astype(np.float32) / 255
pred_alpha = pred_alpha.astype(np.float32) / 255
thresh_steps = np.arange(0, 1 + step, step)
round_down_map = -np.ones_like(alpha)
for i in range(1, len(thresh_steps)):
alpha_thresh = alpha >= thresh_steps[i]
pred_alpha_thresh = pred_alpha >= thresh_steps[i]
intersection = (alpha_thresh & pred_alpha_thresh).astype(np.uint8)
# connected components
_, output, stats, _ = cv2.connectedComponentsWithStats(
intersection, connectivity=4)
# start from 1 in dim 0 to exclude background
size = stats[1:, -1]
# largest connected component of the intersection
omega = np.zeros_like(alpha)
if len(size) != 0:
max_id = np.argmax(size)
# plus one to include background
omega[output == max_id + 1] = 1
mask = (round_down_map == -1) & (omega == 0)
round_down_map[mask] = thresh_steps[i - 1]
round_down_map[round_down_map == -1] = 1
alpha_diff = alpha - round_down_map
pred_alpha_diff = pred_alpha - round_down_map
# only calculate difference larger than or equal to 0.15
alpha_phi = 1 - alpha_diff * (alpha_diff >= 0.15)
pred_alpha_phi = 1 - pred_alpha_diff * (pred_alpha_diff >= 0.15)
connectivity_error = np.sum(
np.abs(alpha_phi - pred_alpha_phi) * (trimap == 128))
# same as SAD, divide by 1000 to reduce the magnitude of the result
return connectivity_error / 1000
def reorder_image(img, input_order='HWC'):
"""Reorder images to 'HWC' order.
If the input_order is (h, w), return (h, w, 1);
If the input_order is (c, h, w), return (h, w, c);
If the input_order is (h, w, c), return as it is.
Args:
img (ndarray): Input image.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
If the input image shape is (h, w), input_order will not have
effects. Default: 'HWC'.
Returns:
ndarray: reordered image.
"""
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
if len(img.shape) == 2:
img = img[..., None]
return img
if input_order == 'CHW':
img = img.transpose(1, 2, 0)
return img
def psnr(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
"""Calculate PSNR (Peak Signal-to-Noise Ratio).
Ref: https://en.wikipedia.org/wiki/Peak_signal-to-noise_ratio
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edges of an image. These
pixels are not involved in the PSNR calculation. Default: 0.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether to convert the images to other color models.
If None, the images are not altered. When computing for 'Y',
the images are assumed to be in BGR order. Options are 'Y' and
None. Default: None.
Returns:
float: psnr result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are different: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
if isinstance(convert_to, str) and convert_to.lower() == 'y':
img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
elif convert_to is not None:
raise ValueError('Wrong color model. Supported values are '
'"Y" and None.')
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
mse_value = np.mean((img1 - img2)**2)
if mse_value == 0:
return float('inf')
return 20. * np.log10(255. / np.sqrt(mse_value))
def _ssim(img1, img2):
"""Calculate SSIM (structural similarity) for one channel images.
It is called by func:`calculate_ssim`.
Args:
img1, img2 (ndarray): Images with range [0, 255] with order 'HWC'.
Returns:
float: ssim result.
"""
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5]
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) *
(2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
def ssim(img1, img2, crop_border=0, input_order='HWC', convert_to=None):
"""Calculate SSIM (structural similarity).
Ref:
Image quality assessment: From error visibility to structural similarity
The results are the same as that of the official released MATLAB code in
https://ece.uwaterloo.ca/~z70wang/research/ssim/.
For three-channel images, SSIM is calculated for each channel and then
averaged.
Args:
img1 (ndarray): Images with range [0, 255].
img2 (ndarray): Images with range [0, 255].
crop_border (int): Cropped pixels in each edges of an image. These
pixels are not involved in the SSIM calculation. Default: 0.
input_order (str): Whether the input order is 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether to convert the images to other color models.
If None, the images are not altered. When computing for 'Y',
the images are assumed to be in BGR order. Options are 'Y' and
None. Default: None.
Returns:
float: ssim result.
"""
assert img1.shape == img2.shape, (
f'Image shapes are different: {img1.shape}, {img2.shape}.')
if input_order not in ['HWC', 'CHW']:
raise ValueError(
f'Wrong input_order {input_order}. Supported input_orders are '
'"HWC" and "CHW"')
img1 = reorder_image(img1, input_order=input_order)
img2 = reorder_image(img2, input_order=input_order)
if isinstance(convert_to, str) and convert_to.lower() == 'y':
img1, img2 = img1.astype(np.float32), img2.astype(np.float32)
img1 = mmcv.bgr2ycbcr(img1 / 255., y_only=True) * 255.
img2 = mmcv.bgr2ycbcr(img2 / 255., y_only=True) * 255.
img1 = np.expand_dims(img1, axis=2)
img2 = np.expand_dims(img2, axis=2)
elif convert_to is not None:
raise ValueError('Wrong color model. Supported values are '
'"Y" and None')
if crop_border != 0:
img1 = img1[crop_border:-crop_border, crop_border:-crop_border, None]
img2 = img2[crop_border:-crop_border, crop_border:-crop_border, None]
ssims = []
for i in range(img1.shape[2]):
ssims.append(_ssim(img1[..., i], img2[..., i]))
return np.array(ssims).mean()
class L1Evaluation:
"""L1 evaluation metric.
Args:
data_dict (dict): Must contain keys of 'gt_img' and 'fake_res'. If
'mask' is given, the results will be computed with mask as weight.
"""
def __call__(self, data_dict):
gt = data_dict['gt_img']
if 'fake_img' in data_dict:
pred = data_dict.get('fake_img')
else:
pred = data_dict.get('fake_res')
mask = data_dict.get('mask', None)
from mmedit.models.losses.pixelwise_loss import l1_loss
l1_error = l1_loss(pred, gt, weight=mask, reduction='mean')
return l1_error
def estimate_aggd_param(block):
"""Estimate AGGD (Asymmetric Generalized Gaussian Distribution) parameters.
Args:
block (ndarray): 2D Image block.
Returns:
tuple: alpha (float), beta_l (float) and beta_r (float) for the AGGD
distribution (Estimating the parames in Equation 7 in the paper).
"""
block = block.flatten()
gam = np.arange(0.2, 10.001, 0.001) # len = 9801
gam_reciprocal = np.reciprocal(gam)
r_gam = np.square(gamma(gam_reciprocal * 2)) / (
gamma(gam_reciprocal) * gamma(gam_reciprocal * 3))
left_std = np.sqrt(np.mean(block[block < 0]**2))
right_std = np.sqrt(np.mean(block[block > 0]**2))
gammahat = left_std / right_std
rhat = (np.mean(np.abs(block)))**2 / np.mean(block**2)
rhatnorm = (rhat * (gammahat**3 + 1) *
(gammahat + 1)) / ((gammahat**2 + 1)**2)
array_position = np.argmin((r_gam - rhatnorm)**2)
alpha = gam[array_position]
beta_l = left_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
beta_r = right_std * np.sqrt(gamma(1 / alpha) / gamma(3 / alpha))
return (alpha, beta_l, beta_r)
def compute_feature(block):
"""Compute features.
Args:
block (ndarray): 2D Image block.
Returns:
list: Features with length of 18.
"""
feat = []
alpha, beta_l, beta_r = estimate_aggd_param(block)
feat.extend([alpha, (beta_l + beta_r) / 2])
# distortions disturb the fairly regular structure of natural images.
# This deviation can be captured by analyzing the sample distribution of
# the products of pairs of adjacent coefficients computed along
# horizontal, vertical and diagonal orientations.
shifts = [[0, 1], [1, 0], [1, 1], [1, -1]]
for shift in shifts:
shifted_block = np.roll(block, shift, axis=(0, 1))
alpha, beta_l, beta_r = estimate_aggd_param(block * shifted_block)
mean = (beta_r - beta_l) * (gamma(2 / alpha) / gamma(1 / alpha))
feat.extend([alpha, mean, beta_l, beta_r])
return feat
def niqe_core(img,
mu_pris_param,
cov_pris_param,
gaussian_window,
block_size_h=96,
block_size_w=96):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
Note that we do not include block overlap height and width, since they are
always 0 in the official implementation.
For good performance, it is advisable by the official implementation to
divide the distorted image in to the same size patched as used for the
construction of multivariate Gaussian model.
Args:
img (ndarray): Input image whose quality needs to be computed. The
image must be a gray or Y (of YCbCr) image with shape (h, w).
Range [0, 255] with float type.
mu_pris_param (ndarray): Mean of a pre-defined multivariate Gaussian
model calculated on the pristine dataset.
cov_pris_param (ndarray): Covariance of a pre-defined multivariate
Gaussian model calculated on the pristine dataset.
gaussian_window (ndarray): A 7x7 Gaussian window used for smoothing the
image.
block_size_h (int): Height of the blocks in to which image is divided.
Default: 96 (the official recommended value).
block_size_w (int): Width of the blocks in to which image is divided.
Default: 96 (the official recommended value).
"""
# crop image
h, w = img.shape
num_block_h = math.floor(h / block_size_h)
num_block_w = math.floor(w / block_size_w)
img = img[0:num_block_h * block_size_h, 0:num_block_w * block_size_w]
distparam = [] # dist param is actually the multiscale features
for scale in (1, 2): # perform on two scales (1, 2)
mu = convolve(img, gaussian_window, mode='nearest')
sigma = np.sqrt(
np.abs(
convolve(np.square(img), gaussian_window, mode='nearest') -
np.square(mu)))
# normalize, as in Eq. 1 in the paper
img_nomalized = (img - mu) / (sigma + 1)
feat = []
for idx_w in range(num_block_w):
for idx_h in range(num_block_h):
# process each block
block = img_nomalized[idx_h * block_size_h //
scale:(idx_h + 1) * block_size_h //
scale, idx_w * block_size_w //
scale:(idx_w + 1) * block_size_w //
scale]
feat.append(compute_feature(block))
distparam.append(np.array(feat))
# matlab-like bicubic downsample with anti-aliasing
if scale == 1:
resize = MATLABLikeResize(keys=None, scale=0.5)
img = resize._resize(img[:, :, np.newaxis] / 255.)[:, :, 0] * 255.
distparam = np.concatenate(distparam, axis=1)
# fit a MVG (multivariate Gaussian) model to distorted patch features
mu_distparam = np.nanmean(distparam, axis=0)
distparam_no_nan = distparam[~np.isnan(distparam).any(axis=1)]
cov_distparam = np.cov(distparam_no_nan, rowvar=False)
# compute niqe quality, Eq. 10 in the paper
invcov_param = np.linalg.pinv((cov_pris_param + cov_distparam) / 2)
quality = np.matmul(
np.matmul((mu_pris_param - mu_distparam), invcov_param),
np.transpose((mu_pris_param - mu_distparam)))
return np.squeeze(np.sqrt(quality))
def niqe(img, crop_border, input_order='HWC', convert_to='y'):
"""Calculate NIQE (Natural Image Quality Evaluator) metric.
Ref: Making a "Completely Blind" Image Quality Analyzer.
This implementation could produce almost the same results as the official
MATLAB codes: http://live.ece.utexas.edu/research/quality/niqe_release.zip
We use the official params estimated from the pristine dataset.
We use the recommended block size (96, 96) without overlaps.
Args:
img (ndarray): Input image whose quality needs to be computed.
The input image must be in range [0, 255] with float/int type.
The input_order of image can be 'HW' or 'HWC' or 'CHW'. (BGR order)
If the input order is 'HWC' or 'CHW', it will be converted to gray
or Y (of YCbCr) image according to the ``convert_to`` argument.
crop_border (int): Cropped pixels in each edge of an image. These
pixels are not involved in the metric calculation.
input_order (str): Whether the input order is 'HW', 'HWC' or 'CHW'.
Default: 'HWC'.
convert_to (str): Whether converted to 'y' (of MATLAB YCbCr) or 'gray'.
Default: 'y'.
Returns:
float: NIQE result.
"""
# we use the official params estimated from the pristine dataset.
niqe_pris_params = np.load('mmedit/core/evaluation/niqe_pris_params.npz')
mu_pris_param = niqe_pris_params['mu_pris_param']
cov_pris_param = niqe_pris_params['cov_pris_param']
gaussian_window = niqe_pris_params['gaussian_window']
img = img.astype(np.float32)
if input_order != 'HW':
img = reorder_image(img, input_order=input_order)
if convert_to == 'y':
img = mmcv.bgr2ycbcr(img / 255., y_only=True) * 255.
elif convert_to == 'gray':
img = mmcv.bgr2gray(img / 255., cv2.COLOR_BGR2GRAY) * 255.
img = np.squeeze(img)
if crop_border != 0:
img = img[crop_border:-crop_border, crop_border:-crop_border]
# round to follow official implementation
img = img.round()
niqe_result = niqe_core(img, mu_pris_param, cov_pris_param,
gaussian_window)
return niqe_result
| 20,691 | 38.413333 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/evaluation/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .eval_hooks import DistEvalIterHook, EvalIterHook
from .metrics import (L1Evaluation, connectivity, gradient_error, mse, niqe,
psnr, reorder_image, sad, ssim)
__all__ = [
'mse', 'sad', 'psnr', 'reorder_image', 'ssim', 'EvalIterHook',
'DistEvalIterHook', 'L1Evaluation', 'gradient_error', 'connectivity',
'niqe'
]
| 401 | 35.545455 | 76 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/export/wrappers.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import warnings
import numpy as np
import onnxruntime as ort
import torch
from torch import nn
from mmedit.models import BaseMattor, BasicRestorer, build_model
def inference_with_session(sess, io_binding, output_names, input_tensor):
device_type = input_tensor.device.type
device_id = input_tensor.device.index
device_id = 0 if device_id is None else device_id
io_binding.bind_input(
name='input',
device_type=device_type,
device_id=device_id,
element_type=np.float32,
shape=input_tensor.shape,
buffer_ptr=input_tensor.data_ptr())
for name in output_names:
io_binding.bind_output(name)
sess.run_with_iobinding(io_binding)
pred = io_binding.copy_outputs_to_cpu()
return pred
class ONNXRuntimeMattor(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeMattor, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
def forward(self,
merged,
trimap,
meta,
test_mode=False,
save_image=False,
save_path=None,
iteration=None):
input_tensor = torch.cat((merged, trimap), 1).contiguous()
pred_alpha = inference_with_session(self.sess, self.io_binding,
self.output_names, input_tensor)[0]
pred_alpha = pred_alpha.squeeze()
pred_alpha = self.base_model.restore_shape(pred_alpha, meta)
eval_result = self.base_model.evaluate(pred_alpha, meta)
if save_image:
self.base_model.save_image(pred_alpha, meta, save_path, iteration)
return {'pred_alpha': pred_alpha, 'eval_result': eval_result}
class RestorerGenerator(nn.Module):
def __init__(self, sess, io_binding, output_names):
super(RestorerGenerator, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
def forward(self, x):
pred = inference_with_session(self.sess, self.io_binding,
self.output_names, x)[0]
pred = torch.from_numpy(pred)
return pred
class ONNXRuntimeRestorer(nn.Module):
def __init__(self, sess, io_binding, output_names, base_model):
super(ONNXRuntimeRestorer, self).__init__()
self.sess = sess
self.io_binding = io_binding
self.output_names = output_names
self.base_model = base_model
restorer_generator = RestorerGenerator(self.sess, self.io_binding,
self.output_names)
base_model.generator = restorer_generator
def forward(self, lq, gt=None, test_mode=False, **kwargs):
return self.base_model(lq, gt=gt, test_mode=test_mode, **kwargs)
class ONNXRuntimeEditing(nn.Module):
def __init__(self, onnx_file, cfg, device_id):
super(ONNXRuntimeEditing, self).__init__()
ort_custom_op_path = ''
try:
from mmcv.ops import get_onnxruntime_op_path
ort_custom_op_path = get_onnxruntime_op_path()
except (ImportError, ModuleNotFoundError):
warnings.warn('If input model has custom op from mmcv, \
you may have to build mmcv with ONNXRuntime from source.')
session_options = ort.SessionOptions()
# register custom op for onnxruntime
if osp.exists(ort_custom_op_path):
session_options.register_custom_ops_library(ort_custom_op_path)
sess = ort.InferenceSession(onnx_file, session_options)
providers = ['CPUExecutionProvider']
options = [{}]
is_cuda_available = ort.get_device() == 'GPU'
if is_cuda_available:
providers.insert(0, 'CUDAExecutionProvider')
options.insert(0, {'device_id': device_id})
sess.set_providers(providers, options)
self.sess = sess
self.device_id = device_id
self.io_binding = sess.io_binding()
self.output_names = [_.name for _ in sess.get_outputs()]
base_model = build_model(
cfg.model, train_cfg=None, test_cfg=cfg.test_cfg)
if isinstance(base_model, BaseMattor):
WrapperClass = ONNXRuntimeMattor
elif isinstance(base_model, BasicRestorer):
WrapperClass = ONNXRuntimeRestorer
self.wrapper = WrapperClass(self.sess, self.io_binding,
self.output_names, base_model)
def forward(self, **kwargs):
return self.wrapper(**kwargs)
| 4,767 | 34.318519 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/export/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .wrappers import ONNXRuntimeEditing
__all__ = ['ONNXRuntimeEditing']
| 123 | 23.8 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/visualization.py | # Copyright (c) OpenMMLab. All rights reserved.
import os.path as osp
import mmcv
import torch
from mmcv.runner import HOOKS, Hook
from mmcv.runner.dist_utils import master_only
from torchvision.utils import save_image
@HOOKS.register_module()
class VisualizationHook(Hook):
"""Visualization hook.
In this hook, we use the official api `save_image` in torchvision to save
the visualization results.
Args:
output_dir (str): The file path to store visualizations.
res_name_list (str): The list contains the name of results in outputs
dict. The results in outputs dict must be a torch.Tensor with shape
(n, c, h, w).
interval (int): The interval of calling this hook. If set to -1,
the visualization hook will not be called. Default: -1.
filename_tmpl (str): Format string used to save images. The output file
name will be formatted as this args. Default: 'iter_{}.png'.
rerange (bool): Whether to rerange the output value from [-1, 1] to
[0, 1]. We highly recommend users should preprocess the
visualization results on their own. Here, we just provide a simple
interface. Default: True.
bgr2rgb (bool): Whether to reformat the channel dimension from BGR to
RGB. The final image we will save is following RGB style.
Default: True.
nrow (int): The number of samples in a row. Default: 1.
padding (int): The number of padding pixels between each samples.
Default: 4.
"""
def __init__(self,
output_dir,
res_name_list,
interval=-1,
filename_tmpl='iter_{}.png',
rerange=True,
bgr2rgb=True,
nrow=1,
padding=4):
assert mmcv.is_list_of(res_name_list, str)
self.output_dir = output_dir
self.res_name_list = res_name_list
self.interval = interval
self.filename_tmpl = filename_tmpl
self.bgr2rgb = bgr2rgb
self.rerange = rerange
self.nrow = nrow
self.padding = padding
mmcv.mkdir_or_exist(self.output_dir)
@master_only
def after_train_iter(self, runner):
"""The behavior after each train iteration.
Args:
runner (object): The runner.
"""
if not self.every_n_iters(runner, self.interval):
return
results = runner.outputs['results']
filename = self.filename_tmpl.format(runner.iter + 1)
img_list = [x for k, x in results.items() if k in self.res_name_list]
img_cat = torch.cat(img_list, dim=3).detach()
if self.rerange:
img_cat = ((img_cat + 1) / 2)
if self.bgr2rgb:
img_cat = img_cat[:, [2, 1, 0], ...]
img_cat = img_cat.clamp_(0, 1)
save_image(
img_cat,
osp.join(self.output_dir, filename),
nrow=self.nrow,
padding=self.padding)
| 3,050 | 34.894118 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .ema import ExponentialMovingAverageHook
from .visualization import VisualizationHook
__all__ = ['VisualizationHook', 'ExponentialMovingAverageHook']
| 204 | 33.166667 | 63 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/hooks/ema.py | # Copyright (c) OpenMMLab. All rights reserved.
import warnings
from copy import deepcopy
from functools import partial
import mmcv
import torch
from mmcv.parallel import is_module_wrapper
from mmcv.runner import HOOKS, Hook
@HOOKS.register_module()
class ExponentialMovingAverageHook(Hook):
"""Exponential Moving Average Hook.
Exponential moving average is a trick that widely used in current GAN
literature, e.g., PGGAN, StyleGAN, and BigGAN. This general idea of it is
maintaining a model with the same architecture, but its parameters are
updated as a moving average of the trained weights in the original model.
In general, the model with moving averaged weights achieves better
performance.
Args:
module_keys (str | tuple[str]): The name of the ema model. Note that we
require these keys are followed by '_ema' so that we can easily
find the original model by discarding the last four characters.
interp_mode (str, optional): Mode of the interpolation method.
Defaults to 'lerp'.
interp_cfg (dict | None, optional): Set arguments of the interpolation
function. Defaults to None.
interval (int, optional): Evaluation interval (by iterations).
Default: -1.
start_iter (int, optional): Start iteration for ema. If the start
iteration is not reached, the weights of ema model will maintain
the same as the original one. Otherwise, its parameters are updated
as a moving average of the trained weights in the original model.
Default: 0.
"""
def __init__(self,
module_keys,
interp_mode='lerp',
interp_cfg=None,
interval=-1,
start_iter=0):
super().__init__()
assert isinstance(module_keys, str) or mmcv.is_tuple_of(
module_keys, str)
self.module_keys = (module_keys, ) if isinstance(module_keys,
str) else module_keys
# sanity check for the format of module keys
for k in self.module_keys:
assert k.endswith(
'_ema'), 'You should give keys that end with "_ema".'
self.interp_mode = interp_mode
self.interp_cfg = dict() if interp_cfg is None else deepcopy(
interp_cfg)
self.interval = interval
self.start_iter = start_iter
assert hasattr(
self, interp_mode
), f'Currently, we do not support {self.interp_mode} for EMA.'
self.interp_func = partial(
getattr(self, interp_mode), **self.interp_cfg)
@staticmethod
def lerp(a, b, momentum=0.999, momentum_nontrainable=0., trainable=True):
m = momentum if trainable else momentum_nontrainable
return a + (b - a) * m
def every_n_iters(self, runner, n):
if runner.iter < self.start_iter:
return True
return (runner.iter + 1 - self.start_iter) % n == 0 if n > 0 else False
@torch.no_grad()
def after_train_iter(self, runner):
if not self.every_n_iters(runner, self.interval):
return
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
for key in self.module_keys:
# get current ema states
ema_net = getattr(model, key)
states_ema = ema_net.state_dict(keep_vars=False)
# get currently original states
net = getattr(model, key[:-4])
states_orig = net.state_dict(keep_vars=True)
for k, v in states_orig.items():
if runner.iter < self.start_iter:
states_ema[k].data.copy_(v.data)
else:
states_ema[k] = self.interp_func(
v, states_ema[k], trainable=v.requires_grad).detach()
ema_net.load_state_dict(states_ema, strict=True)
def before_run(self, runner):
model = runner.model.module if is_module_wrapper(
runner.model) else runner.model
# sanity check for ema model
for k in self.module_keys:
if not hasattr(model, k) and not hasattr(model, k[:-4]):
raise RuntimeError(
f'Cannot find both {k[:-4]} and {k} network for EMA hook.')
if not hasattr(model, k) and hasattr(model, k[:-4]):
setattr(model, k, deepcopy(getattr(model, k[:-4])))
warnings.warn(
f'We do not suggest construct and initialize EMA model {k}'
' in hook. You may explicitly define it by yourself.')
| 4,719 | 40.403509 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/utils/dist_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
import torch.distributed as dist
from mmcv.runner import get_dist_info
def sync_random_seed(seed=None, device='cuda'):
"""Make sure different ranks share the same seed.
All workers must call this function, otherwise it will deadlock.
This method is generally used in `DistributedSampler`,
because the seed should be identical across all processes
in the distributed group.
Args:
seed (int, Optional): The seed. Default to None.
device (str): The device where the seed will be put on.
Default to 'cuda'.
Returns:
int: Seed to be used.
"""
if seed is None:
seed = np.random.randint(2**31)
assert isinstance(seed, int)
rank, world_size = get_dist_info()
if world_size == 1:
return seed
if rank == 0:
random_num = torch.tensor(seed, dtype=torch.int32, device=device)
else:
random_num = torch.tensor(0, dtype=torch.int32, device=device)
dist.broadcast(random_num, src=0)
return random_num.item()
| 1,108 | 29.805556 | 73 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/utils/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .dist_utils import sync_random_seed
__all__ = ['sync_random_seed']
| 121 | 23.4 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/optimizer/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .builder import build_optimizers
__all__ = ['build_optimizers']
| 118 | 22.8 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/core/optimizer/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.runner import build_optimizer
def build_optimizers(model, cfgs):
"""Build multiple optimizers from configs.
If `cfgs` contains several dicts for optimizers, then a dict for each
constructed optimizers will be returned.
If `cfgs` only contains one optimizer config, the constructed optimizer
itself will be returned.
For example,
1) Multiple optimizer configs:
.. code-block:: python
optimizer_cfg = dict(
model1=dict(type='SGD', lr=lr),
model2=dict(type='SGD', lr=lr))
The return dict is
``dict('model1': torch.optim.Optimizer, 'model2': torch.optim.Optimizer)``
2) Single optimizer config:
.. code-block:: python
optimizer_cfg = dict(type='SGD', lr=lr)
The return is ``torch.optim.Optimizer``.
Args:
model (:obj:`nn.Module`): The model with parameters to be optimized.
cfgs (dict): The config dict of the optimizer.
Returns:
dict[:obj:`torch.optim.Optimizer`] | :obj:`torch.optim.Optimizer`:
The initialized optimizers.
"""
optimizers = {}
if hasattr(model, 'module'):
model = model.module
# determine whether 'cfgs' has several dicts for optimizers
is_dict_of_dict = True
for key, cfg in cfgs.items():
if not isinstance(cfg, dict):
is_dict_of_dict = False
if is_dict_of_dict:
for key, cfg in cfgs.items():
cfg_ = cfg.copy()
module = getattr(model, key)
optimizers[key] = build_optimizer(module, cfg_)
return optimizers
return build_optimizer(model, cfgs)
| 1,679 | 27.474576 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/base.py | # Copyright (c) OpenMMLab. All rights reserved.
from abc import ABCMeta, abstractmethod
from collections import OrderedDict
import torch
import torch.nn as nn
class BaseModel(nn.Module, metaclass=ABCMeta):
"""Base model.
All models should subclass it.
All subclass should overwrite:
``init_weights``, supporting to initialize models.
``forward_train``, supporting to forward when training.
``forward_test``, supporting to forward when testing.
``train_step``, supporting to train one step when training.
"""
@abstractmethod
def init_weights(self):
"""Abstract method for initializing weight.
All subclass should overwrite it.
"""
@abstractmethod
def forward_train(self, imgs, labels):
"""Abstract method for training forward.
All subclass should overwrite it.
"""
@abstractmethod
def forward_test(self, imgs):
"""Abstract method for testing forward.
All subclass should overwrite it.
"""
def forward(self, imgs, labels, test_mode, **kwargs):
"""Forward function for base model.
Args:
imgs (Tensor): Input image(s).
labels (Tensor): Ground-truth label(s).
test_mode (bool): Whether in test mode.
kwargs (dict): Other arguments.
Returns:
Tensor: Forward results.
"""
if test_mode:
return self.forward_test(imgs, **kwargs)
return self.forward_train(imgs, labels, **kwargs)
@abstractmethod
def train_step(self, data_batch, optimizer):
"""Abstract method for one training step.
All subclass should overwrite it.
"""
def val_step(self, data_batch, **kwargs):
"""Abstract method for one validation step.
All subclass should overwrite it.
"""
output = self.forward_test(**data_batch, **kwargs)
return output
def parse_losses(self, losses):
"""Parse losses dict for different loss variants.
Args:
losses (dict): Loss dict.
Returns:
loss (float): Sum of the total loss.
log_vars (dict): loss dict for different variants.
"""
log_vars = OrderedDict()
for loss_name, loss_value in losses.items():
if isinstance(loss_value, torch.Tensor):
log_vars[loss_name] = loss_value.mean()
elif isinstance(loss_value, list):
log_vars[loss_name] = sum(_loss.mean() for _loss in loss_value)
else:
raise TypeError(
f'{loss_name} is not a tensor or list of tensors')
loss = sum(_value for _key, _value in log_vars.items()
if 'loss' in _key)
log_vars['loss'] = loss
for name in log_vars:
log_vars[name] = log_vars[name].item()
return loss, log_vars
| 2,948 | 26.820755 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/registry.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import MODELS as MMCV_MODELS
from mmcv.utils import Registry
MODELS = Registry('model', parent=MMCV_MODELS)
BACKBONES = MODELS
COMPONENTS = MODELS
LOSSES = MODELS
| 226 | 24.222222 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .backbones import * # noqa: F401, F403
from .base import BaseModel
from .builder import (build, build_backbone, build_component, build_loss,
build_model)
from .common import * # noqa: F401, F403
from .losses import * # noqa: F401, F403
from .registry import BACKBONES, COMPONENTS, LOSSES, MODELS
from .restorers import BasicRestorer
__all__ = [
'BaseModel', 'BasicRestorer', 'build', 'build_backbone', 'build_component',
'build_loss', 'build_model', 'BACKBONES', 'COMPONENTS', 'LOSSES', 'MODELS'
]
| 585 | 38.066667 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/builder.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv import build_from_cfg
from .registry import BACKBONES, COMPONENTS, LOSSES, MODELS
def build(cfg, registry, default_args=None):
"""Build module function.
Args:
cfg (dict): Configuration for building modules.
registry (obj): ``registry`` object.
default_args (dict, optional): Default arguments. Defaults to None.
"""
if isinstance(cfg, list):
modules = [
build_from_cfg(cfg_, registry, default_args) for cfg_ in cfg
]
return nn.Sequential(*modules)
return build_from_cfg(cfg, registry, default_args)
def build_backbone(cfg):
"""Build backbone.
Args:
cfg (dict): Configuration for building backbone.
"""
return build(cfg, BACKBONES)
def build_component(cfg):
"""Build component.
Args:
cfg (dict): Configuration for building component.
"""
return build(cfg, COMPONENTS)
def build_loss(cfg):
"""Build loss.
Args:
cfg (dict): Configuration for building loss.
"""
return build(cfg, LOSSES)
def build_model(cfg, train_cfg=None, test_cfg=None):
"""Build model.
Args:
cfg (dict): Configuration for building model.
train_cfg (dict): Training configuration. Default: None.
test_cfg (dict): Testing configuration. Default: None.
"""
return build(cfg, MODELS, dict(train_cfg=train_cfg, test_cfg=test_cfg))
| 1,482 | 23.311475 | 75 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/basicvsr.py | # Copyright (c) OpenMMLab. All rights reserved.
import numbers
import os.path as osp
import mmcv
import numpy as np
import torch
from mmedit.core import tensor2img
from ..registry import MODELS
from .basic_restorer import BasicRestorer
@MODELS.register_module()
class BasicVSR(BasicRestorer):
"""BasicVSR model for video super-resolution.
Note that this model is used for IconVSR.
Paper:
BasicVSR: The Search for Essential Components in Video Super-Resolution
and Beyond, CVPR, 2021
Args:
generator (dict): Config for the generator structure.
pixel_loss (dict): Config for pixel-wise loss.
ensemble (dict): Config for ensemble. Default: None.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
def __init__(self,
generator,
pixel_loss,
ensemble=None,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__(generator, pixel_loss, train_cfg, test_cfg,
pretrained)
# fix pre-trained networks
self.fix_iter = train_cfg.get('fix_iter', 0) if train_cfg else 0
self.is_weight_fixed = False
# count training steps
self.register_buffer('step_counter', torch.zeros(1))
# ensemble
self.forward_ensemble = None
if ensemble is not None:
if ensemble['type'] == 'SpatialTemporalEnsemble':
from mmedit.models.common.ensemble import \
SpatialTemporalEnsemble
is_temporal = ensemble.get('is_temporal_ensemble', False)
self.forward_ensemble = SpatialTemporalEnsemble(is_temporal)
else:
raise NotImplementedError(
'Currently support only '
'"SpatialTemporalEnsemble", but got type '
f'[{ensemble["type"]}]')
def check_if_mirror_extended(self, lrs):
"""Check whether the input is a mirror-extended sequence.
If mirror-extended, the i-th (i=0, ..., t-1) frame is equal to the
(t-1-i)-th frame.
Args:
lrs (tensor): Input LR images with shape (n, t, c, h, w)
"""
is_mirror_extended = False
if lrs.size(1) % 2 == 0:
lrs_1, lrs_2 = torch.chunk(lrs, 2, dim=1)
if torch.norm(lrs_1 - lrs_2.flip(1)) == 0:
is_mirror_extended = True
return is_mirror_extended
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data.
optimizer (obj): Optimizer.
Returns:
dict: Returned output.
"""
# fix SPyNet and EDVR at the beginning
if self.step_counter < self.fix_iter:
if not self.is_weight_fixed:
self.is_weight_fixed = True
for k, v in self.generator.named_parameters():
if 'spynet' in k or 'edvr' in k:
v.requires_grad_(False)
elif self.step_counter == self.fix_iter:
# train all the parameters
self.generator.requires_grad_(True)
outputs = self(**data_batch, test_mode=False)
loss, log_vars = self.parse_losses(outputs.pop('losses'))
# optimize
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
self.step_counter += 1
outputs.update({'log_vars': log_vars})
return outputs
def evaluate(self, output, gt):
"""Evaluation function.
If the output contains multiple frames, we compute the metric
one by one and take an average.
Args:
output (Tensor): Model output with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w).
Returns:
dict: Evaluation results.
"""
crop_border = self.test_cfg.crop_border
convert_to = self.test_cfg.get('convert_to', None)
eval_result = dict()
for metric in self.test_cfg.metrics:
if output.ndim == 5: # a sequence: (n, t, c, h, w)
avg = []
for i in range(0, output.size(1)):
output_i = tensor2img(output[:, i, :, :, :])
gt_i = tensor2img(gt[:, i, :, :, :])
avg.append(self.allowed_metrics[metric](
output_i, gt_i, crop_border, convert_to=convert_to))
eval_result[metric] = np.mean(avg)
elif output.ndim == 4: # an image: (n, c, t, w), for Vimeo-90K-T
output_img = tensor2img(output)
gt_img = tensor2img(gt)
value = self.allowed_metrics[metric](
output_img, gt_img, crop_border, convert_to=convert_to)
eval_result[metric] = value
return eval_result
def forward_test(self,
lq,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, t, c, h, w).
gt (Tensor): GT Tensor with shape (n, t, c, h, w). Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results.
"""
with torch.no_grad():
if self.forward_ensemble is not None:
output = self.forward_ensemble(lq, self.generator)
else:
output = self.generator(lq)
# If the GT is an image (i.e. the center frame), the output sequence is
# turned to an image.
if gt is not None and gt.ndim == 4:
t = output.size(1)
if self.check_if_mirror_extended(lq): # with mirror extension
output = 0.5 * (output[:, t // 4] + output[:, -1 - t // 4])
else: # without mirror extension
output = output[:, t // 2]
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
if output.ndim == 4: # an image, key = 000001/0000 (Vimeo-90K)
img_name = meta[0]['key'].replace('/', '_')
if isinstance(iteration, numbers.Number):
save_path = osp.join(
save_path, f'{img_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{img_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
elif output.ndim == 5: # a sequence, key = 000
folder_name = meta[0]['key'].split('/')[0]
for i in range(0, output.size(1)):
if isinstance(iteration, numbers.Number):
save_path_i = osp.join(
save_path, folder_name,
f'{i:08d}-{iteration + 1:06d}.png')
elif iteration is None:
save_path_i = osp.join(save_path, folder_name,
f'{i:08d}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(
tensor2img(output[:, i, :, :, :]), save_path_i)
return results
| 8,430 | 36.471111 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/basic_restorer.py | # Copyright (c) OpenMMLab. All rights reserved.
import numbers
import os.path as osp
import mmcv
from mmcv.runner import auto_fp16
from mmedit.core import psnr, ssim, tensor2img
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
@MODELS.register_module()
class BasicRestorer(BaseModel):
"""Basic model for image restoration.
It must contain a generator that takes an image as inputs and outputs a
restored image. It also has a pixel-wise loss for training.
The subclasses should overwrite the function `forward_train`,
`forward_test` and `train_step`.
Args:
generator (dict): Config for the generator structure.
pixel_loss (dict): Config for pixel-wise loss.
train_cfg (dict): Config for training. Default: None.
test_cfg (dict): Config for testing. Default: None.
pretrained (str): Path for pretrained model. Default: None.
"""
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self,
generator,
pixel_loss,
train_cfg=None,
test_cfg=None,
pretrained=None):
super().__init__()
self.train_cfg = train_cfg
self.test_cfg = test_cfg
# support fp16
self.fp16_enabled = False
# generator
self.generator = build_backbone(generator)
self.init_weights(pretrained)
# loss
self.pixel_loss = build_loss(pixel_loss)
def init_weights(self, pretrained=None):
"""Init weights for models.
Args:
pretrained (str, optional): Path for pretrained weights. If given
None, pretrained weights will not be loaded. Defaults to None.
"""
self.generator.init_weights(pretrained)
@auto_fp16(apply_to=('lq', ))
def forward(self, lq, gt=None, test_mode=False, **kwargs):
"""Forward function.
Args:
lq (Tensor): Input lq images.
gt (Tensor): Ground-truth image. Default: None.
test_mode (bool): Whether in test mode or not. Default: False.
kwargs (dict): Other arguments.
"""
if test_mode:
return self.forward_test(lq, gt, **kwargs)
return self.forward_train(lq, gt)
def forward_train(self, lq, gt):
"""Training forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w).
Returns:
Tensor: Output tensor.
"""
losses = dict()
output = self.generator(lq)
loss_pix = self.pixel_loss(output, gt)
losses['loss_pix'] = loss_pix
outputs = dict(
losses=losses,
num_samples=len(gt.data),
results=dict(lq=lq.cpu(), gt=gt.cpu(), output=output.cpu()))
return outputs
def evaluate(self, output, gt):
"""Evaluation function.
Args:
output (Tensor): Model output with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w).
Returns:
dict: Evaluation results.
"""
crop_border = self.test_cfg.crop_border
output = tensor2img(output)
gt = tensor2img(gt)
eval_result = dict()
for metric in self.test_cfg.metrics:
eval_result[metric] = self.allowed_metrics[metric](output, gt,
crop_border)
return eval_result
def forward_test(self,
lq,
gt=None,
meta=None,
save_image=False,
save_path=None,
iteration=None):
"""Testing forward function.
Args:
lq (Tensor): LQ Tensor with shape (n, c, h, w).
gt (Tensor): GT Tensor with shape (n, c, h, w). Default: None.
save_image (bool): Whether to save image. Default: False.
save_path (str): Path to save image. Default: None.
iteration (int): Iteration for the saving image name.
Default: None.
Returns:
dict: Output results.
"""
output = self.generator(lq)
if self.test_cfg is not None and self.test_cfg.get('metrics', None):
assert gt is not None, (
'evaluation with metrics must have gt images.')
results = dict(eval_result=self.evaluate(output, gt))
else:
results = dict(lq=lq.cpu(), output=output.cpu())
if gt is not None:
results['gt'] = gt.cpu()
# save image
if save_image:
lq_path = meta[0]['lq_path']
folder_name = osp.splitext(osp.basename(lq_path))[0]
if isinstance(iteration, numbers.Number):
save_path = osp.join(save_path, folder_name,
f'{folder_name}-{iteration + 1:06d}.png')
elif iteration is None:
save_path = osp.join(save_path, f'{folder_name}.png')
else:
raise ValueError('iteration should be number or None, '
f'but got {type(iteration)}')
mmcv.imwrite(tensor2img(output), save_path)
return results
def forward_dummy(self, img):
"""Used for computing network FLOPs.
Args:
img (Tensor): Input image.
Returns:
Tensor: Output image.
"""
out = self.generator(img)
return out
def train_step(self, data_batch, optimizer):
"""Train step.
Args:
data_batch (dict): A batch of data.
optimizer (obj): Optimizer.
Returns:
dict: Returned output.
"""
outputs = self(**data_batch, test_mode=False)
loss, log_vars = self.parse_losses(outputs.pop('losses'))
# optimize
optimizer['generator'].zero_grad()
loss.backward()
optimizer['generator'].step()
outputs.update({'log_vars': log_vars})
return outputs
def val_step(self, data_batch, **kwargs):
"""Validation step.
Args:
data_batch (dict): A batch of data.
kwargs (dict): Other arguments for ``val_step``.
Returns:
dict: Returned output.
"""
output = self.forward_test(**data_batch, **kwargs)
return output
| 6,558 | 30.085308 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/restorers/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .basic_restorer import BasicRestorer
from .basicvsr import BasicVSR
__all__ = ['BasicRestorer', 'BasicVSR']
| 162 | 26.166667 | 47 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/flow_warp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
import torch.nn.functional as F
def flow_warp(x,
flow,
interpolation='bilinear',
padding_mode='zeros',
align_corners=True):
"""Warp an image or a feature map with optical flow.
Args:
x (Tensor): Tensor with size (n, c, h, w).
flow (Tensor): Tensor with size (n, h, w, 2). The last dimension is
a two-channel, denoting the width and height relative offsets.
Note that the values are not normalized to [-1, 1].
interpolation (str): Interpolation mode: 'nearest' or 'bilinear'.
Default: 'bilinear'.
padding_mode (str): Padding mode: 'zeros' or 'border' or 'reflection'.
Default: 'zeros'.
align_corners (bool): Whether align corners. Default: True.
Returns:
Tensor: Warped image or feature map.
"""
if x.size()[-2:] != flow.size()[1:3]:
raise ValueError(f'The spatial sizes of input ({x.size()[-2:]}) and '
f'flow ({flow.size()[1:3]}) are not the same.')
_, _, h, w = x.size()
# create mesh grid
grid_y, grid_x = torch.meshgrid(torch.arange(0, h), torch.arange(0, w))
grid = torch.stack((grid_x, grid_y), 2).type_as(x) # (h, w, 2)
grid.requires_grad = False
grid_flow = grid + flow
# scale grid_flow to [-1,1]
grid_flow_x = 2.0 * grid_flow[:, :, :, 0] / max(w - 1, 1) - 1.0
grid_flow_y = 2.0 * grid_flow[:, :, :, 1] / max(h - 1, 1) - 1.0
grid_flow = torch.stack((grid_flow_x, grid_flow_y), dim=3)
output = F.grid_sample(
x,
grid_flow,
mode=interpolation,
padding_mode=padding_mode,
align_corners=align_corners)
return output
| 1,781 | 36.125 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/aspp.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch
from mmcv.cnn import ConvModule
from torch import nn
from torch.nn import functional as F
from .separable_conv_module import DepthwiseSeparableConvModule
class ASPPPooling(nn.Sequential):
def __init__(self, in_channels, out_channels, conv_cfg, norm_cfg, act_cfg):
super().__init__(
nn.AdaptiveAvgPool2d(1),
ConvModule(
in_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
def forward(self, x):
size = x.shape[-2:]
for mod in self:
x = mod(x)
return F.interpolate(
x, size=size, mode='bilinear', align_corners=False)
class ASPP(nn.Module):
"""ASPP module from DeepLabV3.
The code is adopted from
https://github.com/pytorch/vision/blob/master/torchvision/models/
segmentation/deeplabv3.py
For more information about the module:
`"Rethinking Atrous Convolution for Semantic Image Segmentation"
<https://arxiv.org/abs/1706.05587>`_.
Args:
in_channels (int): Input channels of the module.
out_channels (int): Output channels of the module.
mid_channels (int): Output channels of the intermediate ASPP conv
modules.
dilations (Sequence[int]): Dilation rate of three ASPP conv module.
Default: [12, 24, 36].
conv_cfg (dict): Config dict for convolution layer. If "None",
nn.Conv2d will be applied. Default: None.
norm_cfg (dict): Config dict for normalization layer.
Default: dict(type='BN').
act_cfg (dict): Config dict for activation layer.
Default: dict(type='ReLU').
separable_conv (bool): Whether replace normal conv with depthwise
separable conv which is faster. Default: False.
"""
def __init__(self,
in_channels,
out_channels=256,
mid_channels=256,
dilations=(12, 24, 36),
conv_cfg=None,
norm_cfg=dict(type='BN'),
act_cfg=dict(type='ReLU'),
separable_conv=False):
super().__init__()
if separable_conv:
conv_module = DepthwiseSeparableConvModule
else:
conv_module = ConvModule
modules = []
modules.append(
ConvModule(
in_channels,
mid_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
for dilation in dilations:
modules.append(
conv_module(
in_channels,
mid_channels,
3,
padding=dilation,
dilation=dilation,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg))
modules.append(
ASPPPooling(in_channels, mid_channels, conv_cfg, norm_cfg,
act_cfg))
self.convs = nn.ModuleList(modules)
self.project = nn.Sequential(
ConvModule(
5 * mid_channels,
out_channels,
1,
conv_cfg=conv_cfg,
norm_cfg=norm_cfg,
act_cfg=act_cfg), nn.Dropout(0.5))
def forward(self, x):
"""Forward function for ASPP module.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
res = []
for conv in self.convs:
res.append(conv(x))
res = torch.cat(res, dim=1)
return self.project(res)
| 3,861 | 29.650794 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/sr_backbone_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import constant_init, kaiming_init
from mmcv.utils.parrots_wrapper import _BatchNorm
def default_init_weights(module, scale=1):
"""Initialize network weights.
Args:
modules (nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks.
"""
for m in module.modules():
if isinstance(m, nn.Conv2d):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, nn.Linear):
kaiming_init(m, a=0, mode='fan_in', bias=0)
m.weight.data *= scale
elif isinstance(m, _BatchNorm):
constant_init(m.weight, val=1, bias=0)
def make_layer(block, num_blocks, **kwarg):
"""Make layers by stacking the same blocks.
Args:
block (nn.module): nn.module class for basic block.
num_blocks (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
layers = []
for _ in range(num_blocks):
layers.append(block(**kwarg))
return nn.Sequential(*layers)
class ResidualBlockNoBN(nn.Module):
"""Residual block without BN.
It has a style of:
::
---Conv-ReLU-Conv-+-
|________________|
Args:
mid_channels (int): Channel number of intermediate features.
Default: 64.
res_scale (float): Used to scale the residual before addition.
Default: 1.0.
"""
def __init__(self, mid_channels=64, res_scale=1.0):
super().__init__()
self.res_scale = res_scale
self.conv1 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.conv2 = nn.Conv2d(mid_channels, mid_channels, 3, 1, 1, bias=True)
self.relu = nn.ReLU(inplace=True)
# if res_scale < 1.0, use the default initialization, as in EDSR.
# if res_scale = 1.0, use scaled kaiming_init, as in MSRResNet.
if res_scale == 1.0:
self.init_weights()
def init_weights(self):
"""Initialize weights for ResidualBlockNoBN.
Initialization methods like `kaiming_init` are for VGG-style
modules. For modules with residual paths, using smaller std is
better for stability and performance. We empirically use 0.1.
See more details in "ESRGAN: Enhanced Super-Resolution Generative
Adversarial Networks"
"""
for m in [self.conv1, self.conv2]:
default_init_weights(m, 0.1)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (n, c, h, w).
Returns:
Tensor: Forward results.
"""
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out * self.res_scale
| 2,919 | 28.795918 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/model_utils.py | # Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
def set_requires_grad(nets, requires_grad=False):
"""Set requires_grad for all the networks.
Args:
nets (nn.Module | list[nn.Module]): A list of networks or a single
network.
requires_grad (bool): Whether the networks require gradients or not
"""
if not isinstance(nets, list):
nets = [nets]
for net in nets:
if net is not None:
for param in net.parameters():
param.requires_grad = requires_grad
def extract_bbox_patch(bbox, img, channel_first=True):
"""Extract patch from a given bbox
Args:
bbox (torch.Tensor | numpy.array): Bbox with (top, left, h, w). If
`img` has batch dimension, the `bbox` must be stacked at first
dimension. The shape should be (4,) or (n, 4).
img (torch.Tensor | numpy.array): Image data to be extracted. If
organized in batch dimension, the batch dimension must be the first
order like (n, h, w, c) or (n, c, h, w).
channel_first (bool): If True, the channel dimension of img is before
height and width, e.g. (c, h, w). Otherwise, the img shape (samples
in the batch) is like (h, w, c).
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
def _extract(bbox, img):
assert len(bbox) == 4
t, l, h, w = bbox
if channel_first:
img_patch = img[..., t:t + h, l:l + w]
else:
img_patch = img[t:t + h, l:l + w, ...]
return img_patch
input_size = img.shape
assert len(input_size) == 3 or len(input_size) == 4
bbox_size = bbox.shape
assert bbox_size == (4, ) or (len(bbox_size) == 2
and bbox_size[0] == input_size[0])
# images with batch dimension
if len(input_size) == 4:
output_list = []
for i in range(input_size[0]):
img_patch_ = _extract(bbox[i], img[i:i + 1, ...])
output_list.append(img_patch_)
if isinstance(img, torch.Tensor):
img_patch = torch.cat(output_list, dim=0)
else:
img_patch = np.concatenate(output_list, axis=0)
# standardize image
else:
img_patch = _extract(bbox, img)
return img_patch
def scale_bbox(bbox, target_size):
"""Modify bbox to target size.
The original bbox will be enlarged to the target size with the original
bbox in the center of the new bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not. The shape should be (4,) or (n, 4).
target_size (tuple[int]): Target size of final bbox.
Returns:
(np.ndarray | torch.Tensor): Modified bboxes.
"""
def _mod(bbox, target_size):
top_ori, left_ori, h_ori, w_ori = bbox
h, w = target_size
assert h >= h_ori and w >= w_ori
top = int(max(0, top_ori - (h - h_ori) // 2))
left = int(max(0, left_ori - (w - w_ori) // 2))
if isinstance(bbox, torch.Tensor):
bbox_new = torch.Tensor([top, left, h, w]).type_as(bbox)
else:
bbox_new = np.asarray([top, left, h, w])
return bbox_new
if isinstance(bbox, torch.Tensor):
bbox_new = torch.zeros_like(bbox)
elif isinstance(bbox, np.ndarray):
bbox_new = np.zeros_like(bbox)
else:
raise TypeError('bbox mush be torch.Tensor or numpy.ndarray'
f'but got type {type(bbox)}')
bbox_shape = list(bbox.shape)
if len(bbox_shape) == 2:
for i in range(bbox_shape[0]):
bbox_new[i, :] = _mod(bbox[i], target_size)
else:
bbox_new = _mod(bbox, target_size)
return bbox_new
def extract_around_bbox(img, bbox, target_size, channel_first=True):
"""Extract patches around the given bbox.
Args:
bbox (np.ndarray | torch.Tensor): Bboxes to be modified. Bbox can
be in batch or not.
target_size (List(int)): Target size of final bbox.
Returns:
(torch.Tensor | numpy.array): Extracted patches. The dimension of the \
output should be the same as `img`.
"""
bbox_new = scale_bbox(bbox, target_size)
img_patch = extract_bbox_patch(bbox_new, img, channel_first=channel_first)
return img_patch, bbox_new
| 4,502 | 31.868613 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/separable_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import ConvModule
class DepthwiseSeparableConvModule(nn.Module):
"""Depthwise separable convolution module.
See https://arxiv.org/pdf/1704.04861.pdf for details.
This module can replace a ConvModule with the conv block replaced by two
conv block: depthwise conv block and pointwise conv block. The depthwise
conv block contains depthwise-conv/norm/activation layers. The pointwise
conv block contains pointwise-conv/norm/activation layers. It should be
noted that there will be norm/activation layer in the depthwise conv block
if ``norm_cfg`` and ``act_cfg`` are specified.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): Same as nn.Conv2d.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
stride (int or tuple[int]): Same as nn.Conv2d. Default: 1.
padding (int or tuple[int]): Same as nn.Conv2d. Default: 0.
dilation (int or tuple[int]): Same as nn.Conv2d. Default: 1.
norm_cfg (dict): Default norm config for both depthwise ConvModule and
pointwise ConvModule. Default: None.
act_cfg (dict): Default activation config for both depthwise ConvModule
and pointwise ConvModule. Default: dict(type='ReLU').
dw_norm_cfg (dict): Norm config of depthwise ConvModule. If it is
'default', it will be the same as ``norm_cfg``. Default: 'default'.
dw_act_cfg (dict): Activation config of depthwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
pw_norm_cfg (dict): Norm config of pointwise ConvModule. If it is
'default', it will be the same as `norm_cfg`. Default: 'default'.
pw_act_cfg (dict): Activation config of pointwise ConvModule. If it is
'default', it will be the same as ``act_cfg``. Default: 'default'.
kwargs (optional): Other shared arguments for depthwise and pointwise
ConvModule. See ConvModule for ref.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
norm_cfg=None,
act_cfg=dict(type='ReLU'),
dw_norm_cfg='default',
dw_act_cfg='default',
pw_norm_cfg='default',
pw_act_cfg='default',
**kwargs):
super().__init__()
assert 'groups' not in kwargs, 'groups should not be specified'
# if norm/activation config of depthwise/pointwise ConvModule is not
# specified, use default config.
dw_norm_cfg = dw_norm_cfg if dw_norm_cfg != 'default' else norm_cfg
dw_act_cfg = dw_act_cfg if dw_act_cfg != 'default' else act_cfg
pw_norm_cfg = pw_norm_cfg if pw_norm_cfg != 'default' else norm_cfg
pw_act_cfg = pw_act_cfg if pw_act_cfg != 'default' else act_cfg
# depthwise convolution
self.depthwise_conv = ConvModule(
in_channels,
in_channels,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
groups=in_channels,
norm_cfg=dw_norm_cfg,
act_cfg=dw_act_cfg,
**kwargs)
self.pointwise_conv = ConvModule(
in_channels,
out_channels,
1,
norm_cfg=pw_norm_cfg,
act_cfg=pw_act_cfg,
**kwargs)
def forward(self, x):
"""Forward function.
Args:
x (Tensor): Input tensor with shape (N, C, H, W).
Returns:
Tensor: Output tensor.
"""
x = self.depthwise_conv(x)
x = self.pointwise_conv(x)
return x
| 3,907 | 38.877551 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/linear_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import torch.nn as nn
from mmcv.cnn import build_activation_layer, kaiming_init
class LinearModule(nn.Module):
"""A linear block that contains linear/norm/activation layers.
For low level vision, we add spectral norm and padding layer.
Args:
in_features (int): Same as nn.Linear.
out_features (int): Same as nn.Linear.
bias (bool): Same as nn.Linear.
act_cfg (dict): Config dict for activation layer, "relu" by default.
inplace (bool): Whether to use inplace mode for activation.
with_spectral_norm (bool): Whether use spectral norm in linear module.
order (tuple[str]): The order of linear/activation layers. It is a
sequence of "linear", "norm" and "act". Examples are
("linear", "act") and ("act", "linear").
"""
def __init__(self,
in_features,
out_features,
bias=True,
act_cfg=dict(type='ReLU'),
inplace=True,
with_spectral_norm=False,
order=('linear', 'act')):
super().__init__()
assert act_cfg is None or isinstance(act_cfg, dict)
self.act_cfg = act_cfg
self.inplace = inplace
self.with_spectral_norm = with_spectral_norm
self.order = order
assert isinstance(self.order, tuple) and len(self.order) == 2
assert set(order) == set(['linear', 'act'])
self.with_activation = act_cfg is not None
self.with_bias = bias
# build linear layer
self.linear = nn.Linear(in_features, out_features, bias=bias)
# export the attributes of self.linear to a higher level for
# convenience
self.in_features = self.linear.in_features
self.out_features = self.linear.out_features
if self.with_spectral_norm:
self.linear = nn.utils.spectral_norm(self.linear)
# build activation layer
if self.with_activation:
act_cfg_ = act_cfg.copy()
act_cfg_.setdefault('inplace', inplace)
self.activate = build_activation_layer(act_cfg_)
# Use msra init by default
self.init_weights()
def init_weights(self):
if self.with_activation and self.act_cfg['type'] == 'LeakyReLU':
nonlinearity = 'leaky_relu'
a = self.act_cfg.get('negative_slope', 0.01)
else:
nonlinearity = 'relu'
a = 0
kaiming_init(self.linear, a=a, nonlinearity=nonlinearity)
def forward(self, x, activate=True):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of :math:`(n, *, c)`.
Same as ``torch.nn.Linear``.
activate (bool, optional): Whether to use activation layer.
Defaults to True.
Returns:
torch.Tensor: Same as ``torch.nn.Linear``.
"""
for layer in self.order:
if layer == 'linear':
x = self.linear(x)
elif layer == 'act' and activate and self.with_activation:
x = self.activate(x)
return x
| 3,204 | 34.611111 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/contextual_attention.py | # Copyright (c) OpenMMLab. All rights reserved.
from functools import partial
import torch
import torch.nn as nn
import torch.nn.functional as F
class ContextualAttentionModule(nn.Module):
"""Contexture attention module.
The details of this module can be found in:
Generative Image Inpainting with Contextual Attention
Args:
unfold_raw_kernel_size (int): Kernel size used in unfolding raw
feature. Default: 4.
unfold_raw_stride (int): Stride used in unfolding raw feature. Default:
2.
unfold_raw_padding (int): Padding used in unfolding raw feature.
Default: 1.
unfold_corr_kernel_size (int): Kernel size used in unfolding
context for computing correlation maps. Default: 3.
unfold_corr_stride (int): Stride used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_dilation (int): Dilation used in unfolding context for
computing correlation maps. Default: 1.
unfold_corr_padding (int): Padding used in unfolding context for
computing correlation maps. Default: 1.
scale (float): The resale factor used in resize input features.
Default: 0.5.
fuse_kernel_size (int): The kernel size used in fusion module.
Default: 3.
softmax_scale (float): The scale factor for softmax function.
Default: 10.
return_attention_score (bool): If True, the attention score will be
returned. Default: True.
"""
def __init__(self,
unfold_raw_kernel_size=4,
unfold_raw_stride=2,
unfold_raw_padding=1,
unfold_corr_kernel_size=3,
unfold_corr_stride=1,
unfold_corr_dilation=1,
unfold_corr_padding=1,
scale=0.5,
fuse_kernel_size=3,
softmax_scale=10,
return_attention_score=True):
super().__init__()
self.unfold_raw_kernel_size = unfold_raw_kernel_size
self.unfold_raw_stride = unfold_raw_stride
self.unfold_raw_padding = unfold_raw_padding
self.unfold_corr_kernel_size = unfold_corr_kernel_size
self.unfold_corr_stride = unfold_corr_stride
self.unfold_corr_dilation = unfold_corr_dilation
self.unfold_corr_padding = unfold_corr_padding
self.scale = scale
self.fuse_kernel_size = fuse_kernel_size
self.with_fuse_correlation = fuse_kernel_size > 1
self.softmax_scale = softmax_scale
self.return_attention_score = return_attention_score
if self.with_fuse_correlation:
assert fuse_kernel_size % 2 == 1
fuse_kernel = torch.eye(fuse_kernel_size).view(
1, 1, fuse_kernel_size, fuse_kernel_size)
self.register_buffer('fuse_kernel', fuse_kernel)
padding = int((fuse_kernel_size - 1) // 2)
self.fuse_conv = partial(F.conv2d, padding=padding, stride=1)
self.softmax = nn.Softmax(dim=1)
def forward(self, x, context, mask=None):
"""Forward Function.
Args:
x (torch.Tensor): Tensor with shape (n, c, h, w).
context (torch.Tensor): Tensor with shape (n, c, h, w).
mask (torch.Tensor): Tensor with shape (n, 1, h, w). Default: None.
Returns:
tuple(torch.Tensor): Features after contextural attention.
"""
# raw features to be used in copy (deconv)
raw_context = context
raw_context_cols = self.im2col(
raw_context,
kernel_size=self.unfold_raw_kernel_size,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
normalize=False,
return_cols=True)
# resize the feature to reduce computational cost
x = F.interpolate(x, scale_factor=self.scale)
context = F.interpolate(context, scale_factor=self.scale)
context_cols = self.im2col(
context,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
normalize=True,
return_cols=True)
h_unfold, w_unfold = self.calculate_unfold_hw(
context.size()[-2:],
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
)
# reshape context_cols to
# (n*h_unfold*w_unfold, c, unfold_mks, unfold_mks)
# 'mks' is short for 'mask_kernel_size'
context_cols = context_cols.reshape(-1, *context_cols.shape[2:])
# the shape of correlation map should be:
# (n, h_unfold*w_unfold, h', w')
correlation_map = self.patch_correlation(x, context_cols)
# fuse correlation map to enlarge consistent attention region.
if self.with_fuse_correlation:
correlation_map = self.fuse_correlation_map(
correlation_map, h_unfold, w_unfold)
correlation_map = self.mask_correlation_map(correlation_map, mask=mask)
attention_score = self.softmax(correlation_map * self.softmax_scale)
raw_context_filter = raw_context_cols.reshape(
-1, *raw_context_cols.shape[2:])
output = self.patch_copy_deconv(attention_score, raw_context_filter)
# deconv will cause overlap and we need to remove the effects of that
overlap_factor = self.calculate_overlap_factor(attention_score)
output /= overlap_factor
if self.return_attention_score:
n, _, h_s, w_s = attention_score.size()
attention_score = attention_score.view(n, h_unfold, w_unfold, h_s,
w_s)
return output, attention_score
return output
def patch_correlation(self, x, kernel):
"""Calculate patch correlation.
Args:
x (torch.Tensor): Input tensor.
kernel (torch.Tensor): Kernel tensor.
Returns:
torch.Tensor: Tensor with shape of (n, l, h, w).
"""
n, _, h_in, w_in = x.size()
patch_corr = F.conv2d(
x.view(1, -1, h_in, w_in),
kernel,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation,
groups=n)
h_out, w_out = patch_corr.size()[-2:]
return patch_corr.view(n, -1, h_out, w_out)
def patch_copy_deconv(self, attention_score, context_filter):
"""Copy patches using deconv.
Args:
attention_score (torch.Tensor): Tensor with shape of (n, l , h, w).
context_filter (torch.Tensor): Filter kernel.
Returns:
torch.Tensor: Tensor with shape of (n, c, h, w).
"""
n, _, h, w = attention_score.size()
attention_score = attention_score.view(1, -1, h, w)
output = F.conv_transpose2d(
attention_score,
context_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding,
groups=n)
h_out, w_out = output.size()[-2:]
return output.view(n, -1, h_out, w_out)
def fuse_correlation_map(self, correlation_map, h_unfold, w_unfold):
"""Fuse correlation map.
This operation is to fuse correlation map for increasing large
consistent correlation regions.
The mechanism behind this op is simple and easy to understand. A
standard 'Eye' matrix will be applied as a filter on the correlation
map in horizontal and vertical direction.
The shape of input correlation map is (n, h_unfold*w_unfold, h, w).
When adopting fusing, we will apply convolutional filter in the
reshaped feature map with shape of (n, 1, h_unfold*w_fold, h*w).
A simple specification for horizontal direction is shown below:
.. code-block:: python
(h, (h, (h, (h,
0) 1) 2) 3) ...
(h, 0)
(h, 1) 1
(h, 2) 1
(h, 3) 1
...
"""
# horizontal direction
n, _, h_map, w_map = correlation_map.size()
map_ = correlation_map.permute(0, 2, 3, 1)
map_ = map_.reshape(n, h_map * w_map, h_unfold * w_unfold, 1)
map_ = map_.permute(0, 3, 1, 2).contiguous()
map_ = self.fuse_conv(map_, self.fuse_kernel)
correlation_map = map_.view(n, h_unfold, w_unfold, h_map, w_map)
# vertical direction
map_ = correlation_map.permute(0, 2, 1, 4,
3).reshape(n, 1, h_unfold * w_unfold,
h_map * w_map)
map_ = self.fuse_conv(map_, self.fuse_kernel)
# Note that the dimension should be transposed since the convolution of
# eye matrix will put the normed scores into the last several dimension
correlation_map = map_.view(n, w_unfold, h_unfold, w_map,
h_map).permute(0, 4, 3, 2, 1)
correlation_map = correlation_map.reshape(n, -1, h_unfold, w_unfold)
return correlation_map
def calculate_unfold_hw(self,
input_size,
kernel_size=3,
stride=1,
dilation=1,
padding=0):
"""Calculate (h, w) after unfolding
The official implementation of `unfold` in pytorch will put the
dimension (h, w) into `L`. Thus, this function is just to calculate the
(h, w) according to the equation in:
https://pytorch.org/docs/stable/nn.html#torch.nn.Unfold
"""
h_in, w_in = input_size
h_unfold = int((h_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
w_unfold = int((w_in + 2 * padding - dilation *
(kernel_size - 1) - 1) / stride + 1)
return h_unfold, w_unfold
def calculate_overlap_factor(self, attention_score):
"""Calculate the overlap factor after applying deconv.
Args:
attention_score (torch.Tensor): The attention score with shape of
(n, c, h, w).
Returns:
torch.Tensor: The overlap factor will be returned.
"""
h, w = attention_score.shape[-2:]
kernel_size = self.unfold_raw_kernel_size
ones_input = torch.ones(1, 1, h, w).to(attention_score)
ones_filter = torch.ones(1, 1, kernel_size,
kernel_size).to(attention_score)
overlap = F.conv_transpose2d(
ones_input,
ones_filter,
stride=self.unfold_raw_stride,
padding=self.unfold_raw_padding)
# avoid division by zero
overlap[overlap == 0] = 1.
return overlap
def mask_correlation_map(self, correlation_map, mask):
"""Add mask weight for correlation map.
Add a negative infinity number to the masked regions so that softmax
function will result in 'zero' in those regions.
Args:
correlation_map (torch.Tensor): Correlation map with shape of
(n, h_unfold*w_unfold, h_map, w_map).
mask (torch.Tensor): Mask tensor with shape of (n, c, h, w). '1'
in the mask indicates masked region while '0' indicates valid
region.
Returns:
torch.Tensor: Updated correlation map with mask.
"""
if mask is not None:
mask = F.interpolate(mask, scale_factor=self.scale)
# if any pixel is masked in patch, the patch is considered to be
# masked
mask_cols = self.im2col(
mask,
kernel_size=self.unfold_corr_kernel_size,
stride=self.unfold_corr_stride,
padding=self.unfold_corr_padding,
dilation=self.unfold_corr_dilation)
mask_cols = (mask_cols.sum(dim=1, keepdim=True) > 0).float()
mask_cols = mask_cols.permute(0, 2,
1).reshape(mask.size(0), -1, 1, 1)
# add negative inf will bring zero in softmax
mask_cols[mask_cols == 1] = -float('inf')
correlation_map += mask_cols
return correlation_map
def im2col(self,
img,
kernel_size,
stride=1,
padding=0,
dilation=1,
normalize=False,
return_cols=False):
"""Reshape image-style feature to columns.
This function is used for unfold feature maps to columns. The
details of this function can be found in:
https://pytorch.org/docs/1.1.0/nn.html?highlight=unfold#torch.nn.Unfold
Args:
img (torch.Tensor): Features to be unfolded. The shape of this
feature should be (n, c, h, w).
kernel_size (int): In this function, we only support square kernel
with same height and width.
stride (int): Stride number in unfolding. Default: 1.
padding (int): Padding number in unfolding. Default: 0.
dilation (int): Dilation number in unfolding. Default: 1.
normalize (bool): If True, the unfolded feature will be normalized.
Default: False.
return_cols (bool): The official implementation in PyTorch of
unfolding will return features with shape of
(n, c*$prod{kernel_size}$, L). If True, the features will be
reshaped to (n, L, c, kernel_size, kernel_size). Otherwise,
the results will maintain the shape as the official
implementation.
Returns:
torch.Tensor: Unfolded columns. If `return_cols` is True, the \
shape of output tensor is \
`(n, L, c, kernel_size, kernel_size)`. Otherwise, the shape \
will be `(n, c*$prod{kernel_size}$, L)`.
"""
# unfold img to columns with shape (n, c*kernel_size**2, num_cols)
img_unfold = F.unfold(
img,
kernel_size,
stride=stride,
padding=padding,
dilation=dilation)
# normalize the feature map
if normalize:
norm = torch.sqrt((img_unfold**2).sum(dim=1, keepdim=True))
eps = torch.tensor([1e-4]).to(img)
img_unfold = img_unfold / torch.max(norm, eps)
if return_cols:
img_unfold_ = img_unfold.permute(0, 2, 1)
n, num_cols = img_unfold_.size()[:2]
img_cols = img_unfold_.view(n, num_cols, img.size(1), kernel_size,
kernel_size)
return img_cols
return img_unfold
| 15,214 | 39.039474 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/gated_conv_module.py | # Copyright (c) OpenMMLab. All rights reserved.
import copy
import torch
import torch.nn as nn
from mmcv.cnn import ConvModule, build_activation_layer
class SimpleGatedConvModule(nn.Module):
"""Simple Gated Convolutional Module.
This module is a simple gated convolutional module. The detailed formula
is:
.. math::
y = \\phi(conv1(x)) * \\sigma(conv2(x)),
where `phi` is the feature activation function and `sigma` is the gate
activation function. In default, the gate activation function is sigmoid.
Args:
in_channels (int): Same as nn.Conv2d.
out_channels (int): The number of channels of the output feature. Note
that `out_channels` in the conv module is doubled since this module
contains two convolutions for feature and gate separately.
kernel_size (int or tuple[int]): Same as nn.Conv2d.
feat_act_cfg (dict): Config dict for feature activation layer.
gate_act_cfg (dict): Config dict for gate activation layer.
kwargs (keyword arguments): Same as `ConvModule`.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
feat_act_cfg=dict(type='ELU'),
gate_act_cfg=dict(type='Sigmoid'),
**kwargs):
super().__init__()
# the activation function should specified outside conv module
kwargs_ = copy.deepcopy(kwargs)
kwargs_['act_cfg'] = None
self.with_feat_act = feat_act_cfg is not None
self.with_gate_act = gate_act_cfg is not None
self.conv = ConvModule(in_channels, out_channels * 2, kernel_size,
**kwargs_)
if self.with_feat_act:
self.feat_act = build_activation_layer(feat_act_cfg)
if self.with_gate_act:
self.gate_act = build_activation_layer(gate_act_cfg)
def forward(self, x):
"""Forward Function.
Args:
x (torch.Tensor): Input tensor with shape of (n, c, h, w).
Returns:
torch.Tensor: Output tensor with shape of (n, c, h', w').
"""
x = self.conv(x)
x, gate = torch.split(x, x.size(1) // 2, dim=1)
if self.with_feat_act:
x = self.feat_act(x)
if self.with_gate_act:
gate = self.gate_act(gate)
x = x * gate
return x
| 2,423 | 32.205479 | 79 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/__init__.py | # Copyright (c) OpenMMLab. All rights reserved.
from .aspp import ASPP
from .contextual_attention import ContextualAttentionModule
from .conv import * # noqa: F401, F403
from .downsample import pixel_unshuffle
from .ensemble import SpatialTemporalEnsemble
from .flow_warp import flow_warp
from .gated_conv_module import SimpleGatedConvModule
from .gca_module import GCAModule
from .generation_model_utils import (GANImageBuffer, ResidualBlockWithDropout,
UnetSkipConnectionBlock,
generation_init_weights)
from .img_normalize import ImgNormalize
from .linear_module import LinearModule
from .mask_conv_module import MaskConvModule
from .model_utils import (extract_around_bbox, extract_bbox_patch, scale_bbox,
set_requires_grad)
from .partial_conv import PartialConv2d
from .separable_conv_module import DepthwiseSeparableConvModule
from .sr_backbone_utils import (ResidualBlockNoBN, default_init_weights,
make_layer)
from .upsample import PixelShufflePack
__all__ = [
'ASPP', 'PartialConv2d', 'PixelShufflePack', 'default_init_weights',
'ResidualBlockNoBN', 'make_layer', 'MaskConvModule', 'extract_bbox_patch',
'extract_around_bbox', 'set_requires_grad', 'scale_bbox',
'DepthwiseSeparableConvModule', 'ContextualAttentionModule', 'GCAModule',
'SimpleGatedConvModule', 'LinearModule', 'flow_warp', 'ImgNormalize',
'generation_init_weights', 'GANImageBuffer', 'UnetSkipConnectionBlock',
'ResidualBlockWithDropout', 'pixel_unshuffle', 'SpatialTemporalEnsemble'
]
| 1,623 | 48.212121 | 78 | py |
BasicVSR_PlusPlus | BasicVSR_PlusPlus-master/mmedit/models/common/conv.py | # Copyright (c) OpenMMLab. All rights reserved.
from mmcv.cnn import CONV_LAYERS
from torch import nn
CONV_LAYERS.register_module('Deconv', module=nn.ConvTranspose2d)
# TODO: octave conv
| 188 | 26 | 64 | py |
Subsets and Splits