prompt
stringlengths 94
42.6k
| completion
stringlengths 6
120
| api
stringlengths 14
68
|
---|---|---|
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": mge.tensor(2.0)}
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif( | get_device_count_by_fork("gpu") | megengine.distributed.helper.get_device_count_by_fork |
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": mge.tensor(2.0)}
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_min_max_observer():
word_size = get_device_count_by_fork("gpu")
x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
@dist.launcher
def worker():
rank = dist.get_rank()
m = SyncMinMaxObserver()
y = mge.tensor(x[rank * 3 : (rank + 1) * 3])
m(y)
assert m.min_val == np_min and m.max_val == np_max
worker()
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_exponential_moving_average_observer():
word_size = get_device_count_by_fork("gpu")
t = np.random.rand()
x1 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
x2 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
@dist.launcher
def worker():
rank = | dist.get_rank() | megengine.distributed.get_rank |
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": mge.tensor(2.0)}
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_min_max_observer():
word_size = get_device_count_by_fork("gpu")
x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
@dist.launcher
def worker():
rank = dist.get_rank()
m = SyncMinMaxObserver()
y = mge.tensor(x[rank * 3 : (rank + 1) * 3])
m(y)
assert m.min_val == np_min and m.max_val == np_max
worker()
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_exponential_moving_average_observer():
word_size = get_device_count_by_fork("gpu")
t = np.random.rand()
x1 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
x2 = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
@dist.launcher
def worker():
rank = dist.get_rank()
m = | SyncExponentialMovingAverageObserver(momentum=t) | megengine.quantization.observer.SyncExponentialMovingAverageObserver |
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": mge.tensor(2.0)}
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif(get_device_count_by_fork("gpu") < 2, reason="need more gpu device")
@pytest.mark.isolated_distributed
def test_sync_min_max_observer():
word_size = get_device_count_by_fork("gpu")
x = np.random.rand(3 * word_size, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
@dist.launcher
def worker():
rank = dist.get_rank()
m = SyncMinMaxObserver()
y = mge.tensor(x[rank * 3 : (rank + 1) * 3])
m(y)
assert m.min_val == np_min and m.max_val == np_max
worker()
@pytest.mark.skipif(
platform.system() == "Darwin", reason="do not imp GPU mode at macos now"
)
@pytest.mark.skipif(
platform.system() == "Windows", reason="windows disable MGB_ENABLE_OPR_MM"
)
@pytest.mark.skipif( | get_device_count_by_fork("gpu") | megengine.distributed.helper.get_device_count_by_fork |
import platform
import numpy as np
import pytest
import megengine as mge
import megengine.distributed as dist
from megengine.distributed.helper import get_device_count_by_fork
from megengine.quantization.observer import (
ExponentialMovingAverageObserver,
MinMaxObserver,
Observer,
PassiveObserver,
SyncExponentialMovingAverageObserver,
SyncMinMaxObserver,
)
def test_observer():
with pytest.raises(TypeError):
Observer("qint8")
def test_min_max_observer():
x = np.random.rand(3, 3, 3, 3).astype("float32")
np_min, np_max = x.min(), x.max()
x = mge.tensor(x)
m = MinMaxObserver()
m(x)
np.testing.assert_allclose(m.min_val.numpy(), np_min)
np.testing.assert_allclose(m.max_val.numpy(), np_max)
def test_exponential_moving_average_observer():
t = np.random.rand()
x1 = np.random.rand(3, 3, 3, 3).astype("float32")
x2 = np.random.rand(3, 3, 3, 3).astype("float32")
expected_min = x1.min() * t + x2.min() * (1 - t)
expected_max = x1.max() * t + x2.max() * (1 - t)
m = ExponentialMovingAverageObserver(momentum=t)
m(mge.tensor(x1, dtype=np.float32))
m(mge.tensor(x2, dtype=np.float32))
np.testing.assert_allclose(m.min_val.numpy(), expected_min)
np.testing.assert_allclose(m.max_val.numpy(), expected_max)
def test_passive_observer():
q_dict = {"scale": mge.tensor(1.0)}
m = PassiveObserver(q_dict, "qint8")
assert m.orig_scale == 1.0
assert m.scale == 1.0
m.scale = 2.0
assert m.scale == 2.0
assert m.get_qparams() == {"scale": | mge.tensor(2.0) | megengine.tensor |
import random
from megengine.data.transform import RandomResizedCrop as mge_RRC
from megengine.data.transform import Resize as mge_resize
from ..registry import PIPELINES
from edit.utils import interp_codes
@PIPELINES.register_module()
class Resize(object):
"""
Args:
size (int|list|tuple): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int): Interpolation mode of resize. Default: cv2.INTER_LINEAR.
"""
def __init__(self, keys, size, interpolation='bilinear'):
assert interpolation in interp_codes
self.keys = keys
self.size = size
self.interpolation_str = interpolation
self.resize = | mge_resize(output_size=self.size, interpolation=interp_codes[interpolation]) | megengine.data.transform.Resize |
import random
from megengine.data.transform import RandomResizedCrop as mge_RRC
from megengine.data.transform import Resize as mge_resize
from ..registry import PIPELINES
from edit.utils import interp_codes
@PIPELINES.register_module()
class Resize(object):
"""
Args:
size (int|list|tuple): Desired output size. If size is a sequence like
(h, w), output size will be matched to this. If size is an int,
smaller edge of the image will be matched to this number.
i.e, if height > width, then image will be rescaled to
(size * height / width, size)
interpolation (int): Interpolation mode of resize. Default: cv2.INTER_LINEAR.
"""
def __init__(self, keys, size, interpolation='bilinear'):
assert interpolation in interp_codes
self.keys = keys
self.size = size
self.interpolation_str = interpolation
self.resize = mge_resize(output_size=self.size, interpolation=interp_codes[interpolation])
def __call__(self, results):
"""Call function.
Args:
results (dict): A dict containing the necessary information and data for augmentation.
Returns:
dict: A dict containing the processed data and information.
"""
for key in self.keys:
if isinstance(results[key], list):
results[key] = [
self.resize.apply(v) for v in results[key]
]
else:
results[key] = self.resize.apply(results[key])
return results
def __repr__(self):
interpolate_str = self.interpolation_str
format_string = self.__class__.__name__ + '(size={0}'.format(self.size)
format_string += ', interpolation={0})'.format(interpolate_str)
return format_string
@PIPELINES.register_module()
class RandomResizedCrop(object):
"""
Crop the input data to random size and aspect ratio.
A crop of random size (default: of 0.08 to 1.0) of the original size and a random
aspect ratio (default: of 3/4 to 1.33) of the original aspect ratio is made.
After applying crop transfrom, the input data will be resized to given size.
Args:
output_size (int|list|tuple): Target size of output image, with (height, width) shape.
scale (list|tuple): Range of size of the origin size cropped. Default: (0.08, 1.0)
ratio (list|tuple): Range of aspect ratio of the origin aspect ratio cropped. Default: (0.75, 1.33)
interpolation:
'nearest': cv2.INTER_NEAREST,
'bilinear': cv2.INTER_LINEAR,
'bicubic': cv2.INTER_CUBIC,
'area': cv2.INTER_AREA,
'lanczos': cv2.INTER_LANCZOS4
"""
def __init__(self, keys, output_size, scale=(0.08, 1.0), ratio=(3. / 4., 4. / 3.), interpolation='bilinear', do_prob = 0.5):
assert interpolation in interp_codes
self.keys = keys
self.size = output_size
self.interpolation_str = interpolation
self.scale = scale
self.ratio = ratio
self.rrc = | mge_RRC(output_size=output_size, scale_range=scale, ratio_range=ratio, interpolation=interp_codes[interpolation]) | megengine.data.transform.RandomResizedCrop |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import bisect
import datetime
import math
import os
import pickle
import time
from typing import Optional
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import (
Checkpoint,
MeterBuffer,
cached_property,
ensure_dir,
get_last_call_deltatime,
)
from loguru import logger
from tensorboardX import SummaryWriter
from basecls.layers import compute_precise_bn_stats
from basecls.models import sync_model
from basecls.utils import default_logging, registers
from .tester import ClsTester
__all__ = [
"CheckpointHook",
"EvalHook",
"LoggerHook",
"LRSchedulerHook",
"PreciseBNHook",
"ResumeHook",
"TensorboardHook",
]
def _create_checkpoint(trainer: BaseTrainer, save_dir: str) -> Checkpoint:
"""Create a checkpoint for save and resume"""
model = trainer.model
ema = trainer.ema
ckpt_kws = {"ema": ema} if ema is not None else {}
optim = trainer.solver.optimizer
scaler = trainer.solver.grad_scaler
progress = trainer.progress
ckpt = Checkpoint(
save_dir,
model,
tag_file=None,
optimizer=optim,
scaler=scaler,
progress=progress,
**ckpt_kws,
)
return ckpt
class CheckpointHook(BaseHook):
"""Hook for managing checkpoints during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
save_every_n_epoch: interval for saving checkpoint. Default: ``1``
"""
def __init__(self, save_dir: str = None, save_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.save_every_n_epoch = save_every_n_epoch
def after_epoch(self):
progress = self.trainer.progress
ckpt = _create_checkpoint(self.trainer, self.save_dir)
ckpt.save("latest.pkl")
if progress.epoch % self.save_every_n_epoch == 0:
progress_str = progress.progress_str_list()
save_name = "_".join(progress_str[:-1]) + ".pkl"
ckpt.save(save_name)
logger.info(f"Save checkpoint {save_name} to {self.save_dir}")
def after_train(self):
# NOTE: usually final ema is not the best so we dont save it
mge.save(
{"state_dict": self.trainer.model.state_dict()},
os.path.join(self.save_dir, "dumped_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
class EvalHook(BaseHook):
"""Hook for evaluating during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
eval_every_n_epoch: interval for evaluating. Default: ``1``
"""
def __init__(self, save_dir: str = None, eval_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.eval_every_n_epoch = eval_every_n_epoch
self.best_acc1 = 0
self.best_ema_acc1 = 0
def after_epoch(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
progress = trainer.progress
if progress.epoch % self.eval_every_n_epoch == 0 and progress.epoch != progress.max_epoch:
self.test(cfg, model, ema)
def after_train(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
# TODO: actually useless maybe when precise_bn is on
sync_model(model)
if ema is not None:
sync_model(ema)
self.test(cfg, model, ema)
def test(self, cfg: ConfigDict, model: M.Module, ema: Optional[M.Module] = None):
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
tester = ClsTester(cfg, model, dataloader)
acc1, _ = tester.test()
if acc1 > self.best_acc1:
self.best_acc1 = acc1
if dist.get_rank() == 0:
mge.save(
{"state_dict": model.state_dict(), "acc1": self.best_acc1},
os.path.join(self.save_dir, "best_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
logger.info(
f"Epoch: {self.trainer.progress.epoch}, Test Acc@1: {acc1:.3f}, "
f"Best Test Acc@1: {self.best_acc1:.3f}"
)
if ema is None:
return
tester_ema = ClsTester(cfg, ema, dataloader)
ema_acc1, _ = tester_ema.test()
if ema_acc1 > self.best_ema_acc1:
self.best_ema_acc1 = ema_acc1
if dist.get_rank() == 0:
mge.save(
{"state_dict": ema.state_dict(), "acc1": self.best_ema_acc1},
os.path.join(self.save_dir, "best_ema_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
logger.info(
f"Epoch: {self.trainer.progress.epoch}, EMA Acc@1: {ema_acc1:.3f}, "
f"Best EMA Acc@1: {self.best_ema_acc1:.3f}"
)
class LoggerHook(BaseHook):
"""Hook for logging during training.
Effect during ``before_train``, ``after_train``, ``before_iter`` and ``after_iter`` procedure.
Args:
log_every_n_iter: interval for logging. Default: ``20``
"""
def __init__(self, log_every_n_iter: int = 20):
super().__init__()
self.log_every_n_iter = log_every_n_iter
self.meter = MeterBuffer(self.log_every_n_iter)
def before_train(self):
trainer = self.trainer
progress = trainer.progress
default_logging(trainer.cfg, trainer.model)
logger.info(f"Starting training from epoch {progress.epoch}, iteration {progress.iter}")
self.start_training_time = time.perf_counter()
def after_train(self):
total_training_time = time.perf_counter() - self.start_training_time
total_time_str = str(datetime.timedelta(seconds=total_training_time))
logger.info(
"Total training time: {} ({:.4f} s / iter)".format(
total_time_str, self.meter["iters_time"].global_avg
)
)
def before_iter(self):
self.iter_start_time = time.perf_counter()
def after_iter(self):
single_iter_time = time.perf_counter() - self.iter_start_time
delta_time = get_last_call_deltatime()
if delta_time is None:
delta_time = single_iter_time
self.meter.update(
{
"iters_time": single_iter_time, # to get global average iter time
"eta_iter_time": delta_time, # to get ETA time
"extra_time": delta_time - single_iter_time, # to get extra time
}
)
trainer = self.trainer
progress = trainer.progress
epoch_id, iter_id = progress.epoch, progress.iter
max_epoch, max_iter = progress.max_epoch, progress.max_iter
if iter_id % self.log_every_n_iter == 0 or (iter_id == 1 and epoch_id == 1):
log_str_list = []
# step info string
log_str_list.append(str(progress))
# loss string
log_str_list.append(self.get_loss_str(trainer.meter))
# stat string
log_str_list.append(self.get_stat_str(trainer.meter))
# other training info like learning rate.
log_str_list.append(self.get_train_info_str())
# memory useage.
log_str_list.append(self.get_memory_str(trainer.meter))
# time string
left_iters = max_iter - iter_id + (max_epoch - epoch_id) * max_iter
time_str = self.get_time_str(left_iters)
log_str_list.append(time_str)
# filter empty strings
log_str_list = [s for s in log_str_list if len(s) > 0]
log_str = ", ".join(log_str_list)
logger.info(log_str)
# reset meters in trainer
trainer.meter.reset()
def get_loss_str(self, meter):
"""Get loss information during trainging process."""
loss_dict = meter.get_filtered_meter(filter_key="loss")
loss_str = ", ".join(
[f"{name}:{value.latest:.3f}({value.avg:.3f})" for name, value in loss_dict.items()]
)
return loss_str
def get_stat_str(self, meter):
"""Get stat information during trainging process."""
stat_dict = meter.get_filtered_meter(filter_key="stat")
stat_str = ", ".join(
[f"{name}:{value.latest:.3f}({value.avg:.3f})" for name, value in stat_dict.items()]
)
return stat_str
def get_memory_str(self, meter):
"""Get memory information during trainging process."""
def mem_in_Mb(mem_value):
return math.ceil(mem_value / 1024 / 1024)
mem_dict = meter.get_filtered_meter(filter_key="memory")
mem_str = ", ".join(
[
f"{name}:{mem_in_Mb(value.latest)}({mem_in_Mb(value.avg)})Mb"
for name, value in mem_dict.items()
]
)
return mem_str
def get_train_info_str(self):
"""Get training process related information such as learning rate."""
# extra info to display, such as learning rate
trainer = self.trainer
lr = trainer.solver.optimizer.param_groups[0]["lr"]
lr_str = f"lr:{lr:.3e}"
loss_scale = trainer.solver.grad_scaler.scale_factor
loss_scale_str = f", amp_loss_scale:{loss_scale:.1f}" if trainer.cfg.amp.enabled else ""
return lr_str + loss_scale_str
def get_time_str(self, left_iters: int) -> str:
"""Get time related information sucn as data_time, train_time, ETA and so on."""
# time string
trainer = self.trainer
time_dict = trainer.meter.get_filtered_meter(filter_key="time")
train_time_str = ", ".join(
[f"{name}:{value.avg:.3f}s" for name, value in time_dict.items()]
)
train_time_str += ", extra_time:{:.3f}s, ".format(self.meter["extra_time"].avg)
eta_seconds = self.meter["eta_iter_time"].global_avg * left_iters
eta_string = "ETA:{}".format(datetime.timedelta(seconds=int(eta_seconds)))
time_str = train_time_str + eta_string
return time_str
class LRSchedulerHook(BaseHook):
"""Hook for learning rate scheduling during training.
Effect during ``before_epoch`` procedure.
"""
def before_epoch(self):
trainer = self.trainer
epoch_id = trainer.progress.epoch
cfg = trainer.cfg.solver
lr_factor = self.get_lr_factor(cfg, epoch_id)
if epoch_id <= cfg.warmup_epochs:
alpha = (epoch_id - 1) / cfg.warmup_epochs
lr_factor *= cfg.warmup_factor * (1 - alpha) + alpha
scaled_lr = self.total_lr * lr_factor
for param_group in trainer.solver.optimizer.param_groups:
param_group["lr"] = scaled_lr
def get_lr_factor(self, cfg: ConfigDict, epoch_id: int) -> float:
"""Calculate learning rate factor.
It supports ``"step"``, ``"linear"``, ``"cosine"``, ``"exp"``, and ``"rel_exp"`` schedule.
Args:
cfg: config for training.
epoch_id: current epoch.
Returns:
Learning rate factor.
"""
if cfg.lr_schedule == "step":
return cfg.lr_decay_factor ** bisect.bisect_left(cfg.lr_decay_steps, epoch_id)
elif cfg.lr_schedule == "linear":
alpha = 1 - (epoch_id - 1) / cfg.max_epoch
return (1 - cfg.lr_min_factor) * alpha + cfg.lr_min_factor
elif cfg.lr_schedule == "cosine":
alpha = 0.5 * (1 + math.cos(math.pi * (epoch_id - 1) / cfg.max_epoch))
return (1 - cfg.lr_min_factor) * alpha + cfg.lr_min_factor
elif cfg.lr_schedule == "exp":
return cfg.lr_decay_factor ** (epoch_id - 1)
elif cfg.lr_schedule == "rel_exp":
if cfg.lr_min_factor <= 0:
raise ValueError(
"Exponential lr schedule requires lr_min_factor to be greater than 0"
)
return cfg.lr_min_factor ** ((epoch_id - 1) / cfg.max_epoch)
else:
raise NotImplementedError(f"Learning rate schedule '{cfg.lr_schedule}' not supported")
@cached_property
def total_lr(self) -> float:
"""Total learning rate."""
cfg = self.trainer.cfg.solver
total_lr = cfg.basic_lr * | dist.get_world_size() | megengine.distributed.get_world_size |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import bisect
import datetime
import math
import os
import pickle
import time
from typing import Optional
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import (
Checkpoint,
MeterBuffer,
cached_property,
ensure_dir,
get_last_call_deltatime,
)
from loguru import logger
from tensorboardX import SummaryWriter
from basecls.layers import compute_precise_bn_stats
from basecls.models import sync_model
from basecls.utils import default_logging, registers
from .tester import ClsTester
__all__ = [
"CheckpointHook",
"EvalHook",
"LoggerHook",
"LRSchedulerHook",
"PreciseBNHook",
"ResumeHook",
"TensorboardHook",
]
def _create_checkpoint(trainer: BaseTrainer, save_dir: str) -> Checkpoint:
"""Create a checkpoint for save and resume"""
model = trainer.model
ema = trainer.ema
ckpt_kws = {"ema": ema} if ema is not None else {}
optim = trainer.solver.optimizer
scaler = trainer.solver.grad_scaler
progress = trainer.progress
ckpt = Checkpoint(
save_dir,
model,
tag_file=None,
optimizer=optim,
scaler=scaler,
progress=progress,
**ckpt_kws,
)
return ckpt
class CheckpointHook(BaseHook):
"""Hook for managing checkpoints during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
save_every_n_epoch: interval for saving checkpoint. Default: ``1``
"""
def __init__(self, save_dir: str = None, save_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.save_every_n_epoch = save_every_n_epoch
def after_epoch(self):
progress = self.trainer.progress
ckpt = _create_checkpoint(self.trainer, self.save_dir)
ckpt.save("latest.pkl")
if progress.epoch % self.save_every_n_epoch == 0:
progress_str = progress.progress_str_list()
save_name = "_".join(progress_str[:-1]) + ".pkl"
ckpt.save(save_name)
logger.info(f"Save checkpoint {save_name} to {self.save_dir}")
def after_train(self):
# NOTE: usually final ema is not the best so we dont save it
mge.save(
{"state_dict": self.trainer.model.state_dict()},
os.path.join(self.save_dir, "dumped_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
class EvalHook(BaseHook):
"""Hook for evaluating during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
eval_every_n_epoch: interval for evaluating. Default: ``1``
"""
def __init__(self, save_dir: str = None, eval_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.eval_every_n_epoch = eval_every_n_epoch
self.best_acc1 = 0
self.best_ema_acc1 = 0
def after_epoch(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
progress = trainer.progress
if progress.epoch % self.eval_every_n_epoch == 0 and progress.epoch != progress.max_epoch:
self.test(cfg, model, ema)
def after_train(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
# TODO: actually useless maybe when precise_bn is on
sync_model(model)
if ema is not None:
sync_model(ema)
self.test(cfg, model, ema)
def test(self, cfg: ConfigDict, model: M.Module, ema: Optional[M.Module] = None):
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
tester = ClsTester(cfg, model, dataloader)
acc1, _ = tester.test()
if acc1 > self.best_acc1:
self.best_acc1 = acc1
if | dist.get_rank() | megengine.distributed.get_rank |
#!/usr/bin/env python3
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.
import bisect
import datetime
import math
import os
import pickle
import time
from typing import Optional
import megengine as mge
import megengine.distributed as dist
import megengine.module as M
from basecore.config import ConfigDict
from basecore.engine import BaseHook, BaseTrainer
from basecore.utils import (
Checkpoint,
MeterBuffer,
cached_property,
ensure_dir,
get_last_call_deltatime,
)
from loguru import logger
from tensorboardX import SummaryWriter
from basecls.layers import compute_precise_bn_stats
from basecls.models import sync_model
from basecls.utils import default_logging, registers
from .tester import ClsTester
__all__ = [
"CheckpointHook",
"EvalHook",
"LoggerHook",
"LRSchedulerHook",
"PreciseBNHook",
"ResumeHook",
"TensorboardHook",
]
def _create_checkpoint(trainer: BaseTrainer, save_dir: str) -> Checkpoint:
"""Create a checkpoint for save and resume"""
model = trainer.model
ema = trainer.ema
ckpt_kws = {"ema": ema} if ema is not None else {}
optim = trainer.solver.optimizer
scaler = trainer.solver.grad_scaler
progress = trainer.progress
ckpt = Checkpoint(
save_dir,
model,
tag_file=None,
optimizer=optim,
scaler=scaler,
progress=progress,
**ckpt_kws,
)
return ckpt
class CheckpointHook(BaseHook):
"""Hook for managing checkpoints during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
save_every_n_epoch: interval for saving checkpoint. Default: ``1``
"""
def __init__(self, save_dir: str = None, save_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.save_every_n_epoch = save_every_n_epoch
def after_epoch(self):
progress = self.trainer.progress
ckpt = _create_checkpoint(self.trainer, self.save_dir)
ckpt.save("latest.pkl")
if progress.epoch % self.save_every_n_epoch == 0:
progress_str = progress.progress_str_list()
save_name = "_".join(progress_str[:-1]) + ".pkl"
ckpt.save(save_name)
logger.info(f"Save checkpoint {save_name} to {self.save_dir}")
def after_train(self):
# NOTE: usually final ema is not the best so we dont save it
mge.save(
{"state_dict": self.trainer.model.state_dict()},
os.path.join(self.save_dir, "dumped_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
class EvalHook(BaseHook):
"""Hook for evaluating during training.
Effect during ``after_epoch`` and ``after_train`` procedure.
Args:
save_dir: checkpoint directory.
eval_every_n_epoch: interval for evaluating. Default: ``1``
"""
def __init__(self, save_dir: str = None, eval_every_n_epoch: int = 1):
super().__init__()
ensure_dir(save_dir)
self.save_dir = save_dir
self.eval_every_n_epoch = eval_every_n_epoch
self.best_acc1 = 0
self.best_ema_acc1 = 0
def after_epoch(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
progress = trainer.progress
if progress.epoch % self.eval_every_n_epoch == 0 and progress.epoch != progress.max_epoch:
self.test(cfg, model, ema)
def after_train(self):
trainer = self.trainer
cfg = trainer.cfg
model = trainer.model
ema = trainer.ema
# TODO: actually useless maybe when precise_bn is on
sync_model(model)
if ema is not None:
sync_model(ema)
self.test(cfg, model, ema)
def test(self, cfg: ConfigDict, model: M.Module, ema: Optional[M.Module] = None):
dataloader = registers.dataloaders.get(cfg.data.name).build(cfg, False)
# FIXME: need atomic user_pop, maybe in MegEngine 1.5?
# tester = BaseTester(model, dataloader, AccEvaluator())
tester = ClsTester(cfg, model, dataloader)
acc1, _ = tester.test()
if acc1 > self.best_acc1:
self.best_acc1 = acc1
if dist.get_rank() == 0:
mge.save(
{"state_dict": model.state_dict(), "acc1": self.best_acc1},
os.path.join(self.save_dir, "best_model.pkl"),
pickle_protocol=pickle.DEFAULT_PROTOCOL,
)
logger.info(
f"Epoch: {self.trainer.progress.epoch}, Test Acc@1: {acc1:.3f}, "
f"Best Test Acc@1: {self.best_acc1:.3f}"
)
if ema is None:
return
tester_ema = ClsTester(cfg, ema, dataloader)
ema_acc1, _ = tester_ema.test()
if ema_acc1 > self.best_ema_acc1:
self.best_ema_acc1 = ema_acc1
if | dist.get_rank() | megengine.distributed.get_rank |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = | F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1) | megengine.functional.stack |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = | F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = | F.concat(res, 0) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = | F.expand_dims(labels, axis=2) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = | M.Sequential(*cls_subnet) | megengine.module.Sequential |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = | M.Sequential(*bbox_subnet) | megengine.module.Sequential |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
| M.init.fill_(self.cls_score.bias, bias_value) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.p6 = | M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.p6 = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)
self.p7 = | M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
M.init.fill_(output_conv.bias, 0)
lateral_convs.append(lateral_conv)
output_convs.append(output_conv)
self.p6 = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)
self.p7 = M.Conv2d(fpn_dim, fpn_dim, kernel_size=3, stride=2, padding=1, bias=use_bias)
self.relu = | M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = | F.expand_dims(anchors, axis=0) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + | F.expand_dims(shifts, axis=1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = | F.concat(rpn_cls_list, axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = | F.concat(rpn_bbox_list,axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = | F.concat(rpn_iou_list, axis=1) | megengine.functional.concat |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = | F.expand_dims(ignore_mask, axis=0) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = | F.nn.indexing_one_hot(overlaps, index, 1) | megengine.functional.nn.indexing_one_hot |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = | F.sigmoid(cls_score) | megengine.functional.sigmoid |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
| M.init.msra_normal_(lateral_conv.weight, mode="fan_in") | megengine.module.init.msra_normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
| M.init.msra_normal_(output_conv.weight, mode="fan_in") | megengine.module.init.msra_normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = | mge.tensor(np_anchors) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = | mge.tensor(mean) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = | mge.tensor(std) | megengine.tensor |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims( | F.expand_dims(value, axis=1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
| M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append( | M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
| M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1) | megengine.module.Conv2d |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append( | M.ReLU() | megengine.module.ReLU |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
| M.init.fill_(lateral_conv.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
M.init.fill_(layer.bias, 0)
prior_prob = 0.01
# Use prior in model initialization to improve stability
bias_value = -(math.log((1 - prior_prob) / prior_prob))
M.init.fill_(self.cls_score.bias, bias_value)
def forward(self, features):
cls_prob_list, rpn_num_prob_list, pred_bbox_list, rpn_iou_prob_list = [], [], [], []
for feature in features:
rpn_cls_conv = self.cls_subnet(feature)
cls_score = self.cls_score(rpn_cls_conv)
rpn_num_prob = self.num_pred(rpn_cls_conv)
cls_prob = F.sigmoid(cls_score)
rpn_box_conv = self.bbox_subnet(feature)
offsets = self.bbox_pred(rpn_box_conv)
rpn_iou_prob = self.iou_pred(rpn_box_conv)
cls_prob_list.append(cls_prob)
pred_bbox_list.append(offsets)
rpn_iou_prob_list.append(rpn_iou_prob)
rpn_num_prob_list.append(rpn_num_prob)
assert cls_prob_list[0].ndim == 4
pred_cls_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in cls_prob_list]
pred_reg_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, 4)
for _ in pred_bbox_list]
rpn_iou_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_iou_prob_list]
rpn_num_prob_list = [
_.transpose(0, 2, 3, 1).reshape(_.shape[0], -1, (config.num_classes-1))
for _ in rpn_num_prob_list]
return pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list
class FPN(M.Module):
"""
This module implements Feature Pyramid Network.
It creates pyramid features built on top of some input feature maps.
"""
def __init__(self, bottom_up):
super(FPN, self).__init__()
in_channels = [512, 1024, 2048]
fpn_dim = 256
use_bias =True
lateral_convs, output_convs = [], []
for idx, in_channels in enumerate(in_channels):
lateral_conv = M.Conv2d(
in_channels, fpn_dim, kernel_size=1, bias=use_bias)
output_conv = M.Conv2d(
fpn_dim, fpn_dim, kernel_size=3, stride=1, padding=1, bias=use_bias)
M.init.msra_normal_(lateral_conv.weight, mode="fan_in")
M.init.msra_normal_(output_conv.weight, mode="fan_in")
if use_bias:
M.init.fill_(lateral_conv.bias, 0)
| M.init.fill_(output_conv.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = | F.linspace(0, width-1, width) | megengine.functional.linspace |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = | F.linspace(0, height -1, height) | megengine.functional.linspace |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to( | F.expand_dims(all_anchors, 1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
| M.init.normal_(layer.weight, std=0.01) | megengine.module.init.normal_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i*F.ones([a.shape[0], 1]).to(a.device)], axis=1)
for i, a in enumerate(anchors_list)]
all_anchors_final = F.concat(all_anchors_list, axis = 0)
rpn_bbox_offset_final = F.concat(pred_reg_list, axis = 1)
rpn_cls_prob_final = F.concat(pred_cls_list, axis = 1)
rpn_iou_prob_final = F.concat(rpn_iou_list, axis = 1)
rpn_num_per_points_final = F.concat(rpn_num_prob_list, axis = 1)
rpn_labels, rpn_target_boxes = rpn_anchor_target_opr(boxes, im_info, all_anchors_final)
ious_target = self.anchor_iou_target_opr(boxes, im_info, all_anchors_final,
rpn_bbox_offset_final)
n = rpn_labels.shape[0]
target_boxes = rpn_target_boxes.reshape(n, -1, 2, 4).transpose(2, 0, 1, 3)
rpn_cls_prob_final = rpn_cls_prob_final
offsets_final = rpn_bbox_offset_final
target_boxes = target_boxes[0]
rpn_labels = rpn_labels.transpose(2, 0, 1)
labels = rpn_labels[0]
cls_loss = sigmoid_cross_entropy_retina(rpn_cls_prob_final,
labels, alpha = config.focal_loss_alpha, gamma = config.focal_loss_gamma)
rpn_bbox_loss = smooth_l1_loss_retina(offsets_final, target_boxes, labels)
rpn_labels = F.expand_dims(labels, axis=2)
rpn_iou_loss = iou_l1_loss(rpn_iou_prob_final, ious_target, rpn_labels)
loss_dict = {}
loss_dict['rpn_cls_loss'] = cls_loss
loss_dict['rpn_bbox_loss'] = 2 * rpn_bbox_loss
loss_dict['rpn_iou_loss'] = 2 * rpn_iou_loss
return loss_dict
class RetinaNetHead(M.Module):
def __init__(self):
super().__init__()
num_convs = 4
in_channels = 256
cls_subnet, bbox_subnet = [], []
for _ in range(num_convs):
cls_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
cls_subnet.append(M.ReLU())
bbox_subnet.append(
M.Conv2d(in_channels, in_channels, kernel_size=3, stride=1, padding=1)
)
bbox_subnet.append(M.ReLU())
self.cls_subnet = M.Sequential(*cls_subnet)
self.bbox_subnet = M.Sequential(*bbox_subnet)
# predictor
self.cls_score = M.Conv2d(
in_channels, config.num_cell_anchors * (config.num_classes-1) * 1,
kernel_size=3, stride=1, padding=1)
self.bbox_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 4 * 1,
kernel_size=3, stride=1, padding=1)
self.iou_pred = M.Conv2d(
in_channels, config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self.num_pred = M.Conv2d(in_channels,
config.num_cell_anchors * 1,
kernel_size = 3, stride=1, padding = 1)
self._init_weights()
def _init_weights(self):
# Initialization
for modules in [self.cls_subnet, self.bbox_subnet, self.num_pred,
self.cls_score, self.bbox_pred, self.iou_pred]:
for layer in modules.modules():
if isinstance(layer, M.Conv2d):
M.init.normal_(layer.weight, std=0.01)
| M.init.fill_(layer.bias, 0) | megengine.module.init.fill_ |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to( | F.expand_dims(all_anchors, 1) | megengine.functional.expand_dims |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - | F.equal(gtboxes[:, 4], config.anchor_ignore_label) | megengine.functional.equal |
import numpy as np
import megengine as mge
import megengine.functional as F
import megengine.module as M
import math
from config import config
from backbone.resnet50 import ResNet50
from module.generate_anchors import generate_anchors
from det_opr.bbox_opr import bbox_transform_inv_opr, box_overlap_opr
from det_opr.utils import get_padded_tensor
from rpn_anchor_target_opr import rpn_anchor_target_opr
from det_opr.loss_opr import sigmoid_cross_entropy_retina, smooth_l1_loss_retina, iou_l1_loss
import pdb
class RetinaNetAnchorV2(M.Module):
def __init__(self):
super().__init__()
def generate_anchors_opr(self, fm_3x3, fm_stride,
anchor_scales=(8, 16, 32, 64, 128),
anchor_ratios=(1, 2, 3), base_size = 4):
np_anchors = generate_anchors(
base_size=base_size,
ratios=np.array(anchor_ratios),
scales=np.array(anchor_scales))
device = fm_3x3.device
anchors = mge.tensor(np_anchors).to(device)
height, width = fm_3x3.shape[2], fm_3x3.shape[3]
shift_x = F.linspace(0, width-1, width).to(device) * fm_stride
shift_y = F.linspace(0, height -1, height).to(device) * fm_stride
broad_shift_x = F.broadcast_to(shift_x.reshape(1, -1), (height, width)).flatten()
broad_shift_y = F.broadcast_to(shift_y.reshape(-1, 1), (height, width)).flatten()
shifts = F.stack([broad_shift_x, broad_shift_y, broad_shift_x, broad_shift_y], axis=1)
c = anchors.shape[1]
all_anchors = F.expand_dims(anchors, axis=0) + F.expand_dims(shifts, axis=1)
all_anchors = all_anchors.reshape(-1, c).detach()
return all_anchors
def forward(self, fpn_fms):
all_anchors_list = []
fm_stride = [8, 16, 32, 64, 128]
fm_stride.reverse()
for i, fm_3x3 in enumerate(fpn_fms):
anchor_scales = np.array(config.anchor_base_scale) * fm_stride[i]
all_anchors = self.generate_anchors_opr(fm_3x3, fm_stride[i], anchor_scales,
config.anchor_aspect_ratios, base_size = 4)
all_anchors_list.append(all_anchors)
return all_anchors_list
class Network(M.Module):
def __init__(self):
super().__init__()
# ----------------------- build the backbone ------------------------ #
self.resnet50 = ResNet50()
# ------------ freeze the weights of resnet stage1 and stage 2 ------ #
if config.backbone_freeze_at >= 1:
for p in self.resnet50.conv1.parameters():
# p.requires_grad = False
p = p.detach()
if config.backbone_freeze_at >= 2:
for p in self.resnet50.layer1.parameters():
# p.requires_grad = False
p = p.detach()
# -------------------------- build the FPN -------------------------- #
self.backbone = FPN(self.resnet50)
# -------------------------- build the RPN -------------------------- #
# self.RPN = RPN(config.rpn_channel)
self.head = RetinaNetHead()
# -------------------------- buid the anchor generator -------------- #
self.anchor_generator = RetinaNetAnchorV2()
# -------------------------- buid the criteria ---------------------- #
self.criteria = RetinaNetCriteriaV2()
# -------------------------- input Tensor --------------------------- #
self.inputs = {
"image": mge.tensor(
np.random.random([2, 3, 756, 1400]).astype(np.float32), dtype="float32",
),
"im_info": mge.tensor(
np.random.random([2, 6]).astype(np.float32), dtype="float32",
),
"gt_boxes": mge.tensor(
np.random.random([2, 500, 5]).astype(np.float32), dtype="float32",
),
}
def pre_process(self, images):
mean = config.image_mean.reshape(1, -1, 1, 1).astype(np.float32)
std = config.image_std.reshape(1, -1, 1, 1).astype(np.float32)
mean = mge.tensor(mean).to(images.device)
std = mge.tensor(std).to(images.device)
normed_images = (images - mean) / std
normed_images = get_padded_tensor(normed_images, 64)
return normed_images
def forward(self, inputs):
im_info = inputs['im_info']
# process the images
normed_images = self.pre_process(inputs['image'])
if self.training:
gt_boxes = inputs['gt_boxes']
return self._forward_train(normed_images, im_info, gt_boxes)
else:
return self._forward_test(normed_images, im_info)
def _forward_train(self, image, im_info, gt_boxes):
loss_dict = {}
# stride: 128,64,32,16,8, p6->p2
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
loss_dict = self.criteria(
pred_cls_list, rpn_num_prob_list, pred_reg_list, anchors_list,
rpn_iou_list, gt_boxes, im_info)
return loss_dict
def _forward_test(self, image, im_info):
fpn_fms = self.backbone(image)
pred_cls_list, rpn_num_prob_list, pred_reg_list, rpn_iou_list = self.head(fpn_fms)
anchors_list = self.anchor_generator(fpn_fms)
pred_boxes = self._recover_dtboxes(anchors_list, pred_cls_list,
pred_reg_list, rpn_iou_list)
return pred_boxes
def _recover_dtboxes(self, anchors_list, rpn_cls_list, rpn_bbox_list, rpn_iou_list):
assert rpn_cls_list[0].shape[0] == 1
all_anchors = F.concat(anchors_list, axis = 0)
rpn_cls_scores_final = F.concat(rpn_cls_list, axis=1)[0]
rpn_bbox_offsets_final = F.concat(rpn_bbox_list,axis=1)[0]
rpn_iou_prob_final = F.concat(rpn_iou_list, axis=1)[0]
rpn_bbox_offsets = rpn_bbox_offsets_final.reshape(-1, 4)
rpn_cls_scores = rpn_cls_scores_final.reshape(-1, 1)
rpn_iou_prob = rpn_iou_prob_final.reshape(-1, 1)
n, c = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (n, 1, c)).reshape(-1, c)
rpn_bbox = bbox_transform_inv_opr(anchors, rpn_bbox_offsets)
pred_boxes = F.concat([rpn_bbox, rpn_cls_scores, rpn_iou_prob], axis=1)
return pred_boxes
class RetinaNetCriteriaV2(M.Module):
def __init__(self):
super().__init__()
def anchor_iou_target_opr(self, boxes, im_info, all_anchors,
rpn_bbox_offsets):
n = rpn_bbox_offsets.shape[0]
res = []
for i in range(n):
gtboxes = boxes[i, :im_info[i, 5].astype(np.int32)]
offsets = rpn_bbox_offsets[i].reshape(-1, 4).detach()
m = offsets.shape[0]
an, ac = all_anchors.shape[0], all_anchors.shape[1]
anchors = F.broadcast_to(F.expand_dims(all_anchors, 1), (an, 1, ac)).reshape(-1, ac)
dtboxes = bbox_transform_inv_opr(anchors[:,:4], offsets[:, :4])
overlaps = box_overlap_opr(dtboxes, gtboxes[:, :4])
ignore_mask = 1 - F.equal(gtboxes[:, 4], config.anchor_ignore_label).astype(np.float32)
ignore_mask = F.expand_dims(ignore_mask, axis=0)
overlaps = overlaps * ignore_mask
index = F.argmax(overlaps, axis = 1)
value = F.nn.indexing_one_hot(overlaps, index, 1)
value = F.expand_dims(F.expand_dims(value, axis=1), axis=0)
res.append(value)
result = F.concat(res, 0)
return result
def forward(self, pred_cls_list, rpn_num_prob_list, pred_reg_list,
anchors_list, rpn_iou_list, boxes, im_info):
all_anchors_list = [F.concat([a, i* | F.ones([a.shape[0], 1]) | megengine.functional.ones |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = | F.zeros((2*B, netG.hidden_channels, h, w)) | megengine.functional.zeros |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = | F.zeros((2*B, netG.hidden_channels, h, w)) | megengine.functional.zeros |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if | dist.is_distributed() | megengine.distributed.is_distributed |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = | F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0) | megengine.functional.concat |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
return res + biup
epoch_dict = {}
def adjust_learning_rate(optimizer, epoch):
if epoch>=8 and epoch % 1 == 0 and epoch_dict.get(epoch, None) is None:
epoch_dict[epoch] = True
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.8
print("adjust lr! , now lr: {}".format(param_group["lr"]))
# TODO 可以再写一个父类,抽象一些公共方法,当大于1个模型时,代码重复了,如getimdid和test step
@MODELS.register_module()
class BidirectionalRestorer(BaseModel):
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self, generator, pixel_loss, train_cfg=None, eval_cfg=None, pretrained=None, Fidelity_loss=None):
super(BidirectionalRestorer, self).__init__()
self.train_cfg = train_cfg
self.eval_cfg = eval_cfg
# generator
self.generator = build_backbone(generator)
# loss
self.pixel_loss = build_loss(pixel_loss)
if Fidelity_loss:
self.Fidelity_loss = build_loss(Fidelity_loss)
else:
self.Fidelity_loss = None
# load pretrained
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
self.generator.init_weights(pretrained)
def train_step(self, batchdata, now_epoch, now_iter):
LR_tensor = | mge.tensor(batchdata['lq'], dtype="float32") | megengine.tensor |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
return res + biup
epoch_dict = {}
def adjust_learning_rate(optimizer, epoch):
if epoch>=8 and epoch % 1 == 0 and epoch_dict.get(epoch, None) is None:
epoch_dict[epoch] = True
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.8
print("adjust lr! , now lr: {}".format(param_group["lr"]))
# TODO 可以再写一个父类,抽象一些公共方法,当大于1个模型时,代码重复了,如getimdid和test step
@MODELS.register_module()
class BidirectionalRestorer(BaseModel):
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self, generator, pixel_loss, train_cfg=None, eval_cfg=None, pretrained=None, Fidelity_loss=None):
super(BidirectionalRestorer, self).__init__()
self.train_cfg = train_cfg
self.eval_cfg = eval_cfg
# generator
self.generator = build_backbone(generator)
# loss
self.pixel_loss = build_loss(pixel_loss)
if Fidelity_loss:
self.Fidelity_loss = build_loss(Fidelity_loss)
else:
self.Fidelity_loss = None
# load pretrained
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
self.generator.init_weights(pretrained)
def train_step(self, batchdata, now_epoch, now_iter):
LR_tensor = mge.tensor(batchdata['lq'], dtype="float32")
HR_tensor = | mge.tensor(batchdata['gt'], dtype="float32") | megengine.tensor |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return | F.nn.interpolate(image, scale_factor=4) | megengine.functional.nn.interpolate |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = | F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0) | megengine.functional.concat |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = | F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0) | megengine.functional.concat |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = | F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0) | megengine.functional.concat |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = | dist.functional.all_reduce_sum(loss) | megengine.distributed.functional.all_reduce_sum |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / | dist.get_world_size() | megengine.distributed.get_world_size |
import os
import time
import numpy as np
import megengine.distributed as dist
import megengine as mge
import megengine.functional as F
from megengine.autodiff import GradManager
from edit.core.hook.evaluation import psnr, ssim
from edit.utils import imwrite, tensor2img, bgr2ycbcr, img_multi_padding, img_de_multi_padding, ensemble_forward, ensemble_back
from edit.utils import img_multi_padding, img_de_multi_padding, flow_to_image
from ..base import BaseModel
from ..builder import build_backbone, build_loss
from ..registry import MODELS
from tqdm import tqdm
def get_bilinear(image):
B,T,C,h,w = image.shape
image = image.reshape(-1, C,h,w)
return F.nn.interpolate(image, scale_factor=4).reshape(B,T,C,4*h, 4*w)
def train_generator_batch(image, label, *, gm, netG, netloss):
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.train()
with gm:
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
loss = netloss(res+biup, label)
gm.backward(loss)
if dist.is_distributed():
loss = dist.functional.all_reduce_sum(loss) / dist.get_world_size()
return loss
def test_generator_batch(image, *, netG):
# image: [1,100,3,180,320]
B,T,_,h,w = image.shape
biup = get_bilinear(image)
netG.eval()
forward_hiddens = []
backward_hiddens = []
res = []
hidden = F.zeros((2*B, netG.hidden_channels, h, w))
for i in range(T):
now_frame = F.concat([image[:, i, ...], image[:, T-i-1, ...]], axis=0)
if i==0:
flow = netG.flownet(now_frame, now_frame)
else:
ref = F.concat([image[:, i-1, ...], image[:, T-i, ...]], axis=0)
flow = netG.flownet(now_frame, ref)
hidden = netG(hidden, flow, now_frame)
forward_hiddens.append(hidden[0:B, ...])
backward_hiddens.append(hidden[B:2*B, ...])
for i in range(T):
res.append(netG.do_upsample(forward_hiddens[i], backward_hiddens[T-i-1]))
res = F.stack(res, axis = 1) # [B,T,3,H,W]
return res + biup
epoch_dict = {}
def adjust_learning_rate(optimizer, epoch):
if epoch>=8 and epoch % 1 == 0 and epoch_dict.get(epoch, None) is None:
epoch_dict[epoch] = True
for param_group in optimizer.param_groups:
param_group["lr"] = param_group["lr"] * 0.8
print("adjust lr! , now lr: {}".format(param_group["lr"]))
# TODO 可以再写一个父类,抽象一些公共方法,当大于1个模型时,代码重复了,如getimdid和test step
@MODELS.register_module()
class BidirectionalRestorer(BaseModel):
allowed_metrics = {'PSNR': psnr, 'SSIM': ssim}
def __init__(self, generator, pixel_loss, train_cfg=None, eval_cfg=None, pretrained=None, Fidelity_loss=None):
super(BidirectionalRestorer, self).__init__()
self.train_cfg = train_cfg
self.eval_cfg = eval_cfg
# generator
self.generator = build_backbone(generator)
# loss
self.pixel_loss = build_loss(pixel_loss)
if Fidelity_loss:
self.Fidelity_loss = build_loss(Fidelity_loss)
else:
self.Fidelity_loss = None
# load pretrained
self.init_weights(pretrained)
def init_weights(self, pretrained=None):
self.generator.init_weights(pretrained)
def train_step(self, batchdata, now_epoch, now_iter):
LR_tensor = mge.tensor(batchdata['lq'], dtype="float32")
HR_tensor = mge.tensor(batchdata['gt'], dtype="float32")
loss = train_generator_batch(LR_tensor, HR_tensor, gm=self.gms['generator'], netG=self.generator, netloss=self.pixel_loss)
adjust_learning_rate(self.optimizers['generator'], now_epoch)
self.optimizers['generator'].step()
self.optimizers['generator'].clear_grad()
return loss
def get_img_id(self, key):
shift = self.eval_cfg.get('save_shift', 0)
assert isinstance(key, str)
L = key.split("/")
return int(L[-1][:-4]), str(int(L[-2]) - shift).zfill(3) # id, clip
def test_step(self, batchdata, **kwargs):
"""
possible kwargs:
save_image
save_path
ensemble
"""
lq = batchdata['lq'] # [B,3,h,w]
gt = batchdata.get('gt', None) # if not None: [B,3,4*h,4*w]
assert len(batchdata['lq_path']) == 1 # 每个sample所带的lq_path列表长度仅为1, 即自己
lq_paths = batchdata['lq_path'][0] # length 为batch长度
now_start_id, clip = self.get_img_id(lq_paths[0])
now_end_id, _ = self.get_img_id(lq_paths[-1])
assert clip == _
if now_start_id==0:
print("first frame: {}".format(lq_paths[0]))
self.LR_list = []
self.HR_list = []
# pad lq
B ,_ ,origin_H, origin_W = lq.shape
lq = img_multi_padding(lq, padding_multi=self.eval_cfg.multi_pad, pad_method = "edge") # edge constant
self.LR_list.append(lq) # [1,3,h,w]
if gt is not None:
for i in range(B):
self.HR_list.append(gt[i:i+1, ...])
if now_end_id == 99:
print("start to forward all frames....")
if self.eval_cfg.gap == 1:
# do ensemble (8 times)
ensemble_res = []
self.LR_list = np.concatenate(self.LR_list, axis=0) # [100, 3,h,w]
for item in tqdm(range(8)): # do not have flip
inp = mge.tensor(ensemble_forward(self.LR_list, Type=item), dtype="float32")
oup = test_generator_batch( | F.expand_dims(inp, axis=0) | megengine.functional.expand_dims |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = | F.mean(x, [2, 3], True) | megengine.functional.mean |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = | F.nn.interpolate(gp, (x.shape[2], x.shape[3])) | megengine.functional.nn.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = | F.concat([conv1, conv31, conv32, conv33, gp], axis=1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = | M.Dropout(0.5) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = | M.Conv2d(256, self.num_classes, 1, 1, padding=0) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
self.backbone = getattr(resnet, cfg.backbone)(
replace_stride_with_dilation=[False, False, True],
pretrained=cfg.backbone_pretrained,
)
del self.backbone.fc
def forward(self, x):
layers = self.backbone.extract_features(x)
up0 = self.aspp(layers["res5"])
up0 = self.dropout(up0)
up0 = | F.nn.interpolate(up0, scale_factor=self.sub_output_stride) | megengine.functional.nn.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
self.backbone = getattr(resnet, cfg.backbone)(
replace_stride_with_dilation=[False, False, True],
pretrained=cfg.backbone_pretrained,
)
del self.backbone.fc
def forward(self, x):
layers = self.backbone.extract_features(x)
up0 = self.aspp(layers["res5"])
up0 = self.dropout(up0)
up0 = F.nn.interpolate(up0, scale_factor=self.sub_output_stride)
up1 = self.upstage1(layers["res2"])
up1 = | F.concat([up0, up1], 1) | megengine.functional.concat |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
M.init.zeros_(m.bias)
self.backbone = getattr(resnet, cfg.backbone)(
replace_stride_with_dilation=[False, False, True],
pretrained=cfg.backbone_pretrained,
)
del self.backbone.fc
def forward(self, x):
layers = self.backbone.extract_features(x)
up0 = self.aspp(layers["res5"])
up0 = self.dropout(up0)
up0 = F.nn.interpolate(up0, scale_factor=self.sub_output_stride)
up1 = self.upstage1(layers["res2"])
up1 = F.concat([up0, up1], 1)
up2 = self.upstage2(up1)
out = self.conv_out(up2)
out = | F.nn.interpolate(out, scale_factor=4) | megengine.functional.nn.interpolate |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
| M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
| M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
| M.BatchNorm2d(out_channels) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
| M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
| M.BatchNorm2d(48) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
| M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
| M.BatchNorm2d(256) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
| M.Dropout(0.5) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
| M.Conv2d(256, 256, 3, 1, padding=1, bias=False) | megengine.module.Conv2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
| M.BatchNorm2d(256) | megengine.module.BatchNorm2d |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
| M.ReLU() | megengine.module.ReLU |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
| M.Dropout(0.1) | megengine.module.Dropout |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
| M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu") | megengine.module.init.msra_normal_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
| M.init.ones_(m.weight) | megengine.module.init.ones_ |
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
import megengine.functional as F
import megengine.module as M
import official.vision.classification.resnet.model as resnet
class ASPP(M.Module):
def __init__(self, in_channels, out_channels, dr=1):
super().__init__()
self.conv1 = M.Sequential(
M.Conv2d(
in_channels, out_channels, 1, 1, padding=0, dilation=dr, bias=False
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv2 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=6 * dr,
dilation=6 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv3 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=12 * dr,
dilation=12 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv4 = M.Sequential(
M.Conv2d(
in_channels,
out_channels,
3,
1,
padding=18 * dr,
dilation=18 * dr,
bias=False,
),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_gp = M.Sequential(
M.Conv2d(in_channels, out_channels, 1, 1, 0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
self.conv_out = M.Sequential(
M.Conv2d(out_channels * 5, out_channels, 1, 1, padding=0, bias=False),
M.BatchNorm2d(out_channels),
M.ReLU(),
)
def forward(self, x):
conv1 = self.conv1(x)
conv31 = self.conv2(x)
conv32 = self.conv3(x)
conv33 = self.conv4(x)
gp = F.mean(x, [2, 3], True)
gp = self.conv_gp(gp)
gp = F.nn.interpolate(gp, (x.shape[2], x.shape[3]))
out = F.concat([conv1, conv31, conv32, conv33, gp], axis=1)
out = self.conv_out(out)
return out
class DeepLabV3Plus(M.Module):
def __init__(self, cfg):
super().__init__()
self.cfg = cfg
self.output_stride = 16
self.sub_output_stride = self.output_stride // 4
self.num_classes = cfg.num_classes
self.aspp = ASPP(
in_channels=2048, out_channels=256, dr=16 // self.output_stride
)
self.dropout = M.Dropout(0.5)
self.upstage1 = M.Sequential(
M.Conv2d(256, 48, 1, 1, padding=1 // 2, bias=False),
M.BatchNorm2d(48),
M.ReLU(),
)
self.upstage2 = M.Sequential(
M.Conv2d(256 + 48, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.5),
M.Conv2d(256, 256, 3, 1, padding=1, bias=False),
M.BatchNorm2d(256),
M.ReLU(),
M.Dropout(0.1),
)
self.conv_out = M.Conv2d(256, self.num_classes, 1, 1, padding=0)
for m in self.modules():
if isinstance(m, M.Conv2d):
M.init.msra_normal_(m.weight, mode="fan_out", nonlinearity="relu")
elif isinstance(m, M.BatchNorm2d):
M.init.ones_(m.weight)
| M.init.zeros_(m.bias) | megengine.module.init.zeros_ |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = | F.minimum(dw, max_delta) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = | F.minimum(dh, max_delta) | megengine.functional.minimum |
import math
import megengine as mge
import megengine.functional as F
import numpy as np
from megengine import Tensor
import pdb
def restore_bbox(rois, deltas, unnormalize=True, config = None):
assert deltas.ndim == 3
if unnormalize:
std_opr = mge.tensor(config.bbox_normalize_stds.reshape(1, 1, -1))
mean_opr = mge.tensor(config.bbox_normalize_means.reshape(1, 1, -1))
deltas = deltas * std_opr
deltas = deltas + mean_opr
# n = deltas.shape[1]
n, c = deltas.shape[0], deltas.shape[1]
all_rois = F.broadcast_to(F.expand_dims(rois, 1), (n, c, rois.shape[1])).reshape(-1, rois.shape[1])
deltas = deltas.reshape(-1, deltas.shape[2])
pred_bbox = bbox_transform_inv_opr(all_rois, deltas)
pred_bbox = pred_bbox.reshape(-1, c, pred_bbox.shape[1])
return pred_bbox
def filter_boxes_opr(boxes, min_size):
"""Remove all boxes with any side smaller than min_size."""
wh = boxes[:, 2:4] - boxes[:, 0:2] + 1
keep_mask = F.prod(wh >= min_size, axis = 1).astype(np.float32)
keep_mask = keep_mask + F.equal(keep_mask.sum(), 0).astype(np.float32)
return keep
def clip_boxes_opr(boxes, im_info):
""" Clip the boxes into the image region."""
w = im_info[1] - 1
h = im_info[0] - 1
boxes[:, 0::4] = boxes[:, 0::4].clamp(min=0, max=w)
boxes[:, 1::4] = boxes[:, 1::4].clamp(min=0, max=h)
boxes[:, 2::4] = boxes[:, 2::4].clamp(min=0, max=w)
boxes[:, 3::4] = boxes[:, 3::4].clamp(min=0, max=h)
return boxes
def bbox_transform_inv_opr(bbox, deltas):
max_delta = math.log(1000.0 / 16)
""" Transforms the learned deltas to the final bbox coordinates, the axis is 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
pred_ctr_x = bbox_ctr_x + deltas[:, 0] * bbox_width
pred_ctr_y = bbox_ctr_y + deltas[:, 1] * bbox_height
dw = deltas[:, 2]
dh = deltas[:, 3]
dw = F.minimum(dw, max_delta)
dh = F.minimum(dh, max_delta)
pred_width = bbox_width * F.exp(dw)
pred_height = bbox_height * F.exp(dh)
pred_x1 = pred_ctr_x - 0.5 * pred_width
pred_y1 = pred_ctr_y - 0.5 * pred_height
pred_x2 = pred_ctr_x + 0.5 * pred_width
pred_y2 = pred_ctr_y + 0.5 * pred_height
# pred_boxes = F.concat((pred_x1.reshape(-1, 1), pred_y1.reshape(-1, 1),
# pred_x2.reshape(-1, 1), pred_y2.reshape(-1, 1)), axis=1)
pred_boxes = F.stack([pred_x1, pred_y1, pred_x2, pred_y2], axis = 1)
return pred_boxes
def bbox_transform_opr(bbox, gt):
""" Transform the bounding box and ground truth to the loss targets.
The 4 box coordinates are in axis 1"""
bbox_width = bbox[:, 2] - bbox[:, 0] + 1
bbox_height = bbox[:, 3] - bbox[:, 1] + 1
bbox_ctr_x = bbox[:, 0] + 0.5 * bbox_width
bbox_ctr_y = bbox[:, 1] + 0.5 * bbox_height
gt_width = gt[:, 2] - gt[:, 0] + 1
gt_height = gt[:, 3] - gt[:, 1] + 1
gt_ctr_x = gt[:, 0] + 0.5 * gt_width
gt_ctr_y = gt[:, 1] + 0.5 * gt_height
target_dx = (gt_ctr_x - bbox_ctr_x) / bbox_width
target_dy = (gt_ctr_y - bbox_ctr_y) / bbox_height
target_dw = | F.log(gt_width / bbox_width) | megengine.functional.log |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.