File size: 3,974 Bytes
caa56d6 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 |
InPlaceABN = None
from torch import nn
import torch.nn.functional as F
class Conv3dReLU(nn.Sequential):
def __init__(
self,
in_channels,
out_channels,
kernel_size,
padding=0,
stride=1,
use_batchnorm=True,
):
if use_batchnorm == "inplace" and InPlaceABN is None:
raise RuntimeError(
"In order to use `use_batchnorm='inplace'` inplace_abn package must be installed. "
+ "To install see: https://github.com/mapillary/inplace_abn"
)
conv = nn.Conv3d(
in_channels,
out_channels,
kernel_size,
stride=stride,
padding=padding,
bias=not (use_batchnorm),
)
relu = nn.ReLU(inplace=True)
if use_batchnorm == "inplace":
bn = InPlaceABN(out_channels, activation="leaky_relu", activation_param=0.0)
relu = nn.Identity()
elif use_batchnorm and use_batchnorm != "inplace":
bn = nn.BatchNorm3d(out_channels)
else:
bn = nn.Identity()
super(Conv3dReLU, self).__init__(conv, bn, relu)
class DecoderBlock(nn.Module):
def __init__(
self, in_channels, skip_channels, out_channels, use_batchnorm=True,
):
super().__init__()
self.conv1 = Conv3dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.conv2 = Conv3dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
class LightDecoderBlock(nn.Module):
def __init__(
self, in_channels, skip_channels, out_channels, use_batchnorm=True,
):
super().__init__()
self.conv1 = Conv3dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
def forward(self, x):
x = self.conv1(x)
return x
def freeze_net(model: nn.Module, freeze_prefixs):
flag = False
for name, param in model.named_parameters():
items = name.split(".")
if items[0] == "module":
prefix = items[1]
else:
prefix = items[0]
if prefix in freeze_prefixs:
if param.requires_grad is True:
param.requires_grad = False
flag = True
# print("freeze",name)
assert flag
def unfreeze_net(model: nn.Module):
for name, param in model.named_parameters():
param.requires_grad = True
from .resnet_helper import ResBlock, get_trans_func
class ResDecoderBlock(nn.Module):
def __init__(
self, in_channels, skip_channels, out_channels, use_batchnorm=True,
):
super().__init__()
trans_func = get_trans_func("bottleneck_transform")
self.conv1 = ResBlock(
in_channels + skip_channels,
out_channels,
3,
1,
trans_func,
out_channels//2,
num_groups=1,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
)
self.conv2 = ResBlock(
out_channels,
out_channels,
3,
1,
trans_func,
out_channels//2,
num_groups=1,
stride_1x1=False,
inplace_relu=True,
eps=1e-5,
bn_mmt=0.1,
dilation=1,
norm_module=nn.BatchNorm3d,
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
|