File size: 5,539 Bytes
2a13495 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from segmentation_models_pytorch.base import modules as md
class DecoderBlock(nn.Module):
def __init__(
self,
in_channels,
skip_channels,
out_channels,
use_batchnorm=True,
attention_type=None,
):
super().__init__()
self.conv1 = md.Conv2dReLU(
in_channels + skip_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention1 = md.Attention(
attention_type, in_channels=in_channels + skip_channels
)
self.conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
self.attention2 = md.Attention(attention_type, in_channels=out_channels)
def forward(self, x, skip=None):
x = F.interpolate(x, scale_factor=2, mode="nearest")
if skip is not None:
x = torch.cat([x, skip], dim=1)
x = self.attention1(x)
x = self.conv1(x)
x = self.conv2(x)
x = self.attention2(x)
return x
class CenterBlock(nn.Sequential):
def __init__(self, in_channels, out_channels, use_batchnorm=True):
conv1 = md.Conv2dReLU(
in_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
conv2 = md.Conv2dReLU(
out_channels,
out_channels,
kernel_size=3,
padding=1,
use_batchnorm=use_batchnorm,
)
super().__init__(conv1, conv2)
class UnetPlusPlusDecoder(nn.Module):
def __init__(
self,
encoder_channels,
decoder_channels,
n_blocks=5,
use_batchnorm=True,
attention_type=None,
center=False,
):
super().__init__()
if n_blocks != len(decoder_channels):
raise ValueError(
"Model depth is {}, but you provide `decoder_channels` for {} blocks.".format(
n_blocks, len(decoder_channels)
)
)
# remove first skip with same spatial resolution
encoder_channels = encoder_channels[1:]
# reverse channels to start from head of encoder
encoder_channels = encoder_channels[::-1]
# computing blocks input and output channels
head_channels = encoder_channels[0]
self.in_channels = [head_channels] + list(decoder_channels[:-1])
self.skip_channels = list(encoder_channels[1:]) + [0]
self.out_channels = decoder_channels
if center:
self.center = CenterBlock(
head_channels, head_channels, use_batchnorm=use_batchnorm
)
else:
self.center = nn.Identity()
# combine decoder keyword arguments
kwargs = dict(use_batchnorm=use_batchnorm, attention_type=attention_type)
blocks = {}
for layer_idx in range(len(self.in_channels) - 1):
for depth_idx in range(layer_idx + 1):
if depth_idx == 0:
in_ch = self.in_channels[layer_idx]
skip_ch = self.skip_channels[layer_idx] * (layer_idx + 1)
out_ch = self.out_channels[layer_idx]
else:
out_ch = self.skip_channels[layer_idx]
skip_ch = self.skip_channels[layer_idx] * (
layer_idx + 1 - depth_idx
)
in_ch = self.skip_channels[layer_idx - 1]
blocks[f"x_{depth_idx}_{layer_idx}"] = DecoderBlock(
in_ch, skip_ch, out_ch, **kwargs
)
blocks[f"x_{0}_{len(self.in_channels)-1}"] = DecoderBlock(
self.in_channels[-1], 0, self.out_channels[-1], **kwargs
)
self.blocks = nn.ModuleDict(blocks)
self.depth = len(self.in_channels) - 1
def forward(self, *features):
features = features[1:] # remove first skip with same spatial resolution
features = features[::-1] # reverse channels to start from head of encoder
# start building dense connections
dense_x = {}
for layer_idx in range(len(self.in_channels) - 1):
for depth_idx in range(self.depth - layer_idx):
if layer_idx == 0:
output = self.blocks[f"x_{depth_idx}_{depth_idx}"](
features[depth_idx], features[depth_idx + 1]
)
dense_x[f"x_{depth_idx}_{depth_idx}"] = output
else:
dense_l_i = depth_idx + layer_idx
cat_features = [
dense_x[f"x_{idx}_{dense_l_i}"]
for idx in range(depth_idx + 1, dense_l_i + 1)
]
cat_features = torch.cat(
cat_features + [features[dense_l_i + 1]], dim=1
)
dense_x[f"x_{depth_idx}_{dense_l_i}"] = self.blocks[
f"x_{depth_idx}_{dense_l_i}"
](dense_x[f"x_{depth_idx}_{dense_l_i-1}"], cat_features)
dense_x[f"x_{0}_{self.depth}"] = self.blocks[f"x_{0}_{self.depth}"](
dense_x[f"x_{0}_{self.depth-1}"]
)
return dense_x[f"x_{0}_{self.depth}"]
|