Spaces:
Sleeping
Sleeping
import torch | |
import torch.nn as nn | |
class EncodingBlock(nn.Module): | |
def __init__(self, in_channels, out_channels): | |
super(EncodingBlock, self).__init__() | |
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) | |
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) | |
self.pool = nn.MaxPool2d(kernel_size=2, stride=2) | |
self.activation = nn.ReLU(inplace=True) | |
def forward(self, x): | |
x = self.conv1(x) | |
x = self.activation(x) | |
x = self.conv2(x) | |
x = self.activation(x) | |
skip_connection = x | |
x = self.pool(x) | |
return x, skip_connection | |
class DecodingBlock(nn.Module): | |
def __init__(self, in_channels, out_channels): | |
super(DecodingBlock, self).__init__() | |
self.conv_transpose = nn.ConvTranspose2d(in_channels, out_channels, kernel_size=5, stride=2, padding=2) | |
self.conv1 = nn.Conv2d(out_channels * 2, out_channels, kernel_size=3, padding=1) | |
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) | |
self.activation = nn.ReLU(inplace=True) | |
def forward(self, x, skip_connection): | |
x = self.conv_transpose(x) | |
pd = (0, skip_connection.size(-1) - x.size(-1), 0, skip_connection.size(-2) - x.size(-2)) | |
x = nn.functional.pad(x, pd, mode='constant', value=0) | |
x = torch.cat((x, skip_connection), dim=1) | |
x = self.conv1(x) | |
x = self.activation(x) | |
x = self.conv2(x) | |
x = self.activation(x) | |
return x | |
class UNet(nn.Module): | |
def __init__(self, init_features=32, bottleneck_size=512): | |
super(UNet, self).__init__() | |
self.encoding_block1 = EncodingBlock(1, init_features) | |
self.encoding_block2 = EncodingBlock(init_features, init_features*2) | |
self.encoding_block3 = EncodingBlock(init_features*2, init_features*4) | |
self.encoding_block4 = EncodingBlock(init_features*4, init_features*8) | |
self.bottleneck_conv1 = nn.Conv2d(init_features*8, bottleneck_size, kernel_size=3, padding=1) | |
self.bottleneck_conv2 = nn.Conv2d(bottleneck_size, bottleneck_size, kernel_size=3, padding=1) | |
self.decoding_block4 = DecodingBlock(bottleneck_size, init_features*8) | |
self.decoding_block3 = DecodingBlock(init_features*8, init_features*4) | |
self.decoding_block2 = DecodingBlock(init_features*4, init_features*2) | |
self.decoding_block1 = DecodingBlock(init_features*2, init_features) | |
self.final_conv = nn.Conv2d(init_features, 1, kernel_size=1) | |
def forward(self, x): | |
x, skip1 = self.encoding_block1(x) | |
x, skip2 = self.encoding_block2(x) | |
x, skip3 = self.encoding_block3(x) | |
x, skip4 = self.encoding_block4(x) | |
x = self.bottleneck_conv1(x) | |
x = nn.ReLU(inplace=True)(x) | |
x = self.bottleneck_conv2(x) | |
x = nn.ReLU(inplace=True)(x) | |
x = self.decoding_block4(x, skip4) | |
x = self.decoding_block3(x, skip3) | |
x = self.decoding_block2(x, skip2) | |
x = self.decoding_block1(x, skip1) | |
x = self.final_conv(x) | |
return x | |