Spaces:
Runtime error
Runtime error
import torch | |
import torch.nn as nn | |
class ResidualConvBlock(nn.Module): | |
def __init__(self, in_channels, out_channels): | |
super(ResidualConvBlock, self).__init__() | |
self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1) | |
self.in1 = nn.InstanceNorm2d(out_channels) | |
self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1) | |
self.in2 = nn.InstanceNorm2d(out_channels) | |
self.relu = nn.LeakyReLU(inplace=True) | |
self.downsample = nn.Conv2d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else None | |
def forward(self, x): | |
residual = x | |
out = self.relu(self.in1(self.conv1(x))) | |
out = self.in2(self.conv2(out)) | |
if self.downsample: | |
residual = self.downsample(x) | |
out += residual | |
return self.relu(out) | |
class AttentionGate(nn.Module): | |
def __init__(self, F_g, F_l, F_int): | |
super(AttentionGate, self).__init__() | |
self.W_g = nn.Sequential( | |
nn.Conv2d(F_g, F_int, kernel_size=1, stride=1, padding=0, bias=True), | |
nn.InstanceNorm2d(F_int) | |
) | |
self.W_x = nn.Sequential( | |
nn.Conv2d(F_l, F_int, kernel_size=1, stride=1, padding=0, bias=True), | |
nn.InstanceNorm2d(F_int) | |
) | |
self.psi = nn.Sequential( | |
nn.Conv2d(F_int, 1, kernel_size=1, stride=1, padding=0, bias=True), | |
nn.InstanceNorm2d(1), | |
nn.Sigmoid() | |
) | |
self.relu = nn.LeakyReLU(inplace=True) | |
def forward(self, g, x): | |
g1 = self.W_g(g) | |
x1 = self.W_x(x) | |
psi = self.relu(g1 + x1) | |
psi = self.psi(psi) | |
return x * psi | |
class EnhancedUNet(nn.Module): | |
def __init__(self, n_channels, n_classes): | |
super(EnhancedUNet, self).__init__() | |
self.n_channels = n_channels | |
self.n_classes = n_classes | |
self.inc = ResidualConvBlock(n_channels, 64) | |
self.down1 = nn.Sequential(nn.MaxPool2d(2), ResidualConvBlock(64, 128)) | |
self.down2 = nn.Sequential(nn.MaxPool2d(2), ResidualConvBlock(128, 256)) | |
self.down3 = nn.Sequential(nn.MaxPool2d(2), ResidualConvBlock(256, 512)) | |
self.down4 = nn.Sequential(nn.MaxPool2d(2), ResidualConvBlock(512, 1024)) | |
self.dilation = nn.Sequential( | |
nn.Conv2d(1024, 1024, kernel_size=3, padding=2, dilation=2), | |
nn.InstanceNorm2d(1024), | |
nn.LeakyReLU(inplace=True), | |
nn.Conv2d(1024, 1024, kernel_size=3, padding=4, dilation=4), | |
nn.InstanceNorm2d(1024), | |
nn.LeakyReLU(inplace=True) | |
) | |
self.up4 = nn.ConvTranspose2d(1024, 512, kernel_size=2, stride=2) | |
self.att4 = AttentionGate(F_g=512, F_l=512, F_int=256) | |
self.up_conv4 = ResidualConvBlock(1024, 512) | |
self.up3 = nn.ConvTranspose2d(512, 256, kernel_size=2, stride=2) | |
self.att3 = AttentionGate(F_g=256, F_l=256, F_int=128) | |
self.up_conv3 = ResidualConvBlock(512, 256) | |
self.up2 = nn.ConvTranspose2d(256, 128, kernel_size=2, stride=2) | |
self.att2 = AttentionGate(F_g=128, F_l=128, F_int=64) | |
self.up_conv2 = ResidualConvBlock(256, 128) | |
self.up1 = nn.ConvTranspose2d(128, 64, kernel_size=2, stride=2) | |
self.att1 = AttentionGate(F_g=64, F_l=64, F_int=32) | |
self.up_conv1 = ResidualConvBlock(128, 64) | |
self.outc = nn.Conv2d(64, n_classes, kernel_size=1) | |
self.dropout = nn.Dropout(0.5) | |
def forward(self, x): | |
x1 = self.inc(x) | |
x2 = self.down1(x1) | |
x2 = self.dropout(x2) | |
x3 = self.down2(x2) | |
x3 = self.dropout(x3) | |
x4 = self.down3(x3) | |
x4 = self.dropout(x4) | |
x5 = self.down4(x4) | |
x5 = self.dilation(x5) | |
x5 = self.dropout(x5) | |
x = self.up4(x5) | |
x4 = self.att4(g=x, x=x4) | |
x = torch.cat([x4, x], dim=1) | |
x = self.up_conv4(x) | |
x = self.dropout(x) | |
x = self.up3(x) | |
x3 = self.att3(g=x, x=x3) | |
x = torch.cat([x3, x], dim=1) | |
x = self.up_conv3(x) | |
x = self.dropout(x) | |
x = self.up2(x) | |
x2 = self.att2(g=x, x=x2) | |
x = torch.cat([x2, x], dim=1) | |
x = self.up_conv2(x) | |
x = self.dropout(x) | |
x = self.up1(x) | |
x1 = self.att1(g=x, x=x1) | |
x = torch.cat([x1, x], dim=1) | |
x = self.up_conv1(x) | |
logits = self.outc(x) | |
return logits |