BLADE_FRBNN / models /resnet_model.py
peterma02's picture
Upload folder using huggingface_hub
f3972ea verified
import torch
import torch.nn as nn
import torch.nn.functional as F
class ResidualBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride = 1, downsample = None):
super(ResidualBlock, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size = 3, stride = stride, padding = 1),
nn.BatchNorm2d(out_channels),
nn.ReLU())
self.conv2 = nn.Sequential(
nn.Conv2d(out_channels, out_channels, kernel_size = 3, stride = 1, padding = 1),
nn.BatchNorm2d(out_channels))
self.downsample = downsample
self.relu = nn.ReLU()
self.out_channels = out_channels
self.dropout_percentage = 0.5
self.dropout1 = nn.Dropout(p=self.dropout_percentage)
self.batchnorm_mod = nn.BatchNorm2d(out_channels)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.dropout1(out)
# out = self.batchnorm_mod(out)
out = self.conv2(out)
out = self.dropout1(out)
# out = self.batchnorm_mod(out)
if self.downsample:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, inchan, block, layers, num_classes = 10):
super(ResNet, self).__init__()
self.inplanes = 64
self.eps = 1e-5
self.relu = nn.ReLU()
self.conv1 = nn.Sequential(
nn.Conv2d(inchan, 64, kernel_size = 7, stride = 2, padding = 3),
nn.BatchNorm2d(64),
nn.ReLU())
self.maxpool = nn.MaxPool2d(kernel_size = (2, 2), stride = 2, padding = 1)
self.layer0 = self._make_layer(block, 64, layers[0], stride = 1)
self.layer1 = self._make_layer(block, 128, layers[1], stride = 2)
self.layer2 = self._make_layer(block, 256, layers[2], stride = 2)
self.layer3 = self._make_layer(block, 512, layers[3], stride = 1)
self.avgpool = nn.AvgPool2d(7, stride=1)
self.fc = nn.Linear(39424, num_classes)
self.dropout_percentage = 0.3
self.dropout1 = nn.Dropout(p=self.dropout_percentage)
# Encoder
self.encoder = nn.Sequential(
nn.Conv2d(24, 32, kernel_size = 3, stride =1, padding = 1),
nn.ReLU(True),nn.Dropout(p=self.dropout_percentage),
nn.Conv2d(32, 64, kernel_size = 3, stride =1, padding = 1),
nn.ReLU(True),nn.Dropout(p=self.dropout_percentage),
nn.Conv2d(64, 32, kernel_size = 3, stride = 1, padding = 1),
nn.ReLU(True),nn.Dropout(p=self.dropout_percentage),
nn.Conv2d(32, 24, kernel_size = 3, stride = 1, padding = 1),
nn.Sigmoid()
)
params = sum(p.numel() for p in self.encoder.parameters())
print("num params encoder ",params)
def norm(self, x):
shifted = x-x.min()
maxes = torch.amax(abs(shifted), dim=(-2, -1))
repeated_maxes = maxes.unsqueeze(2).unsqueeze(3).repeat(1, 1, x.shape[-2],x.shape[-1])
x = shifted/repeated_maxes
return x
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes, kernel_size=1, stride=stride),
nn.BatchNorm2d(planes),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x, return_mask=False):
# x = self.norm(x)
x = self.conv1(x)
x = self.maxpool(x)
x = self.layer0(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.dropout1(x)
x = self.fc(x)
# return x
if return_mask:
return x, self.mask, self.value
else:
return x
class ConvAutoencoder(nn.Module):
def __init__(self):
super(ConvAutoencoder, self).__init__()
# Encoder
self.encoder = nn.Sequential(
nn.Conv2d(3, 16, kernel_size=3, stride=2, padding=1), # (16, 96, 128)
nn.ReLU(),
nn.Conv2d(16, 32, kernel_size=3, stride=2, padding=1), # (32, 48, 64)
nn.ReLU(),
nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1), # (64, 24, 32)
nn.ReLU(),
nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),# (128, 12, 16)
nn.ReLU()
)
# Fully connected latent space
self.fc1 = nn.Linear(128 * 12 * 16, 8)
self.fc2 = nn.Linear(8, 128 * 12 * 16)
# Decoder
self.decoder = nn.Sequential(
nn.ConvTranspose2d(128, 64, kernel_size=3, stride=2, padding=1, output_padding=1), # (64, 24, 32)
nn.ReLU(),
nn.ConvTranspose2d(64, 32, kernel_size=3, stride=2, padding=1, output_padding=1), # (32, 48, 64)
nn.ReLU(),
nn.ConvTranspose2d(32, 16, kernel_size=3, stride=2, padding=1, output_padding=1), # (16, 96, 128)
nn.ReLU(),
nn.ConvTranspose2d(16, 3, kernel_size=3, stride=2, padding=1, output_padding=1), # (3, 192, 256)
nn.Sigmoid() # Using Sigmoid for the final activation to get output in range [0, 1]
)
def forward(self, x):
# Encode
x = self.encoder(x)
# Flatten the encoded output
x = x.view(x.size(0), -1)
# Fully connected latent space
x = self.fc1(x)
x = self.fc2(x)
# Reshape the output to the shape suitable for the decoder
x = x.view(x.size(0), 128, 12, 16)
# Decode
x = self.decoder(x)
return x