import torch import torch.nn as nn import torch.nn.functional as F from torchvision import models class SimpleCNN(nn.Module): def __init__(self, k_size=3, pool_size=2, num_classes=1): super(SimpleCNN, self).__init__() self.relu = nn.ReLU() # First Convolutional Layer self.conv1 = nn.Conv2d(in_channels=3, out_channels=8, kernel_size=k_size, padding=1) self.conv2 = nn.Conv2d(in_channels=8, out_channels=16, kernel_size=k_size, stride=1, padding=1) self.pool1 = nn.MaxPool2d(kernel_size=pool_size) # Second Convolutional Layer self.conv3 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=k_size, stride=1, padding=1) self.conv4 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=k_size, stride=1, padding=1) self.pool2 = nn.MaxPool2d(kernel_size=pool_size) self.conv5 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=k_size, stride=1, padding=1) self.conv6 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=k_size, stride=1, padding=1) self.pool3 = nn.MaxPool2d(kernel_size=pool_size) self.conv7 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=k_size, stride=1, padding=1) self.conv8 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=k_size, stride=1, padding=1) self.pool4 = nn.MaxPool2d(kernel_size=pool_size) # Fully Connected Layers self.fc = nn.Linear(64*14*14, num_classes) # Adjust the input features based on your input image size def forward(self, x): x = self.pool1(self.relu(self.conv2(self.relu(self.conv1(x))))) x = self.pool2(self.relu(self.conv4(self.relu(self.conv3(x))))) x = self.pool3(self.relu(self.conv6(self.relu(self.conv5(x))))) x = self.pool4(self.relu(self.conv8(self.relu(self.conv7(x))))) # print(x.shape) x = x.view(x.size(0), -1) x = self.fc(x) return x class CustomResNet18(nn.Module): def __init__(self, num_classes=11): super(CustomResNet18, self).__init__() self.resnet = models.resnet18(pretrained=True) num_features = self.resnet.fc.in_features self.resnet.fc = nn.Linear(num_features, num_classes) def forward(self, x): return self.resnet(x)