|
from torch import nn
|
|
from torchvision import models
|
|
from torch.nn import *
|
|
import torch
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
|
|
|
class CustomResNet18(nn.Module):
|
|
|
|
def get_out_channels(self,module):
|
|
"""تابعی برای یافتن تعداد کانالهای خروجی از لایههای کانولوشن و BatchNorm"""
|
|
if isinstance(module, nn.Conv2d):
|
|
return module.out_channels
|
|
elif isinstance(module, nn.BatchNorm2d):
|
|
return module.num_features
|
|
elif isinstance(module, nn.Linear):
|
|
return module.out_features
|
|
return None
|
|
|
|
def replace_relu_with_prelu_and_dropout(self,module, inplace=True):
|
|
for name, child in module.named_children():
|
|
|
|
self.replace_relu_with_prelu_and_dropout(child, inplace)
|
|
|
|
if isinstance(child, nn.ReLU):
|
|
|
|
out_channels = None
|
|
for prev_name, prev_child in module.named_children():
|
|
if prev_name == name:
|
|
break
|
|
out_channels = self.get_out_channels(prev_child) or out_channels
|
|
|
|
if out_channels is None:
|
|
raise ValueError(f"Cannot determine `out_channels` for {child}. Please check the model structure.")
|
|
|
|
|
|
prelu = PReLU(device=device, num_parameters=out_channels)
|
|
dropout = nn.Dropout2d(p=0.2)
|
|
|
|
|
|
setattr(module, name, nn.Sequential(prelu, dropout).to(device))
|
|
def __init__(self):
|
|
super(CustomResNet18,self)
|
|
self.model = models.resnet18(weights = models.ResNet18_Weights.IMAGENET1K_V1).train(True).to(device)
|
|
self.replace_relu_with_prelu_and_dropout(self.model)
|
|
|
|
|
|
|
|
number = self.model.fc.in_features
|
|
module = []
|
|
|
|
|
|
module.append(LazyLinear(7))
|
|
self.model.fc = Sequential(*module).to(device)
|
|
def forward(self,x):
|
|
return self.model(x) |