File size: 1,506 Bytes
1e7c928 fced91e 1e7c928 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 |
import torch
import torchvision
from torch import nn
from torchvision.models import efficientnet_b0, EfficientNet_B0_Weights
from torchvision.models._api import WeightsEnum
from torch.hub import load_state_dict_from_url
def create_effnetb0_model(num_classes:int=10,
seed:int=42):
"""Creates an EficientNetB0 feature extractor model and transforms.
Args:
num_classes (int, optional): number of classes in the classifier head. Defaults to 10.
seed (int, optional): random seed value. Defaults to 42.
Returns:
model (torch.nn.Module): EffNetB0 feature extractor model.
transforms (torchvision.transforms): EfnetB0 image transforms.
"""
# Fix for wrong hash error from: https://github.com/pytorch/vision/issues/7744
def get_state_dict(self, *args, **kwargs):
kwargs.pop("check_hash")
return load_state_dict_from_url(self.url, *args, **kwargs)
WeightsEnum.get_state_dict = get_state_dict
# Create EffNetB0 pretrained weights, transforms and model
weights = EfficientNet_B0_Weights.DEFAULT
transforms = weights.transforms()
model = efficientnet_b0(weights=weights)
# Freeze all layers in base model
for param in model.features.parameters():
param.requires_grad = False
# Change the classifier head with random seed for reproducibility
torch.manual_seed(seed)
model.classifier = nn.Sequential(
nn.Dropout(p=0.3),
nn.Linear(in_features=1280, out_features=num_classes)
)
return model, transforms
|