Spaces:
Sleeping
Sleeping
File size: 3,996 Bytes
c96e8c8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 |
import os
import cv2
import torch
import torchvision
from torch import nn
from torchvision import transforms
from PIL import Image
class_names = ["Glioma Tumor", "Meningitis Tumor", "No Tumor", "Pituitary Tumor"]
class CFG:
DEVICE = 'cpu'
NUM_DEVICES = torch.cuda.device_count()
NUM_WORKERS = os.cpu_count()
NUM_CLASSES = 4
EPOCHS = 16
BATCH_SIZE = 32
LR = 0.001
APPLY_SHUFFLE = True
SEED = 768
HEIGHT = 224
WIDTH = 224
CHANNELS = 3
IMAGE_SIZE = (224, 224, 3)
class EfficientNetV2Model(nn.Module):
def __init__(self, backbone_model, name='efficientnet-v2-large',
num_classes=CFG.NUM_CLASSES, device=CFG.DEVICE):
super(EfficientNetV2Model, self).__init__()
self.backbone_model = backbone_model
self.device = device
self.num_classes = num_classes
self.name = name
classifier = nn.Sequential(
nn.Flatten(),
nn.Dropout(p=0.2, inplace=True),
nn.Linear(in_features=1280, out_features=256, bias=True),
nn.GELU(),
nn.Dropout(p=0.2, inplace=True),
nn.Linear(in_features=256, out_features=num_classes, bias=False)
).to(device)
self._set_classifier(classifier)
def _set_classifier(self, classifier:nn.Module) -> None:
self.backbone_model.classifier = classifier
def forward(self, image):
return self.backbone_model(image)
def get_effiecientnetv2_model(
device: torch.device=CFG.NUM_CLASSES) -> nn.Module:
# Set the manual seeds
torch.manual_seed(CFG.SEED)
torch.cuda.manual_seed(CFG.SEED)
# Get model weights
model_weights = (
torchvision
.models
.EfficientNet_V2_L_Weights
.DEFAULT
)
# Get model and push to device
model = (
torchvision.models.efficientnet_v2_l(
weights=model_weights
)
).to(device)
# Freeze Model Parameters
for param in model.features.parameters():
param.requires_grad = False
return model
# Get EfficientNet v2 model
backbone_model = get_effiecientnetv2_model(CFG.DEVICE)
efficientnetv2_params = {
'backbone_model' : backbone_model,
'name' : 'efficientnet-v2-large',
'device' : CFG.DEVICE
}
# Generate Model
efficientnet_model = EfficientNetV2Model(**efficientnetv2_params)
efficientnet_model.load_state_dict(
torch.load('efficientnetV2.pth', map_location=torch.device('cpu'))
)
# def predict_eff(image_path):
# # Define the image transformation
# transform = transforms.Compose([
# transforms.Resize((224, 224)),
# transforms.ToTensor()
# ])
# # Load and preprocess the image
# # image_path = "glioma.jpg" # Replace with the path to your image
# image = Image.open(image_path)
# input_tensor = transform(image)
# input_batch = input_tensor.unsqueeze(0).to("cuda") # Add batch dimension
# # Perform inference
# with torch.no_grad():
# output = efficientnet_model(input_batch).to("cuda")
# # You can now use the 'output' tensor as needed (e.g., get predictions)
# # print(output)
# return output
def predict_eff(image_path):
# Define the image transformation
transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor()
])
# Load and preprocess the image
# image_path = "glioma.jpg" # Replace with the path to your image
image = Image.open(image_path)
input_tensor = transform(image)
input_batch = input_tensor.unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
# Perform inference
with torch.no_grad():
output = efficientnet_model(input_batch).to(CFG.DEVICE)
# You can now use the 'output' tensor as needed (e.g., get predictions)
# print(output)
res = torch.softmax(output, dim=1)
probs = {class_names[i]: float(res[0][i]) for i in range(len(class_names))}
return probs |