Spaces:
Runtime error
Runtime error
Upload 21 files
Browse files- S12.ipynb.ipynb +0 -0
- datasets/__init__.py +2 -0
- datasets/__pycache__/__init__.cpython-311.pyc +0 -0
- datasets/__pycache__/cifar10.cpython-311.pyc +0 -0
- datasets/__pycache__/generic.cpython-311.pyc +0 -0
- datasets/cifar10.py +37 -0
- datasets/generic.py +111 -0
- lightning_logs/version_0/hparams.yaml +1 -0
- lightning_logs/version_1/hparams.yaml +1 -0
- models/CUSTOMRESNET.py +166 -0
- models/__init__.py +0 -0
- models/__pycache__/CUSTOMRESNET.cpython-311.pyc +0 -0
- models/__pycache__/__init__.cpython-311.pyc +0 -0
- requirements.txt +10 -8
- utils/__init__.py +1 -0
- utils/__pycache__/__init__.cpython-311.pyc +0 -0
- utils/__pycache__/incorrect_images.cpython-311.pyc +0 -0
- utils/__pycache__/visualize.cpython-311.pyc +0 -0
- utils/incorrect_images.py +65 -0
- utils/visualize.py +70 -0
S12.ipynb.ipynb
ADDED
The diff for this file is too large to render.
See raw diff
|
|
datasets/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
|
2 |
+
from .cifar10 import cifar10_dataset
|
datasets/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (234 Bytes). View file
|
|
datasets/__pycache__/cifar10.cpython-311.pyc
ADDED
Binary file (2.77 kB). View file
|
|
datasets/__pycache__/generic.cpython-311.pyc
ADDED
Binary file (7.32 kB). View file
|
|
datasets/cifar10.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy as np
|
2 |
+
import cv2
|
3 |
+
from torchvision import datasets
|
4 |
+
import albumentations as A
|
5 |
+
from albumentations.pytorch import ToTensorV2
|
6 |
+
|
7 |
+
from .generic import MyDataSet
|
8 |
+
|
9 |
+
|
10 |
+
class AlbumentationsCIFAR10(datasets.CIFAR10):
|
11 |
+
def __init__(self, root, alb_transform=None, **kwargs):
|
12 |
+
super(AlbumentationsCIFAR10, self).__init__(root, **kwargs)
|
13 |
+
self.alb_transform = alb_transform
|
14 |
+
|
15 |
+
def __getitem__(self, index):
|
16 |
+
image, label = super(AlbumentationsCIFAR10, self).__getitem__(index)
|
17 |
+
if self.alb_transform is not None:
|
18 |
+
image = self.alb_transform(image=np.array(image))['image']
|
19 |
+
return image, label
|
20 |
+
|
21 |
+
|
22 |
+
class cifar10_dataset(MyDataSet):
|
23 |
+
DataSet = AlbumentationsCIFAR10
|
24 |
+
mean = (0.49139968, 0.48215827, 0.44653124)
|
25 |
+
std = (0.24703233, 0.24348505, 0.26158768)
|
26 |
+
default_alb_transforms = [
|
27 |
+
A.HorizontalFlip(p=1.0),
|
28 |
+
A.ShiftScaleRotate(shift_limit=(-0.2, 0.2), scale_limit=(-0.2, 0.2), rotate_limit=(-15, 15), p=0.5),
|
29 |
+
A.PadIfNeeded(min_height=36, min_width=36, p=1.0),
|
30 |
+
A.RandomCrop (32, 32, always_apply=False, p=1.0),
|
31 |
+
A.CenterCrop(32, 32, always_apply=False, p=1.0),
|
32 |
+
A.CoarseDropout(max_holes = 1, max_height=8, max_width=8, min_holes = 1,
|
33 |
+
min_height=8,min_width=8,
|
34 |
+
fill_value=(0.4914, 0.4822, 0.4465), always_apply=False,p=0.5),
|
35 |
+
|
36 |
+
|
37 |
+
]
|
datasets/generic.py
ADDED
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
from abc import ABC
|
3 |
+
from functools import cached_property
|
4 |
+
|
5 |
+
import torch
|
6 |
+
import albumentations as A
|
7 |
+
from albumentations.pytorch import ToTensorV2
|
8 |
+
|
9 |
+
try:
|
10 |
+
from epoch.utils import plot_examples
|
11 |
+
except ModuleNotFoundError:
|
12 |
+
from utils import plot_examples
|
13 |
+
|
14 |
+
|
15 |
+
class MyDataSet(ABC):
|
16 |
+
DataSet = None
|
17 |
+
mean = None
|
18 |
+
std = None
|
19 |
+
classes = None
|
20 |
+
default_alb_transforms = None
|
21 |
+
|
22 |
+
def __init__(self, batch_size=1, normalize=True, shuffle=True, augment=True, alb_transforms=None):
|
23 |
+
self.batch_size = batch_size
|
24 |
+
self.normalize = normalize
|
25 |
+
self.shuffle = shuffle
|
26 |
+
self.augment = augment
|
27 |
+
self.alb_transforms = alb_transforms or self.default_alb_transforms
|
28 |
+
|
29 |
+
self.loader_kwargs = {'batch_size': batch_size, 'num_workers': os.cpu_count(), 'pin_memory': True}
|
30 |
+
|
31 |
+
@classmethod
|
32 |
+
def set_classes(cls, data):
|
33 |
+
if cls.classes is None:
|
34 |
+
cls.classes = {i: c for i, c in enumerate(data.classes)}
|
35 |
+
|
36 |
+
@cached_property
|
37 |
+
def train_data(self):
|
38 |
+
res = self.DataSet('../data', train=True, download=True, alb_transform=self.get_train_transforms())
|
39 |
+
self.set_classes(res)
|
40 |
+
return res
|
41 |
+
|
42 |
+
@cached_property
|
43 |
+
def test_data(self):
|
44 |
+
res = self.DataSet('../data', train=False, download=True, alb_transform=self.get_test_transforms())
|
45 |
+
self.set_classes(res)
|
46 |
+
return res
|
47 |
+
|
48 |
+
@cached_property
|
49 |
+
def train_loader(self):
|
50 |
+
return torch.utils.data.DataLoader(self.train_data, shuffle=self.shuffle, **self.loader_kwargs)
|
51 |
+
|
52 |
+
@cached_property
|
53 |
+
def test_loader(self):
|
54 |
+
return torch.utils.data.DataLoader(self.test_data, shuffle=False, **self.loader_kwargs)
|
55 |
+
|
56 |
+
@cached_property
|
57 |
+
def example_iter(self):
|
58 |
+
return iter(self.train_loader)
|
59 |
+
|
60 |
+
def get_train_transforms(self):
|
61 |
+
all_transforms = list()
|
62 |
+
if self.normalize:
|
63 |
+
all_transforms.append(A.Normalize(self.mean, self.std))
|
64 |
+
if self.augment and self.alb_transforms is not None:
|
65 |
+
all_transforms.extend(self.alb_transforms)
|
66 |
+
all_transforms.append(ToTensorV2())
|
67 |
+
return A.Compose(all_transforms)
|
68 |
+
|
69 |
+
def get_test_transforms(self):
|
70 |
+
all_transforms = list()
|
71 |
+
if self.normalize:
|
72 |
+
all_transforms.append(A.Normalize(self.mean, self.std))
|
73 |
+
all_transforms.append(ToTensorV2())
|
74 |
+
return A.Compose(all_transforms)
|
75 |
+
|
76 |
+
def download(self):
|
77 |
+
self.DataSet('../data', train=True, download=True)
|
78 |
+
self.DataSet('../data', train=False, download=True)
|
79 |
+
|
80 |
+
def denormalise(self, tensor):
|
81 |
+
result = tensor.clone().detach().requires_grad_(False)
|
82 |
+
if self.normalize:
|
83 |
+
for t, m, s in zip(result, self.mean, self.std):
|
84 |
+
t.mul_(s).add_(m)
|
85 |
+
return result
|
86 |
+
|
87 |
+
def show_transform(self, img):
|
88 |
+
if self.normalize:
|
89 |
+
img = self.denormalise(img)
|
90 |
+
if len(self.mean) == 3:
|
91 |
+
return img.permute(1, 2, 0)
|
92 |
+
else:
|
93 |
+
return img.squeeze(0)
|
94 |
+
|
95 |
+
def show_examples(self, figsize=(8, 6)):
|
96 |
+
batch_data, batch_label = next(self.example_iter)
|
97 |
+
images = list()
|
98 |
+
labels = list()
|
99 |
+
|
100 |
+
for i in range(len(batch_data)):
|
101 |
+
image = batch_data[i]
|
102 |
+
image = self.show_transform(image)
|
103 |
+
|
104 |
+
label = batch_label[i].item()
|
105 |
+
if self.classes is not None:
|
106 |
+
label = f'{label}:{self.classes[label]}'
|
107 |
+
|
108 |
+
images.append(image)
|
109 |
+
labels.append(label)
|
110 |
+
|
111 |
+
plot_examples(images, labels, figsize=figsize)
|
lightning_logs/version_0/hparams.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
lightning_logs/version_1/hparams.yaml
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{}
|
models/CUSTOMRESNET.py
ADDED
@@ -0,0 +1,166 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
from torch.nn import functional as F
|
4 |
+
from torch import optim
|
5 |
+
from pytorch_lightning import LightningModule
|
6 |
+
from torchmetrics import Accuracy
|
7 |
+
from utils.visualize import find_lr
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
class MyModel(nn.Module):
|
12 |
+
def __init__(self):
|
13 |
+
super().__init__()
|
14 |
+
self.conv1 = nn.Sequential (
|
15 |
+
nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1,bias=False),
|
16 |
+
nn.BatchNorm2d(64),
|
17 |
+
nn.ReLU(inplace=True)
|
18 |
+
) # Number of Parameters = 3*3*3*64=1728
|
19 |
+
# Layer 1
|
20 |
+
self.conv11 = nn.Sequential(
|
21 |
+
nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1,bias=False),
|
22 |
+
nn.MaxPool2d(kernel_size=2,stride=2),
|
23 |
+
nn.BatchNorm2d(128),
|
24 |
+
nn.ReLU(inplace=True)
|
25 |
+
) # Number of Parameters = 3*3*64*128 = 73728
|
26 |
+
self.conv12 = nn.Sequential(
|
27 |
+
nn.Conv2d(128,128, kernel_size=3, stride=1, padding=1,bias=False),# Number of Parameters = 3*3*64*128 = 73728
|
28 |
+
nn.BatchNorm2d(128),
|
29 |
+
nn.ReLU(inplace=True),
|
30 |
+
nn.Conv2d(128,128, kernel_size=3, stride=1, padding=1,bias=False),# Number of Parameters = 3*3*64*128 = 73728
|
31 |
+
nn.BatchNorm2d(128),
|
32 |
+
nn.ReLU(inplace=True)
|
33 |
+
)
|
34 |
+
|
35 |
+
# Layer 2
|
36 |
+
self.conv2 = nn.Sequential(
|
37 |
+
nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1,bias=False),
|
38 |
+
nn.MaxPool2d(kernel_size=2,stride=2),
|
39 |
+
nn.BatchNorm2d(256),
|
40 |
+
nn.ReLU(inplace=True)
|
41 |
+
)
|
42 |
+
|
43 |
+
# Layer 3
|
44 |
+
self.conv31 = nn.Sequential(
|
45 |
+
nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1,bias=False),
|
46 |
+
nn.MaxPool2d(kernel_size=2,stride=2),
|
47 |
+
nn.BatchNorm2d(512),
|
48 |
+
nn.ReLU(inplace=True)
|
49 |
+
)
|
50 |
+
self.conv32 = nn.Sequential(
|
51 |
+
nn.Conv2d(512,512, kernel_size=3, stride=1, padding=1,bias=False),
|
52 |
+
nn.BatchNorm2d(512),
|
53 |
+
nn.ReLU(inplace=True),
|
54 |
+
nn.Conv2d(512,512, kernel_size=3, stride=1, padding=1,bias=False),
|
55 |
+
nn.BatchNorm2d(512),
|
56 |
+
nn.ReLU(inplace=True)
|
57 |
+
)
|
58 |
+
|
59 |
+
self.maxpool = nn.MaxPool2d(kernel_size=4,stride=2)
|
60 |
+
|
61 |
+
# Fully connected
|
62 |
+
self.fc = nn.Linear(512, 10, bias=True)
|
63 |
+
|
64 |
+
def forward(self, x):
|
65 |
+
#x = x.unsqueeze(0)
|
66 |
+
x = self.conv1(x)
|
67 |
+
|
68 |
+
x = self.conv11(x)
|
69 |
+
R1=x
|
70 |
+
x = self.conv12(x)
|
71 |
+
x=x+R1
|
72 |
+
|
73 |
+
x = self.conv2(x)
|
74 |
+
|
75 |
+
x = self.conv31(x)
|
76 |
+
R2=x
|
77 |
+
x = self.conv32(x)
|
78 |
+
x=x+R2
|
79 |
+
|
80 |
+
x = self.maxpool(x)
|
81 |
+
|
82 |
+
x = x.squeeze(dim=2)
|
83 |
+
x = x.squeeze(dim=2)
|
84 |
+
x = self.fc(x)
|
85 |
+
x = x.view(-1, 10)
|
86 |
+
|
87 |
+
return x
|
88 |
+
|
89 |
+
|
90 |
+
class Model(LightningModule):
|
91 |
+
def __init__(self, dataset,max_epochs=24):
|
92 |
+
super(Model, self).__init__()
|
93 |
+
|
94 |
+
self.dataset = dataset
|
95 |
+
self.network= MyModel()
|
96 |
+
self.criterion = nn.CrossEntropyLoss()
|
97 |
+
self.train_accuracy = Accuracy(task='multiclass', num_classes=10)
|
98 |
+
self.val_accuracy = Accuracy(task='multiclass', num_classes=10)
|
99 |
+
|
100 |
+
self.max_epochs = max_epochs
|
101 |
+
|
102 |
+
def forward(self, x):
|
103 |
+
return self.network(x)
|
104 |
+
|
105 |
+
def common_step(self, batch, mode):
|
106 |
+
x, y = batch
|
107 |
+
logits = self.forward(x)
|
108 |
+
loss = self.criterion(logits, y)
|
109 |
+
|
110 |
+
acc_metric = getattr(self, f'{mode}_accuracy')
|
111 |
+
acc_metric(logits, y)
|
112 |
+
|
113 |
+
return loss
|
114 |
+
|
115 |
+
def training_step(self, batch, batch_idx):
|
116 |
+
loss = self.common_step(batch, 'train')
|
117 |
+
self.log("train_loss", loss, on_epoch=True, prog_bar=True, logger=True)
|
118 |
+
self.log("train_acc", self.train_accuracy, on_epoch=True, prog_bar=True, logger=True)
|
119 |
+
return loss
|
120 |
+
|
121 |
+
def validation_step(self, batch, batch_idx):
|
122 |
+
loss = self.common_step(batch, 'val')
|
123 |
+
self.log("val_loss", loss, on_epoch=True, prog_bar=True, logger=True)
|
124 |
+
self.log("val_acc", self.val_accuracy, on_epoch=True, prog_bar=True, logger=True)
|
125 |
+
return loss
|
126 |
+
|
127 |
+
def predict_step(self, batch, batch_idx, dataloader_idx=0):
|
128 |
+
if isinstance(batch, list):
|
129 |
+
x, _ = batch
|
130 |
+
else:
|
131 |
+
x = batch
|
132 |
+
return self.forward(x)
|
133 |
+
|
134 |
+
def configure_optimizers(self):
|
135 |
+
optimizer = optim.Adam(self.parameters(), lr=1e-7, weight_decay=1e-2)
|
136 |
+
best_lr = find_lr(self, self.train_dataloader(), optimizer, self.criterion)
|
137 |
+
scheduler = optim.lr_scheduler.OneCycleLR(
|
138 |
+
optimizer,
|
139 |
+
max_lr=best_lr,
|
140 |
+
steps_per_epoch=len(self.dataset.train_loader),
|
141 |
+
epochs=self.max_epochs,
|
142 |
+
pct_start=5/self.max_epochs,
|
143 |
+
div_factor=100,
|
144 |
+
three_phase=False,
|
145 |
+
final_div_factor=100,
|
146 |
+
anneal_strategy='linear'
|
147 |
+
)
|
148 |
+
return {
|
149 |
+
'optimizer': optimizer,
|
150 |
+
'lr_scheduler': {
|
151 |
+
"scheduler": scheduler,
|
152 |
+
"interval": "step",
|
153 |
+
}
|
154 |
+
}
|
155 |
+
|
156 |
+
def prepare_data(self):
|
157 |
+
self.dataset.download()
|
158 |
+
|
159 |
+
def train_dataloader(self):
|
160 |
+
return self.dataset.train_loader
|
161 |
+
|
162 |
+
def val_dataloader(self):
|
163 |
+
return self.dataset.test_loader
|
164 |
+
|
165 |
+
def predict_dataloader(self):
|
166 |
+
return self.val_dataloader()
|
models/__init__.py
ADDED
File without changes
|
models/__pycache__/CUSTOMRESNET.cpython-311.pyc
ADDED
Binary file (9.1 kB). View file
|
|
models/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (171 Bytes). View file
|
|
requirements.txt
CHANGED
@@ -1,13 +1,15 @@
|
|
1 |
torch
|
2 |
torchvision
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
torch-lr-finder
|
4 |
grad-cam
|
5 |
-
|
6 |
-
numpy
|
7 |
-
gradio
|
8 |
-
albumentations
|
9 |
-
matlab
|
10 |
-
pytorch-lightning
|
11 |
-
albumentations
|
12 |
torchmetrics
|
13 |
-
|
|
|
|
|
|
1 |
torch
|
2 |
torchvision
|
3 |
+
torchinfo
|
4 |
+
tqdm
|
5 |
+
matplotlib
|
6 |
+
albumentations
|
7 |
+
numpy
|
8 |
+
opencv-python
|
9 |
torch-lr-finder
|
10 |
grad-cam
|
11 |
+
pytorch-lightning
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
torchmetrics
|
13 |
+
pandas
|
14 |
+
gradio
|
15 |
+
Pillow
|
utils/__init__.py
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
from .visualize import *
|
utils/__pycache__/__init__.cpython-311.pyc
ADDED
Binary file (204 Bytes). View file
|
|
utils/__pycache__/incorrect_images.cpython-311.pyc
ADDED
Binary file (4.63 kB). View file
|
|
utils/__pycache__/visualize.cpython-311.pyc
ADDED
Binary file (4.35 kB). View file
|
|
utils/incorrect_images.py
ADDED
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from collections import defaultdict
|
3 |
+
|
4 |
+
from pytorch_lightning import Trainer
|
5 |
+
from pytorch_lightning.callbacks import ModelSummary, LearningRateMonitor
|
6 |
+
|
7 |
+
from .visualize import plot_examples, get_cam_visualisation, get_incorrect_preds
|
8 |
+
|
9 |
+
|
10 |
+
class incorrect(object):
|
11 |
+
def __init__(self, model, max_epochs=None, precision="32-true"):
|
12 |
+
self.model = model
|
13 |
+
self.dataset = model.dataset
|
14 |
+
self.incorrect_preds = None
|
15 |
+
self.grad_cam = None
|
16 |
+
self.trainer = Trainer(callbacks=[ModelSummary(max_depth=10), LearningRateMonitor(logging_interval='step')],
|
17 |
+
max_epochs=max_epochs or model.max_epochs, precision=precision)
|
18 |
+
self.incorrect_preds = None
|
19 |
+
self.incorrect_preds_pd = None
|
20 |
+
self.grad_cam = None
|
21 |
+
|
22 |
+
def execute(self):
|
23 |
+
self.trainer.fit(self.model)
|
24 |
+
|
25 |
+
def get_incorrect_preds(self):
|
26 |
+
self.incorrect_preds = defaultdict(list)
|
27 |
+
incorrect_images = list()
|
28 |
+
processed = 0
|
29 |
+
results = self.trainer.predict(self.model, self.model.predict_dataloader())
|
30 |
+
for (data, target), pred in zip(self.model.predict_dataloader(), results):
|
31 |
+
ind, pred_, truth = get_incorrect_preds(pred, target)
|
32 |
+
self.incorrect_preds["indices"] += [x + processed for x in ind]
|
33 |
+
incorrect_images += data[ind]
|
34 |
+
self.incorrect_preds["ground_truths"] += truth
|
35 |
+
self.incorrect_preds["predicted_vals"] += pred_
|
36 |
+
processed += len(data)
|
37 |
+
self.incorrect_preds_pd = pd.DataFrame(self.incorrect_preds)
|
38 |
+
self.incorrect_preds["images"] = incorrect_images
|
39 |
+
|
40 |
+
def show_incorrect(self, cams=False, target_layer=None):
|
41 |
+
if self.incorrect_preds is None:
|
42 |
+
self.get_incorrect_preds()
|
43 |
+
|
44 |
+
images = list()
|
45 |
+
labels = list()
|
46 |
+
|
47 |
+
for i in range(20):
|
48 |
+
image = self.incorrect_preds["images"][i]
|
49 |
+
pred = self.incorrect_preds["predicted_vals"][i]
|
50 |
+
truth = self.incorrect_preds["ground_truths"][i]
|
51 |
+
|
52 |
+
if cams:
|
53 |
+
image = get_cam_visualisation(self.model, self.dataset, image, pred, target_layer)
|
54 |
+
else:
|
55 |
+
image = self.dataset.show_transform(image).cpu()
|
56 |
+
|
57 |
+
if self.dataset.classes is not None:
|
58 |
+
pred = f'{pred}:{self.dataset.classes[pred]}'
|
59 |
+
truth = f'{truth}:{self.dataset.classes[truth]}'
|
60 |
+
label = f'{pred}/{truth}'
|
61 |
+
|
62 |
+
images.append(image)
|
63 |
+
labels.append(label)
|
64 |
+
|
65 |
+
plot_examples(images, labels, figsize=(10, 8))
|
utils/visualize.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torchinfo
|
3 |
+
from torch_lr_finder import LRFinder
|
4 |
+
from matplotlib import pyplot as plt
|
5 |
+
from pytorch_grad_cam import GradCAM
|
6 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
7 |
+
from pytorch_grad_cam.utils.model_targets import ClassifierOutputTarget
|
8 |
+
|
9 |
+
SEED = 42
|
10 |
+
DEVICE = None
|
11 |
+
|
12 |
+
|
13 |
+
def get_device():
|
14 |
+
global DEVICE
|
15 |
+
if DEVICE is not None:
|
16 |
+
return DEVICE
|
17 |
+
|
18 |
+
if torch.cuda.is_available():
|
19 |
+
DEVICE = "cuda"
|
20 |
+
elif torch.backends.mps.is_available():
|
21 |
+
DEVICE = "mps"
|
22 |
+
else:
|
23 |
+
DEVICE = "cpu"
|
24 |
+
print("Device Selected:", DEVICE)
|
25 |
+
return DEVICE
|
26 |
+
|
27 |
+
|
28 |
+
def seed(seed=SEED):
|
29 |
+
torch.manual_seed(seed)
|
30 |
+
if get_device() == 'cuda':
|
31 |
+
torch.cuda.manual_seed(seed)
|
32 |
+
|
33 |
+
|
34 |
+
def plot_examples(images, labels, figsize=None, n=20):
|
35 |
+
_ = plt.figure(figsize=figsize)
|
36 |
+
|
37 |
+
for i in range(n):
|
38 |
+
plt.subplot(4, n//4, i + 1)
|
39 |
+
plt.tight_layout()
|
40 |
+
image = images[i]
|
41 |
+
plt.imshow(image, cmap='gray')
|
42 |
+
label = labels[i]
|
43 |
+
plt.title(str(label))
|
44 |
+
plt.xticks([])
|
45 |
+
plt.yticks([])
|
46 |
+
|
47 |
+
|
48 |
+
def find_lr(model, data_loader, optimizer, criterion):
|
49 |
+
lr_finder = LRFinder(model, optimizer, criterion)
|
50 |
+
lr_finder.range_test(data_loader, end_lr=0.1, num_iter=200, step_mode='exp')
|
51 |
+
_, best_lr = lr_finder.plot()
|
52 |
+
lr_finder.reset()
|
53 |
+
return best_lr
|
54 |
+
|
55 |
+
|
56 |
+
def get_incorrect_preds(prediction, labels):
|
57 |
+
prediction = prediction.argmax(dim=1)
|
58 |
+
indices = prediction.ne(labels).nonzero().reshape(-1).tolist()
|
59 |
+
return indices, prediction[indices].tolist(), labels[indices].tolist()
|
60 |
+
|
61 |
+
|
62 |
+
def get_cam_visualisation(model, dataset, input_tensor, label, target_layer, use_cuda=False):
|
63 |
+
grad_cam = GradCAM(model=model, target_layers=[target_layer], use_cuda=use_cuda)
|
64 |
+
targets = [ClassifierOutputTarget(label)]
|
65 |
+
grayscale_cam = grad_cam(input_tensor=input_tensor.unsqueeze(0), targets=targets)
|
66 |
+
grayscale_cam = grayscale_cam[0, :]
|
67 |
+
output = show_cam_on_image(dataset.show_transform(input_tensor).cpu().numpy(), grayscale_cam,use_rgb=True)
|
68 |
+
return output
|
69 |
+
|
70 |
+
|