Spaces:
Runtime error
Runtime error
upload 14 files
Browse files- bird.jpg +0 -0
- car.jpg +0 -0
- cat.jpg +0 -0
- deer.jpg +0 -0
- dog.jpg +0 -0
- frog.jpg +0 -0
- gradcam_helper.py +87 -0
- horse.jpg +0 -0
- lightningmodel.py +263 -0
- misclas_helper.py +142 -0
- plane.jpg +0 -0
- ship.jpg +0 -0
- truck.jpg +0 -0
- weights_92.ckpt +3 -0
bird.jpg
ADDED
car.jpg
ADDED
cat.jpg
ADDED
deer.jpg
ADDED
dog.jpg
ADDED
frog.jpg
ADDED
gradcam_helper.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from IPython.core.display import display
|
14 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
15 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
16 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
17 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
18 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
19 |
+
from pytorch_lightning.loggers import CSVLogger
|
20 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
21 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
22 |
+
from torchmetrics.functional import accuracy
|
23 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
24 |
+
from torchvision import datasets, transforms, utils
|
25 |
+
from PIL import Image
|
26 |
+
from pytorch_grad_cam import GradCAM
|
27 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
targets = None
|
32 |
+
|
33 |
+
|
34 |
+
# Yes - This is important predecessor3 for gradioMisClassGradCAM
|
35 |
+
def display_gradcam_output(data: list,
|
36 |
+
classes: list[str],
|
37 |
+
inv_normalize: transforms.Normalize,
|
38 |
+
model: 'DL Model',
|
39 |
+
target_layers: list['model_layer'],
|
40 |
+
targets=None,
|
41 |
+
number_of_samples: int = 10,
|
42 |
+
transparency: float = 0.60):
|
43 |
+
"""
|
44 |
+
Function to visualize GradCam output on the data
|
45 |
+
:param data: List[Tuple(image, label)]
|
46 |
+
:param classes: Name of classes in the dataset
|
47 |
+
:param inv_normalize: Mean and Standard deviation values of the dataset
|
48 |
+
:param model: Model architecture
|
49 |
+
:param target_layers: Layers on which GradCam should be executed
|
50 |
+
:param targets: Classes to be focused on for GradCam
|
51 |
+
:param number_of_samples: Number of images to print
|
52 |
+
:param transparency: Weight of Normal image when mixed with activations
|
53 |
+
"""
|
54 |
+
# Plot configuration
|
55 |
+
fig = plt.figure(figsize=(10, 10))
|
56 |
+
x_count = 5
|
57 |
+
y_count = 1 if number_of_samples <= 5 else math.floor(number_of_samples / x_count)
|
58 |
+
|
59 |
+
# Create an object for GradCam
|
60 |
+
#cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
|
61 |
+
cam = GradCAM(model=model, target_layers=target_layers)
|
62 |
+
|
63 |
+
# Iterate over number of specified images
|
64 |
+
for i in range(number_of_samples):
|
65 |
+
plt.subplot(y_count, x_count, i + 1)
|
66 |
+
input_tensor = data[i][0]
|
67 |
+
|
68 |
+
# Get the activations of the layer for the images
|
69 |
+
grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
|
70 |
+
grayscale_cam = grayscale_cam[0, :]
|
71 |
+
|
72 |
+
# Get back the original image
|
73 |
+
img = input_tensor.squeeze(0).to('cpu')
|
74 |
+
img = inv_normalize(img)
|
75 |
+
rgb_img = np.transpose(img, (1, 2, 0))
|
76 |
+
rgb_img = rgb_img.numpy()
|
77 |
+
|
78 |
+
# Mix the activations on the original image
|
79 |
+
visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
80 |
+
|
81 |
+
# Display the images on the plot
|
82 |
+
plt.imshow(visualization)
|
83 |
+
# plt.title(r"Correct: " + classes[data[i][1].item()] + '\n' + 'Output: ' + classes[data[i][2].item()])
|
84 |
+
plt.xticks([])
|
85 |
+
plt.yticks([])
|
86 |
+
plt.savefig('imshow_output_gradcam.png')
|
87 |
+
return 'imshow_output_gradcam.png'
|
horse.jpg
ADDED
lightningmodel.py
ADDED
@@ -0,0 +1,263 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from IPython.core.display import display
|
14 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
15 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
16 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
17 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
18 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
19 |
+
from pytorch_lightning.loggers import CSVLogger
|
20 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
21 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
22 |
+
from torchmetrics.functional import accuracy
|
23 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
24 |
+
from torchvision import datasets, transforms, utils
|
25 |
+
from PIL import Image
|
26 |
+
from pytorch_grad_cam import GradCAM
|
27 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
28 |
+
|
29 |
+
seed_everything(7)
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
class Net_S13(nn.Module):
|
34 |
+
#class ResNet(nn.Module):
|
35 |
+
def __init__(self):
|
36 |
+
super(Net_S13, self).__init__()
|
37 |
+
#super(ResNet, self).__init__()
|
38 |
+
|
39 |
+
# Control Variable
|
40 |
+
self.printShape = False
|
41 |
+
|
42 |
+
#Common :-
|
43 |
+
set1 = 64 #prepLayer
|
44 |
+
set2 = 128 #Layer2
|
45 |
+
set3 = 256 #Layer3
|
46 |
+
set4 = 512 #Layer4
|
47 |
+
avg = 1024 #channels
|
48 |
+
drop = 0.1 #dropout
|
49 |
+
S = 1 #stride
|
50 |
+
K = 3 #kernel_size
|
51 |
+
|
52 |
+
# PrepLayer - Conv 3x3 s1, p1) >> BN >> RELU [64k]
|
53 |
+
I = 3
|
54 |
+
O = set1
|
55 |
+
P = 1 #padding
|
56 |
+
self.prepLayer = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
57 |
+
|
58 |
+
# Layer1 -
|
59 |
+
# X = Conv 3x3 (s1, p1) >> MaxPool2D >> BN >> RELU [128k]
|
60 |
+
# R1 = ResBlock( (Conv-BN-ReLU-Conv-BN-ReLU))(X) [128k]
|
61 |
+
# Add(X, R1)
|
62 |
+
I = O
|
63 |
+
O = set2
|
64 |
+
P = 1 #padding
|
65 |
+
self.Layer1 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
66 |
+
|
67 |
+
I = O
|
68 |
+
O = I
|
69 |
+
P = 1 #padding
|
70 |
+
self.resNetLayer1Part1 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
71 |
+
|
72 |
+
I = O
|
73 |
+
O = I
|
74 |
+
P = 1 #padding
|
75 |
+
self.resNetLayer1Part2 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
76 |
+
|
77 |
+
# Layer 2 -
|
78 |
+
# Conv 3x3 [256k]
|
79 |
+
# MaxPooling2D
|
80 |
+
# BN
|
81 |
+
# ReLU
|
82 |
+
I = O
|
83 |
+
O = set3
|
84 |
+
P = 1 #padding
|
85 |
+
self.Layer2 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
86 |
+
|
87 |
+
# Layer 3 -
|
88 |
+
# X = Conv 3x3 (s1, p1) >> MaxPool2D >> BN >> RELU [512k]
|
89 |
+
# R2 = ResBlock( (Conv-BN-ReLU-Conv-BN-ReLU))(X) [512k]
|
90 |
+
# Add(X, R2)
|
91 |
+
I = O
|
92 |
+
O = set4
|
93 |
+
P = 1 #padding
|
94 |
+
self.Layer3 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
95 |
+
|
96 |
+
I = O
|
97 |
+
O = I
|
98 |
+
P = 1 #padding
|
99 |
+
self.resNetLayer2Part1 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
100 |
+
|
101 |
+
I = O
|
102 |
+
O = I
|
103 |
+
P = 1 #padding
|
104 |
+
self.resNetLayer2Part2 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
105 |
+
|
106 |
+
# MaxPooling with Kernel Size 4
|
107 |
+
self.pool = nn.MaxPool2d(kernel_size = 4, stride = 4)
|
108 |
+
|
109 |
+
# FC Layer
|
110 |
+
I = 512
|
111 |
+
O = 10
|
112 |
+
self.lastLayer = nn.Linear(I, O)
|
113 |
+
|
114 |
+
self.aGAP = nn.AdaptiveAvgPool2d((1, 1))
|
115 |
+
self.flat = nn.Flatten(1, -1)
|
116 |
+
self.gap = nn.AvgPool2d(avg)
|
117 |
+
self.drop = nn.Dropout(drop)
|
118 |
+
|
119 |
+
# convolution Block
|
120 |
+
def convBlock(self, in_channels, out_channels, kernel_size, stride, padding, last_layer = False, bias = False):
|
121 |
+
if(False == last_layer):
|
122 |
+
return nn.Sequential(
|
123 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias),
|
124 |
+
nn.BatchNorm2d(out_channels),
|
125 |
+
nn.ReLU())
|
126 |
+
else:
|
127 |
+
return nn.Sequential(
|
128 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias))
|
129 |
+
|
130 |
+
# convolution-MP Block
|
131 |
+
def convMPBlock(self, in_channels, out_channels, kernel_size, stride, padding, bias = False):
|
132 |
+
return nn.Sequential(
|
133 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias),
|
134 |
+
nn.MaxPool2d(kernel_size = 2, stride = 2),
|
135 |
+
nn.BatchNorm2d(out_channels),
|
136 |
+
nn.ReLU())
|
137 |
+
|
138 |
+
def printf(self, n, x, string1=""):
|
139 |
+
if(self.printShape):
|
140 |
+
print(f"{n} " f"{x.shape = }" f" {string1}") ## Comment / Uncomment this line towards the no need of print or needed print
|
141 |
+
pass
|
142 |
+
def printEmpty(self,):
|
143 |
+
if(self.printShape):
|
144 |
+
print("") ## Comment / Uncomment this line towards the no need of print or needed print
|
145 |
+
pass
|
146 |
+
|
147 |
+
def forward(self, x):
|
148 |
+
self.printf(0.0, x, "prepLayer input")
|
149 |
+
x = self.prepLayer(x)
|
150 |
+
x = self.drop(x)
|
151 |
+
self.printf(0.1, x, "prepLayer output")
|
152 |
+
self.printEmpty()
|
153 |
+
|
154 |
+
self.printf(1.0, x, "Layer1 input")
|
155 |
+
x = self.Layer1(x)
|
156 |
+
self.printf(1.1, x, "Layer1 output --> sacroscant")
|
157 |
+
y = x #sacrosanct path1
|
158 |
+
self.printf(1.2, x, "Layer1 resnet input")
|
159 |
+
x = self.resNetLayer1Part1(x) #residual path1
|
160 |
+
x = self.drop(x)
|
161 |
+
x = self.resNetLayer1Part2(x) #residual path1
|
162 |
+
self.printf(1.3, x, "Layer1 resnet output")
|
163 |
+
x = x + y #adding sacrosanct path1 and residual path1
|
164 |
+
x = self.drop(x)
|
165 |
+
self.printf(1.4, x, "res+sacrosanct output")
|
166 |
+
self.printEmpty()
|
167 |
+
|
168 |
+
self.printf(2.0, x, "Layer2 input")
|
169 |
+
x = self.Layer2(x)
|
170 |
+
x = self.drop(x)
|
171 |
+
self.printf(2.1, x, "Layer2 output")
|
172 |
+
self.printEmpty()
|
173 |
+
|
174 |
+
self.printf(3.0, x, "Layer3 input")
|
175 |
+
x = self.Layer3(x)
|
176 |
+
self.printf(3.1, x, "Layer3 output --> sacroscant")
|
177 |
+
y = x #sacrosanct path2
|
178 |
+
self.printf(3.2, x, "Layer3 resnet input")
|
179 |
+
x = self.resNetLayer2Part1(x) #residual path2
|
180 |
+
x = self.drop(x)
|
181 |
+
x = self.resNetLayer2Part2(x) #residual path2
|
182 |
+
self.printf(3.3, x, "Layer3 resnet output")
|
183 |
+
x = x + y #adding sacrosanct path2 and residual path2
|
184 |
+
x = self.drop(x)
|
185 |
+
self.printf(3.4, x, "res+sacrosanct output")
|
186 |
+
self.printEmpty()
|
187 |
+
|
188 |
+
self.printf(4.0, x, "pool input")
|
189 |
+
x = self.pool(x)
|
190 |
+
self.printf(4.1, x, "pool output")
|
191 |
+
self.printEmpty()
|
192 |
+
|
193 |
+
# x = x.view(-1, 10)
|
194 |
+
self.printf(4.2, x, "For showing before last layer")
|
195 |
+
x = x.view(x.size(0), -1)
|
196 |
+
self.printf(5.0, x, "last layer input") #512, 1, 1
|
197 |
+
x = self.lastLayer(x)
|
198 |
+
# x = self.gap(x)
|
199 |
+
self.printf(5.1, x, "last layer output") #10, 1, 1
|
200 |
+
self.printEmpty()
|
201 |
+
|
202 |
+
# self.printf(7.0, x)
|
203 |
+
return F.log_softmax(x)
|
204 |
+
|
205 |
+
def create_model():
|
206 |
+
model = Net_S13()
|
207 |
+
return model
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
class LitResnet(LightningModule):
|
212 |
+
def __init__(self, lr=0.05):
|
213 |
+
super().__init__()
|
214 |
+
|
215 |
+
self.save_hyperparameters()
|
216 |
+
self.model = create_model()
|
217 |
+
|
218 |
+
def forward(self, x):
|
219 |
+
out = self.model(x)
|
220 |
+
return F.log_softmax(out, dim=1)
|
221 |
+
|
222 |
+
def training_step(self, batch, batch_idx):
|
223 |
+
x, y = batch
|
224 |
+
logits = self(x)
|
225 |
+
loss = F.nll_loss(logits, y)
|
226 |
+
self.log("train_loss", loss)
|
227 |
+
return loss
|
228 |
+
|
229 |
+
def evaluate(self, batch, stage=None):
|
230 |
+
x, y = batch
|
231 |
+
logits = self(x)
|
232 |
+
loss = F.nll_loss(logits, y)
|
233 |
+
preds = torch.argmax(logits, dim=1)
|
234 |
+
acc = accuracy(preds, y, task='MULTICLASS', num_classes=10)
|
235 |
+
|
236 |
+
if stage:
|
237 |
+
self.log(f"{stage}_loss", loss, prog_bar=True)
|
238 |
+
self.log(f"{stage}_acc", acc, prog_bar=True)
|
239 |
+
|
240 |
+
def validation_step(self, batch, batch_idx):
|
241 |
+
self.evaluate(batch, "val")
|
242 |
+
|
243 |
+
def test_step(self, batch, batch_idx):
|
244 |
+
self.evaluate(batch, "test")
|
245 |
+
|
246 |
+
def configure_optimizers(self):
|
247 |
+
optimizer = torch.optim.SGD(
|
248 |
+
self.parameters(),
|
249 |
+
lr=self.hparams.lr,
|
250 |
+
momentum=0.9,
|
251 |
+
weight_decay=5e-4,
|
252 |
+
)
|
253 |
+
steps_per_epoch = 45000 // BATCH_SIZE
|
254 |
+
scheduler_dict = {
|
255 |
+
"scheduler": OneCycleLR(
|
256 |
+
optimizer,
|
257 |
+
0.1,
|
258 |
+
epochs=self.trainer.max_epochs,
|
259 |
+
steps_per_epoch=steps_per_epoch,
|
260 |
+
),
|
261 |
+
"interval": "step",
|
262 |
+
}
|
263 |
+
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
|
misclas_helper.py
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from IPython.core.display import display
|
14 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
15 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
16 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
17 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
18 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
19 |
+
from pytorch_lightning.loggers import CSVLogger
|
20 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
21 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
22 |
+
from torchmetrics.functional import accuracy
|
23 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
24 |
+
from torchvision import datasets, transforms, utils
|
25 |
+
from PIL import Image
|
26 |
+
from pytorch_grad_cam import GradCAM
|
27 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
28 |
+
|
29 |
+
# Denormalize the data using test mean and std deviation
|
30 |
+
inv_normalize = transforms.Normalize(
|
31 |
+
mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
|
32 |
+
std=[1/0.23, 1/0.23, 1/0.23]
|
33 |
+
)
|
34 |
+
|
35 |
+
|
36 |
+
def get_misclassified_data2(model, device, count):
|
37 |
+
"""
|
38 |
+
Function to run the model on test set and return misclassified images
|
39 |
+
:param model: Network Architecture
|
40 |
+
:param device: CPU/GPU
|
41 |
+
:param test_loader: DataLoader for test set
|
42 |
+
"""
|
43 |
+
|
44 |
+
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
|
45 |
+
BATCH_SIZE = 256 if torch.cuda.is_available() else 64
|
46 |
+
NUM_WORKERS = int(os.cpu_count() / 2)
|
47 |
+
|
48 |
+
|
49 |
+
train_transforms = torchvision.transforms.Compose(
|
50 |
+
[
|
51 |
+
torchvision.transforms.RandomCrop(32, padding=4),
|
52 |
+
torchvision.transforms.RandomHorizontalFlip(),
|
53 |
+
torchvision.transforms.ToTensor(),
|
54 |
+
cifar10_normalization(),
|
55 |
+
]
|
56 |
+
)
|
57 |
+
|
58 |
+
test_transforms = torchvision.transforms.Compose(
|
59 |
+
[
|
60 |
+
torchvision.transforms.ToTensor(),
|
61 |
+
cifar10_normalization(),
|
62 |
+
]
|
63 |
+
)
|
64 |
+
|
65 |
+
cifar10_dm = CIFAR10DataModule(
|
66 |
+
data_dir=PATH_DATASETS,
|
67 |
+
batch_size=BATCH_SIZE,
|
68 |
+
num_workers=NUM_WORKERS,
|
69 |
+
train_transforms=train_transforms,
|
70 |
+
test_transforms=test_transforms,
|
71 |
+
val_transforms=test_transforms,
|
72 |
+
)
|
73 |
+
|
74 |
+
cifar10_dm.prepare_data()
|
75 |
+
cifar10_dm.setup()
|
76 |
+
test_loader = cifar10_dm.test_dataloader()
|
77 |
+
|
78 |
+
# Prepare the model for evaluation i.e. drop the dropout layer
|
79 |
+
model.eval()
|
80 |
+
|
81 |
+
# List to store misclassified Images
|
82 |
+
misclassified_data = []
|
83 |
+
|
84 |
+
# Reset the gradients
|
85 |
+
with torch.no_grad():
|
86 |
+
# Extract images, labels in a batch
|
87 |
+
for data, target in test_loader:
|
88 |
+
|
89 |
+
# Migrate the data to the device
|
90 |
+
data, target = data.to(device), target.to(device)
|
91 |
+
|
92 |
+
# Extract single image, label from the batch
|
93 |
+
for image, label in zip(data, target):
|
94 |
+
|
95 |
+
# Add batch dimension to the image
|
96 |
+
image = image.unsqueeze(0)
|
97 |
+
|
98 |
+
# Get the model prediction on the image
|
99 |
+
output = model(image)
|
100 |
+
|
101 |
+
# Convert the output from one-hot encoding to a value
|
102 |
+
pred = output.argmax(dim=1, keepdim=True)
|
103 |
+
|
104 |
+
# If prediction is incorrect, append the data
|
105 |
+
if pred != label:
|
106 |
+
misclassified_data.append((image, label, pred))
|
107 |
+
|
108 |
+
if len(misclassified_data) > count :
|
109 |
+
break
|
110 |
+
return misclassified_data
|
111 |
+
|
112 |
+
|
113 |
+
# Yes - This is important predecessor2 for gradioMisClass
|
114 |
+
|
115 |
+
def display_cifar_misclassified_data(data: list,
|
116 |
+
classes: list[str],
|
117 |
+
inv_normalize: transforms.Normalize,
|
118 |
+
number_of_samples: int = 10):
|
119 |
+
"""
|
120 |
+
Function to plot images with labels
|
121 |
+
:param data: List[Tuple(image, label)]
|
122 |
+
:param classes: Name of classes in the dataset
|
123 |
+
:param inv_normalize: Mean and Standard deviation values of the dataset
|
124 |
+
:param number_of_samples: Number of images to print
|
125 |
+
"""
|
126 |
+
fig = plt.figure(figsize=(10, 10))
|
127 |
+
img = None
|
128 |
+
x_count = 5
|
129 |
+
y_count = 1 if number_of_samples <= 5 else math.floor(number_of_samples / x_count)
|
130 |
+
|
131 |
+
for i in range(number_of_samples):
|
132 |
+
plt.subplot(y_count, x_count, i + 1)
|
133 |
+
img = data[i][0].squeeze().to('cpu')
|
134 |
+
img = inv_normalize(img)
|
135 |
+
plt.imshow(np.transpose(img, (1, 2, 0)))
|
136 |
+
plt.xticks([])
|
137 |
+
plt.yticks([])
|
138 |
+
plt.savefig('imshow_output_misclas.png')
|
139 |
+
return 'imshow_output_misclas.png'
|
140 |
+
|
141 |
+
# Plot the misclassified data
|
142 |
+
|
plane.jpg
ADDED
ship.jpg
ADDED
truck.jpg
ADDED
weights_92.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e2a9fff8b371c9438f23ff81373c5858e6d5339b00ea6845c61bc6c3ef4abc0
|
3 |
+
size 52633065
|