Spaces:
Runtime error
Runtime error
uploaded 15 files
Browse files- app.py +126 -0
- bird.jpg +0 -0
- car.jpg +0 -0
- cat.jpg +0 -0
- deer.jpg +0 -0
- dog.jpg +0 -0
- frog.jpg +0 -0
- gradcam_helper.py +86 -0
- horse.jpg +0 -0
- lightningmodel.py +262 -0
- misclas_helper.py +141 -0
- plane.jpg +0 -0
- ship.jpg +0 -0
- truck.jpg +0 -0
- weights_92.ckpt +3 -0
app.py
ADDED
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gradioMisClassGradCAMimageInputter
|
2 |
+
import os
|
3 |
+
import math
|
4 |
+
import numpy as np
|
5 |
+
import pandas as pd
|
6 |
+
import seaborn as sn
|
7 |
+
import torch
|
8 |
+
import torch.nn as nn
|
9 |
+
import torch.nn.functional as F
|
10 |
+
import torchvision
|
11 |
+
import matplotlib.pyplot as plt
|
12 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
13 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
14 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
15 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
16 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
17 |
+
from pytorch_lightning.loggers import CSVLogger
|
18 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
19 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
20 |
+
from torchmetrics.functional import accuracy
|
21 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
22 |
+
from torchvision import datasets, transforms, utils
|
23 |
+
from PIL import Image
|
24 |
+
from pytorch_grad_cam import GradCAM
|
25 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
26 |
+
import gradio as gr
|
27 |
+
import misclas_helper
|
28 |
+
import gradcam_helper
|
29 |
+
import lightningmodel
|
30 |
+
from misclas_helper import display_cifar_misclassified_data
|
31 |
+
from gradcam_helper import display_gradcam_output
|
32 |
+
from misclas_helper import get_misclassified_data2
|
33 |
+
from lightningmodel import LitResnet
|
34 |
+
|
35 |
+
fileName = None
|
36 |
+
|
37 |
+
targets = None
|
38 |
+
device = torch.device("cpu")
|
39 |
+
classes = ('plane', 'car', 'bird', 'cat', 'deer',
|
40 |
+
'dog', 'frog', 'horse', 'ship', 'truck')
|
41 |
+
|
42 |
+
model = LitResnet(lr=0.05).load_from_checkpoint("weights_92.ckpt")
|
43 |
+
|
44 |
+
device = torch.device("cpu")
|
45 |
+
|
46 |
+
# Denormalize the data using test mean and std deviation
|
47 |
+
inv_normalize = transforms.Normalize(
|
48 |
+
mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
|
49 |
+
std=[1/0.23, 1/0.23, 1/0.23]
|
50 |
+
)
|
51 |
+
|
52 |
+
# Get the misclassified data from test dataset
|
53 |
+
misclassified_data = get_misclassified_data2(model, device, 20)
|
54 |
+
|
55 |
+
def hello(DoYouWantToShowMisClassifiedImages, HowManyImages):
|
56 |
+
if(DoYouWantToShowMisClassifiedImages.lower() == "yes"):
|
57 |
+
fileName = misclas_helper.display_cifar_misclassified_data(misclassified_data, classes, inv_normalize, number_of_samples=HowManyImages)
|
58 |
+
return Image.open(fileName)
|
59 |
+
else:
|
60 |
+
return None
|
61 |
+
misClass_demo = gr.Interface(
|
62 |
+
fn = hello,
|
63 |
+
inputs=['text', gr.Slider(0, 20, step=5)],
|
64 |
+
outputs=['image'],
|
65 |
+
title="Misclasseified Images",
|
66 |
+
description="If your answer to the question DoYouWantToShowMisClassifiedImages is yes, then only it works.",
|
67 |
+
)
|
68 |
+
|
69 |
+
|
70 |
+
############
|
71 |
+
|
72 |
+
|
73 |
+
def inference(DoYouWantToShowGradCAMMedImages, HowManyImages, WhichLayer, transparency):
|
74 |
+
if(DoYouWantToShowGradCAMMedImages.lower() == "yes"):
|
75 |
+
if(WhichLayer == -1):
|
76 |
+
target_layers = [model.model.resNetLayer2Part2[-1]]
|
77 |
+
elif(WhichLayer == -2):
|
78 |
+
target_layers = [model.model.resNetLayer2Part1[-1]]
|
79 |
+
elif(WhichLayer == -3):
|
80 |
+
target_layers = [model.model.Layer3[-1]]
|
81 |
+
fileName = gradcam_helper.display_gradcam_output(misclassified_data, classes, inv_normalize, model.model, target_layers, targets, number_of_samples=HowManyImages, transparency=0.70)
|
82 |
+
return Image.open(fileName)
|
83 |
+
|
84 |
+
gradCAM_demo = gr.Interface(
|
85 |
+
fn=inference,
|
86 |
+
#DoYouWantToShowGradCAMMedImages, HowManyImages, WhichLayer, transparency
|
87 |
+
inputs=['text', gr.Slider(0, 20, step=5), gr.Slider(-3, -1, value = -1, step=1), gr.Slider(0, 1, value = 0.7, label = "Overall Opacity of the Overlay")],
|
88 |
+
outputs=['image'],
|
89 |
+
title="GradCammd Images",
|
90 |
+
description="If your answer to the question DoYouWantToShowGradCAMMedImages is yes, then only it works.",
|
91 |
+
)
|
92 |
+
|
93 |
+
|
94 |
+
############
|
95 |
+
|
96 |
+
def ImageInputter(img1, img2, img3, img4, img5, img6, img7, img8, img9, img10):
|
97 |
+
return img1, img2, img3, img4, img5, img6, img7, img8, img9, img10
|
98 |
+
|
99 |
+
imageInputter_demo = gr.Interface(
|
100 |
+
ImageInputter,
|
101 |
+
[
|
102 |
+
"image","image","image","image","image","image","image","image","image","image"
|
103 |
+
],
|
104 |
+
[
|
105 |
+
"image","image","image","image","image","image","image","image","image","image"
|
106 |
+
],
|
107 |
+
examples=[
|
108 |
+
["bird.jpg", "car.jpg", "cat.jpg"],
|
109 |
+
["deer.jpg", "dog.jpg", "frog.jpg"],
|
110 |
+
["horse.jpg", "plane.jpg", "ship.jpg"],
|
111 |
+
[None, "truck.jpg", None],
|
112 |
+
],
|
113 |
+
title="Max 10 images input",
|
114 |
+
description="Here's a sample image inputter. Allows you to feed in 10 images and display them. You may drag and drop images from bottom examples to the input feeders",
|
115 |
+
)
|
116 |
+
|
117 |
+
|
118 |
+
############
|
119 |
+
|
120 |
+
|
121 |
+
demo = gr.TabbedInterface(
|
122 |
+
interface_list = [misClass_demo, gradCAM_demo, imageInputter_demo],
|
123 |
+
tab_names = ["MisClassified Images", "GradCAMMed Images", "10 images inputter"]
|
124 |
+
)
|
125 |
+
|
126 |
+
demo.launch(debug=True)
|
bird.jpg
ADDED
![]() |
car.jpg
ADDED
![]() |
cat.jpg
ADDED
![]() |
deer.jpg
ADDED
![]() |
dog.jpg
ADDED
![]() |
frog.jpg
ADDED
![]() |
gradcam_helper.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
14 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
15 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
16 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
17 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
18 |
+
from pytorch_lightning.loggers import CSVLogger
|
19 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
20 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
21 |
+
from torchmetrics.functional import accuracy
|
22 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
23 |
+
from torchvision import datasets, transforms, utils
|
24 |
+
from PIL import Image
|
25 |
+
from pytorch_grad_cam import GradCAM
|
26 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
targets = None
|
31 |
+
|
32 |
+
|
33 |
+
# Yes - This is important predecessor3 for gradioMisClassGradCAM
|
34 |
+
def display_gradcam_output(data: list,
|
35 |
+
classes: list[str],
|
36 |
+
inv_normalize: transforms.Normalize,
|
37 |
+
model: 'DL Model',
|
38 |
+
target_layers: list['model_layer'],
|
39 |
+
targets=None,
|
40 |
+
number_of_samples: int = 10,
|
41 |
+
transparency: float = 0.60):
|
42 |
+
"""
|
43 |
+
Function to visualize GradCam output on the data
|
44 |
+
:param data: List[Tuple(image, label)]
|
45 |
+
:param classes: Name of classes in the dataset
|
46 |
+
:param inv_normalize: Mean and Standard deviation values of the dataset
|
47 |
+
:param model: Model architecture
|
48 |
+
:param target_layers: Layers on which GradCam should be executed
|
49 |
+
:param targets: Classes to be focused on for GradCam
|
50 |
+
:param number_of_samples: Number of images to print
|
51 |
+
:param transparency: Weight of Normal image when mixed with activations
|
52 |
+
"""
|
53 |
+
# Plot configuration
|
54 |
+
fig = plt.figure(figsize=(10, 10))
|
55 |
+
x_count = 5
|
56 |
+
y_count = 1 if number_of_samples <= 5 else math.floor(number_of_samples / x_count)
|
57 |
+
|
58 |
+
# Create an object for GradCam
|
59 |
+
#cam = GradCAM(model=model, target_layers=target_layers, use_cuda=True)
|
60 |
+
cam = GradCAM(model=model, target_layers=target_layers)
|
61 |
+
|
62 |
+
# Iterate over number of specified images
|
63 |
+
for i in range(number_of_samples):
|
64 |
+
plt.subplot(y_count, x_count, i + 1)
|
65 |
+
input_tensor = data[i][0]
|
66 |
+
|
67 |
+
# Get the activations of the layer for the images
|
68 |
+
grayscale_cam = cam(input_tensor=input_tensor, targets=targets)
|
69 |
+
grayscale_cam = grayscale_cam[0, :]
|
70 |
+
|
71 |
+
# Get back the original image
|
72 |
+
img = input_tensor.squeeze(0).to('cpu')
|
73 |
+
img = inv_normalize(img)
|
74 |
+
rgb_img = np.transpose(img, (1, 2, 0))
|
75 |
+
rgb_img = rgb_img.numpy()
|
76 |
+
|
77 |
+
# Mix the activations on the original image
|
78 |
+
visualization = show_cam_on_image(rgb_img, grayscale_cam, use_rgb=True, image_weight=transparency)
|
79 |
+
|
80 |
+
# Display the images on the plot
|
81 |
+
plt.imshow(visualization)
|
82 |
+
# plt.title(r"Correct: " + classes[data[i][1].item()] + '\n' + 'Output: ' + classes[data[i][2].item()])
|
83 |
+
plt.xticks([])
|
84 |
+
plt.yticks([])
|
85 |
+
plt.savefig('imshow_output_gradcam.png')
|
86 |
+
return 'imshow_output_gradcam.png'
|
horse.jpg
ADDED
![]() |
lightningmodel.py
ADDED
@@ -0,0 +1,262 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
14 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
15 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
16 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
17 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
18 |
+
from pytorch_lightning.loggers import CSVLogger
|
19 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
20 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
21 |
+
from torchmetrics.functional import accuracy
|
22 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
23 |
+
from torchvision import datasets, transforms, utils
|
24 |
+
from PIL import Image
|
25 |
+
from pytorch_grad_cam import GradCAM
|
26 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
27 |
+
|
28 |
+
seed_everything(7)
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
class Net_S13(nn.Module):
|
33 |
+
#class ResNet(nn.Module):
|
34 |
+
def __init__(self):
|
35 |
+
super(Net_S13, self).__init__()
|
36 |
+
#super(ResNet, self).__init__()
|
37 |
+
|
38 |
+
# Control Variable
|
39 |
+
self.printShape = False
|
40 |
+
|
41 |
+
#Common :-
|
42 |
+
set1 = 64 #prepLayer
|
43 |
+
set2 = 128 #Layer2
|
44 |
+
set3 = 256 #Layer3
|
45 |
+
set4 = 512 #Layer4
|
46 |
+
avg = 1024 #channels
|
47 |
+
drop = 0.1 #dropout
|
48 |
+
S = 1 #stride
|
49 |
+
K = 3 #kernel_size
|
50 |
+
|
51 |
+
# PrepLayer - Conv 3x3 s1, p1) >> BN >> RELU [64k]
|
52 |
+
I = 3
|
53 |
+
O = set1
|
54 |
+
P = 1 #padding
|
55 |
+
self.prepLayer = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
56 |
+
|
57 |
+
# Layer1 -
|
58 |
+
# X = Conv 3x3 (s1, p1) >> MaxPool2D >> BN >> RELU [128k]
|
59 |
+
# R1 = ResBlock( (Conv-BN-ReLU-Conv-BN-ReLU))(X) [128k]
|
60 |
+
# Add(X, R1)
|
61 |
+
I = O
|
62 |
+
O = set2
|
63 |
+
P = 1 #padding
|
64 |
+
self.Layer1 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
65 |
+
|
66 |
+
I = O
|
67 |
+
O = I
|
68 |
+
P = 1 #padding
|
69 |
+
self.resNetLayer1Part1 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
70 |
+
|
71 |
+
I = O
|
72 |
+
O = I
|
73 |
+
P = 1 #padding
|
74 |
+
self.resNetLayer1Part2 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
75 |
+
|
76 |
+
# Layer 2 -
|
77 |
+
# Conv 3x3 [256k]
|
78 |
+
# MaxPooling2D
|
79 |
+
# BN
|
80 |
+
# ReLU
|
81 |
+
I = O
|
82 |
+
O = set3
|
83 |
+
P = 1 #padding
|
84 |
+
self.Layer2 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
85 |
+
|
86 |
+
# Layer 3 -
|
87 |
+
# X = Conv 3x3 (s1, p1) >> MaxPool2D >> BN >> RELU [512k]
|
88 |
+
# R2 = ResBlock( (Conv-BN-ReLU-Conv-BN-ReLU))(X) [512k]
|
89 |
+
# Add(X, R2)
|
90 |
+
I = O
|
91 |
+
O = set4
|
92 |
+
P = 1 #padding
|
93 |
+
self.Layer3 = self.convMPBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
94 |
+
|
95 |
+
I = O
|
96 |
+
O = I
|
97 |
+
P = 1 #padding
|
98 |
+
self.resNetLayer2Part1 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
99 |
+
|
100 |
+
I = O
|
101 |
+
O = I
|
102 |
+
P = 1 #padding
|
103 |
+
self.resNetLayer2Part2 = self.convBlock(in_channels = I, out_channels = O, kernel_size = K, stride = S, padding = P)
|
104 |
+
|
105 |
+
# MaxPooling with Kernel Size 4
|
106 |
+
self.pool = nn.MaxPool2d(kernel_size = 4, stride = 4)
|
107 |
+
|
108 |
+
# FC Layer
|
109 |
+
I = 512
|
110 |
+
O = 10
|
111 |
+
self.lastLayer = nn.Linear(I, O)
|
112 |
+
|
113 |
+
self.aGAP = nn.AdaptiveAvgPool2d((1, 1))
|
114 |
+
self.flat = nn.Flatten(1, -1)
|
115 |
+
self.gap = nn.AvgPool2d(avg)
|
116 |
+
self.drop = nn.Dropout(drop)
|
117 |
+
|
118 |
+
# convolution Block
|
119 |
+
def convBlock(self, in_channels, out_channels, kernel_size, stride, padding, last_layer = False, bias = False):
|
120 |
+
if(False == last_layer):
|
121 |
+
return nn.Sequential(
|
122 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias),
|
123 |
+
nn.BatchNorm2d(out_channels),
|
124 |
+
nn.ReLU())
|
125 |
+
else:
|
126 |
+
return nn.Sequential(
|
127 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias))
|
128 |
+
|
129 |
+
# convolution-MP Block
|
130 |
+
def convMPBlock(self, in_channels, out_channels, kernel_size, stride, padding, bias = False):
|
131 |
+
return nn.Sequential(
|
132 |
+
nn.Conv2d(in_channels = in_channels, out_channels = out_channels, stride = stride, padding = padding, kernel_size = kernel_size, bias = bias),
|
133 |
+
nn.MaxPool2d(kernel_size = 2, stride = 2),
|
134 |
+
nn.BatchNorm2d(out_channels),
|
135 |
+
nn.ReLU())
|
136 |
+
|
137 |
+
def printf(self, n, x, string1=""):
|
138 |
+
if(self.printShape):
|
139 |
+
print(f"{n} " f"{x.shape = }" f" {string1}") ## Comment / Uncomment this line towards the no need of print or needed print
|
140 |
+
pass
|
141 |
+
def printEmpty(self,):
|
142 |
+
if(self.printShape):
|
143 |
+
print("") ## Comment / Uncomment this line towards the no need of print or needed print
|
144 |
+
pass
|
145 |
+
|
146 |
+
def forward(self, x):
|
147 |
+
self.printf(0.0, x, "prepLayer input")
|
148 |
+
x = self.prepLayer(x)
|
149 |
+
x = self.drop(x)
|
150 |
+
self.printf(0.1, x, "prepLayer output")
|
151 |
+
self.printEmpty()
|
152 |
+
|
153 |
+
self.printf(1.0, x, "Layer1 input")
|
154 |
+
x = self.Layer1(x)
|
155 |
+
self.printf(1.1, x, "Layer1 output --> sacroscant")
|
156 |
+
y = x #sacrosanct path1
|
157 |
+
self.printf(1.2, x, "Layer1 resnet input")
|
158 |
+
x = self.resNetLayer1Part1(x) #residual path1
|
159 |
+
x = self.drop(x)
|
160 |
+
x = self.resNetLayer1Part2(x) #residual path1
|
161 |
+
self.printf(1.3, x, "Layer1 resnet output")
|
162 |
+
x = x + y #adding sacrosanct path1 and residual path1
|
163 |
+
x = self.drop(x)
|
164 |
+
self.printf(1.4, x, "res+sacrosanct output")
|
165 |
+
self.printEmpty()
|
166 |
+
|
167 |
+
self.printf(2.0, x, "Layer2 input")
|
168 |
+
x = self.Layer2(x)
|
169 |
+
x = self.drop(x)
|
170 |
+
self.printf(2.1, x, "Layer2 output")
|
171 |
+
self.printEmpty()
|
172 |
+
|
173 |
+
self.printf(3.0, x, "Layer3 input")
|
174 |
+
x = self.Layer3(x)
|
175 |
+
self.printf(3.1, x, "Layer3 output --> sacroscant")
|
176 |
+
y = x #sacrosanct path2
|
177 |
+
self.printf(3.2, x, "Layer3 resnet input")
|
178 |
+
x = self.resNetLayer2Part1(x) #residual path2
|
179 |
+
x = self.drop(x)
|
180 |
+
x = self.resNetLayer2Part2(x) #residual path2
|
181 |
+
self.printf(3.3, x, "Layer3 resnet output")
|
182 |
+
x = x + y #adding sacrosanct path2 and residual path2
|
183 |
+
x = self.drop(x)
|
184 |
+
self.printf(3.4, x, "res+sacrosanct output")
|
185 |
+
self.printEmpty()
|
186 |
+
|
187 |
+
self.printf(4.0, x, "pool input")
|
188 |
+
x = self.pool(x)
|
189 |
+
self.printf(4.1, x, "pool output")
|
190 |
+
self.printEmpty()
|
191 |
+
|
192 |
+
# x = x.view(-1, 10)
|
193 |
+
self.printf(4.2, x, "For showing before last layer")
|
194 |
+
x = x.view(x.size(0), -1)
|
195 |
+
self.printf(5.0, x, "last layer input") #512, 1, 1
|
196 |
+
x = self.lastLayer(x)
|
197 |
+
# x = self.gap(x)
|
198 |
+
self.printf(5.1, x, "last layer output") #10, 1, 1
|
199 |
+
self.printEmpty()
|
200 |
+
|
201 |
+
# self.printf(7.0, x)
|
202 |
+
return F.log_softmax(x)
|
203 |
+
|
204 |
+
def create_model():
|
205 |
+
model = Net_S13()
|
206 |
+
return model
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
class LitResnet(LightningModule):
|
211 |
+
def __init__(self, lr=0.05):
|
212 |
+
super().__init__()
|
213 |
+
|
214 |
+
self.save_hyperparameters()
|
215 |
+
self.model = create_model()
|
216 |
+
|
217 |
+
def forward(self, x):
|
218 |
+
out = self.model(x)
|
219 |
+
return F.log_softmax(out, dim=1)
|
220 |
+
|
221 |
+
def training_step(self, batch, batch_idx):
|
222 |
+
x, y = batch
|
223 |
+
logits = self(x)
|
224 |
+
loss = F.nll_loss(logits, y)
|
225 |
+
self.log("train_loss", loss)
|
226 |
+
return loss
|
227 |
+
|
228 |
+
def evaluate(self, batch, stage=None):
|
229 |
+
x, y = batch
|
230 |
+
logits = self(x)
|
231 |
+
loss = F.nll_loss(logits, y)
|
232 |
+
preds = torch.argmax(logits, dim=1)
|
233 |
+
acc = accuracy(preds, y, task='MULTICLASS', num_classes=10)
|
234 |
+
|
235 |
+
if stage:
|
236 |
+
self.log(f"{stage}_loss", loss, prog_bar=True)
|
237 |
+
self.log(f"{stage}_acc", acc, prog_bar=True)
|
238 |
+
|
239 |
+
def validation_step(self, batch, batch_idx):
|
240 |
+
self.evaluate(batch, "val")
|
241 |
+
|
242 |
+
def test_step(self, batch, batch_idx):
|
243 |
+
self.evaluate(batch, "test")
|
244 |
+
|
245 |
+
def configure_optimizers(self):
|
246 |
+
optimizer = torch.optim.SGD(
|
247 |
+
self.parameters(),
|
248 |
+
lr=self.hparams.lr,
|
249 |
+
momentum=0.9,
|
250 |
+
weight_decay=5e-4,
|
251 |
+
)
|
252 |
+
steps_per_epoch = 45000 // BATCH_SIZE
|
253 |
+
scheduler_dict = {
|
254 |
+
"scheduler": OneCycleLR(
|
255 |
+
optimizer,
|
256 |
+
0.1,
|
257 |
+
epochs=self.trainer.max_epochs,
|
258 |
+
steps_per_epoch=steps_per_epoch,
|
259 |
+
),
|
260 |
+
"interval": "step",
|
261 |
+
}
|
262 |
+
return {"optimizer": optimizer, "lr_scheduler": scheduler_dict}
|
misclas_helper.py
ADDED
@@ -0,0 +1,141 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import math
|
3 |
+
import numpy as np
|
4 |
+
import pandas as pd
|
5 |
+
import seaborn as sn
|
6 |
+
import torch
|
7 |
+
import torch.nn as nn
|
8 |
+
import torch.nn.functional as F
|
9 |
+
import torchvision
|
10 |
+
import matplotlib.pyplot as plt
|
11 |
+
import torch.nn as nn
|
12 |
+
import torch.nn.functional as F
|
13 |
+
from pl_bolts.datamodules import CIFAR10DataModule
|
14 |
+
from pl_bolts.transforms.dataset_normalizations import cifar10_normalization
|
15 |
+
from pytorch_lightning import LightningModule, Trainer, seed_everything
|
16 |
+
from pytorch_lightning.callbacks import LearningRateMonitor
|
17 |
+
from pytorch_lightning.callbacks.progress import TQDMProgressBar
|
18 |
+
from pytorch_lightning.loggers import CSVLogger
|
19 |
+
from torch.optim.lr_scheduler import OneCycleLR
|
20 |
+
from torch.optim.swa_utils import AveragedModel, update_bn
|
21 |
+
from torchmetrics.functional import accuracy
|
22 |
+
from pytorch_lightning.callbacks import ModelCheckpoint
|
23 |
+
from torchvision import datasets, transforms, utils
|
24 |
+
from PIL import Image
|
25 |
+
from pytorch_grad_cam import GradCAM
|
26 |
+
from pytorch_grad_cam.utils.image import show_cam_on_image
|
27 |
+
|
28 |
+
# Denormalize the data using test mean and std deviation
|
29 |
+
inv_normalize = transforms.Normalize(
|
30 |
+
mean=[-0.50/0.23, -0.50/0.23, -0.50/0.23],
|
31 |
+
std=[1/0.23, 1/0.23, 1/0.23]
|
32 |
+
)
|
33 |
+
|
34 |
+
|
35 |
+
def get_misclassified_data2(model, device, count):
|
36 |
+
"""
|
37 |
+
Function to run the model on test set and return misclassified images
|
38 |
+
:param model: Network Architecture
|
39 |
+
:param device: CPU/GPU
|
40 |
+
:param test_loader: DataLoader for test set
|
41 |
+
"""
|
42 |
+
|
43 |
+
PATH_DATASETS = os.environ.get("PATH_DATASETS", ".")
|
44 |
+
BATCH_SIZE = 256 if torch.cuda.is_available() else 64
|
45 |
+
NUM_WORKERS = int(os.cpu_count() / 2)
|
46 |
+
|
47 |
+
|
48 |
+
train_transforms = torchvision.transforms.Compose(
|
49 |
+
[
|
50 |
+
torchvision.transforms.RandomCrop(32, padding=4),
|
51 |
+
torchvision.transforms.RandomHorizontalFlip(),
|
52 |
+
torchvision.transforms.ToTensor(),
|
53 |
+
cifar10_normalization(),
|
54 |
+
]
|
55 |
+
)
|
56 |
+
|
57 |
+
test_transforms = torchvision.transforms.Compose(
|
58 |
+
[
|
59 |
+
torchvision.transforms.ToTensor(),
|
60 |
+
cifar10_normalization(),
|
61 |
+
]
|
62 |
+
)
|
63 |
+
|
64 |
+
cifar10_dm = CIFAR10DataModule(
|
65 |
+
data_dir=PATH_DATASETS,
|
66 |
+
batch_size=BATCH_SIZE,
|
67 |
+
num_workers=NUM_WORKERS,
|
68 |
+
train_transforms=train_transforms,
|
69 |
+
test_transforms=test_transforms,
|
70 |
+
val_transforms=test_transforms,
|
71 |
+
)
|
72 |
+
|
73 |
+
cifar10_dm.prepare_data()
|
74 |
+
cifar10_dm.setup()
|
75 |
+
test_loader = cifar10_dm.test_dataloader()
|
76 |
+
|
77 |
+
# Prepare the model for evaluation i.e. drop the dropout layer
|
78 |
+
model.eval()
|
79 |
+
|
80 |
+
# List to store misclassified Images
|
81 |
+
misclassified_data = []
|
82 |
+
|
83 |
+
# Reset the gradients
|
84 |
+
with torch.no_grad():
|
85 |
+
# Extract images, labels in a batch
|
86 |
+
for data, target in test_loader:
|
87 |
+
|
88 |
+
# Migrate the data to the device
|
89 |
+
data, target = data.to(device), target.to(device)
|
90 |
+
|
91 |
+
# Extract single image, label from the batch
|
92 |
+
for image, label in zip(data, target):
|
93 |
+
|
94 |
+
# Add batch dimension to the image
|
95 |
+
image = image.unsqueeze(0)
|
96 |
+
|
97 |
+
# Get the model prediction on the image
|
98 |
+
output = model(image)
|
99 |
+
|
100 |
+
# Convert the output from one-hot encoding to a value
|
101 |
+
pred = output.argmax(dim=1, keepdim=True)
|
102 |
+
|
103 |
+
# If prediction is incorrect, append the data
|
104 |
+
if pred != label:
|
105 |
+
misclassified_data.append((image, label, pred))
|
106 |
+
|
107 |
+
if len(misclassified_data) > count :
|
108 |
+
break
|
109 |
+
return misclassified_data
|
110 |
+
|
111 |
+
|
112 |
+
# Yes - This is important predecessor2 for gradioMisClass
|
113 |
+
|
114 |
+
def display_cifar_misclassified_data(data: list,
|
115 |
+
classes: list[str],
|
116 |
+
inv_normalize: transforms.Normalize,
|
117 |
+
number_of_samples: int = 10):
|
118 |
+
"""
|
119 |
+
Function to plot images with labels
|
120 |
+
:param data: List[Tuple(image, label)]
|
121 |
+
:param classes: Name of classes in the dataset
|
122 |
+
:param inv_normalize: Mean and Standard deviation values of the dataset
|
123 |
+
:param number_of_samples: Number of images to print
|
124 |
+
"""
|
125 |
+
fig = plt.figure(figsize=(10, 10))
|
126 |
+
img = None
|
127 |
+
x_count = 5
|
128 |
+
y_count = 1 if number_of_samples <= 5 else math.floor(number_of_samples / x_count)
|
129 |
+
|
130 |
+
for i in range(number_of_samples):
|
131 |
+
plt.subplot(y_count, x_count, i + 1)
|
132 |
+
img = data[i][0].squeeze().to('cpu')
|
133 |
+
img = inv_normalize(img)
|
134 |
+
plt.imshow(np.transpose(img, (1, 2, 0)))
|
135 |
+
plt.xticks([])
|
136 |
+
plt.yticks([])
|
137 |
+
plt.savefig('imshow_output_misclas.png')
|
138 |
+
return 'imshow_output_misclas.png'
|
139 |
+
|
140 |
+
# Plot the misclassified data
|
141 |
+
|
plane.jpg
ADDED
![]() |
ship.jpg
ADDED
![]() |
truck.jpg
ADDED
![]() |
weights_92.ckpt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5e2a9fff8b371c9438f23ff81373c5858e6d5339b00ea6845c61bc6c3ef4abc0
|
3 |
+
size 52633065
|