hydraadra112 commited on
Commit
0c2ac0a
·
1 Parent(s): 9922762

Created DR Classifier App

Browse files
Files changed (5) hide show
  1. app.py +205 -0
  2. architecture.py +70 -0
  3. model.ipynb +0 -0
  4. modelv2_output.png +0 -0
  5. requirements.txt +4 -0
app.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import torch
3
+ from torch import nn
4
+ from pathlib import Path
5
+ from PIL import Image
6
+ from streamlit_image_zoom import image_zoom
7
+ from torchvision.transforms import v2
8
+ from architecture import DR_Classifierv2
9
+ import os
10
+ import random
11
+
12
+ # Labels for classification
13
+ idx_labels = {0: 'Mild', 1: 'Moderate', 2: 'No DR', 3: 'Proliferate DR', 4: 'Severe'}
14
+ current_device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
15
+
16
+ # Model loading
17
+ @st.cache_resource
18
+ def load_model() -> nn.Module:
19
+ PATH = Path('./dataset/model_v2.pth')
20
+ model = DR_Classifierv2(input_shape=3, output_shape=5, hidden_units=64)
21
+ model.load_state_dict(torch.load(PATH, map_location=current_device))
22
+ return model
23
+
24
+ # Preprocess images for prediction
25
+ def preprocess_image(img: Image) -> torch.Tensor:
26
+ transform = v2.Compose([
27
+ v2.PILToTensor(),
28
+ v2.ToDtype(torch.float32),
29
+ v2.Resize((224, 224)),
30
+ v2.Normalize(mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])])
31
+ return transform(img).unsqueeze(0).to(current_device)
32
+
33
+ def predict_class(model: nn.Module, img: torch.Tensor) -> str:
34
+ if img.shape != (img.shape[0], 3, 224, 224):
35
+ raise ValueError('Image is not the expected shape: [batch_size, 3, 224, 224]')
36
+
37
+ model.eval()
38
+ with torch.inference_mode():
39
+ pred = model(img)
40
+ predicted_class = torch.argmax(pred, dim=1).item()
41
+ return idx_labels[predicted_class]
42
+
43
+ # Main app
44
+ def main():
45
+ st.header('Diabetic Retinopathy Classifier')
46
+ st.caption('Prepared by: John Manuel Carado | BSCS 3-A')
47
+
48
+ # Tabs for navigation
49
+ pred_tab, model_tab, data_tab = st.tabs(['Prediction', 'About the Model', 'About the Dataset'])
50
+
51
+ with pred_tab:
52
+ pytorch_model = load_model()
53
+
54
+ # Upload an image
55
+ uploaded_image = st.file_uploader('Upload a Fundus image for classification!', type=['png', 'jpg'])
56
+
57
+ selected_image = None
58
+
59
+ with st.expander('**OR choose from existing fundus images:**'):
60
+ existing_images = {
61
+ "No DR": Path('./dataset/colored_images/No_DR/0ae2dd2e09ea.png'),
62
+ "Moderate": Path('./dataset/colored_images/Moderate/fd48cf452e9d.png'),
63
+ "Proliferate DR": Path('./dataset/colored_images/Proliferate_DR/0e82bcacc475.png')
64
+ }
65
+
66
+ cols = st.columns(len(existing_images))
67
+ for (label, img_path), col in zip(existing_images.items(), cols):
68
+ with col:
69
+ st.image(img_path, caption=label)
70
+ if st.button(f'Select {label}'):
71
+ selected_image = img_path
72
+
73
+ # Use uploaded image if provided, otherwise fallback to selected existing image
74
+ image = uploaded_image or selected_image
75
+
76
+ # Prediction and display
77
+ if image:
78
+ parsed_image = Image.open(image).convert("RGB")
79
+ preprocessed_image = preprocess_image(parsed_image)
80
+ classification = predict_class(pytorch_model, preprocessed_image)
81
+
82
+ # Show zoomable image and classification
83
+ image_zoom(parsed_image, mode="dragmove", size=(700, 500), keep_aspect_ratio=True, zoom_factor=2.0, increment=0.2)
84
+ st.success(f'**Prediction ->** {classification}')
85
+ else:
86
+ st.warning("Please upload an image or select an existing one for prediction.")
87
+
88
+
89
+ with model_tab:
90
+ st.header('Model Performance')
91
+ st.write('The DR images are trained in a basic CNN architecture for a multiclassification task.')
92
+ with st.expander('Click to see PyTorch class architecture'):
93
+ arc = """
94
+ class DR_Classifierv2(nn.Module):
95
+ def __init__(self, output_shape: int, input_shape: int = 3, hidden_units: int = 64):
96
+ super().__init__()
97
+
98
+ self.block1 = nn.Sequential(
99
+ nn.Conv2d(input_shape, hidden_units, kernel_size=3, padding='same'),
100
+ nn.LeakyReLU(0.1),
101
+ nn.BatchNorm2d(hidden_units),
102
+ nn.Conv2d(hidden_units, hidden_units, kernel_size=3, padding='same'),
103
+ nn.LeakyReLU(0.1),
104
+ nn.BatchNorm2d(hidden_units),
105
+ nn.MaxPool2d(2),
106
+ nn.Dropout(0.3)
107
+ )
108
+
109
+ self.block2 = nn.Sequential(
110
+ nn.Conv2d(hidden_units, hidden_units * 2, kernel_size=3, padding='same'),
111
+ nn.LeakyReLU(0.1),
112
+ nn.BatchNorm2d(hidden_units * 2),
113
+ nn.Conv2d(hidden_units * 2, hidden_units * 2, kernel_size=3, padding='same'),
114
+ nn.LeakyReLU(0.1),
115
+ nn.BatchNorm2d(hidden_units * 2),
116
+ nn.MaxPool2d(2),
117
+ nn.Dropout(0.4)
118
+ )
119
+
120
+ self.block3 = nn.Sequential(
121
+ nn.Conv2d(hidden_units * 2, hidden_units * 4, kernel_size=3, padding='same'),
122
+ nn.LeakyReLU(0.1),
123
+ nn.BatchNorm2d(hidden_units * 4),
124
+ nn.Conv2d(hidden_units * 4, hidden_units * 4, kernel_size=3, padding='same'),
125
+ nn.LeakyReLU(0.1),
126
+ nn.BatchNorm2d(hidden_units * 4),
127
+ nn.MaxPool2d(2),
128
+ nn.Dropout(0.4)
129
+ )
130
+
131
+ self.block4 = nn.Sequential(
132
+ nn.Conv2d(hidden_units * 4, hidden_units * 8, kernel_size=3, padding='same'),
133
+ nn.LeakyReLU(0.1),
134
+ nn.BatchNorm2d(hidden_units * 8),
135
+ nn.Conv2d(hidden_units * 8, hidden_units * 8, kernel_size=3, padding='same'),
136
+ nn.LeakyReLU(0.1),
137
+ nn.BatchNorm2d(hidden_units * 8),
138
+ nn.MaxPool2d(2),
139
+ nn.Dropout(0.5)
140
+ )
141
+
142
+ self.adaptiveAvgPool = nn.AdaptiveAvgPool2d(1)
143
+
144
+ self.classifier = nn.Sequential(
145
+ nn.Flatten(),
146
+ nn.Linear(hidden_units * 8, 512),
147
+ nn.LeakyReLU(0.1),
148
+ nn.BatchNorm1d(512),
149
+ nn.Dropout(0.6),
150
+ nn.Linear(512, output_shape)
151
+ )
152
+
153
+ def forward(self, x: torch.Tensor):
154
+ x = self.block1(x)
155
+ x = self.block2(x)
156
+ x = self.block3(x)
157
+ x = self.block4(x)
158
+ x = self.adaptiveAvgPool(x)
159
+ x = self.classifier(x)
160
+ return x"""
161
+ st.code(arc)
162
+
163
+ st.image('modelv2_output.png', caption='Models accuracy and loss curves')
164
+ st.write('Hyperparams:')
165
+
166
+ col1, col2 = st.columns(2)
167
+
168
+ with col1:
169
+ st.caption('**Epochs**: `n_epochs=30`')
170
+ st.caption('**Learning Rate**: `lr=0.00001`')
171
+ st.caption('**Scheduler**: `ReduceLROnPlateau(optimizer, mode="min", patience=5)`')
172
+
173
+ with col2:
174
+ st.caption('**Data Loader Batches**: `DataLoader(train_dataset, batch_size=64, shuffle=True)`')
175
+ st.caption('**Loss Function**: `nn.CrossEntropyLoss()`')
176
+ st.caption('**Optimizer**: `optim.Adam(params=model_2.parameters(), lr=0.00001, weight_decay=1e-4)`')
177
+
178
+ with data_tab:
179
+ col1, col2 = st.columns(2)
180
+
181
+ with col1:
182
+ dataset_path = Path('./dataset/colored_images/')
183
+
184
+ labels = next(os.walk(dataset_path))[1]
185
+ random_index = random.randint(1, len(labels))
186
+ random_label = labels[random_index]
187
+
188
+ label_dir = dataset_path / random_label
189
+
190
+ # Get random image
191
+ random_image = label_dir / random.choice(os.listdir(label_dir))
192
+
193
+ parsed_image = Image.open(Path(random_image)).convert("RGB")
194
+
195
+ st.image(parsed_image, caption='Sample image from the dataset', width=300)
196
+
197
+ with col2:
198
+ st.header('APTOS 2019 Blindness Detection')
199
+ st.caption('The images consist of retina scan images to detect diabetic retinopathy. The original dataset is available at APTOS 2019 Blindness Detection. These images are resized into 224x224 pixels so that they can be readily used with many pre-trained deep learning models. All of the images are already saved into their respective folders according to the severity/stage of diabetic retinopathy using the train.csv file provided. You will find five directories with the respective images:')
200
+ url = 'https://www.kaggle.com/datasets/sovitrath/diabetic-retinopathy-224x224-2019-data'
201
+ st.caption('For more details about the dataset, visit [Kaggle](%s).' % url)
202
+
203
+
204
+ if __name__ == '__main__':
205
+ main()
architecture.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torch import nn
3
+ import matplotlib.pyplot as plt
4
+ class DR_Classifierv2(nn.Module):
5
+ def __init__(self, output_shape: int, input_shape: int = 3, hidden_units: int = 64):
6
+ super().__init__()
7
+
8
+ self.block1 = nn.Sequential(
9
+ nn.Conv2d(input_shape, hidden_units, kernel_size=3, padding='same'),
10
+ nn.LeakyReLU(0.1),
11
+ nn.BatchNorm2d(hidden_units),
12
+ nn.Conv2d(hidden_units, hidden_units, kernel_size=3, padding='same'),
13
+ nn.LeakyReLU(0.1),
14
+ nn.BatchNorm2d(hidden_units),
15
+ nn.MaxPool2d(2),
16
+ nn.Dropout(0.3)
17
+ )
18
+
19
+ self.block2 = nn.Sequential(
20
+ nn.Conv2d(hidden_units, hidden_units * 2, kernel_size=3, padding='same'),
21
+ nn.LeakyReLU(0.1),
22
+ nn.BatchNorm2d(hidden_units * 2),
23
+ nn.Conv2d(hidden_units * 2, hidden_units * 2, kernel_size=3, padding='same'),
24
+ nn.LeakyReLU(0.1),
25
+ nn.BatchNorm2d(hidden_units * 2),
26
+ nn.MaxPool2d(2),
27
+ nn.Dropout(0.4)
28
+ )
29
+
30
+ self.block3 = nn.Sequential(
31
+ nn.Conv2d(hidden_units * 2, hidden_units * 4, kernel_size=3, padding='same'),
32
+ nn.LeakyReLU(0.1),
33
+ nn.BatchNorm2d(hidden_units * 4),
34
+ nn.Conv2d(hidden_units * 4, hidden_units * 4, kernel_size=3, padding='same'),
35
+ nn.LeakyReLU(0.1),
36
+ nn.BatchNorm2d(hidden_units * 4),
37
+ nn.MaxPool2d(2),
38
+ nn.Dropout(0.4)
39
+ )
40
+
41
+ self.block4 = nn.Sequential(
42
+ nn.Conv2d(hidden_units * 4, hidden_units * 8, kernel_size=3, padding='same'),
43
+ nn.LeakyReLU(0.1),
44
+ nn.BatchNorm2d(hidden_units * 8),
45
+ nn.Conv2d(hidden_units * 8, hidden_units * 8, kernel_size=3, padding='same'),
46
+ nn.LeakyReLU(0.1),
47
+ nn.BatchNorm2d(hidden_units * 8),
48
+ nn.MaxPool2d(2),
49
+ nn.Dropout(0.5)
50
+ )
51
+
52
+ self.adaptiveAvgPool = nn.AdaptiveAvgPool2d(1)
53
+
54
+ self.classifier = nn.Sequential(
55
+ nn.Flatten(),
56
+ nn.Linear(hidden_units * 8, 512),
57
+ nn.LeakyReLU(0.1),
58
+ nn.BatchNorm1d(512),
59
+ nn.Dropout(0.6),
60
+ nn.Linear(512, output_shape)
61
+ )
62
+
63
+ def forward(self, x: torch.Tensor):
64
+ x = self.block1(x)
65
+ x = self.block2(x)
66
+ x = self.block3(x)
67
+ x = self.block4(x)
68
+ x = self.adaptiveAvgPool(x)
69
+ x = self.classifier(x)
70
+ return x
model.ipynb CHANGED
The diff for this file is too large to render. See raw diff
 
modelv2_output.png ADDED
requirements.txt CHANGED
@@ -1,3 +1,6 @@
 
 
 
1
  torch==2.2.0
2
  torchvision==0.17.0
3
  torchmetrics==1.6.1
@@ -7,4 +10,5 @@ matplotlib==3.10.0
7
  jupyter==1.1.1
8
  ipykernel==6.29.5
9
  pathlib==1.0.1
 
10
  kagglehub
 
1
+ streamlit==1.42.0
2
+ streamlit-image-zoom
3
+ st-clickable-images
4
  torch==2.2.0
5
  torchvision==0.17.0
6
  torchmetrics==1.6.1
 
10
  jupyter==1.1.1
11
  ipykernel==6.29.5
12
  pathlib==1.0.1
13
+ protobuf==3.20.*
14
  kagglehub