TharunSiva commited on
Commit
1d0281d
Β·
verified Β·
1 Parent(s): df352b9

util files

Browse files
Files changed (5) hide show
  1. .gitattributes +1 -0
  2. ResUNet-segModel-weights.hdf5 +3 -0
  3. ResUNet.py +100 -0
  4. eff.py +145 -0
  5. vit.py +109 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ ResUNet-segModel-weights.hdf5 filter=lfs diff=lfs merge=lfs -text
ResUNet-segModel-weights.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:21efee8add11e7881172be0602d7e039271eb98bdaf7b085aa9546a2c710e424
3
+ size 14961712
ResUNet.py ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import numpy as np
3
+ import pandas as pd
4
+ import cv2
5
+
6
+ import tensorflow as tf
7
+ from tensorflow.python.keras import Sequential
8
+ from tensorflow.keras import layers, optimizers
9
+ from tensorflow.keras.layers import *
10
+ from tensorflow.keras.models import Model
11
+ from tensorflow.keras.initializers import glorot_uniform
12
+ from tensorflow.keras.utils import plot_model
13
+ from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, LearningRateScheduler
14
+ import tensorflow.keras.backend as K
15
+
16
+
17
+ # lets create model now
18
+ def resblock(X, f):
19
+ '''
20
+ function for creating res block
21
+ '''
22
+ X_copy = X #copy of input
23
+
24
+ # main path
25
+ X = Conv2D(f, kernel_size=(1,1), kernel_initializer='he_normal')(X)
26
+ X = BatchNormalization()(X)
27
+ X = Activation('relu')(X)
28
+
29
+ X = Conv2D(f, kernel_size=(3,3), padding='same', kernel_initializer='he_normal')(X)
30
+ X = BatchNormalization()(X)
31
+
32
+ # shortcut path
33
+ X_copy = Conv2D(f, kernel_size=(1,1), kernel_initializer='he_normal')(X_copy)
34
+ X_copy = BatchNormalization()(X_copy)
35
+
36
+ # Adding the output from main path and short path together
37
+ X = Add()([X, X_copy])
38
+ X = Activation('relu')(X)
39
+
40
+ return X
41
+
42
+ def upsample_concat(x, skip):
43
+ '''
44
+ funtion for upsampling image
45
+ '''
46
+ X = UpSampling2D((2,2))(x)
47
+ merge = Concatenate()([X, skip])
48
+
49
+ return merge
50
+
51
+
52
+ def load_model():
53
+
54
+ input_shape = (256,256,3)
55
+ X_input = Input(input_shape) #iniating tensor of input shape
56
+
57
+ # Stage 1
58
+ conv_1 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(X_input)
59
+ conv_1 = BatchNormalization()(conv_1)
60
+ conv_1 = Conv2D(16, 3, activation='relu', padding='same', kernel_initializer='he_normal')(conv_1)
61
+ conv_1 = BatchNormalization()(conv_1)
62
+ pool_1 = MaxPool2D((2,2))(conv_1)
63
+
64
+ # stage 2
65
+ conv_2 = resblock(pool_1, 32)
66
+ pool_2 = MaxPool2D((2,2))(conv_2)
67
+
68
+ # Stage 3
69
+ conv_3 = resblock(pool_2, 64)
70
+ pool_3 = MaxPool2D((2,2))(conv_3)
71
+
72
+ # Stage 4
73
+ conv_4 = resblock(pool_3, 128)
74
+ pool_4 = MaxPool2D((2,2))(conv_4)
75
+
76
+ # Stage 5 (bottle neck)
77
+ conv_5 = resblock(pool_4, 256)
78
+
79
+ # Upsample Stage 1
80
+ up_1 = upsample_concat(conv_5, conv_4)
81
+ up_1 = resblock(up_1, 128)
82
+
83
+ # Upsample Stage 2
84
+ up_2 = upsample_concat(up_1, conv_3)
85
+ up_2 = resblock(up_2, 64)
86
+
87
+ # Upsample Stage 3
88
+ up_3 = upsample_concat(up_2, conv_2)
89
+ up_3 = resblock(up_3, 32)
90
+
91
+ # Upsample Stage 4
92
+ up_4 = upsample_concat(up_3, conv_1)
93
+ up_4 = resblock(up_4, 16)
94
+
95
+ # final output
96
+ out = Conv2D(1, (1,1), kernel_initializer='he_normal', padding='same', activation='sigmoid')(up_4)
97
+
98
+ seg_model = Model(X_input, out)
99
+
100
+ return seg_model
eff.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ import torch
4
+ import torchvision
5
+ from torch import nn
6
+ from torchvision import transforms
7
+ from PIL import Image
8
+
9
+ class_names = ["Glioma Tumor", "Meningitis Tumor", "No Tumor", "Pituitary Tumor"]
10
+
11
+ class CFG:
12
+ DEVICE = 'cpu'
13
+ NUM_DEVICES = torch.cuda.device_count()
14
+ NUM_WORKERS = os.cpu_count()
15
+ NUM_CLASSES = 4
16
+ EPOCHS = 16
17
+ BATCH_SIZE = 32
18
+ LR = 0.001
19
+ APPLY_SHUFFLE = True
20
+ SEED = 768
21
+ HEIGHT = 224
22
+ WIDTH = 224
23
+ CHANNELS = 3
24
+ IMAGE_SIZE = (224, 224, 3)
25
+
26
+
27
+ class EfficientNetV2Model(nn.Module):
28
+ def __init__(self, backbone_model, name='efficientnet-v2-large',
29
+ num_classes=CFG.NUM_CLASSES, device=CFG.DEVICE):
30
+ super(EfficientNetV2Model, self).__init__()
31
+
32
+ self.backbone_model = backbone_model
33
+ self.device = device
34
+ self.num_classes = num_classes
35
+ self.name = name
36
+
37
+ classifier = nn.Sequential(
38
+ nn.Flatten(),
39
+ nn.Dropout(p=0.2, inplace=True),
40
+ nn.Linear(in_features=1280, out_features=256, bias=True),
41
+ nn.GELU(),
42
+ nn.Dropout(p=0.2, inplace=True),
43
+ nn.Linear(in_features=256, out_features=num_classes, bias=False)
44
+ ).to(device)
45
+
46
+ self._set_classifier(classifier)
47
+
48
+ def _set_classifier(self, classifier:nn.Module) -> None:
49
+ self.backbone_model.classifier = classifier
50
+
51
+ def forward(self, image):
52
+ return self.backbone_model(image)
53
+
54
+
55
+ def get_effiecientnetv2_model(
56
+ device: torch.device=CFG.NUM_CLASSES) -> nn.Module:
57
+ # Set the manual seeds
58
+ torch.manual_seed(CFG.SEED)
59
+ torch.cuda.manual_seed(CFG.SEED)
60
+
61
+ # Get model weights
62
+ model_weights = (
63
+ torchvision
64
+ .models
65
+ .EfficientNet_V2_L_Weights
66
+ .DEFAULT
67
+ )
68
+
69
+ # Get model and push to device
70
+ model = (
71
+ torchvision.models.efficientnet_v2_l(
72
+ weights=model_weights
73
+ )
74
+ ).to(device)
75
+
76
+ # Freeze Model Parameters
77
+ for param in model.features.parameters():
78
+ param.requires_grad = False
79
+
80
+ return model
81
+
82
+
83
+ # Get EfficientNet v2 model
84
+ backbone_model = get_effiecientnetv2_model(CFG.DEVICE)
85
+
86
+ efficientnetv2_params = {
87
+ 'backbone_model' : backbone_model,
88
+ 'name' : 'efficientnet-v2-large',
89
+ 'device' : CFG.DEVICE
90
+ }
91
+
92
+ # Generate Model
93
+ efficientnet_model = EfficientNetV2Model(**efficientnetv2_params)
94
+
95
+ efficientnet_model.load_state_dict(
96
+ torch.load('efficientnetV2.pth', map_location=torch.device('cpu'))
97
+ )
98
+
99
+
100
+ # def predict_eff(image_path):
101
+ # # Define the image transformation
102
+ # transform = transforms.Compose([
103
+ # transforms.Resize((224, 224)),
104
+ # transforms.ToTensor()
105
+ # ])
106
+
107
+ # # Load and preprocess the image
108
+ # # image_path = "glioma.jpg" # Replace with the path to your image
109
+ # image = Image.open(image_path)
110
+ # input_tensor = transform(image)
111
+ # input_batch = input_tensor.unsqueeze(0).to("cuda") # Add batch dimension
112
+
113
+ # # Perform inference
114
+ # with torch.no_grad():
115
+ # output = efficientnet_model(input_batch).to("cuda")
116
+
117
+ # # You can now use the 'output' tensor as needed (e.g., get predictions)
118
+ # # print(output)
119
+ # return output
120
+
121
+
122
+ def predict_eff(image_path):
123
+ # Define the image transformation
124
+ transform = transforms.Compose([
125
+ transforms.Resize((224, 224)),
126
+ transforms.ToTensor()
127
+ ])
128
+
129
+ # Load and preprocess the image
130
+ # image_path = "glioma.jpg" # Replace with the path to your image
131
+ image = Image.open(image_path)
132
+ input_tensor = transform(image)
133
+ input_batch = input_tensor.unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
134
+
135
+ # Perform inference
136
+ with torch.no_grad():
137
+ output = efficientnet_model(input_batch).to(CFG.DEVICE)
138
+
139
+ # You can now use the 'output' tensor as needed (e.g., get predictions)
140
+ # print(output)
141
+ res = torch.softmax(output, dim=1)
142
+
143
+ probs = {class_names[i]: float(res[0][i]) for i in range(len(class_names))}
144
+
145
+ return probs
vit.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torchvision
4
+ from torch import nn
5
+ from torchvision import transforms
6
+ from PIL import Image
7
+
8
+ class CFG:
9
+ DEVICE = 'cpu'
10
+ NUM_DEVICES = torch.cuda.device_count()
11
+ NUM_WORKERS = os.cpu_count()
12
+ NUM_CLASSES = 4
13
+ EPOCHS = 16
14
+ BATCH_SIZE = 32
15
+ LR = 0.001
16
+ APPLY_SHUFFLE = True
17
+ SEED = 768
18
+ HEIGHT = 224
19
+ WIDTH = 224
20
+ CHANNELS = 3
21
+ IMAGE_SIZE = (224, 224, 3)
22
+
23
+
24
+ class VisionTransformerModel(nn.Module):
25
+ def __init__(self, backbone_model, name='vision-transformer',
26
+ num_classes=CFG.NUM_CLASSES, device=CFG.DEVICE):
27
+ super(VisionTransformerModel, self).__init__()
28
+
29
+ self.backbone_model = backbone_model
30
+ self.device = device
31
+ self.num_classes = num_classes
32
+ self.name = name
33
+
34
+ self.classifier = nn.Sequential(
35
+ nn.Flatten(),
36
+ nn.Dropout(p=0.2, inplace=True),
37
+ nn.Linear(in_features=1000, out_features=256, bias=True),
38
+ nn.GELU(),
39
+ nn.Dropout(p=0.2, inplace=True),
40
+ nn.Linear(in_features=256, out_features=num_classes, bias=False)
41
+ ).to(device)
42
+
43
+ def forward(self, image):
44
+ vit_output = self.backbone_model(image)
45
+ return self.classifier(vit_output)
46
+
47
+
48
+ def get_vit_b32_model(
49
+ device: torch.device=CFG.NUM_CLASSES) -> nn.Module:
50
+ # Set the manual seeds
51
+ torch.manual_seed(CFG.SEED)
52
+ torch.cuda.manual_seed(CFG.SEED)
53
+
54
+ # Get model weights
55
+ model_weights = (
56
+ torchvision
57
+ .models
58
+ .ViT_L_32_Weights
59
+ .DEFAULT
60
+ )
61
+
62
+ # Get model and push to device
63
+ model = (
64
+ torchvision.models.vit_l_32(
65
+ weights=model_weights
66
+ )
67
+ ).to(device)
68
+
69
+ # Freeze Model Parameters
70
+ for param in model.parameters():
71
+ param.requires_grad = False
72
+
73
+ return model
74
+
75
+ # Get ViT model
76
+ vit_backbone = get_vit_b32_model(CFG.DEVICE)
77
+
78
+ vit_params = {
79
+ 'backbone_model' : vit_backbone,
80
+ 'name' : 'ViT-L-B32',
81
+ 'device' : CFG.DEVICE
82
+ }
83
+
84
+ # Generate Model
85
+ vit_model = VisionTransformerModel(**vit_params)
86
+
87
+ vit_model.load_state_dict(
88
+ torch.load('vit_model.pth', map_location=torch.device('cpu'))
89
+ )
90
+
91
+
92
+ # Define the image transformation
93
+ transform = transforms.Compose([
94
+ transforms.Resize((224, 224)),
95
+ transforms.ToTensor()
96
+ ])
97
+
98
+
99
+ def predict(image_path):
100
+ image = Image.open(image_path)
101
+ input_tensor = transform(image)
102
+ input_batch = input_tensor.unsqueeze(0).to(CFG.DEVICE) # Add batch dimension
103
+
104
+ # Perform inference
105
+ with torch.no_grad():
106
+ output = vit_model(input_batch).to(CFG.DEVICE)
107
+
108
+ # You can now use the 'output' tensor as needed (e.g., get predictions)
109
+ return output