cycool29 commited on
Commit
01b64d6
·
1 Parent(s): 00bdf50
Files changed (8) hide show
  1. app.py +1 -1
  2. augment.py +9 -1
  3. configs.py +83 -12
  4. convert.py +12 -2
  5. eval.py +2 -0
  6. models.py +0 -1
  7. train.py +5 -2
  8. tuning.py +1 -3
app.py CHANGED
@@ -30,7 +30,7 @@ def process_file(webcam_filepath, upload_filepath):
30
 
31
 
32
  demo = gr.Interface(
33
- theme='gradio/soft',
34
  fn=process_file,
35
  title="HANDETECT",
36
  description="An innovative AI-powered system that facilitates early detection and monitoring of movement disorders through handwriting assessment",
 
30
 
31
 
32
  demo = gr.Interface(
33
+ theme="gradio/soft",
34
  fn=process_file,
35
  title="HANDETECT",
36
  description="An innovative AI-powered system that facilitates early detection and monitoring of movement disorders through handwriting assessment",
augment.py CHANGED
@@ -8,7 +8,15 @@ tasks = ["1", "2", "3", "4", "5", "6"]
8
 
9
  for task in ["1"]:
10
  # Loop through all folders in Task 1 and generate augmented images for each class
11
- for class_label in ['Alzheimer Disease', 'Cerebral Palsy', 'Dystonia', 'Essential Tremor', 'Healthy', 'Huntington Disease', 'Parkinson Disease']:
 
 
 
 
 
 
 
 
12
  if class_label != ".DS_Store":
13
  print("Augmenting images in class: ", class_label, " in Task ", task)
14
  # Create a temp folder to combine the raw data and the external data
 
8
 
9
  for task in ["1"]:
10
  # Loop through all folders in Task 1 and generate augmented images for each class
11
+ for class_label in [
12
+ "Alzheimer Disease",
13
+ "Cerebral Palsy",
14
+ "Dystonia",
15
+ "Essential Tremor",
16
+ "Healthy",
17
+ "Huntington Disease",
18
+ "Parkinson Disease",
19
+ ]:
20
  if class_label != ".DS_Store":
21
  print("Augmenting images in class: ", class_label, " in Task ", task)
22
  # Create a temp folder to combine the raw data and the external data
configs.py CHANGED
@@ -4,15 +4,21 @@ from torchvision import transforms
4
  from torch.utils.data import Dataset
5
  from models import *
6
  import torch.nn as nn
7
- from torchvision.models import squeezenet1_0, SqueezeNet1_0_Weights
 
 
 
 
 
8
  from torchvision.models import squeezenet1_0
 
9
  # Constants
10
  RANDOM_SEED = 123
11
- BATCH_SIZE = 16
12
  NUM_EPOCHS = 40
13
- LEARNING_RATE = 5.488903014780378e-05
14
  STEP_SIZE = 10
15
- GAMMA = 0.3
16
  DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
17
  NUM_PRINT = 100
18
  TASK = 1
@@ -22,21 +28,28 @@ EXTERNAL_DATA_DIR = r"data/train/external/Task "
22
  TEMP_DATA_DIR = "data/temp/"
23
  NUM_CLASSES = 7
24
  EARLY_STOPPING_PATIENCE = 20
25
- CLASSES = ['Alzheimer Disease', 'Cerebral Palsy', 'Dystonia', 'Essential Tremor', 'Healthy', 'Huntington Disease', 'Parkinson Disease']
 
 
 
 
 
 
 
 
26
  MODEL_SAVE_PATH = "output/checkpoints/model.pth"
27
 
28
 
29
-
30
  class SqueezeNet1_0WithDropout(nn.Module):
31
  def __init__(self, num_classes=1000):
32
  super(SqueezeNet1_0WithDropout, self).__init__()
33
- squeezenet = squeezenet1_0(weights=SqueezeNet1_0_Weights)
34
  self.features = squeezenet.features
35
  self.classifier = nn.Sequential(
36
  nn.Conv2d(512, num_classes, kernel_size=1),
37
  nn.BatchNorm2d(num_classes), # add batch normalization
38
  nn.ReLU(inplace=True),
39
- nn.AdaptiveAvgPool2d((1, 1))
40
  )
41
 
42
  def forward(self, x):
@@ -44,9 +57,67 @@ class SqueezeNet1_0WithDropout(nn.Module):
44
  x = self.classifier(x)
45
  x = torch.flatten(x, 1)
46
  return x
47
-
48
-
49
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
50
  MODEL = SqueezeNet1_0WithDropout(num_classes=7)
51
  print(CLASSES)
52
 
@@ -70,4 +141,4 @@ class CustomDataset(Dataset):
70
 
71
  def __getitem__(self, idx):
72
  img, label = self.data[idx]
73
- return img, label
 
4
  from torch.utils.data import Dataset
5
  from models import *
6
  import torch.nn as nn
7
+ from torchvision.models import (
8
+ squeezenet1_0,
9
+ SqueezeNet1_0_Weights,
10
+ mobilenet_v3_small,
11
+ MobileNet_V3_Small_Weights,
12
+ )
13
  from torchvision.models import squeezenet1_0
14
+
15
  # Constants
16
  RANDOM_SEED = 123
17
+ BATCH_SIZE = 32
18
  NUM_EPOCHS = 40
19
+ LEARNING_RATE = 0.005873024218838728
20
  STEP_SIZE = 10
21
+ GAMMA = 0.9
22
  DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
23
  NUM_PRINT = 100
24
  TASK = 1
 
28
  TEMP_DATA_DIR = "data/temp/"
29
  NUM_CLASSES = 7
30
  EARLY_STOPPING_PATIENCE = 20
31
+ CLASSES = [
32
+ "Alzheimer Disease",
33
+ "Cerebral Palsy",
34
+ "Dystonia",
35
+ "Essential Tremor",
36
+ "Healthy",
37
+ "Huntington Disease",
38
+ "Parkinson Disease",
39
+ ]
40
  MODEL_SAVE_PATH = "output/checkpoints/model.pth"
41
 
42
 
 
43
  class SqueezeNet1_0WithDropout(nn.Module):
44
  def __init__(self, num_classes=1000):
45
  super(SqueezeNet1_0WithDropout, self).__init__()
46
+ squeezenet = squeezenet1_0(weights=SqueezeNet1_0_Weights.DEFAULT)
47
  self.features = squeezenet.features
48
  self.classifier = nn.Sequential(
49
  nn.Conv2d(512, num_classes, kernel_size=1),
50
  nn.BatchNorm2d(num_classes), # add batch normalization
51
  nn.ReLU(inplace=True),
52
+ nn.AdaptiveAvgPool2d((1, 1)),
53
  )
54
 
55
  def forward(self, x):
 
57
  x = self.classifier(x)
58
  x = torch.flatten(x, 1)
59
  return x
60
+
61
+
62
+ # class ShuffleNetV2WithDropout(nn.Module):
63
+ # def __init__(self, num_classes=1000):
64
+ # super(ShuffleNetV2WithDropout, self).__init__()
65
+ # shufflenet = shufflenet_v2_x2_0(weights=ShuffleNet_V2_X2_0_Weights)
66
+ # self.features = shufflenet.features
67
+ # self.classifier = nn.Sequential(
68
+ # nn.Conv2d(1024, num_classes, kernel_size=1),
69
+ # nn.BatchNorm2d(num_classes), # add batch normalization
70
+ # nn.ReLU(inplace=True),
71
+ # nn.AdaptiveAvgPool2d((1, 1))
72
+ # )
73
+
74
+ # def forward(self, x):
75
+ # x = self.features(x)
76
+ # x = self.classifier(x)
77
+ # x = torch.flatten(x, 1)
78
+ # return x
79
+
80
+
81
+ class MobileNetV3SmallWithDropout(nn.Module):
82
+ def __init__(self, num_classes=1000):
83
+ super(MobileNetV3SmallWithDropout, self).__init__()
84
+ mobilenet = mobilenet_v3_small(weights=MobileNet_V3_Small_Weights)
85
+ self.features = mobilenet.features
86
+ self.classifier = nn.Sequential(
87
+ nn.Conv2d(576, num_classes, kernel_size=1),
88
+ nn.BatchNorm2d(num_classes), # add batch normalization
89
+ nn.ReLU(inplace=True),
90
+ nn.AdaptiveAvgPool2d((1, 1)),
91
+ )
92
+
93
+ def forward(self, x):
94
+ x = self.features(x)
95
+ x = self.classifier(x)
96
+ x = torch.flatten(x, 1)
97
+ return x
98
+
99
+
100
+ class ResNet18WithNorm(nn.Module):
101
+ def __init__(self, num_classes=1000):
102
+ super(ResNet18WithNorm, self).__init__()
103
+ resnet = resnet18(pretrained=False)
104
+ self.features = nn.Sequential(
105
+ *list(resnet.children())[:-2]
106
+ ) # Remove last 2 layers (avgpool and fc)
107
+ self.classifier = nn.Sequential(
108
+ nn.AdaptiveAvgPool2d((1, 1)),
109
+ nn.Flatten(),
110
+ nn.Linear(512, num_classes),
111
+ nn.BatchNorm2d(num_classes), # Add batch normalization
112
+ )
113
+
114
+ def forward(self, x):
115
+ x = self.features(x)
116
+ x = self.classifier(x)
117
+ x = torch.flatten(x, 1)
118
+ return x
119
+
120
+
121
  MODEL = SqueezeNet1_0WithDropout(num_classes=7)
122
  print(CLASSES)
123
 
 
141
 
142
  def __getitem__(self, idx):
143
  img, label = self.data[idx]
144
+ return img, label
convert.py CHANGED
@@ -2,6 +2,16 @@ import torch
2
  import onnx2tf
3
  from configs import *
4
 
5
- torch.onnx.export(model=MODEL, args=torch.randn(1, 3, 64, 64), f='output/checkpoints/model.onnx', verbose=True, input_names=['input'], output_names=['output'])
 
 
 
 
 
 
 
6
 
7
- onnx2tf.convert(input_onnx_file_path='output/checkpoints/model.onnx', output_folder_path='output/checkpoints/converted/')
 
 
 
 
2
  import onnx2tf
3
  from configs import *
4
 
5
+ torch.onnx.export(
6
+ model=MODEL,
7
+ args=torch.randn(1, 3, 64, 64),
8
+ f="output/checkpoints/model.onnx",
9
+ verbose=True,
10
+ input_names=["input"],
11
+ output_names=["output"],
12
+ )
13
 
14
+ onnx2tf.convert(
15
+ input_onnx_file_path="output/checkpoints/model.onnx",
16
+ output_folder_path="output/checkpoints/converted/",
17
+ )
eval.py CHANGED
@@ -18,6 +18,7 @@ MODEL = MODEL.to(DEVICE)
18
  MODEL.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
19
  MODEL.eval()
20
 
 
21
  def predict_image(image_path, model, transform):
22
  model.eval()
23
  correct_predictions = 0
@@ -74,5 +75,6 @@ def predict_image(image_path, model, transform):
74
  conf_matrix.plot()
75
  plt.show()
76
 
 
77
  # Call predict_image function
78
  predict_image(image_path, MODEL, preprocess)
 
18
  MODEL.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
19
  MODEL.eval()
20
 
21
+
22
  def predict_image(image_path, model, transform):
23
  model.eval()
24
  correct_predictions = 0
 
75
  conf_matrix.plot()
76
  plt.show()
77
 
78
+
79
  # Call predict_image function
80
  predict_image(image_path, MODEL, preprocess)
models.py CHANGED
@@ -39,4 +39,3 @@ from torchvision.models import efficientnet_v2_m
39
  from torchvision.models import efficientnet_v2_l
40
  from torchvision.models import efficientnet_b0
41
  from torchvision.models import efficientnet_b1
42
-
 
39
  from torchvision.models import efficientnet_v2_l
40
  from torchvision.models import efficientnet_b0
41
  from torchvision.models import efficientnet_b1
 
train.py CHANGED
@@ -15,7 +15,10 @@ def setup_tensorboard():
15
 
16
  def load_and_preprocess_data():
17
  return data_loader.load_data(
18
- RAW_DATA_DIR + str(TASK), AUG_DATA_DIR + str(TASK), EXTERNAL_DATA_DIR + str(TASK), preprocess
 
 
 
19
  )
20
 
21
 
@@ -194,4 +197,4 @@ def main_training_loop():
194
 
195
 
196
  if __name__ == "__main__":
197
- main_training_loop()
 
15
 
16
  def load_and_preprocess_data():
17
  return data_loader.load_data(
18
+ RAW_DATA_DIR + str(TASK),
19
+ AUG_DATA_DIR + str(TASK),
20
+ EXTERNAL_DATA_DIR + str(TASK),
21
+ preprocess,
22
  )
23
 
24
 
 
197
 
198
 
199
  if __name__ == "__main__":
200
+ main_training_loop()
tuning.py CHANGED
@@ -115,9 +115,7 @@ if __name__ == "__main__":
115
  )
116
 
117
  # Optimize the hyperparameters
118
- study.optimize(
119
- objective, n_trials=N_TRIALS, timeout=TIMEOUT
120
- )
121
 
122
  # Print the best trial
123
  best_trial = study.best_trial
 
115
  )
116
 
117
  # Optimize the hyperparameters
118
+ study.optimize(objective, n_trials=N_TRIALS, timeout=TIMEOUT)
 
 
119
 
120
  # Print the best trial
121
  best_trial = study.best_trial