cycool29 commited on
Commit
c45c4e1
·
1 Parent(s): 7af2ec9
Files changed (12) hide show
  1. __init.py +0 -0
  2. app.py +135 -48
  3. calculate.py +0 -18
  4. eda.py +41 -34
  5. eval_orig.py +30 -7
  6. extract.py +26 -20
  7. lime_eval.py +108 -0
  8. lrp-eval.py +16 -0
  9. plot-gradcam.py +57 -22
  10. shap_eval.py +31 -43
  11. test-speed.py +93 -0
  12. testing.py +0 -5
__init.py DELETED
File without changes
app.py CHANGED
@@ -1,63 +1,150 @@
1
  import gradio as gr
2
  import predict as predict
3
  import extract as extract
 
 
4
 
5
  def upload_file(files):
6
  file_paths = [file.name for file in files]
7
  return file_paths
8
 
9
 
10
- def process_file(webcam_filepath, upload_filepath, ):
 
 
 
 
 
 
 
11
  result = []
12
- if webcam_filepath == None:
13
- sorted_classes = predict.predict_image(upload_filepath)
14
- for class_label, class_prob in sorted_classes:
15
- class_prob = class_prob.item().__round__(2)
16
- result.append(f"{class_label}: {class_prob}%")
 
17
  cam = extract.extract_gradcam(upload_filepath, save_path="gradcam.jpg")
18
- result = result[:3]
19
- result.append("gradcam.jpg")
20
- return result
21
- elif upload_filepath == None:
22
- sorted_classes = predict.predict_image(webcam_filepath)
23
- for class_label, class_prob in sorted_classes:
24
- class_prob = class_prob.item().__round__(2)
25
- result.append(f"{class_label}: {class_prob}%")
26
- cam = extract.extract_gradcam(webcam_filepath, save_path="gradcam.jpg")
27
- result = result[:3]
28
  result.append("gradcam.jpg")
29
- return result
30
  else:
31
- sorted_classes = predict.predict_image(upload_filepath)
32
- for class_label, class_prob in sorted_classes:
33
- class_prob = class_prob.item().__round__(2)
34
- result.append(f"{class_label}: {class_prob}%")
35
- cam = extract.extract_gradcam(upload_filepath, save_path="gradcam.jpg")
36
- # Only keep the first 3 results
37
- result = result[:3]
38
- result.append("gradcam.jpg")
39
- return result
40
-
41
-
42
-
43
- demo = gr.Interface(
44
- theme="gradio/soft",
45
- fn=process_file,
46
- title="HANDETECT",
47
-
48
- inputs=[
49
- gr.components.Image(type="filepath", label="Choose Image", source="upload"),
50
- ],
51
- outputs=[
52
- gr.outputs.Textbox(label="Probability 1"),
53
- gr.outputs.Textbox(label="Probability 2"),
54
- gr.outputs.Textbox(label="Probability 3"),
55
- # GradCAM
56
- gr.outputs.Image(label="GradCAM++", type="filepath"),
57
-
58
-
59
- ],
60
-
61
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
 
63
  demo.launch()
 
1
  import gradio as gr
2
  import predict as predict
3
  import extract as extract
4
+ import lime_eval as lime_eval
5
+
6
 
7
  def upload_file(files):
8
  file_paths = [file.name for file in files]
9
  return file_paths
10
 
11
 
12
+ def process_file(
13
+ upload_filepath,
14
+ gradcam_toggle,
15
+ lime_toggle,
16
+ ):
17
+ print("Upload filepath:", upload_filepath)
18
+ print("GradCAM toggle:", gradcam_toggle)
19
+ print("LIME toggle:", lime_toggle)
20
  result = []
21
+ sorted_classes = predict.predict_image(upload_filepath)
22
+ for class_label, class_prob in sorted_classes:
23
+ class_prob = class_prob.item().__round__(2)
24
+ result.append(f"{class_label}: {class_prob}%")
25
+ result = result[:4]
26
+ if gradcam_toggle == True:
27
  cam = extract.extract_gradcam(upload_filepath, save_path="gradcam.jpg")
 
 
 
 
 
 
 
 
 
 
28
  result.append("gradcam.jpg")
 
29
  else:
30
+ result.append(None)
31
+ if lime_toggle == True:
32
+ lime = lime_eval.generate_lime(upload_filepath, save_path="lime.jpg")
33
+ result.append("lime.jpg")
34
+ else:
35
+ result.append(None)
36
+ return result
37
+ # else:
38
+ # sorted_classes = predict.predict_image(upload_filepath)
39
+ # for class_label, class_prob in sorted_classes:
40
+ # class_prob = class_prob.item().__round__(2)
41
+ # result.append(f"{class_label}: {class_prob}%")
42
+ # result = result[:4]
43
+ # if gradcam_toggle == 1:
44
+ # cam = extract.extract_gradcam(upload_filepath, save_path="gradcam.jpg")
45
+ # result.append("gradcam.jpg")
46
+ # if lime_toggle == 1:
47
+ # lime = lime_eval.generate_lime(upload_filepath, save_path="lime.jpg")
48
+ # result.append("lime.jpg")
49
+ # return result
50
+
51
+
52
+ # Prerun to innitialize the model
53
+ # process_file(None, r"data\test\Task 1\Dystonia\0c08d2ea-8e1c-4ac6-92db-a752388b30cf.png")
54
+
55
+ css = """
56
+ .block {
57
+ margin-left: auto;
58
+ margin-right: auto;
59
+ width: 100%;
60
+ }
61
+ #image_input {
62
+ width: 200% !important;
63
+ }
64
+ #image_input img {
65
+ width: 200% !important;
66
+ }
67
+ .output-image {
68
+ width: 70% !important;
69
+ text-align: -webkit-center !important;
70
+ }
71
+ .output-image img {
72
+ width: 300px !important;
73
+ }
74
+ .toggle {
75
+ width: 17% !important;
76
+ }
77
+ .show-api {
78
+ visibility: hidden !important;
79
+ }
80
+
81
+ .built-with {
82
+ visibility: hidden !important;
83
+ }
84
+ """
85
+
86
+ block = gr.Blocks(title="HANDETECT", css=css, theme="gradio/soft")
87
+
88
+ block.queue()
89
+ block.title = "HANDETECT"
90
+
91
+ with block as demo:
92
+ with gr.Column():
93
+ with gr.Row():
94
+ image_input = gr.Image(
95
+ type="filepath",
96
+ label="Choose Image",
97
+ source="upload",
98
+ elem_id="image_input",
99
+ )
100
+ with gr.Column():
101
+ gradcam_toggle = gr.Checkbox(
102
+ label="GradCAM", default=False
103
+ )
104
+ lime_toggle = gr.Checkbox(
105
+ label="LIME", default=False
106
+ )
107
+ with gr.Row():
108
+ submit_button = gr.Button(value="Submit")
109
+ # cancel_button = gr.Button(value="Cancel")
110
+ # theme="gradio/soft",
111
+ # fn=process_file,
112
+ # title="HANDETECT",
113
+ # outputs=[
114
+ with gr.Row():
115
+ prob1_textbox = gr.outputs.Textbox(label="Probability 1")
116
+ prob2_textbox = gr.outputs.Textbox(label="Probability 2")
117
+ prob3_textbox = gr.outputs.Textbox(label="Probability 3")
118
+ prob4_textbox = gr.outputs.Textbox(label="Probability 4")
119
+ # GradCAM
120
+ with gr.Row():
121
+ gradcam_output = gr.Image(
122
+ label="Feature Explanation",
123
+ type="filepath",
124
+ elem_classes=["output-image"],
125
+ )
126
+ lime_output = gr.Image(
127
+ label="Feature Explanation",
128
+ type="filepath",
129
+ elem_classes=["output-image"],
130
+ )
131
+
132
+ submit_button.click(
133
+ process_file,
134
+ [image_input, gradcam_toggle, lime_toggle],
135
+ [
136
+ prob1_textbox,
137
+ prob2_textbox,
138
+ prob3_textbox,
139
+ prob4_textbox,
140
+ gradcam_output,
141
+ lime_output,
142
+ ],
143
+ show_progress="minimal",
144
+ preprocess=upload_file,
145
+ scroll_to_output=True,
146
+ # cancels=[cancel_button],
147
+ )
148
+
149
 
150
  demo.launch()
calculate.py DELETED
@@ -1,18 +0,0 @@
1
- from scipy.optimize import linprog
2
-
3
- # Coefficients for the objective function (negative because linprog does minimization)
4
- c = [-0.88, -0.88, -0.85]
5
-
6
- # Coefficients for the inequality constraint (sum of weights = 1)
7
- A = [[1, 1, 1]]
8
- b = [1]
9
-
10
- # Bounds for each weight (between 0 and 1)
11
- bounds = [(0, 1), (0, 1), (0, 1)]
12
-
13
- # Solve the linear programming problem
14
- result = linprog(c, A_eq=A, b_eq=b, bounds=bounds)
15
-
16
- # The optimal weights
17
- optimal_weights = result.x
18
- print("Optimal weights:", optimal_weights)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
eda.py CHANGED
@@ -5,10 +5,10 @@ import matplotlib.pyplot as plt
5
  import seaborn as sns
6
  from matplotlib import rcParams
7
 
8
- rcParams['font.family'] = 'Times New Roman'
9
 
10
  # Define the directory where your dataset is located
11
- dataset_directory = 'data/train/combined/Task 1/'
12
 
13
  # Create a list of class labels based on subdirectories in the dataset directory
14
  class_labels = os.listdir(dataset_directory)
@@ -18,7 +18,7 @@ num_samples_per_class = []
18
  class_labels_processed = []
19
 
20
  # Initialize an empty DataFrame to store image dimensions
21
- image_dimensions_df = pd.DataFrame(columns=['Height', 'Width'])
22
 
23
  # Initialize a dictionary to store a random sample of images from each class
24
  sampled_images = {label: [] for label in class_labels}
@@ -30,74 +30,81 @@ for label in class_labels:
30
  num_samples = len(os.listdir(class_directory))
31
  num_samples_per_class.append(num_samples)
32
  class_labels_processed.append(label)
33
-
34
  # Extract image dimensions and add them to the DataFrame
35
  for image_file in os.listdir(class_directory):
36
  image_path = os.path.join(class_directory, image_file)
37
  image = plt.imread(image_path)
38
  height, width, _ = image.shape
39
- image_dimensions_df = image_dimensions_df._append({'Height': height, 'Width': width}, ignore_index=True)
40
-
 
 
41
  # Randomly sample 5 images from each class for visualization
42
  if len(sampled_images[label]) < 5:
43
  sampled_images[label].append(image)
44
 
45
  # Create a Pandas DataFrame for EDA
46
- eda_data = pd.DataFrame({'Class Label': class_labels_processed, 'Number of Samples': num_samples_per_class})
 
 
47
 
48
  # Plot the number of samples per class
49
  plt.figure(figsize=(10, 6))
50
- sns.barplot(x='Class Label', y='Number of Samples', data=eda_data)
51
- plt.title('Number of Samples per Class')
52
  plt.xticks(rotation=45)
53
- plt.xlabel('Class Label')
54
- plt.ylabel('Number of Samples')
55
- plt.savefig('docs/eda/Number of Samples per Class.png')
 
 
 
56
  plt.show()
57
 
58
  # Calculate and plot the distribution of sample sizes (image dimensions)
59
  plt.figure(figsize=(10, 6))
60
- plt.scatter(image_dimensions_df['Width'], image_dimensions_df['Height'], alpha=0.5)
61
- plt.title('Distribution of Sample Sizes (Image Dimensions)')
62
- plt.xlabel('Width (Pixels)')
63
- plt.ylabel('Height (Pixels)')
64
- plt.savefig('docs/eda/Distribution of Sample Sizes (Image Dimensions).png')
65
  plt.show()
66
 
67
  # Plot a random sample of images from each class
68
  for label, images in sampled_images.items():
69
  plt.figure(figsize=(15, 5))
70
- plt.suptitle(f'Random Sample of Images from Class: {label}')
71
  for i, image in enumerate(images, start=1):
72
  plt.subplot(1, 5, i)
73
  plt.imshow(image)
74
- plt.axis('off')
75
- plt.title(f'Sample {i}')
76
- plt.savefig(f'docs/eda/Random Sample of Images from Class {label}.png')
77
  plt.show()
78
 
79
  # Calculate and plot the correlation matrix for image dimensions
80
  correlation_matrix = image_dimensions_df.corr()
81
  plt.figure(figsize=(8, 6))
82
- sns.heatmap(correlation_matrix, annot=True, cmap='coolwarm', linewidths=0.5)
83
- plt.title('Correlation Matrix of Image Dimensions')
84
- plt.savefig('docs/eda/Correlation Matrix of Image Dimensions.png')
85
  plt.show()
86
 
87
  # Plot the distribution of image widths
88
  plt.figure(figsize=(10, 6))
89
- sns.histplot(image_dimensions_df['Width'], bins=20, kde=True)
90
- plt.title('Distribution of Image Widths')
91
- plt.xlabel('Width (Pixels)')
92
- plt.ylabel('Frequency')
93
- plt.savefig('docs/eda/Distribution of Image Widths.png')
94
  plt.show()
95
 
96
  # Plot the distribution of image heights
97
  plt.figure(figsize=(10, 6))
98
- sns.histplot(image_dimensions_df['Height'], bins=20, kde=True)
99
- plt.title('Distribution of Image Heights')
100
- plt.xlabel('Height (Pixels)')
101
- plt.ylabel('Frequency')
102
- plt.savefig('docs/eda/Distribution of Image Heights.png')
103
  plt.show()
 
5
  import seaborn as sns
6
  from matplotlib import rcParams
7
 
8
+ rcParams["font.family"] = "Times New Roman"
9
 
10
  # Define the directory where your dataset is located
11
+ dataset_directory = "data/train/combined/Task 1/"
12
 
13
  # Create a list of class labels based on subdirectories in the dataset directory
14
  class_labels = os.listdir(dataset_directory)
 
18
  class_labels_processed = []
19
 
20
  # Initialize an empty DataFrame to store image dimensions
21
+ image_dimensions_df = pd.DataFrame(columns=["Height", "Width"])
22
 
23
  # Initialize a dictionary to store a random sample of images from each class
24
  sampled_images = {label: [] for label in class_labels}
 
30
  num_samples = len(os.listdir(class_directory))
31
  num_samples_per_class.append(num_samples)
32
  class_labels_processed.append(label)
33
+
34
  # Extract image dimensions and add them to the DataFrame
35
  for image_file in os.listdir(class_directory):
36
  image_path = os.path.join(class_directory, image_file)
37
  image = plt.imread(image_path)
38
  height, width, _ = image.shape
39
+ image_dimensions_df = image_dimensions_df._append(
40
+ {"Height": height, "Width": width}, ignore_index=True
41
+ )
42
+
43
  # Randomly sample 5 images from each class for visualization
44
  if len(sampled_images[label]) < 5:
45
  sampled_images[label].append(image)
46
 
47
  # Create a Pandas DataFrame for EDA
48
+ eda_data = pd.DataFrame(
49
+ {"Class Label": class_labels_processed, "Number of Samples": num_samples_per_class}
50
+ )
51
 
52
  # Plot the number of samples per class
53
  plt.figure(figsize=(10, 6))
54
+ sns.barplot(x="Class Label", y="Number of Samples", data=eda_data)
55
+ plt.title("Number of Samples per Class")
56
  plt.xticks(rotation=45)
57
+ plt.xlabel("Class Label")
58
+ plt.ylabel("Number of Samples")
59
+ plt.subplots_adjust(
60
+ top=0.88, bottom=0.21, left=0.125, right=0.9, hspace=0.2, wspace=0.2
61
+ )
62
+ plt.savefig("docs/eda/Number of Samples per Class.png")
63
  plt.show()
64
 
65
  # Calculate and plot the distribution of sample sizes (image dimensions)
66
  plt.figure(figsize=(10, 6))
67
+ plt.scatter(image_dimensions_df["Width"], image_dimensions_df["Height"], alpha=0.5)
68
+ plt.title("Distribution of Sample Sizes (Image Dimensions)")
69
+ plt.xlabel("Width (Pixels)")
70
+ plt.ylabel("Height (Pixels)")
71
+ plt.savefig("docs/eda/Distribution of Sample Sizes (Image Dimensions).png")
72
  plt.show()
73
 
74
  # Plot a random sample of images from each class
75
  for label, images in sampled_images.items():
76
  plt.figure(figsize=(15, 5))
77
+ plt.suptitle(f"Random Sample of Images from Class: {label}")
78
  for i, image in enumerate(images, start=1):
79
  plt.subplot(1, 5, i)
80
  plt.imshow(image)
81
+ plt.axis("off")
82
+ plt.title(f"Sample {i}")
83
+ plt.savefig(f"docs/eda/Random Sample of Images from Class {label}.png")
84
  plt.show()
85
 
86
  # Calculate and plot the correlation matrix for image dimensions
87
  correlation_matrix = image_dimensions_df.corr()
88
  plt.figure(figsize=(8, 6))
89
+ sns.heatmap(correlation_matrix, annot=True, cmap="coolwarm", linewidths=0.5)
90
+ plt.title("Correlation Matrix of Image Dimensions")
91
+ plt.savefig("docs/eda/Correlation Matrix of Image Dimensions.png")
92
  plt.show()
93
 
94
  # Plot the distribution of image widths
95
  plt.figure(figsize=(10, 6))
96
+ sns.histplot(image_dimensions_df["Width"], bins=20, kde=True)
97
+ plt.title("Distribution of Image Widths")
98
+ plt.xlabel("Width (Pixels)")
99
+ plt.ylabel("Frequency")
100
+ plt.savefig("docs/eda/Distribution of Image Widths.png")
101
  plt.show()
102
 
103
  # Plot the distribution of image heights
104
  plt.figure(figsize=(10, 6))
105
+ sns.histplot(image_dimensions_df["Height"], bins=20, kde=True)
106
+ plt.title("Distribution of Image Heights")
107
+ plt.xlabel("Height (Pixels)")
108
+ plt.ylabel("Frequency")
109
+ plt.savefig("docs/eda/Distribution of Image Heights.png")
110
  plt.show()
eval_orig.py CHANGED
@@ -13,10 +13,12 @@ from sklearn.metrics import (
13
  accuracy_score,
14
  f1_score,
15
  confusion_matrix,
 
16
  ConfusionMatrixDisplay,
17
  roc_curve,
18
  auc,
19
  average_precision_score,
 
20
  )
21
  from sklearn.preprocessing import label_binarize
22
  from configs import *
@@ -27,7 +29,7 @@ from data_loader import load_data # Import the load_data function
27
  # SquuezeNet: 0.8865856365856365
28
 
29
 
30
- rcParams['font.family'] = 'Times New Roman'
31
 
32
  # Load the model
33
  model = MODEL.to(DEVICE)
@@ -103,18 +105,32 @@ def predict_image(image_path, model, transform):
103
  true_classes_tensor = torch.tensor(true_classes)
104
 
105
  # Calculate the confusion matrix
106
- conf_matrix = confusion_matrix(true_classes, predicted_labels)
 
 
 
107
 
108
  # Plot the confusion matrix
109
- ConfusionMatrixDisplay(
110
- confusion_matrix=conf_matrix, display_labels=range(NUM_CLASSES)
111
- ).plot(cmap=plt.cm.Blues)
 
 
 
 
 
 
 
 
 
112
  plt.title("Confusion Matrix")
 
 
113
  plt.savefig("docs/efficientnet/confusion_matrix.png")
114
  plt.show()
115
 
116
  # Classification report
117
- class_names = [str(cls) for cls in range(NUM_CLASSES)]
118
  report = classification_report(
119
  true_classes, predicted_labels, target_names=class_names
120
  )
@@ -156,7 +172,7 @@ def predict_image(image_path, model, transform):
156
  )
157
  plt.savefig("docs/efficientnet/prc.png")
158
  plt.show()
159
-
160
  # Plot ROC curve
161
  plt.figure(figsize=(10, 6))
162
  plt.plot(fpr, tpr)
@@ -172,6 +188,13 @@ def predict_image(image_path, model, transform):
172
  )
173
  plt.savefig("docs/efficientnet/roc.png")
174
  plt.show()
 
 
 
 
 
 
 
175
 
176
 
177
  predict_image("data/test/Task 1/", model, preprocess)
 
13
  accuracy_score,
14
  f1_score,
15
  confusion_matrix,
16
+ matthews_corrcoef,
17
  ConfusionMatrixDisplay,
18
  roc_curve,
19
  auc,
20
  average_precision_score,
21
+ cohen_kappa_score,
22
  )
23
  from sklearn.preprocessing import label_binarize
24
  from configs import *
 
29
  # SquuezeNet: 0.8865856365856365
30
 
31
 
32
+ rcParams["font.family"] = "Times New Roman"
33
 
34
  # Load the model
35
  model = MODEL.to(DEVICE)
 
105
  true_classes_tensor = torch.tensor(true_classes)
106
 
107
  # Calculate the confusion matrix
108
+ conf_matrix = confusion_matrix(
109
+ true_classes,
110
+ predicted_labels,
111
+ )
112
 
113
  # Plot the confusion matrix
114
+ ConfusionMatrixDisplay(confusion_matrix=conf_matrix, display_labels=CLASSES).plot(
115
+ cmap=plt.cm.Blues, xticks_rotation=25
116
+ )
117
+ # Use the exported value of margin_left to adjust the space between the yticklabels and the yticks
118
+ plt.subplots_adjust(
119
+ top=0.935,
120
+ bottom=0.155,
121
+ left=0.125,
122
+ right=0.905,
123
+ hspace=0.2,
124
+ wspace=0.2,
125
+ )
126
  plt.title("Confusion Matrix")
127
+ manager = plt.get_current_fig_manager()
128
+ manager.full_screen_toggle()
129
  plt.savefig("docs/efficientnet/confusion_matrix.png")
130
  plt.show()
131
 
132
  # Classification report
133
+ class_names = CLASSES
134
  report = classification_report(
135
  true_classes, predicted_labels, target_names=class_names
136
  )
 
172
  )
173
  plt.savefig("docs/efficientnet/prc.png")
174
  plt.show()
175
+
176
  # Plot ROC curve
177
  plt.figure(figsize=(10, 6))
178
  plt.plot(fpr, tpr)
 
188
  )
189
  plt.savefig("docs/efficientnet/roc.png")
190
  plt.show()
191
+
192
+
193
+ # Matthew's correlation coefficient
194
+ print("Matthew's correlation coefficient:", matthews_corrcoef(true_classes, predicted_labels))
195
+
196
+ # Cohen's kappa
197
+ print("Cohen's kappa:", cohen_kappa_score(true_classes, predicted_labels))
198
 
199
 
200
  predict_image("data/test/Task 1/", model, preprocess)
extract.py CHANGED
@@ -21,37 +21,43 @@ for child in model.features[-1]:
21
  if target_layer is None:
22
  raise ValueError("Invalid layer name: {}".format(target_layer))
23
 
 
 
24
  def extract_gradcam(image_path=None, save_path=None):
25
  if image_path is None:
26
  for disease in CLASSES:
27
  print("Processing", disease)
28
- image_path = random.choice(os.listdir("data/test/Task 1/" + disease))
29
- image_path = "data/test/Task 1/" + disease + "/" + image_path
30
- rgb_img = cv2.imread(image_path, 1)
31
- rgb_img = np.float32(rgb_img) / 255
32
- input_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
33
- input_tensor = input_tensor.to(DEVICE)
 
 
 
34
 
35
- # Create a GradCAMPlusPlus object
36
- cam = GradCAMPlusPlus(model=model, target_layers=[target_layer], use_cuda=True)
37
 
38
- # Generate the GradCAM heatmap
39
- grayscale_cam = cam(input_tensor=input_tensor)[0]
40
 
41
- # Apply a colormap to the grayscale heatmap
42
- heatmap_colored = cv2.applyColorMap(np.uint8(255 * grayscale_cam), cv2.COLORMAP_JET)
43
 
44
- # Ensure heatmap_colored has the same dtype as rgb_img
45
- heatmap_colored = heatmap_colored.astype(np.float32) / 255
46
 
47
- # Adjust the alpha value to control transparency
48
- alpha = 0.3 # You can change this value to make the original image more or less transparent
49
 
50
- # Overlay the colored heatmap on the original image
51
- final_output = cv2.addWeighted(rgb_img, 0.3, heatmap_colored, 0.7, 0)
52
 
53
- # Save the final output
54
- cv2.imwrite(f'docs/efficientnet/gradcam/{disease}.jpg', (final_output * 255).astype(np.uint8))
 
55
  else:
56
  rgb_img = cv2.imread(image_path, 1)
57
  rgb_img = np.float32(rgb_img) / 255
 
21
  if target_layer is None:
22
  raise ValueError("Invalid layer name: {}".format(target_layer))
23
 
24
+
25
+
26
  def extract_gradcam(image_path=None, save_path=None):
27
  if image_path is None:
28
  for disease in CLASSES:
29
  print("Processing", disease)
30
+ for image_path in os.listdir(r'data\test\Task 1\{}'.format(disease)):
31
+ print("Processing", image_path)
32
+ image_path = r'data\test\Task 1\{}\{}'.format(disease, image_path)
33
+ image_name = image_path.split('.')[0].split('\\')[-1]
34
+ print("Processing", image_name)
35
+ rgb_img = cv2.imread(image_path, 1)
36
+ rgb_img = np.float32(rgb_img) / 255
37
+ input_tensor = preprocess_image(rgb_img, mean=[0.5, 0.5, 0.5], std=[0.5, 0.5, 0.5])
38
+ input_tensor = input_tensor.to(DEVICE)
39
 
40
+ # Create a GradCAMPlusPlus object
41
+ cam = GradCAMPlusPlus(model=model, target_layers=[target_layer], use_cuda=True)
42
 
43
+ # Generate the GradCAM heatmap
44
+ grayscale_cam = cam(input_tensor=input_tensor)[0]
45
 
46
+ # Apply a colormap to the grayscale heatmap
47
+ heatmap_colored = cv2.applyColorMap(np.uint8(255 * grayscale_cam), cv2.COLORMAP_JET)
48
 
49
+ # Ensure heatmap_colored has the same dtype as rgb_img
50
+ heatmap_colored = heatmap_colored.astype(np.float32) / 255
51
 
52
+ # Adjust the alpha value to control transparency
53
+ alpha = 0.3 # You can change this value to make the original image more or less transparent
54
 
55
+ # Overlay the colored heatmap on the original image
56
+ final_output = cv2.addWeighted(rgb_img, 0.3, heatmap_colored, 0.7, 0)
57
 
58
+ # Save the final output
59
+ os.makedirs(f'docs/efficientnet/gradcam/{disease}', exist_ok=True)
60
+ cv2.imwrite(f'docs/efficientnet/gradcam/{disease}/{image_name}.jpg', (final_output * 255).astype(np.uint8))
61
  else:
62
  rgb_img = cv2.imread(image_path, 1)
63
  rgb_img = np.float32(rgb_img) / 255
lime_eval.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from lime.lime_image import LimeImageExplainer
3
+ from PIL import Image
4
+ import torch
5
+ import torchvision.transforms as transforms
6
+ import matplotlib.pyplot as plt
7
+ from matplotlib.colors import Normalize
8
+ from configs import *
9
+ from sklearn.preprocessing import minmax_scale
10
+
11
+
12
+ model = MODEL.to(DEVICE)
13
+ model.load_state_dict(torch.load(MODEL_SAVE_PATH))
14
+ model.eval()
15
+
16
+
17
+ # Define a function to predict with the model
18
+ def predict(input_image):
19
+ input_image = torch.tensor(input_image, dtype=torch.float32)
20
+ if input_image.dim() == 4:
21
+ input_image = input_image.permute(0, 3, 1, 2) # Permute the dimensions
22
+ input_image = input_image.to(DEVICE) # Move to the appropriate device
23
+ with torch.no_grad():
24
+ output = model(input_image)
25
+ return output
26
+
27
+
28
+ def generate_lime(image_path=None, save_path=None):
29
+ if image_path is None:
30
+ for disease in CLASSES:
31
+ print("Processing", disease)
32
+ for image_path in os.listdir(r"data\test\Task 1\{}".format(disease)):
33
+ image = None
34
+ print("Processing", image_path)
35
+ image_path = r"data\test\Task 1\{}\{}".format(disease, image_path)
36
+ image_name = image_path.split(".")[0].split("\\")[-1]
37
+ print("Processing", image_name)
38
+ image = Image.open(image_path).convert("RGB")
39
+ image = preprocess(image)
40
+ image = image.unsqueeze(0) # Add batch dimension
41
+ image = image.to(DEVICE)
42
+
43
+ # Create the LIME explainer
44
+ explainer = LimeImageExplainer()
45
+
46
+ # Explain the model's predictions for the image
47
+ explanation = explainer.explain_instance(
48
+ image[0].permute(1, 2, 0).numpy(),
49
+ predict,
50
+ top_labels=5,
51
+ num_samples=1000,
52
+ )
53
+
54
+ # Get the image and mask for the explanation
55
+ image, mask = explanation.get_image_and_mask(
56
+ explanation.top_labels[0],
57
+ positive_only=False,
58
+ num_features=10,
59
+ hide_rest=False,
60
+ )
61
+
62
+ # Save the image (dun use plt.imsave)
63
+ # Normalize the image to the [0, 1] range
64
+ # norm = Normalize(vmin=0, vmax=1)
65
+ # image = norm(image)
66
+
67
+ image = (image - np.min(image)) / (np.max(image) - np.min(image))
68
+
69
+ # image = Image.fromarray(image)
70
+ os.makedirs(f"docs/efficientnet/lime/{disease}", exist_ok=True)
71
+ # image.save(f'docs/efficientnet/lime/{disease}/{image_name}.jpg')
72
+ plt.imsave(f"docs/efficientnet/lime/{disease}/{image_name}.jpg", image)
73
+
74
+ else:
75
+ image = None
76
+ print("Processing", image_path)
77
+ image = Image.open(image_path).convert("RGB")
78
+ image = preprocess(image)
79
+ image = image.unsqueeze(0) # Add batch dimension
80
+ image = image.to(DEVICE)
81
+
82
+ # Create the LIME explainer
83
+ explainer = LimeImageExplainer()
84
+
85
+ # Explain the model's predictions for the image
86
+ explanation = explainer.explain_instance(
87
+ image[0].permute(1, 2, 0).numpy(), predict, top_labels=5, num_samples=1000
88
+ )
89
+
90
+ # Get the image and mask for the explanation
91
+ image, mask = explanation.get_image_and_mask(
92
+ explanation.top_labels[0],
93
+ positive_only=False,
94
+ num_features=10,
95
+ hide_rest=False,
96
+ )
97
+
98
+ # Save the image (dun use plt.imsave)
99
+ # Normalize the image to the [0, 1] range
100
+ # norm = Normalize(vmin=0, vmax=1)
101
+ # image = norm(image)
102
+
103
+ image = (image - np.min(image)) / (np.max(image) - np.min(image))
104
+
105
+ # image = Image.fromarray(image)
106
+ # os.makedirs(f"docs/efficientnet/lime/{disease}", exist_ok=True)
107
+ # image.save(f'docs/efficientnet/lime/{disease}/{image_name}.jpg')
108
+ plt.imsave(save_path, image)
lrp-eval.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from torchvision.models import vgg16, VGG16_Weights
3
+ from src.lrp import LRPModel
4
+ from configs import *
5
+ from PIL import Image
6
+
7
+
8
+ image = Image.open(r'data\test\Task 1\Alzheimer Disease\0d846ee1-c90d-4ed5-8467-3550dd653858.png').convert("RGB")
9
+ image = preprocess(image).unsqueeze(0)
10
+ image = image.to(DEVICE)
11
+ model = MODEL.to(DEVICE)
12
+ print(dict(model.named_modules()))
13
+ model.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
14
+ model.eval()
15
+ lrp_model = LRPModel(model)
16
+ r = lrp_model.forward(image)
plot-gradcam.py CHANGED
@@ -1,30 +1,65 @@
1
- # Plot the gradcam pics of 7 classes from C:\Users\User\Documents\PISTEK\HANDETECT\docs\efficientnet\gradcam folder
2
- # Each picture is named as <class_name>.jpg
3
- # Usage: python plot-gradcam.py
4
 
5
  import os
6
  import cv2
7
  import numpy as np
 
 
8
  import matplotlib.pyplot as plt
9
- from matplotlib import rcParams
 
 
10
 
11
- rcParams['font.family'] = 'Times New Roman'
 
 
12
 
13
- # Load the gradcam pics
14
- gradcam_dir = r'C:\Users\User\Documents\PISTEK\HANDETECT\docs\efficientnet\gradcam'
15
- gradcam_pics = []
16
- for pic in os.listdir(gradcam_dir):
17
- gradcam_pics.append(cv2.imread(os.path.join(gradcam_dir, pic), 1))
18
-
19
- # Plot the gradcam pics
20
- plt.figure(figsize=(20, 20))
21
- # Very tight layout
22
- plt.tight_layout(pad=0.1)
23
- for i, pic in enumerate(gradcam_pics):
24
- plt.subplot(3, 3, i + 1)
25
- plt.imshow(pic)
26
- plt.axis('off')
27
- plt.title(os.listdir(gradcam_dir)[i].split('.')[0], fontsize=13)
28
- plt.savefig('docs/efficientnet/gradcam.jpg')
29
- plt.show()
30
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Plot a table, each column is a test image, separate to 7 tables (one for each disease), each column have 4 rows, one is disease name, one is gradcam, one is lime, one is original image
 
 
2
 
3
  import os
4
  import cv2
5
  import numpy as np
6
+ import torch
7
+ import torchvision.transforms as transforms
8
  import matplotlib.pyplot as plt
9
+ from matplotlib.colors import Normalize
10
+ from configs import *
11
+ from sklearn.preprocessing import minmax_scale
12
 
13
+ plt.rcParams["font.family"] = "Times New Roman"
14
+
15
+ # Plot a table, each column is a test image, separate to 7 plot (one for each disease), each column have 4 rows, one is disease name, one is gradcam, one is lime, one is original image, the images are in 'docs/efficientnet/gradcam' and 'docs/efficientnet/lime' and 'data/test/Task 1'
16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
 
18
+ def plot_table():
19
+ diseases = CLASSES
20
+ diseases.sort()
21
+ # diseases = ["Atelectasis", "Cardiomegaly", "Consolidation", "Edema", "Effusion", "Emphysema", "Fibrosis", "Hernia", "Infiltration", "Mass", "Nodule", "Pleural_Thickening", "Pneumonia", "Pneumothorax"]
22
+ print(diseases)
23
+ fig, axs = plt.subplots(4, 14, figsize=(20, 10))
24
+ fig.tight_layout()
25
+ for i, disease in enumerate(diseases):
26
+ # Create a new plot
27
+ print("Processing", disease)
28
+ axs[0, i].axis("off")
29
+ axs[0, i].set_title(disease)
30
+ axs[1, i].axis("off")
31
+ axs[1, i].set_title("GradCAM")
32
+ axs[2, i].axis("off")
33
+ axs[2, i].set_title("LIME")
34
+ axs[3, i].axis("off")
35
+ axs[3, i].set_title("Original")
36
+ # For each image in test folder, there are corresponding ones in gradcam folder and lime folder, plot it accordingly
37
+ for j, image_path in enumerate(os.listdir(r"data\test\Task 1\{}".format(disease))):
38
+ print("Processing", image_path)
39
+ image_path = r"data\test\Task 1\{}\{}".format(disease, image_path)
40
+ image_name = image_path.split(".")[0].split("\\")[-1]
41
+ print("Processing", image_name)
42
+ # Plot the original image
43
+ image = cv2.imread(image_path, 1)
44
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
45
+ axs[3, i].imshow(image)
46
+ # Plot the gradcam image
47
+ image = cv2.imread(
48
+ f"docs/efficientnet/gradcam/{disease}/{image_name}.jpg", 1
49
+ )
50
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
51
+ axs[1, i].imshow(image)
52
+ # Plot the lime image
53
+ image = cv2.imread(
54
+ f"docs/efficientnet/lime/{disease}/{image_name}.jpg", 1
55
+ )
56
+ image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
57
+ axs[2, i].imshow(image)
58
+ # # Plot the disease name
59
+ # axs[0, i].text(0.5, 0.5, disease, horizontalalignment="center")
60
+ plt.savefig("docs/efficientnet/table.png")
61
+ plt.show()
62
+
63
+ if __name__ == "__main__":
64
+ plot_table()
65
+
shap_eval.py CHANGED
@@ -1,49 +1,37 @@
1
- import numpy as np
2
- from lime.lime_image import LimeImageExplainer
3
- from PIL import Image
4
  import torch
5
- import torchvision.transforms as transforms
6
- import matplotlib.pyplot as plt
 
 
 
 
 
7
  from configs import *
8
 
 
 
9
 
10
- model = MODEL.to(DEVICE)
11
- model.load_state_dict(torch.load(MODEL_SAVE_PATH))
12
  model.eval()
13
 
14
- # Load the image
15
- image = Image.open(
16
- r"data\test\Task 1\Healthy\0a7259b2-e650-43aa-93a0-e8b1063476fc.png"
17
- ).convert("RGB")
18
- image = preprocess(image)
19
- image = image.unsqueeze(0) # Add batch dimension
20
- image = image.to(DEVICE)
21
-
22
-
23
- # Define a function to predict with the model
24
- def predict(input_image):
25
- input_image = torch.tensor(input_image, dtype=torch.float32)
26
- if input_image.dim() == 4:
27
- input_image = input_image.permute(0, 3, 1, 2) # Permute the dimensions
28
- input_image = input_image.to(DEVICE) # Move to the appropriate device
29
- with torch.no_grad():
30
- output = model(input_image)
31
- return output
32
-
33
-
34
- # Create the LIME explainer
35
- explainer = LimeImageExplainer()
36
-
37
- # Explain the model's predictions for the image
38
- explanation = explainer.explain_instance(
39
- image[0].permute(1, 2, 0).numpy(), predict, top_labels=5, num_samples=2000
40
- )
41
-
42
- # Get the image and mask for the explanation
43
- image, mask = explanation.get_image_and_mask(
44
- explanation.top_labels[0], positive_only=False, num_features=5, hide_rest=False
45
- )
46
-
47
- # Display the explanation
48
- plt.imshow(image)
49
- plt.show()
 
1
+ # Import necessary libraries
2
+ import shap
 
3
  import torch
4
+ import numpy as np
5
+
6
+ # Load your EfficientNetB3 model
7
+ from torchvision import models
8
+
9
+ # Load your test data
10
+ from data_loader import load_test_data # Replace with your actual data loader function
11
  from configs import *
12
 
13
+ # Define your EfficientNetB3 model and load its pre-trained weights
14
+ model = MODEL
15
 
16
+ # Set your model to evaluation mode
 
17
  model.eval()
18
 
19
+ # Load your test data using your data loader
20
+ test_loader = load_test_data(TEST_DATA_DIR + "1", preprocess) # Replace with your test data loader
21
+
22
+ # Choose a specific image from the test dataset
23
+ image, _ = next(iter(test_loader))
24
+
25
+ # Make sure your model and input data are on the same device (CPU or GPU)
26
+ device = DEVICE
27
+ model = model.to(device)
28
+ image = image.to(device)
29
+
30
+ # Initialize an explainer for your model using SHAP's DeepExplainer
31
+ explainer = shap.DeepExplainer(model, data=test_loader)
32
+
33
+ # Calculate SHAP values for your chosen image
34
+ shap_values = explainer(image)
35
+
36
+ # Summarize the feature importance for the specific image
37
+ shap.summary_plot(shap_values, image)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
test-speed.py ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from gradio_client import Client
2
+ import time
3
+ import csv
4
+ import matplotlib.pyplot as plt
5
+ from matplotlib import rcParams
6
+ from configs import *
7
+ from PIL import Image
8
+
9
+ client = Client("https://cycool29-handetect.hf.space/")
10
+
11
+ list_of_times = []
12
+
13
+
14
+ rcParams["font.family"] = "Times New Roman"
15
+
16
+ # Load the model
17
+ model = MODEL.to(DEVICE)
18
+ model.load_state_dict(torch.load(MODEL_SAVE_PATH, map_location=DEVICE))
19
+ model.eval()
20
+
21
+ for disease in CLASSES:
22
+ print("Processing", disease)
23
+ for image_path in os.listdir(r"data\test\Task 1\{}".format(disease)):
24
+ # print("Processing", image_path)
25
+ image_path = r"data\test\Task 1\{}\{}".format(disease, image_path)
26
+ start_time = time.time()
27
+ result = client.predict(
28
+ image_path,
29
+ api_name="/predict"
30
+ )
31
+ time_taken = time.time() - start_time
32
+ list_of_times.append(time_taken)
33
+ print("Time taken:", time_taken)
34
+
35
+ # Log to csv
36
+ with open('log.csv', 'a', newline='') as file:
37
+ writer = csv.writer(file)
38
+ writer.writerow([disease])
39
+ writer.writerow([image_path])
40
+ writer.writerow([time_taken])
41
+
42
+
43
+ print("Average time taken:", sum(list_of_times)/len(list_of_times))
44
+ print("Max time taken:", max(list_of_times))
45
+ print("Min time taken:", min(list_of_times))
46
+ print("Total time taken:", sum(list_of_times))
47
+ print("Median time taken:", sorted(list_of_times)[len(list_of_times)//2])
48
+
49
+ # Plot the histogram
50
+ plt.hist(list_of_times, bins=10)
51
+ plt.xlabel("Time taken (s)")
52
+ plt.ylabel("Frequency")
53
+ plt.title("Time taken to process each image")
54
+ plt.savefig("docs/efficientnet/time_taken_for_web.png")
55
+
56
+
57
+ # Now is local
58
+ list_of_times = []
59
+
60
+ for disease in CLASSES:
61
+ print("Processing", disease)
62
+ for image_path in os.listdir(r"data\test\Task 1\{}".format(disease)):
63
+ # print("Processing", image_path)
64
+ image_path = r"data\test\Task 1\{}\{}".format(disease, image_path)
65
+ start_time = time.time()
66
+ image = Image.open(image_path).convert("RGB")
67
+ image = preprocess(image).unsqueeze(0)
68
+ image = image.to(DEVICE)
69
+ output = model(image)
70
+ time_taken = time.time() - start_time
71
+ list_of_times.append(time_taken)
72
+ print("Time taken:", time_taken)
73
+
74
+ # Log to csv
75
+ with open('log.csv', 'a', newline='') as file:
76
+ writer = csv.writer(file)
77
+ writer.writerow([disease])
78
+ writer.writerow([image_path])
79
+ writer.writerow([time_taken])
80
+
81
+
82
+ print("Average time taken local:", sum(list_of_times)/len(list_of_times))
83
+ print("Max time taken local:", max(list_of_times))
84
+ print("Min time taken local:", min(list_of_times))
85
+ print("Total time taken local:", sum(list_of_times))
86
+ print("Median time taken local:", sorted(list_of_times)[len(list_of_times)//2])
87
+
88
+ # Plot the histogram
89
+ plt.hist(list_of_times, bins=10)
90
+ plt.xlabel("Time taken (s) local")
91
+ plt.ylabel("Frequency local")
92
+ plt.title("Time taken to process each image local")
93
+ plt.savefig("docs/efficientnet/time_taken_for_local.png")
testing.py DELETED
@@ -1,5 +0,0 @@
1
- import torch
2
-
3
- print("Torch version:",torch.__version__)
4
-
5
- print("Is CUDA enabled?",torch.cuda.is_available())