File size: 8,110 Bytes
8f8b9eb
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f8b9eb
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f8b9eb
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f8b9eb
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
ee0fc9d
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8f8b9eb
cd33b88
 
 
 
 
8f8b9eb
cd33b88
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
#Data Preprocessing

import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from PIL import Image

# Set image size and batch size
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 32

# Paths to your dataset
TRAIN_PATH = 'Covid_19 Image Data'

# Data generator for loading and preprocessing images
datagen = ImageDataGenerator(rescale=1./255, validation_split=0.15)

train_data = datagen.flow_from_directory(
    TRAIN_PATH,
    target_size=IMAGE_SIZE,
    batch_size=BATCH_SIZE,
    class_mode='binary',
    subset='training'  # Set as training data
)

val_data = datagen.flow_from_directory(
    TRAIN_PATH,
    target_size=IMAGE_SIZE,
    batch_size=BATCH_SIZE,
    class_mode='binary',
    subset='validation'  # Set as validation data
)

#CNN Model Setup (Transfer Learning)

import tensorflow as tf
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, Dropout
from tensorflow.keras.models import Model

# Define the input shape
input_shape = (224, 224, 3)

# Load ResNet50 with input shape and without the top layer
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=input_shape)

# Freeze the layers in the base model
base_model.trainable = False

# Add custom layers on top
x = base_model.output
x = GlobalAveragePooling2D()(x)
x = Dense(128, activation='relu')(x)
x = Dropout(0.5)(x)
predictions = Dense(1, activation='sigmoid')(x)

# Define the model
model = Model(inputs=base_model.input, outputs=predictions)

# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])

# Model summary
model.summary()

#Training the Model

# Train the model
history = model.fit(
    train_data,
    validation_data=val_data,
    epochs=10,  # Adjust epochs as needed
    verbose=1
)

import matplotlib.pyplot as plt

# Plot the training and validation accuracy
plt.figure(figsize=(12, 6))

# Accuracy plot
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Model Accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.legend(loc='lower right')
plt.grid(True)

# Loss plot
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Model Loss')
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.legend(loc='upper right')
plt.grid(True)

# Show the plot
plt.tight_layout()
plt.show()

#Explainable AI Integration (Grad-CAM)

import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from tensorflow.keras.models import Model
from PIL import Image

def make_gradcam_heatmap(img_array, model, last_conv_layer_name):
    grad_model = Model(
        inputs=[model.inputs],
        outputs=[model.get_layer(last_conv_layer_name).output, model.output]
    )
    
    # Record operations for automatic differentiation
    with tf.GradientTape() as tape:
        conv_outputs, predictions = grad_model(img_array)
        loss = predictions[:, 0]  # Assuming binary classification (0 = Healthy, 1 = COVID-19)
    
    # Compute gradients
    grads = tape.gradient(loss, conv_outputs)
    pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))

    conv_outputs = conv_outputs[0]
    heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)
    heatmap = np.maximum(heatmap, 0) / np.max(heatmap)  # Normalize between 0 and 1
    return heatmap

def display_gradcam(img_path, heatmap, alpha=0.4):
    img = Image.open(img_path)
    img = img.resize((224, 224))  # Resize the image to match model input size

    heatmap = np.uint8(255 * heatmap)  # Convert heatmap to 0-255 scale
    heatmap = Image.fromarray(heatmap).resize((img.size), Image.LANCZOS)
    heatmap = np.array(heatmap)

    # Create figure to plot the image and heatmap
    fig, ax = plt.subplots(1, 2, figsize=(10, 5))
    ax[0].imshow(img)
    ax[1].imshow(img)
    ax[1].imshow(heatmap, cmap='jet', alpha=alpha)  # Overlay the heatmap
    plt.show()

# Load and preprocess the image
def preprocess_image(image_path):
    img = Image.open(image_path)
    img = img.resize((224, 224))  # Resize to match the input shape of the model
    img = np.array(img) / 255.0   # Normalize pixel values between 0 and 1
    img = np.expand_dims(img, axis=0)  # Add batch dimension
    return img

# Path to the image
img_path = 'Covid_19 Image Data/1/COVID-19 (10).jpg'

# Preprocess the image
img_array = preprocess_image(img_path)

# Get the heatmap
heatmap = make_gradcam_heatmap(img_array, model, 'conv5_block3_out')  # Replace with your last conv layer's name

# Display the original image with the Grad-CAM heatmap overlay
display_gradcam(img_path, heatmap)

#Evaluation

# Evaluate model on validation data
test_loss, test_acc = model.evaluate(val_data, verbose=2)
print(f'Test Accuracy: {test_acc:.2f}')

#Gradio User Interface

import gradio as gr
import numpy as np
from PIL import Image
import tensorflow as tf
from tensorflow.keras.models import Model
import matplotlib.pyplot as plt
import cv2  # For color mapping the heatmap

# Define the Grad-CAM function
def make_gradcam_heatmap(img_array, model, last_conv_layer_name):
    grad_model = Model([model.inputs], [model.get_layer(last_conv_layer_name).output, model.output])
    with tf.GradientTape() as tape:
        conv_outputs, predictions = grad_model(img_array)
        loss = predictions[:, 0]  # For binary classification
    grads = tape.gradient(loss, conv_outputs)
    pooled_grads = tf.reduce_mean(grads, axis=(0, 1, 2))
    conv_outputs = conv_outputs[0]
    heatmap = tf.reduce_mean(tf.multiply(pooled_grads, conv_outputs), axis=-1)
    heatmap = np.maximum(heatmap, 0)  # ReLU activation to make it non-negative
    heatmap = heatmap / np.max(heatmap)  # Normalize between 0 and 1
    return heatmap

# Function to overlay the heatmap on the original image
def apply_heatmap_to_image(img, heatmap):
    # Resize heatmap to match image size
    heatmap = cv2.resize(heatmap, (img.size[0], img.size[1]))

    # Convert heatmap to RGB (apply 'jet' colormap)
    heatmap_colored = cv2.applyColorMap(np.uint8(255 * heatmap), cv2.COLORMAP_JET)
    
    # Convert to RGB mode (since OpenCV uses BGR)
    heatmap_colored = cv2.cvtColor(heatmap_colored, cv2.COLOR_BGR2RGB)
    
    # Overlay the heatmap on the original image
    overlay = np.array(img) * 0.6 + heatmap_colored * 0.4
    overlay = np.clip(overlay, 0, 255).astype('uint8')
    return Image.fromarray(overlay)

# Define the prediction and explainability function
def predict_and_explain(img):
    img = Image.fromarray(img).resize((224, 224))  # Resize image for the model
    img_array = np.array(img) / 255.0   # Normalize pixel values
    img_array = np.expand_dims(img_array, axis=0)  # Add batch dimension

    # Get the prediction
    prediction = model.predict(img_array)
    confidence = float(prediction[0][0])
    result = "COVID-19 Positive" if confidence > 0.5 else "Healthy"

    # Generate the Grad-CAM heatmap
    last_conv_layer_name = 'conv5_block3_out'  # Update with the actual last convolution layer name
    heatmap = make_gradcam_heatmap(img_array, model, last_conv_layer_name)
    
    # Apply heatmap on the image
    heatmap_img = apply_heatmap_to_image(img, heatmap)
    
    # Display confidence and heatmap
    confidence_text = f"Confidence: {confidence:.2f}"
    return result, confidence_text, heatmap_img

# Gradio interface
def create_interface():
    gr_interface = gr.Interface(
        fn=predict_and_explain,
        inputs=gr.Image(type="numpy"),
        outputs=[gr.Textbox(label="Prediction"), gr.Textbox(label="Confidence"), gr.Image(label="Heatmap")],
        title="COVID-19 X-ray Classification with Explainability",
        description="Upload an X-ray image to predict if the patient has COVID-19, see the confidence score, and view the Grad-CAM heatmap."
    )
    return gr_interface

# Launch the interface
gr_interface = create_interface()
gr_interface.launch()