2lu commited on
Commit
7a66a84
·
1 Parent(s): ba6895d

breast cancer detection

Browse files
app.py ADDED
@@ -0,0 +1,165 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import numpy as np
3
+ import tensorflow as tf
4
+ from tensorflow.keras.models import Model
5
+ from tensorflow.keras.applications import ResNet50
6
+ from tensorflow.keras.layers import Conv2D, BatchNormalization, LeakyReLU, Flatten, Dense, Reshape, Dropout, Add
7
+
8
+ IMG_SIZE = 512
9
+
10
+ def residual_block(X,filters):
11
+ # Retrieve Filters
12
+ F1, F2 = filters
13
+ # Saving the input value.we need this later to add to the output.
14
+ X_shortcut = X
15
+
16
+ # First component of main path
17
+ X = Conv2D(filters = F1, kernel_size = (3, 3), strides = (1,1), padding = 'same')(X)
18
+ X = BatchNormalization()(X)
19
+ X = LeakyReLU(alpha=0.1)(X)
20
+
21
+ # Second component of main path
22
+ X = Conv2D(filters = F2, kernel_size = (3, 3), strides = (1,1), padding = 'same')(X)
23
+ X = BatchNormalization()(X)
24
+
25
+ # Final step: Add shortcut value to main path, and pass it through a RELU activation
26
+ X = Add()([X, X_shortcut])
27
+ X = LeakyReLU(alpha=0.1)(X)
28
+ return X
29
+
30
+ def build_model():
31
+ # Inputs to the model
32
+ base_model = ResNet50(
33
+ weights='imagenet',
34
+ input_shape=(512, 512, 3), # Input shape of the images (height, width, channels)
35
+ include_top=False # Exclude the top classification layers
36
+ )
37
+
38
+ # Freeze the base model's layers to prevent them from being trained
39
+ base_model.trainable = False
40
+ x = base_model.output
41
+
42
+ # First conv block
43
+ x = Conv2D(32,(3, 3),kernel_initializer="he_normal",padding="same")(x)
44
+ x = LeakyReLU(alpha=0.1)(x)
45
+
46
+ # Add a residual block
47
+ x = residual_block(x, [64, 32])
48
+ x = residual_block(x, [64, 32])
49
+ x = Flatten()(x)
50
+
51
+ x = Dense(128, kernel_initializer='he_normal')(x)
52
+ x = Dropout(0.2)(x)
53
+ x = Dense(64, kernel_initializer='he_normal')(x)
54
+ x = Dropout(0.2)(x)
55
+
56
+ x = Dense(1, activation='sigmoid')(x)
57
+ model = Model(inputs=base_model.input, outputs=x)
58
+
59
+ return model
60
+
61
+
62
+ stage1_cc = build_model()
63
+ stage1_cc.load_weights("weights/stage1_cc_weights.weights.h5")
64
+
65
+ stage1_mlo = build_model()
66
+ stage1_mlo.load_weights("weights/stage1_mlo_weight.weights.h5")
67
+
68
+ stage2_cc = build_model()
69
+ stage2_cc.load_weights("weights/stage2_cc_weights.weights.h5")
70
+
71
+ stage2_mlo = build_model()
72
+ stage2_mlo.load_weights("weights/stage2_mlo_weight.weights.h5")
73
+
74
+
75
+ def get_diff(prior_image, current_image):
76
+ print(prior_image.shape)
77
+ print(current_image.shape)
78
+
79
+ prior_image = np.array(prior_image, dtype=np.float32)
80
+ current_image = np.array(current_image, dtype=np.float32)
81
+
82
+ prior_image = prior_image.astype(np.float32)
83
+ current_image = current_image.astype(np.float32)
84
+
85
+ avg_width = int((prior_image.shape[0] + current_image.shape[0])/2)
86
+ avg_height = int((prior_image.shape[1] + current_image.shape[1])/2)
87
+
88
+ # print(avg_width, avg_width)
89
+
90
+ prior_image = np.resize(prior_image, [avg_width, avg_height, 3])
91
+ current_image = np.resize(current_image, [avg_width, avg_height, 3])
92
+
93
+ subtract_image =current_image - prior_image
94
+ subtract_image[subtract_image<0] = 0
95
+ return subtract_image
96
+
97
+ def stage1_run(cc_diff_img, mlo_diff_img):
98
+ cc_diff_img = np.resize(cc_diff_img, [IMG_SIZE, IMG_SIZE, 3])
99
+ cc_diff_img = np.expand_dims(cc_diff_img, axis=0)
100
+ cc_diff_img = tf.constant(cc_diff_img, dtype=tf.float32)
101
+ # print(cc_diff_img.shape)
102
+ cc_res = stage1_cc.predict(cc_diff_img)
103
+ # print(cc_res)
104
+ # mlo_res = stage1_mlo.predict(mlo_diff_img)
105
+ mlo_diff_img = np.resize(mlo_diff_img, [IMG_SIZE, IMG_SIZE, 3])
106
+ mlo_diff_img = np.expand_dims(mlo_diff_img, axis=0)
107
+ mlo_diff_img = tf.constant(mlo_diff_img, dtype=tf.float32)
108
+
109
+ mlo_res = stage1_mlo.predict(mlo_diff_img)
110
+ # print(mlo_res)
111
+
112
+ return (cc_res + mlo_res)/2
113
+
114
+ def stage2_run(cc_diff_img, mlo_diff_img):
115
+ cc_diff_img = np.resize(cc_diff_img, [IMG_SIZE, IMG_SIZE, 3])
116
+ cc_diff_img = np.expand_dims(cc_diff_img, axis=0)
117
+ cc_diff_img = tf.constant(cc_diff_img, dtype=tf.float32)
118
+ # print(cc_diff_img.shape)
119
+ cc_res = stage2_cc.predict(cc_diff_img)
120
+ # print(cc_res)
121
+ # mlo_res = stage1_mlo.predict(mlo_diff_img)
122
+ mlo_diff_img = np.resize(mlo_diff_img, [IMG_SIZE, IMG_SIZE, 3])
123
+ mlo_diff_img = np.expand_dims(mlo_diff_img, axis=0)
124
+ mlo_diff_img = tf.constant(mlo_diff_img, dtype=tf.float32)
125
+
126
+ mlo_res = stage2_mlo.predict(mlo_diff_img)
127
+ # print(mlo_res)
128
+
129
+ return (cc_res + mlo_res)/2
130
+
131
+ def give_result(cc_prior_image, mlo_prior_image, cc_recent_image, mlo_recent_image):
132
+ cc_prior_img = np.array(cc_prior_image)
133
+ mlo_prior_img = np.array(mlo_prior_image)
134
+ cc_recent_img = np.array(cc_recent_image)
135
+ mlo_recent_img = np.array(mlo_recent_image)
136
+
137
+ cc_diff_img = get_diff(cc_prior_img, cc_recent_img)
138
+ mlo_diff_img = get_diff(mlo_prior_img, mlo_recent_img)
139
+
140
+ stage1_res = stage1_run(cc_diff_img, mlo_diff_img)
141
+
142
+ if(stage1_res<0.4):
143
+ return "Normal"
144
+
145
+ stage2_res = stage2_run(cc_diff_img, mlo_diff_img)
146
+ if(stage2_res<0.4):
147
+ return "Benign"
148
+ else:
149
+ return "Suspecious"
150
+
151
+ with gr.Blocks(title="Breast Cancer detection", css=".gradio-container {background:lightyellow;}") as demo:
152
+ gr.HTML("<h1>Breast Cancer Detection</h1>")
153
+ with gr.Row():
154
+ cc_prior_image = gr.Image(label="CC Prior image", type="pil")
155
+ mlo_prior_image = gr.Image(label="MLO Prior image", type="pil")
156
+
157
+ with gr.Row():
158
+ cc_recent_image = gr.Image(label="CC Recent image", type="pil")
159
+ mlo_recent_image = gr.Image(label="MLO Recent image", type="pil")
160
+ gr.HTML("<br/>")
161
+ output_label = gr.TextArea(placeholder="Result")
162
+ send_btn = gr.Button("Detect")
163
+ send_btn.click(fn=give_result, inputs=[cc_prior_image, mlo_prior_image, cc_recent_image, mlo_recent_image], outputs=[output_label])
164
+
165
+ demo.launch(debug=True)
requirements.txt ADDED
File without changes
weights/stage1_cc_weights.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:122b4897debc5fb288d543085388baabd23c60e40a179eeafa4dab4ce5d9f2e6
3
+ size 115528736
weights/stage1_mlo_weight.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5fb7aeea540e78f4ff1c57579c882d7b102a419439312ed80a1f4f3fb6e34ef5
3
+ size 115528736
weights/stage2_cc_weights.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:02ab22329d870e93ed001b977b6f7cf1bb91eca819f48fda41277ba2775411bb
3
+ size 115528736
weights/stage2_mlo_weight.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77fd59d988dceddfdfae7d5020d445a1333b2b361c0f8387b924a7f20aeb8a1
3
+ size 115528736