Kalbe-x-Bangkit commited on
Commit
fc0d2da
·
verified ·
1 Parent(s): b745ea4

Delete app-streamlit.py

Browse files
Files changed (1) hide show
  1. app-streamlit.py +0 -503
app-streamlit.py DELETED
@@ -1,503 +0,0 @@
1
- import streamlit as st
2
- import cv2
3
- import numpy as np
4
- import pydicom
5
- import tensorflow as tf
6
- import keras
7
- from pydicom.dataset import Dataset, FileDataset
8
- from pydicom.uid import generate_uid
9
- from google.cloud import storage
10
- import os
11
- import io
12
- from PIL import Image
13
- import uuid
14
- import pandas as pd
15
- import tensorflow as tf
16
- from datetime import datetime
17
- import SimpleITK as sitk
18
- from tensorflow import image
19
- from tensorflow.python.keras.models import load_model
20
- from pydicom.pixel_data_handlers.util import apply_voi_lut
21
-
22
- # Environment Configuration
23
- os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = "./da-kalbe-63ee33c9cdbb.json"
24
- bucket_name = "da-kalbe-ml-result-png"
25
- storage_client = storage.Client()
26
- bucket_result = storage_client.bucket(bucket_name)
27
- bucket_name_load = "da-ml-models"
28
- bucket_load = storage_client.bucket(bucket_name_load)
29
-
30
- H = 224
31
- W = 224
32
-
33
- @st.cache_resource
34
- def load_model():
35
- model = tf.keras.models.load_model("model-detection.h5", compile=False)
36
- model.compile(
37
- loss={
38
- "bbox": "mse",
39
- "class": "sparse_categorical_crossentropy"
40
- },
41
- optimizer=tf.keras.optimizers.Adam(),
42
- metrics={
43
- "bbox": ['mse'],
44
- "class": ['accuracy']
45
- }
46
- )
47
- return model
48
-
49
- def preprocess_image(image):
50
- """ Preprocess the image to the required size and normalization. """
51
- image = cv2.resize(image, (W, H))
52
- image = (image - 127.5) / 127.5 # Normalize to [-1, +1]
53
- image = np.expand_dims(image, axis=0).astype(np.float32)
54
- return image
55
-
56
- def predict(model, image):
57
- """ Predict bounding box and label for the input image. """
58
- pred_bbox, pred_class = model.predict(image)
59
- pred_label_confidence = np.max(pred_class, axis=1)[0]
60
- pred_label = np.argmax(pred_class, axis=1)[0]
61
- return pred_bbox[0], pred_label, pred_label_confidence
62
-
63
- def draw_bbox(image, bbox):
64
- """ Draw bounding box on the image. """
65
- h, w, _ = image.shape
66
- x1, y1, x2, y2 = bbox
67
- x1, y1, x2, y2 = int(x1 * w), int(y1 * h), int(x2 * w), int(y2 * h)
68
- image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2)
69
- return image
70
-
71
- st.title("Chest X-ray Disease Detection")
72
-
73
- st.write("Upload a chest X-ray image and click on 'Detect' to find out if there's any disease.")
74
-
75
- model = load_model()
76
-
77
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
78
-
79
- if uploaded_file is not None:
80
- file_bytes = np.asarray(bytearray(uploaded_file.read()), dtype=np.uint8)
81
- image = cv2.imdecode(file_bytes, 1)
82
-
83
- st.image(image, caption='Uploaded Image.', use_column_width=True)
84
-
85
- if st.button('Detect'):
86
- st.write("Processing...")
87
- input_image = preprocess_image(image)
88
- pred_bbox, pred_label, pred_label_confidence = predict(model, input_image)
89
-
90
- label_mapping = {
91
- 0: 'Atelectasis',
92
- 1: 'Cardiomegaly',
93
- 2: 'Effusion',
94
- 3: 'Infiltrate',
95
- 4: 'Mass',
96
- 5: 'Nodule',
97
- 6: 'Pneumonia',
98
- 7: 'Pneumothorax'
99
- }
100
-
101
- if pred_label_confidence < 0.01:
102
- st.write("May not detect a disease.")
103
- else:
104
- pred_label_name = label_mapping[pred_label]
105
- st.write(f"Prediction Label: {pred_label_name}")
106
- st.write(f"Prediction Bounding Box: {pred_bbox}")
107
- st.write(f"Prediction Confidence: {pred_label_confidence:.2f}")
108
-
109
- output_image = draw_bbox(image.copy(), pred_bbox)
110
- st.image(output_image, caption='Detected Image.', use_column_width=True)
111
-
112
-
113
- # Utility Functions
114
- def upload_to_gcs(image_data: io.BytesIO, filename: str, content_type='application/dicom'):
115
- """Uploads an image to Google Cloud Storage."""
116
- try:
117
- blob = bucket_result.blob(filename)
118
- blob.upload_from_file(image_data, content_type=content_type)
119
- st.write("File ready to be seen in OHIF Viewer.")
120
- except Exception as e:
121
- st.error(f"An unexpected error occurred: {e}")
122
-
123
- def load_dicom_from_gcs(file_name: str = "dicom_00000001_000.dcm"):
124
- # Get the blob object
125
- blob = bucket_load.blob(file_name)
126
-
127
- # Download the file as a bytes object
128
- dicom_bytes = blob.download_as_bytes()
129
-
130
- # Wrap bytes object into BytesIO (file-like object)
131
- dicom_stream = io.BytesIO(dicom_bytes)
132
-
133
- # Load the DICOM file
134
- ds = pydicom.dcmread(dicom_stream)
135
-
136
- return ds
137
-
138
- def png_to_dicom(image_path: str, image_name: str, dicom: str = None):
139
- if dicom is None:
140
- ds = load_dicom_from_gcs()
141
- else:
142
- ds = load_dicom_from_gcs(dicom)
143
-
144
- jpg_image = Image.open(image_path) # Open the image using the path
145
- print("Image Mode:", jpg_image.mode)
146
- if jpg_image.mode == 'L':
147
- np_image = np.array(jpg_image.getdata(), dtype=np.uint8)
148
- ds.Rows = jpg_image.height
149
- ds.Columns = jpg_image.width
150
- ds.PhotometricInterpretation = "MONOCHROME1"
151
- ds.SamplesPerPixel = 1
152
- ds.BitsStored = 8
153
- ds.BitsAllocated = 8
154
- ds.HighBit = 7
155
- ds.PixelRepresentation = 0
156
- ds.PixelData = np_image.tobytes()
157
- ds.save_as(image_name)
158
-
159
- elif jpg_image.mode == 'RGBA':
160
- np_image = np.array(jpg_image.getdata(), dtype=np.uint8)[:, :3]
161
- ds.Rows = jpg_image.height
162
- ds.Columns = jpg_image.width
163
- ds.PhotometricInterpretation = "RGB"
164
- ds.SamplesPerPixel = 3
165
- ds.BitsStored = 8
166
- ds.BitsAllocated = 8
167
- ds.HighBit = 7
168
- ds.PixelRepresentation = 0
169
- ds.PixelData = np_image.tobytes()
170
- ds.save_as(image_name)
171
- elif jpg_image.mode == 'RGB':
172
- np_image = np.array(jpg_image.getdata(), dtype=np.uint8)[:, :3] # Remove alpha if present
173
- ds.Rows = jpg_image.height
174
- ds.Columns = jpg_image.width
175
- ds.PhotometricInterpretation = "RGB"
176
- ds.SamplesPerPixel = 3
177
- ds.BitsStored = 8
178
- ds.BitsAllocated = 8
179
- ds.HighBit = 7
180
- ds.PixelRepresentation = 0
181
- ds.PixelData = np_image.tobytes()
182
- ds.save_as(image_name)
183
- else:
184
- raise ValueError("Unsupported image mode:", jpg_image.mode)
185
- return ds
186
-
187
- def save_dicom_to_bytes(dicom):
188
- dicom_bytes = io.BytesIO()
189
- dicom.save_as(dicom_bytes)
190
- dicom_bytes.seek(0)
191
- return dicom_bytes
192
-
193
- def upload_folder_images(original_image_path, enhanced_image_path):
194
- # Extract the base name of the uploaded image without the extension
195
- folder_name = os.path.splitext(uploaded_file.name)[0]
196
- # Create the folder in Cloud Storage
197
- bucket_result.blob(folder_name + '/').upload_from_string('', content_type='application/x-www-form-urlencoded')
198
- enhancement_name = enhancement_type.split('_')[-1]
199
- # Convert images to DICOM
200
- original_dicom = png_to_dicom(original_image_path, "original_image.dcm")
201
- enhanced_dicom = png_to_dicom(enhanced_image_path, enhancement_name + ".dcm")
202
-
203
- # Convert DICOM to byte stream for uploading
204
- original_dicom_bytes = io.BytesIO()
205
- enhanced_dicom_bytes = io.BytesIO()
206
- original_dicom.save_as(original_dicom_bytes)
207
- enhanced_dicom.save_as(enhanced_dicom_bytes)
208
- original_dicom_bytes.seek(0)
209
- enhanced_dicom_bytes.seek(0)
210
-
211
- # Upload images to GCS
212
- upload_to_gcs(original_dicom_bytes, folder_name + '/' + 'original_image.dcm', content_type='application/dicom')
213
- upload_to_gcs(enhanced_dicom_bytes, folder_name + '/' + enhancement_name + '.dcm', content_type='application/dicom')
214
-
215
-
216
- def get_mean_std_per_batch(image_path, df, H=320, W=320):
217
- sample_data = []
218
- for idx, img in enumerate(df.sample(100)["Image Index"].values):
219
- # path = image_dir + img
220
- sample_data.append(
221
- np.array(keras.utils.load_img(image_path, target_size=(H, W))))
222
-
223
- mean = np.mean(sample_data[0])
224
- std = np.std(sample_data[0])
225
- return mean, std
226
-
227
- def load_image(img_path, preprocess=True, height=320, width=320):
228
- mean, std = get_mean_std_per_batch(img_path, df, height, width)
229
- x = keras.utils.load_img(img_path, target_size=(height, width))
230
- x = keras.utils.img_to_array(x)
231
- if preprocess:
232
- x -= mean
233
- x /= std
234
- x = np.expand_dims(x, axis=0)
235
- return x
236
-
237
- def grad_cam(input_model, img_array, cls, layer_name):
238
- grad_model = tf.keras.models.Model(
239
- [input_model.inputs],
240
- [input_model.get_layer(layer_name).output, input_model.output]
241
- )
242
-
243
- with tf.GradientTape() as tape:
244
- conv_outputs, predictions = grad_model(img_array)
245
- loss = predictions[:, cls]
246
-
247
- output = conv_outputs[0]
248
- grads = tape.gradient(loss, conv_outputs)[0]
249
- gate_f = tf.cast(output > 0, 'float32')
250
- gate_r = tf.cast(grads > 0, 'float32')
251
- guided_grads = gate_f * gate_r * grads
252
-
253
- weights = tf.reduce_mean(guided_grads, axis=(0, 1))
254
-
255
- cam = np.dot(output, weights)
256
-
257
- for index, w in enumerate(weights):
258
- cam += w * output[:, :, index]
259
-
260
- cam = cv2.resize(cam.numpy(), (320, 320), cv2.INTER_LINEAR)
261
- cam = np.maximum(cam, 0)
262
- cam = cam / cam.max()
263
-
264
- return cam
265
-
266
-
267
- # Compute Grad-CAM
268
- def compute_gradcam(model, img_path, layer_name='bn'):
269
- preprocessed_input = load_image(img_path)
270
- predictions = model.predict(preprocessed_input)
271
-
272
- original_image = load_image(img_path, preprocess=False)
273
-
274
- # Assuming you have 14 classes as previously mentioned
275
- labels = ['Cardiomegaly', 'Emphysema', 'Effusion', 'Hernia', 'Infiltration', 'Mass',
276
- 'Nodule', 'Atelectasis', 'Pneumothorax', 'Pleural_Thickening',
277
- 'Pneumonia', 'Fibrosis', 'Edema', 'Consolidation']
278
-
279
- for i in range(len(labels)):
280
- st.write(f"Generating gradcam for class {labels[i]}")
281
- gradcam = grad_cam(model, preprocessed_input, i, layer_name)
282
- gradcam = (gradcam * 255).astype(np.uint8)
283
- gradcam = cv2.applyColorMap(gradcam, cv2.COLORMAP_JET)
284
- gradcam = cv2.addWeighted(gradcam, 0.5, original_image.squeeze().astype(np.uint8), 0.5, 0)
285
- st.image(gradcam, caption=f"{labels[i]}: p={predictions[0][i]:.3f}", use_column_width=True)
286
-
287
- def calculate_mse(original_image, enhanced_image):
288
- mse = np.mean((original_image - enhanced_image) ** 2)
289
- return mse
290
-
291
- def calculate_psnr(original_image, enhanced_image):
292
- mse = calculate_mse(original_image, enhanced_image)
293
- if mse == 0:
294
- return float('inf')
295
- max_pixel_value = 255.0
296
- psnr = 20 * np.log10(max_pixel_value / np.sqrt(mse))
297
- return psnr
298
-
299
- def calculate_maxerr(original_image, enhanced_image):
300
- maxerr = np.max((original_image - enhanced_image) ** 2)
301
- return maxerr
302
-
303
- def calculate_l2rat(original_image, enhanced_image):
304
- l2norm_ratio = np.sum(original_image ** 2) / np.sum((original_image - enhanced_image) ** 2)
305
- return l2norm_ratio
306
-
307
- def process_image(original_image, enhancement_type, fix_monochrome=True):
308
- if fix_monochrome and original_image.shape[-1] == 3:
309
- original_image = cv2.cvtColor(original_image, cv2.COLOR_BGR2GRAY)
310
-
311
- image = original_image - np.min(original_image)
312
- image = image / np.max(original_image)
313
- image = (image * 255).astype(np.uint8)
314
-
315
- enhanced_image = enhance_image(image, enhancement_type)
316
-
317
- mse = calculate_mse(original_image, enhanced_image)
318
- psnr = calculate_psnr(original_image, enhanced_image)
319
- maxerr = calculate_maxerr(original_image, enhanced_image)
320
- l2rat = calculate_l2rat(original_image, enhanced_image)
321
-
322
- return enhanced_image, mse, psnr, maxerr, l2rat
323
-
324
- def apply_clahe(image):
325
- clahe = cv2.createCLAHE(clipLimit=40.0, tileGridSize=(8, 8))
326
- return clahe.apply(image)
327
-
328
- def invert(image):
329
- return cv2.bitwise_not(image)
330
-
331
- def hp_filter(image, kernel=None):
332
- if kernel is None:
333
- kernel = np.array([[-1, -1, -1], [-1, 9, -1], [-1, -1, -1]])
334
- return cv2.filter2D(image, -1, kernel)
335
-
336
- def unsharp_mask(image, radius=5, amount=2):
337
- def usm(image, radius, amount):
338
- blurred = cv2.GaussianBlur(image, (0, 0), radius)
339
- sharpened = cv2.addWeighted(image, 1.0 + amount, blurred, -amount, 0)
340
- return sharpened
341
- return usm(image, radius, amount)
342
-
343
- def hist_eq(image):
344
- return cv2.equalizeHist(image)
345
-
346
- def enhance_image(image, enhancement_type):
347
- if enhancement_type == "Invert":
348
- return invert(image)
349
- elif enhancement_type == "High Pass Filter":
350
- return hp_filter(image)
351
- elif enhancement_type == "Unsharp Masking":
352
- return unsharp_mask(image)
353
- elif enhancement_type == "Histogram Equalization":
354
- return hist_eq(image)
355
- elif enhancement_type == "CLAHE":
356
- return apply_clahe(image)
357
- else:
358
- raise ValueError(f"Unknown enhancement type: {enhancement_type}")
359
-
360
- # Function to add a button to redirect to the URL
361
- def redirect_button(url):
362
- button = st.button('Go to OHIF Viewer')
363
- if button:
364
- st.markdown(f'<meta http-equiv="refresh" content="0;url={url}" />', unsafe_allow_html=True)
365
-
366
- def load_model():
367
- model = tf.keras.models.load_model('./model.h5')
368
- return model
369
-
370
- ###########################################################################################
371
- ########################### Streamlit Interface ###########################################
372
- ###########################################################################################
373
-
374
-
375
- st.sidebar.title("Configuration")
376
- uploaded_file = st.sidebar.file_uploader("Upload Original Image", type=["png", "jpg", "jpeg", "dcm"])
377
- enhancement_type = st.sidebar.selectbox(
378
- "Enhancement Type",
379
- ["Invert", "High Pass Filter", "Unsharp Masking", "Histogram Equalization", "CLAHE"]
380
- )
381
-
382
- # File uploader for DICOM files
383
- if uploaded_file is not None:
384
- if hasattr(uploaded_file, 'name'):
385
- file_extension = uploaded_file.name.split(".")[-1] # Get the file extension
386
- if file_extension.lower() == "dcm":
387
- # Process DICOM file
388
- dicom_data = pydicom.dcmread(uploaded_file)
389
- pixel_array = dicom_data.pixel_array
390
- # Process the pixel_array further if needed
391
- # Extract all metadata
392
- metadata = {elem.keyword: elem.value for elem in dicom_data if elem.keyword}
393
- metadata_dict = {str(key): str(value) for key, value in metadata.items()}
394
- df = pd.DataFrame.from_dict(metadata_dict, orient='index', columns=['Value'])
395
-
396
- # Display metadata in the left-most column
397
- with st.expander("Lihat Metadata"):
398
- st.write("Metadata:")
399
- st.dataframe(df)
400
-
401
- # Read the pixel data
402
- pixel_array = dicom_data.pixel_array
403
- img_array = pixel_array.astype(float)
404
- img_array = (np.maximum(img_array, 0) / img_array.max()) * 255.0 # Normalize to 0-255
405
- img_array = np.uint8(img_array) # Convert to uint8
406
- img = Image.fromarray(img_array)
407
-
408
- col1, col2 = st.columns(2)
409
- # Check the number of dimensions of the image
410
- if img_array.ndim == 3:
411
- n_slices = img_array.shape[0]
412
- if n_slices > 1:
413
- slice_ix = st.sidebar.slider('Slice', 0, n_slices - 1, int(n_slices / 2))
414
- # Display the selected slice
415
- st.image(img_array[slice_ix, :, :], caption=f"Slice {slice_ix}", use_column_width=True)
416
- else:
417
- # If there's only one slice, just display it
418
- st.image(img_array[0, :, :], caption="Single Slice Image", use_column_width=True)
419
- elif img_array.ndim == 2:
420
- # If the image is 2D, just display it
421
- with col1:
422
- st.image(img_array, caption="Original Image", use_column_width=True)
423
- else:
424
- st.error("Unsupported image dimensions")
425
-
426
- original_image = img_array
427
-
428
- # Example: convert to grayscale if it's a color image
429
- if len(pixel_array.shape) > 2:
430
- pixel_array = pixel_array[:, :, 0] # Take only the first channel
431
- # Perform image enhancement and evaluation on pixel_array
432
- enhanced_image, mse, psnr, maxerr, l2rat = process_image(pixel_array, enhancement_type)
433
- else:
434
- # Process regular image file
435
- original_image = np.array(keras.utils.load_img(uploaded_file, color_mode='rgb' if enhancement_type == "Invert" else 'grayscale'))
436
- # Perform image enhancement and evaluation on original_image
437
- enhanced_image, mse, psnr, maxerr, l2rat = process_image(original_image, enhancement_type)
438
- col1, col2 = st.columns(2)
439
- with col1:
440
- st.image(original_image, caption="Original Image", use_column_width=True)
441
- with col2:
442
- st.image(enhanced_image, caption='Enhanced Image', use_column_width=True)
443
-
444
- col1, col2 = st.columns(2)
445
- col3, col4 = st.columns(2)
446
-
447
- col1.metric("MSE", round(mse,3))
448
- col2.metric("PSNR", round(psnr,3))
449
- col3.metric("Maxerr", round(maxerr,3))
450
- col4.metric("L2Rat", round(l2rat,3))
451
-
452
- # Save enhanced image to a file
453
- enhanced_image_path = "enhanced_image.png"
454
- cv2.imwrite(enhanced_image_path, enhanced_image)
455
-
456
-
457
- # Save enhanced image to a file
458
- enhanced_image_path = "enhanced_image.png"
459
- cv2.imwrite(enhanced_image_path, enhanced_image)
460
-
461
- # Save original image to a file
462
- original_image_path = "original_image.png"
463
- cv2.imwrite(original_image_path, original_image)
464
-
465
- # Add the redirect button
466
- col1, col2, col3 = st.columns(3)
467
- with col1:
468
- redirect_button("https://new-ohif-viewer-k7c3gdlxua-et.a.run.app/")
469
-
470
- with col2:
471
- if st.button('Auto Detect'):
472
- name = uploaded_file.name.split("/")[-1].split(".")[0]
473
- true_bbox_row = df[df['Image Index'] == uploaded_file.name]
474
-
475
- if not true_bbox_row.empty:
476
- x1, y1 = int(true_bbox_row['Bbox [x']), int(true_bbox_row['y'])
477
- x2, y2 = int(true_bbox_row['x_max']), int(true_bbox_row['y_max'])
478
- true_bbox = [x1, y1, x2, y2]
479
- label = true_bbox_row['Finding Label'].values[0]
480
-
481
- pred_bbox = predict(image)
482
- iou = cal_iou(true_bbox, pred_bbox)
483
-
484
- image = cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 5) # BLUE
485
- image = cv2.rectangle(image, (pred_bbox[0], pred_bbox[1]), (pred_bbox[2], pred_bbox[3]), (0, 0, 255), 5) # RED
486
-
487
- x_pos = int(image.shape[1] * 0.05)
488
- y_pos = int(image.shape[0] * 0.05)
489
- font_size = 0.7
490
-
491
- cv2.putText(image, f"IoU: {iou:.4f}", (x_pos, y_pos), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255, 0, 0), 2)
492
- cv2.putText(image, f"Label: {label}", (x_pos, y_pos + 30), cv2.FONT_HERSHEY_SIMPLEX, font_size, (255, 255, 255), 2)
493
-
494
- st.image(image, channels="BGR")
495
- else:
496
- st.write("No bounding box and label found for this image.")
497
-
498
- with col3:
499
- if st.button('Generate Grad-CAM'):
500
- model = load_model()
501
- # Compute and show Grad-CAM
502
- st.write("Generating Grad-CAM visualizations")
503
- compute_gradcam(model, uploaded_file)