Margerie commited on
Commit
59d4c6b
1 Parent(s): 077202c

First upload

Browse files
Files changed (4) hide show
  1. dicom_to_nii.py +533 -0
  2. nii_to_dicom.py +570 -0
  3. predict_new.py +209 -0
  4. predict_nnunet.py +32 -0
dicom_to_nii.py ADDED
@@ -0,0 +1,533 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pydicom
2
+ import sys
3
+ import os
4
+ import numpy as np
5
+ import nibabel as nib
6
+ import scipy
7
+
8
+
9
+ def convert_transform_mr_to_nii(dir_mr_dicom, tranform_mr, dir_nii, outputname, CT):
10
+ Patients = PatientList()
11
+ Patients.list_dicom_files(dir_mr_dicom, 1)
12
+ patient = Patients.list[0]
13
+ patient_name = patient.PatientInfo.PatientName
14
+ patient.import_patient_data(CT.PixelSpacing)
15
+ MR = patient.MRimages[0]
16
+ image_position_patient = CT.ImagePositionPatient
17
+ voxelsize = np.array(CT.PixelSpacing)
18
+ save_images(dst_dir=os.path.join(dir_nii), voxelsize=voxelsize,
19
+ image_position_patient=image_position_patient, image=tranform_mr.imageArray, outputname=outputname)
20
+ return MR
21
+
22
+ def convert_mr_dicom_to_nii(dir_dicom, dir_nii, outputname, newvoxelsize=None):
23
+ Patients = PatientList() # initialize list of patient data
24
+ # search dicom files in the patient data folder, stores all files in the attributes (all CT images, dose file, struct file)
25
+ Patients.list_dicom_files(dir_dicom, 1)
26
+ patient = Patients.list[0]
27
+ patient_name = patient.PatientInfo.PatientName
28
+ patient.import_patient_data(newvoxelsize)
29
+ MR = patient.MRimages[0]
30
+ image_position_patient = MR.ImagePositionPatient
31
+ voxelsize = np.array(MR.PixelSpacing)
32
+ save_images(dst_dir=os.path.join(dir_nii), voxelsize=voxelsize,
33
+ image_position_patient=image_position_patient, image=MR.Image, outputname=outputname)
34
+ return MR
35
+
36
+
37
+ def convert_ct_dicom_to_nii(dir_dicom, dir_nii, outputname, newvoxelsize=None):
38
+ Patients = PatientList() # initialize list of patient data
39
+ # search dicom files in the patient data folder, stores all files in the attributes (all CT images, dose file, struct file)
40
+ Patients.list_dicom_files(dir_dicom, 1)
41
+ patient = Patients.list[0]
42
+ patient_name = patient.PatientInfo.PatientName
43
+ patient.import_patient_data(newvoxelsize)
44
+ CT = patient.CTimages[0]
45
+ image_position_patient = CT.ImagePositionPatient
46
+ voxelsize = np.array(CT.PixelSpacing)
47
+ save_images(dst_dir=os.path.join(dir_nii), voxelsize=voxelsize,
48
+ image_position_patient=image_position_patient, image=CT.Image, outputname=outputname)
49
+ return CT
50
+
51
+
52
+ def save_images(dst_dir, voxelsize, image_position_patient, image, outputname):
53
+
54
+ # encode in nii and save at dst_dir
55
+ # IMPORTANT I NEED TO CONFIRM THE SIGNS OF THE ENTRIES IN THE AFFINE,
56
+ # ALTHOUGH MAYBE AT THE END THE IMPORTANCE IS HOW WE WILL USE THIS DATA ....
57
+ # also instead of changing field by field, the pixdim and affine can be encoded
58
+ # using the set_sform method --> info here: https://nipy.org/nibabel/nifti_images.html
59
+
60
+ # IMAGE (CT, MR ...)
61
+ image_shape = image.shape
62
+ # Separate Conversion from preprocessing
63
+ # image = overwrite_ct_threshold(image)
64
+ # for Nifti1 header, change for a Nifti2 type of header
65
+ image_nii = nib.Nifti1Image(image, affine=np.eye(4))
66
+ # Update header fields
67
+ image_nii = set_header_info(image_nii, voxelsize, image_position_patient)
68
+
69
+ # Save nii
70
+ nib.save(image_nii, os.path.join(dst_dir, outputname))
71
+
72
+ # nib.save(image_nii, os.path.join(dst_dir, 'ct.nii.gz'))
73
+
74
+
75
+ # def overwrite_ct_threshold(ct_image, body, artefact=None, contrast=None):
76
+ # # Change the HU out of the body to air: -1000
77
+ # ct_image[body == 0] = -1000
78
+ # if artefact is not None:
79
+ # # Change the HU to muscle: 14
80
+ # ct_image[artefact == 1] = 14
81
+ # if contrast is not None:
82
+ # # Change the HU to water: 0 Houndsfield Unit: CT unit
83
+ # ct_image[contrast == 1] = 0
84
+ # # Threshold above 1560HU
85
+ # ct_image[ct_image > 1560] = 1560
86
+ # return ct_image
87
+
88
+
89
+ def set_header_info(nii_file, voxelsize, image_position_patient, contours_exist=None):
90
+ nii_file.header['pixdim'][1] = voxelsize[0]
91
+ nii_file.header['pixdim'][2] = voxelsize[1]
92
+ nii_file.header['pixdim'][3] = voxelsize[2]
93
+
94
+ # affine - voxelsize
95
+ nii_file.affine[0][0] = voxelsize[0]
96
+ nii_file.affine[1][1] = voxelsize[1]
97
+ nii_file.affine[2][2] = voxelsize[2]
98
+ # affine - imagecorner
99
+ nii_file.affine[0][3] = image_position_patient[0]
100
+ nii_file.affine[1][3] = image_position_patient[1]
101
+ nii_file.affine[2][3] = image_position_patient[2]
102
+ if contours_exist:
103
+ nii_file.header.extensions.append(
104
+ nib.nifti1.Nifti1Extension(0, bytearray(contours_exist)))
105
+ return nii_file
106
+
107
+
108
+ class PatientList:
109
+
110
+ def __init__(self):
111
+ self.list = []
112
+
113
+ def find_CT_image(self, display_id):
114
+ count = -1
115
+ for patient_id in range(len(self.list)):
116
+ for ct_id in range(len(self.list[patient_id].CTimages)):
117
+ if (self.list[patient_id].CTimages[ct_id].isLoaded == 1):
118
+ count += 1
119
+ if (count == display_id):
120
+ break
121
+ if (count == display_id):
122
+ break
123
+
124
+ return patient_id, ct_id
125
+
126
+ def find_dose_image(self, display_id):
127
+ count = -1
128
+ for patient_id in range(len(self.list)):
129
+ for dose_id in range(len(self.list[patient_id].RTdoses)):
130
+ if (self.list[patient_id].RTdoses[dose_id].isLoaded == 1):
131
+ count += 1
132
+ if (count == display_id):
133
+ break
134
+ if (count == display_id):
135
+ break
136
+
137
+ return patient_id, dose_id
138
+
139
+ def find_contour(self, ROIName):
140
+ for patient_id in range(len(self.list)):
141
+ for struct_id in range(len(self.list[patient_id].RTstructs)):
142
+ if (self.list[patient_id].RTstructs[struct_id].isLoaded == 1):
143
+ for contour_id in range(len(self.list[patient_id].RTstructs[struct_id].Contours)):
144
+ if (self.list[patient_id].RTstructs[struct_id].Contours[contour_id].ROIName == ROIName):
145
+ return patient_id, struct_id, contour_id
146
+
147
+ def list_dicom_files(self, folder_path, recursive):
148
+ file_list = os.listdir(folder_path)
149
+ # print("len file_list", len(file_list), "folderpath",folder_path)
150
+ for file_name in file_list:
151
+ file_path = os.path.join(folder_path, file_name)
152
+
153
+ # folders
154
+ if os.path.isdir(file_path):
155
+ if recursive == True:
156
+ subfolder_list = self.list_dicom_files(file_path, True)
157
+ # join_patient_lists(Patients, subfolder_list)
158
+
159
+ # files
160
+ elif os.path.isfile(file_path):
161
+
162
+ try:
163
+ dcm = pydicom.dcmread(file_path)
164
+ except:
165
+ print("Invalid Dicom file: " + file_path)
166
+ continue
167
+
168
+ patient_id = next((x for x, val in enumerate(
169
+ self.list) if val.PatientInfo.PatientID == dcm.PatientID), -1)
170
+
171
+ if patient_id == -1:
172
+ Patient = PatientData()
173
+ Patient.PatientInfo.PatientID = dcm.PatientID
174
+ Patient.PatientInfo.PatientName = str(dcm.PatientName)
175
+ Patient.PatientInfo.PatientBirthDate = dcm.PatientBirthDate
176
+ Patient.PatientInfo.PatientSex = dcm.PatientSex
177
+ self.list.append(Patient)
178
+ patient_id = len(self.list) - 1
179
+
180
+ # Dicom CT
181
+ if dcm.SOPClassUID == "1.2.840.10008.5.1.4.1.1.2":
182
+ ct_id = next((x for x, val in enumerate(
183
+ self.list[patient_id].CTimages) if val.SeriesInstanceUID == dcm.SeriesInstanceUID), -1)
184
+ if ct_id == -1:
185
+ CT = CTimage()
186
+ CT.SeriesInstanceUID = dcm.SeriesInstanceUID
187
+ CT.SOPClassUID == "1.2.840.10008.5.1.4.1.1.2"
188
+ CT.PatientInfo = self.list[patient_id].PatientInfo
189
+ CT.StudyInfo = StudyInfo()
190
+ CT.StudyInfo.StudyInstanceUID = dcm.StudyInstanceUID
191
+ CT.StudyInfo.StudyID = dcm.StudyID
192
+ CT.StudyInfo.StudyDate = dcm.StudyDate
193
+ CT.StudyInfo.StudyTime = dcm.StudyTime
194
+ if (hasattr(dcm, 'SeriesDescription') and dcm.SeriesDescription != ""):
195
+ CT.ImgName = dcm.SeriesDescription
196
+ else:
197
+ CT.ImgName = dcm.SeriesInstanceUID
198
+ self.list[patient_id].CTimages.append(CT)
199
+ ct_id = len(self.list[patient_id].CTimages) - 1
200
+
201
+ self.list[patient_id].CTimages[ct_id].DcmFiles.append(
202
+ file_path)
203
+ elif dcm.SOPClassUID == "1.2.840.10008.5.1.4.1.1.4":
204
+ mr_id = next((x for x, val in enumerate(self.list[patient_id].MRimages) if val.SeriesInstanceUID == dcm.SeriesInstanceUID), -1)
205
+ if mr_id == -1:
206
+ MR = MRimage()
207
+ MR.SeriesInstanceUID = dcm.SeriesInstanceUID
208
+ MR.SOPClassUID == "1.2.840.10008.5.1.4.1.1.4"
209
+ MR.PatientInfo = self.list[patient_id].PatientInfo
210
+ MR.StudyInfo = StudyInfo()
211
+ MR.StudyInfo.StudyInstanceUID = dcm.StudyInstanceUID
212
+ MR.StudyInfo.StudyID = dcm.StudyID
213
+ MR.StudyInfo.StudyDate = dcm.StudyDate
214
+ MR.StudyInfo.StudyTime = dcm.StudyTime
215
+ if(hasattr(dcm, 'SeriesDescription') and dcm.SeriesDescription != ""): MR.ImgName = dcm.SeriesDescription
216
+ else: MR.ImgName = dcm.SeriesInstanceUID
217
+ self.list[patient_id].MRimages.append(MR)
218
+ mr_id = len(self.list[patient_id].MRimages) - 1
219
+
220
+ self.list[patient_id].MRimages[mr_id].DcmFiles.append(file_path)
221
+ else:
222
+ print("Unknown SOPClassUID " +
223
+ dcm.SOPClassUID + " for file " + file_path)
224
+ # other
225
+ else:
226
+ print("Unknown file type " + file_path)
227
+
228
+ def print_patient_list(self):
229
+ print("")
230
+ for patient in self.list:
231
+ patient.print_patient_info()
232
+
233
+ print("")
234
+
235
+
236
+ class PatientData:
237
+
238
+ def __init__(self):
239
+ self.PatientInfo = PatientInfo()
240
+ self.CTimages = []
241
+ self.MRimages = []
242
+
243
+ def print_patient_info(self, prefix=""):
244
+ print("")
245
+ print(prefix + "PatientName: " + self.PatientInfo.PatientName)
246
+ print(prefix + "PatientID: " + self.PatientInfo.PatientID)
247
+
248
+ for ct in self.CTimages:
249
+ print("")
250
+ ct.print_CT_info(prefix + " ")
251
+
252
+ for mr in self.MRimages:
253
+ print("")
254
+ mr.print_MR_info(prefix + " ")
255
+
256
+ def import_patient_data(self, newvoxelsize=None):
257
+ # import CT images
258
+ for i, ct in enumerate(self.CTimages):
259
+ if (ct.isLoaded == 1):
260
+ continue
261
+ ct.import_Dicom_CT()
262
+ # Resample CT images
263
+ for i, ct in enumerate(self.CTimages):
264
+ ct.resample_CT(newvoxelsize)
265
+
266
+ # import MR images
267
+ for i, mr in enumerate(self.MRimages):
268
+ if (mr.isLoaded == 1):
269
+ continue
270
+ mr.import_Dicom_MR(newvoxelsize)
271
+ # Resample MR images
272
+ # for i,mr in enumerate(self.MRimages):
273
+ # mr.resample_MR(newvoxelsize)
274
+
275
+ class PatientInfo:
276
+
277
+ def __init__(self):
278
+ self.PatientID = ''
279
+ self.PatientName = ''
280
+ self.PatientBirthDate = ''
281
+ self.PatientSex = ''
282
+
283
+
284
+ class StudyInfo:
285
+
286
+ def __init__(self):
287
+ self.StudyInstanceUID = ''
288
+ self.StudyID = ''
289
+ self.StudyDate = ''
290
+ self.StudyTime = ''
291
+
292
+
293
+ class CTimage:
294
+
295
+ def __init__(self):
296
+ self.SeriesInstanceUID = ""
297
+ self.PatientInfo = {}
298
+ self.StudyInfo = {}
299
+ self.FrameOfReferenceUID = ""
300
+ self.ImgName = ""
301
+ self.SOPClassUID = ""
302
+ self.DcmFiles = []
303
+ self.isLoaded = 0
304
+
305
+ def print_CT_info(self, prefix=""):
306
+ print(prefix + "CT series: " + self.SeriesInstanceUID)
307
+ for ct_slice in self.DcmFiles:
308
+ print(prefix + " " + ct_slice)
309
+
310
+ def resample_CT(self, newvoxelsize):
311
+ ct = self.Image
312
+ # Rescaling to the newvoxelsize if given in parameter
313
+ if newvoxelsize is not None:
314
+ source_shape = self.GridSize
315
+ voxelsize = self.PixelSpacing
316
+ # print("self.ImagePositionPatient",self.ImagePositionPatient, "source_shape",source_shape,"voxelsize",voxelsize)
317
+ VoxelX_source = self.ImagePositionPatient[0] + \
318
+ np.arange(source_shape[0])*voxelsize[0]
319
+ VoxelY_source = self.ImagePositionPatient[1] + \
320
+ np.arange(source_shape[1])*voxelsize[1]
321
+ VoxelZ_source = self.ImagePositionPatient[2] + \
322
+ np.arange(source_shape[2])*voxelsize[2]
323
+
324
+ target_shape = np.ceil(np.array(source_shape).astype(
325
+ float)*np.array(voxelsize).astype(float)/newvoxelsize).astype(int)
326
+ VoxelX_target = self.ImagePositionPatient[0] + \
327
+ np.arange(target_shape[0])*newvoxelsize[0]
328
+ VoxelY_target = self.ImagePositionPatient[1] + \
329
+ np.arange(target_shape[1])*newvoxelsize[1]
330
+ VoxelZ_target = self.ImagePositionPatient[2] + \
331
+ np.arange(target_shape[2])*newvoxelsize[2]
332
+ # print("source_shape",source_shape,"target_shape",target_shape)
333
+ if (all(source_shape == target_shape) and np.linalg.norm(np.subtract(voxelsize, newvoxelsize) < 0.001)):
334
+ print("Image does not need filtering")
335
+ else:
336
+ # anti-aliasing filter
337
+ sigma = [0, 0, 0]
338
+ if (newvoxelsize[0] > voxelsize[0]):
339
+ sigma[0] = 0.4 * (newvoxelsize[0]/voxelsize[0])
340
+ if (newvoxelsize[1] > voxelsize[1]):
341
+ sigma[1] = 0.4 * (newvoxelsize[1]/voxelsize[1])
342
+ if (newvoxelsize[2] > voxelsize[2]):
343
+ sigma[2] = 0.4 * (newvoxelsize[2]/voxelsize[2])
344
+
345
+ if (sigma != [0, 0, 0]):
346
+ print("Image is filtered before downsampling")
347
+ ct = scipy.ndimage.gaussian_filter(ct, sigma)
348
+
349
+ xi = np.array(np.meshgrid(
350
+ VoxelX_target, VoxelY_target, VoxelZ_target))
351
+ xi = np.rollaxis(xi, 0, 4)
352
+ xi = xi.reshape((xi.size // 3, 3))
353
+
354
+ # get resized ct
355
+ ct = scipy.interpolate.interpn((VoxelX_source, VoxelY_source, VoxelZ_source), ct, xi, method='linear',
356
+ fill_value=-1000, bounds_error=False).reshape(target_shape).transpose(1, 0, 2)
357
+
358
+ self.PixelSpacing = newvoxelsize
359
+ self.GridSize = list(ct.shape)
360
+ self.NumVoxels = self.GridSize[0] * self.GridSize[1] * self.GridSize[2]
361
+ self.Image = ct
362
+ # print("self.ImagePositionPatient",self.ImagePositionPatient, "self.GridSize[0]",self.GridSize[0],"self.PixelSpacing",self.PixelSpacing)
363
+
364
+ self.VoxelX = self.ImagePositionPatient[0] + \
365
+ np.arange(self.GridSize[0])*self.PixelSpacing[0]
366
+ self.VoxelY = self.ImagePositionPatient[1] + \
367
+ np.arange(self.GridSize[1])*self.PixelSpacing[1]
368
+ self.VoxelZ = self.ImagePositionPatient[2] + \
369
+ np.arange(self.GridSize[2])*self.PixelSpacing[2]
370
+ self.isLoaded = 1
371
+
372
+ def import_Dicom_CT(self):
373
+
374
+ if (self.isLoaded == 1):
375
+ print("Warning: CT serries " +
376
+ self.SeriesInstanceUID + " is already loaded")
377
+ return
378
+
379
+ images = []
380
+ SOPInstanceUIDs = []
381
+ SliceLocation = np.zeros(len(self.DcmFiles), dtype='float')
382
+
383
+ for i in range(len(self.DcmFiles)):
384
+ file_path = self.DcmFiles[i]
385
+ dcm = pydicom.dcmread(file_path)
386
+
387
+ if (hasattr(dcm, 'SliceLocation') and abs(dcm.SliceLocation - dcm.ImagePositionPatient[2]) > 0.001):
388
+ print("WARNING: SliceLocation (" + str(dcm.SliceLocation) +
389
+ ") is different than ImagePositionPatient[2] (" + str(dcm.ImagePositionPatient[2]) + ") for " + file_path)
390
+
391
+ SliceLocation[i] = float(dcm.ImagePositionPatient[2])
392
+ images.append(dcm.pixel_array * dcm.RescaleSlope +
393
+ dcm.RescaleIntercept)
394
+ SOPInstanceUIDs.append(dcm.SOPInstanceUID)
395
+
396
+ # sort slices according to their location in order to reconstruct the 3d image
397
+ sort_index = np.argsort(SliceLocation)
398
+ SliceLocation = SliceLocation[sort_index]
399
+ SOPInstanceUIDs = [SOPInstanceUIDs[n] for n in sort_index]
400
+ images = [images[n] for n in sort_index]
401
+ ct = np.dstack(images).astype("float32")
402
+
403
+ if ct.shape[0:2] != (dcm.Rows, dcm.Columns):
404
+ print("WARNING: GridSize " + str(ct.shape[0:2]) + " different from Dicom Rows (" + str(
405
+ dcm.Rows) + ") and Columns (" + str(dcm.Columns) + ")")
406
+
407
+ MeanSliceDistance = (
408
+ SliceLocation[-1] - SliceLocation[0]) / (len(images)-1)
409
+ if (abs(MeanSliceDistance - dcm.SliceThickness) > 0.001):
410
+ print("WARNING: MeanSliceDistance (" + str(MeanSliceDistance) +
411
+ ") is different from SliceThickness (" + str(dcm.SliceThickness) + ")")
412
+
413
+ self.FrameOfReferenceUID = dcm.FrameOfReferenceUID
414
+ self.ImagePositionPatient = [float(dcm.ImagePositionPatient[0]), float(
415
+ dcm.ImagePositionPatient[1]), SliceLocation[0]]
416
+ self.PixelSpacing = [float(dcm.PixelSpacing[0]), float(
417
+ dcm.PixelSpacing[1]), MeanSliceDistance]
418
+ self.GridSize = list(ct.shape)
419
+ self.NumVoxels = self.GridSize[0] * self.GridSize[1] * self.GridSize[2]
420
+ self.Image = ct
421
+ self.SOPInstanceUIDs = SOPInstanceUIDs
422
+ self.VoxelX = self.ImagePositionPatient[0] + \
423
+ np.arange(self.GridSize[0])*self.PixelSpacing[0]
424
+ self.VoxelY = self.ImagePositionPatient[1] + \
425
+ np.arange(self.GridSize[1])*self.PixelSpacing[1]
426
+ self.VoxelZ = self.ImagePositionPatient[2] + \
427
+ np.arange(self.GridSize[2])*self.PixelSpacing[2]
428
+ self.isLoaded = 1
429
+
430
+ class MRimage:
431
+
432
+ def __init__(self):
433
+ self.SeriesInstanceUID = ""
434
+ self.PatientInfo = {}
435
+ self.StudyInfo = {}
436
+ self.FrameOfReferenceUID = ""
437
+ self.ImgName = ""
438
+ self.SOPClassUID = ""
439
+
440
+ self.DcmFiles = []
441
+ self.isLoaded = 0
442
+
443
+
444
+
445
+ def print_MR_info(self, prefix=""):
446
+ print(prefix + "MR series: " + self.SeriesInstanceUID)
447
+ for mr_slice in self.DcmFiles:
448
+ print(prefix + " " + mr_slice)
449
+
450
+ def import_Dicom_MR(self, newvoxelsize):
451
+
452
+ if(self.isLoaded == 1):
453
+ print("Warning: CT series " + self.SeriesInstanceUID + " is already loaded")
454
+ return
455
+
456
+ images = []
457
+ SOPInstanceUIDs = []
458
+ SliceLocation = np.zeros(len(self.DcmFiles), dtype='float')
459
+
460
+ for i in range(len(self.DcmFiles)):
461
+ file_path = self.DcmFiles[i]
462
+ dcm = pydicom.dcmread(file_path)
463
+
464
+ if(hasattr(dcm, 'SliceLocation') and abs(dcm.SliceLocation - dcm.ImagePositionPatient[2]) > 0.001):
465
+ print("WARNING: SliceLocation (" + str(dcm.SliceLocation) + ") is different than ImagePositionPatient[2] (" + str(dcm.ImagePositionPatient[2]) + ") for " + file_path)
466
+
467
+ SliceLocation[i] = float(dcm.ImagePositionPatient[2])
468
+ images.append(dcm.pixel_array)# * dcm.RescaleSlope + dcm.RescaleIntercept)
469
+ SOPInstanceUIDs.append(dcm.SOPInstanceUID)
470
+
471
+ # sort slices according to their location in order to reconstruct the 3d image
472
+ sort_index = np.argsort(SliceLocation)
473
+ SliceLocation = SliceLocation[sort_index]
474
+ SOPInstanceUIDs = [SOPInstanceUIDs[n] for n in sort_index]
475
+ images = [images[n] for n in sort_index]
476
+ mr = np.dstack(images).astype("float32")
477
+
478
+ if mr.shape[0:2] != (dcm.Rows, dcm.Columns):
479
+ print("WARNING: GridSize " + str(mr.shape[0:2]) + " different from Dicom Rows (" + str(dcm.Rows) + ") and Columns (" + str(dcm.Columns) + ")")
480
+
481
+ MeanSliceDistance = (SliceLocation[-1] - SliceLocation[0]) / (len(images)-1)
482
+ if(abs(MeanSliceDistance - dcm.SliceThickness) > 0.001):
483
+ print("WARNING: MeanSliceDistance (" + str(MeanSliceDistance) + ") is different from SliceThickness (" + str(dcm.SliceThickness) + ")")
484
+
485
+ # Rescaling to the newvoxelsize if given in parameter
486
+ if newvoxelsize is not None:
487
+ source_shape = list(mr.shape)
488
+
489
+ voxelsize = [float(dcm.PixelSpacing[0]), float(dcm.PixelSpacing[1]), MeanSliceDistance]
490
+ VoxelX_source = dcm.ImagePositionPatient[0] + np.arange(source_shape[0])*voxelsize[0]
491
+ VoxelY_source = dcm.ImagePositionPatient[1] + np.arange(source_shape[1])*voxelsize[1]
492
+ VoxelZ_source = dcm.ImagePositionPatient[2] + np.arange(source_shape[2])*voxelsize[2]
493
+
494
+ target_shape = np.ceil(np.array(source_shape).astype(float)*np.array(voxelsize).astype(float)/newvoxelsize).astype(int)
495
+ VoxelX_target = dcm.ImagePositionPatient[0] + np.arange(target_shape[0])*newvoxelsize[0]
496
+ VoxelY_target = dcm.ImagePositionPatient[1] + np.arange(target_shape[1])*newvoxelsize[1]
497
+ VoxelZ_target = dcm.ImagePositionPatient[2] + np.arange(target_shape[2])*newvoxelsize[2]
498
+
499
+ if(all(source_shape == target_shape) and np.linalg.norm(np.subtract(voxelsize, newvoxelsize) < 0.001)):
500
+ print("Image does not need filtering")
501
+ else:
502
+ # anti-aliasing filter
503
+ sigma = [0, 0, 0]
504
+ if(newvoxelsize[0] > voxelsize[0]): sigma[0] = 0.4 * (newvoxelsize[0]/voxelsize[0])
505
+ if(newvoxelsize[1] > voxelsize[1]): sigma[1] = 0.4 * (newvoxelsize[1]/voxelsize[1])
506
+ if(newvoxelsize[2] > voxelsize[2]): sigma[2] = 0.4 * (newvoxelsize[2]/voxelsize[2])
507
+
508
+ if(sigma != [0, 0, 0]):
509
+ print("Image is filtered before downsampling")
510
+ mr = scipy.ndimage.gaussian_filter(mr, sigma)
511
+ else:
512
+ print("Image does not need filtering")
513
+
514
+
515
+ xi = np.array(np.meshgrid(VoxelX_target, VoxelY_target, VoxelZ_target))
516
+ xi = np.rollaxis(xi, 0, 4)
517
+ xi = xi.reshape((xi.size // 3, 3))
518
+
519
+ # get resized ct
520
+ mr = scipy.interpolate.interpn((VoxelX_source,VoxelY_source,VoxelZ_source), mr, xi, method='linear', fill_value=0, bounds_error=False).reshape(target_shape).transpose(1,0,2)
521
+
522
+
523
+ self.FrameOfReferenceUID = dcm.FrameOfReferenceUID
524
+ self.ImagePositionPatient = [float(dcm.ImagePositionPatient[0]), float(dcm.ImagePositionPatient[1]), SliceLocation[0]]
525
+ self.PixelSpacing = [float(dcm.PixelSpacing[0]), float(dcm.PixelSpacing[1]), MeanSliceDistance] if newvoxelsize is None else newvoxelsize
526
+ self.GridSize = list(mr.shape)
527
+ self.NumVoxels = self.GridSize[0] * self.GridSize[1] * self.GridSize[2]
528
+ self.Image = mr
529
+ self.SOPInstanceUIDs = SOPInstanceUIDs
530
+ self.VoxelX = self.ImagePositionPatient[0] + np.arange(self.GridSize[0])*self.PixelSpacing[0]
531
+ self.VoxelY = self.ImagePositionPatient[1] + np.arange(self.GridSize[1])*self.PixelSpacing[1]
532
+ self.VoxelZ = self.ImagePositionPatient[2] + np.arange(self.GridSize[2])*self.PixelSpacing[2]
533
+ self.isLoaded = 1
nii_to_dicom.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import nibabel as nib
2
+ import pydicom
3
+ import os
4
+ import glob
5
+ import numpy as np
6
+ from copy import deepcopy
7
+ from matplotlib.patches import Polygon
8
+ import warnings
9
+ from scipy.ndimage import find_objects
10
+ from scipy.ndimage.morphology import binary_fill_holes
11
+ from skimage import measure
12
+ from PIL import Image, ImageDraw
13
+ import scipy
14
+ import datetime
15
+ from dicom_to_nii import set_header_info
16
+
17
+ def convert_nii_to_dicom(dicomctdir, predictedNiiFile, predictedDicomFile, predicted_structures=[], rtstruct_colors=[], refCT = None):
18
+ # img = nib.load(os.path.join(predniidir, patient_id, 'RTStruct.nii.gz'))
19
+ # data = img.get_fdata()[:,:,:,1]
20
+ # patient_list = PatientList() # initialize list of patient data
21
+ # patient_list.list_dicom_files(os.path.join(ct_ref_path,patient,inner_ct_ref_path), 1) # search dicom files in the patient data folder, stores all files in the attributes (all CT images, dose file, struct file)
22
+ # refCT = patient_list.list[0].CTimages[0]
23
+ # refCT.import_Dicom_CT()
24
+
25
+ struct = RTstruct()
26
+ struct.load_from_nii(predictedNiiFile, predicted_structures, rtstruct_colors) #TODO add already the refCT info in here because there are fields to do that
27
+ if not struct.Contours[0].Mask_PixelSpacing == refCT.PixelSpacing:
28
+ struct.resample_struct(refCT.PixelSpacing)
29
+ struct.export_Dicom(refCT, predictedDicomFile)
30
+
31
+ # create_RT_struct(dicomctdir, data.transpose([1,0,2]).astype(int), dicomdir, predicted_structures)
32
+
33
+ def integer_to_onehot(niiFile):
34
+
35
+ # get contours in nnunet format
36
+ nnunet_integer_nib = nib.load(niiFile)
37
+ nnunet_integer_data = nnunet_integer_nib.get_fdata()
38
+
39
+ # convert to onehot encoding (2**i)
40
+ onehot_data = np.zeros(nnunet_integer_data.shape)
41
+ for i in np.unique(nnunet_integer_data):
42
+ onehot_data[nnunet_integer_data == i] = 2**i
43
+
44
+ # get contours_exist
45
+ contours_exist = np.ones(len(np.unique(onehot_data))).astype(bool).tolist()
46
+ #contours_exist = np.ones(len(np.unique(onehot_data))-1).astype(bool) # -1 to remove the background which we don't want
47
+ # save it back to nii format (will overwrite the predicted file - integer format - with this one - onehot format -)
48
+ image_nii = nib.Nifti1Image(onehot_data, affine=np.eye(4)) # for Nifti1 header, change for a Nifti2 type of header
49
+ # Update header fields
50
+ image_nii = set_header_info(image_nii, nnunet_integer_nib.header['pixdim'][1:4], [nnunet_integer_nib.header['qoffset_x'],nnunet_integer_nib.header['qoffset_y'],nnunet_integer_nib.header['qoffset_z']], contours_exist = contours_exist)
51
+ # Save nii
52
+ nib.save(image_nii,niiFile) #overwrites old file
53
+
54
+ return
55
+
56
+ def save_nii_image(nib_img, nib_header,dst_dir, dst_filename, contours_exist = None):
57
+
58
+ image_nii = nib.Nifti1Image(nib_img, affine=np.eye(4)) # for Nifti1 header, change for a Nifti2 type of header
59
+ # Update header fields
60
+ if contours_exist is not None:
61
+ image_nii = set_header_info(image_nii, nib_header['pixdim'][1:4], [nib_header['qoffset_x'],nib_header['qoffset_y'],nib_header['qoffset_z']], contours_exist = contours_exist)
62
+ else:
63
+ image_nii = set_header_info(image_nii, nib_header['pixdim'][1:4], [nib_header['qoffset_x'],nib_header['qoffset_y'],nib_header['qoffset_z']])
64
+ # Save nii
65
+ nib.save(image_nii, os.path.join(dst_dir,dst_filename))
66
+
67
+ def Taubin_smoothing(contour):
68
+ """ Here, we do smoothing in 2D contours!
69
+ Parameters:
70
+ a Nx2 numpy array containing the contour to smooth
71
+ Returns:
72
+ a Nx2 numpy array containing the smoothed contour """
73
+ smoothingloops = 5
74
+ smoothed = [np.empty_like(contour) for i in range(smoothingloops+1)]
75
+ smoothed[0] = contour
76
+ for i in range(smoothingloops):
77
+ # loop over all elements in the contour
78
+ for vertex_i in range(smoothed[0].shape[0]):
79
+ if vertex_i == 0:
80
+ vertex_prev = smoothed[i].shape[0]-1
81
+ vertex_next = vertex_i+1
82
+ elif vertex_i == smoothed[i].shape[0]-1:
83
+ vertex_prev = vertex_i-1
84
+ vertex_next = 0
85
+ else:
86
+ vertex_prev = vertex_i -1
87
+ vertex_next = vertex_i +1
88
+ neighbours_x = np.array([smoothed[i][vertex_prev,0], smoothed[i][vertex_next,0]])
89
+ neighbours_y = np.array([smoothed[i][vertex_prev,1], smoothed[i][vertex_next,1]])
90
+ smoothed[i+1][vertex_i,0] = smoothed[i][vertex_i,0] - 0.3*(smoothed[i][vertex_i,0] - np.mean(neighbours_x))
91
+ smoothed[i+1][vertex_i,1] = smoothed[i][vertex_i,1] - 0.3*(smoothed[i][vertex_i,1] - np.mean(neighbours_y))
92
+
93
+ return np.round(smoothed[smoothingloops],3)
94
+
95
+ class RTstruct:
96
+
97
+ def __init__(self):
98
+ self.SeriesInstanceUID = ""
99
+ self.PatientInfo = {}
100
+ self.StudyInfo = {}
101
+ self.CT_SeriesInstanceUID = ""
102
+ self.DcmFile = ""
103
+ self.isLoaded = 0
104
+ self.Contours = []
105
+ self.NumContours = 0
106
+
107
+
108
+ def print_struct_info(self, prefix=""):
109
+ print(prefix + "Struct: " + self.SeriesInstanceUID)
110
+ print(prefix + " " + self.DcmFile)
111
+
112
+
113
+ def print_ROINames(self):
114
+ print("RT Struct UID: " + self.SeriesInstanceUID)
115
+ count = -1
116
+ for contour in self.Contours:
117
+ count += 1
118
+ print(' [' + str(count) + '] ' + contour.ROIName)
119
+
120
+ def resample_struct(self, newvoxelsize):
121
+ # Rescaling to the newvoxelsize if given in parameter
122
+ if newvoxelsize is not None:
123
+ for i, Contour in enumerate(self.Contours):
124
+ source_shape = Contour.Mask_GridSize
125
+ voxelsize = Contour.Mask_PixelSpacing
126
+ VoxelX_source = Contour.Mask_Offset[0] + np.arange(source_shape[0])*voxelsize[0]
127
+ VoxelY_source = Contour.Mask_Offset[1] + np.arange(source_shape[1])*voxelsize[1]
128
+ VoxelZ_source = Contour.Mask_Offset[2] + np.arange(source_shape[2])*voxelsize[2]
129
+
130
+ target_shape = np.ceil(np.array(source_shape).astype(float)*np.array(voxelsize).astype(float)/newvoxelsize).astype(int)
131
+ VoxelX_target = Contour.Mask_Offset[0] + np.arange(target_shape[0])*newvoxelsize[0]
132
+ VoxelY_target = Contour.Mask_Offset[1] + np.arange(target_shape[1])*newvoxelsize[1]
133
+ VoxelZ_target = Contour.Mask_Offset[2] + np.arange(target_shape[2])*newvoxelsize[2]
134
+
135
+ contour = Contour.Mask
136
+
137
+ if(all(source_shape == target_shape) and np.linalg.norm(np.subtract(voxelsize, newvoxelsize) < 0.001)):
138
+ print("! Image does not need filtering")
139
+ else:
140
+ # anti-aliasing filter
141
+ sigma = [0, 0, 0]
142
+ if(newvoxelsize[0] > voxelsize[0]): sigma[0] = 0.4 * (newvoxelsize[0]/voxelsize[0])
143
+ if(newvoxelsize[1] > voxelsize[1]): sigma[1] = 0.4 * (newvoxelsize[1]/voxelsize[1])
144
+ if(newvoxelsize[2] > voxelsize[2]): sigma[2] = 0.4 * (newvoxelsize[2]/voxelsize[2])
145
+
146
+ if(sigma != [0, 0, 0]):
147
+ contour = scipy.ndimage.gaussian_filter(contour.astype(float), sigma)
148
+ #come back to binary
149
+ contour[np.where(contour>=0.5)] = 1
150
+ contour[np.where(contour<0.5)] = 0
151
+
152
+ xi = np.array(np.meshgrid(VoxelX_target, VoxelY_target, VoxelZ_target))
153
+ xi = np.rollaxis(xi, 0, 4)
154
+ xi = xi.reshape((xi.size // 3, 3))
155
+
156
+ # get resized ct
157
+ contour = scipy.interpolate.interpn((VoxelX_source,VoxelY_source,VoxelZ_source), contour, xi, method='nearest', fill_value=0, bounds_error=False).astype(bool).reshape(target_shape).transpose(1,0,2)
158
+ Contour.Mask_PixelSpacing = newvoxelsize
159
+ Contour.Mask_GridSize = list(contour.shape)
160
+ Contour.NumVoxels = Contour.Mask_GridSize[0] * Contour.Mask_GridSize[1] * Contour.Mask_GridSize[2]
161
+ Contour.Mask = contour
162
+ self.Contours[i]=Contour
163
+
164
+
165
+ def import_Dicom_struct(self, CT):
166
+ if(self.isLoaded == 1):
167
+ print("Warning: RTstruct " + self.SeriesInstanceUID + " is already loaded")
168
+ return
169
+ dcm = pydicom.dcmread(self.DcmFile)
170
+
171
+ self.CT_SeriesInstanceUID = CT.SeriesInstanceUID
172
+
173
+ for dcm_struct in dcm.StructureSetROISequence:
174
+ ReferencedROI_id = next((x for x, val in enumerate(dcm.ROIContourSequence) if val.ReferencedROINumber == dcm_struct.ROINumber), -1)
175
+ dcm_contour = dcm.ROIContourSequence[ReferencedROI_id]
176
+
177
+ Contour = ROIcontour()
178
+ Contour.SeriesInstanceUID = self.SeriesInstanceUID
179
+ Contour.ROIName = dcm_struct.ROIName
180
+ Contour.ROIDisplayColor = dcm_contour.ROIDisplayColor
181
+
182
+ #print("Import contour " + str(len(self.Contours)) + ": " + Contour.ROIName)
183
+
184
+ Contour.Mask = np.zeros((CT.GridSize[0], CT.GridSize[1], CT.GridSize[2]), dtype=np.bool)
185
+ Contour.Mask_GridSize = CT.GridSize
186
+ Contour.Mask_PixelSpacing = CT.PixelSpacing
187
+ Contour.Mask_Offset = CT.ImagePositionPatient
188
+ Contour.Mask_NumVoxels = CT.NumVoxels
189
+ Contour.ContourMask = np.zeros((CT.GridSize[0], CT.GridSize[1], CT.GridSize[2]), dtype=np.bool)
190
+
191
+ SOPInstanceUID_match = 1
192
+
193
+ if not hasattr(dcm_contour, 'ContourSequence'):
194
+ print("This structure has no attribute ContourSequence. Skipping ...")
195
+ continue
196
+
197
+ for dcm_slice in dcm_contour.ContourSequence:
198
+ Slice = {}
199
+
200
+ # list of Dicom coordinates
201
+ Slice["XY_dcm"] = list(zip( np.array(dcm_slice.ContourData[0::3]), np.array(dcm_slice.ContourData[1::3]) ))
202
+ Slice["Z_dcm"] = float(dcm_slice.ContourData[2])
203
+
204
+ # list of coordinates in the image frame
205
+ Slice["XY_img"] = list(zip( ((np.array(dcm_slice.ContourData[0::3]) - CT.ImagePositionPatient[0]) / CT.PixelSpacing[0]), ((np.array(dcm_slice.ContourData[1::3]) - CT.ImagePositionPatient[1]) / CT.PixelSpacing[1]) ))
206
+ Slice["Z_img"] = (Slice["Z_dcm"] - CT.ImagePositionPatient[2]) / CT.PixelSpacing[2]
207
+ Slice["Slice_id"] = int(round(Slice["Z_img"]))
208
+
209
+ # convert polygon to mask (based on matplotlib - slow)
210
+ #x, y = np.meshgrid(np.arange(CT.GridSize[0]), np.arange(CT.GridSize[1]))
211
+ #points = np.transpose((x.ravel(), y.ravel()))
212
+ #path = Path(Slice["XY_img"])
213
+ #mask = path.contains_points(points)
214
+ #mask = mask.reshape((CT.GridSize[0], CT.GridSize[1]))
215
+
216
+ # convert polygon to mask (based on PIL - fast)
217
+ img = Image.new('L', (CT.GridSize[0], CT.GridSize[1]), 0)
218
+ if(len(Slice["XY_img"]) > 1): ImageDraw.Draw(img).polygon(Slice["XY_img"], outline=1, fill=1)
219
+ mask = np.array(img)
220
+ Contour.Mask[:,:,Slice["Slice_id"]] = np.logical_or(Contour.Mask[:,:,Slice["Slice_id"]], mask)
221
+
222
+ # do the same, but only keep contour in the mask
223
+ img = Image.new('L', (CT.GridSize[0], CT.GridSize[1]), 0)
224
+ if(len(Slice["XY_img"]) > 1): ImageDraw.Draw(img).polygon(Slice["XY_img"], outline=1, fill=0)
225
+ mask = np.array(img)
226
+ Contour.ContourMask[:,:,Slice["Slice_id"]] = np.logical_or(Contour.ContourMask[:,:,Slice["Slice_id"]], mask)
227
+
228
+ Contour.ContourSequence.append(Slice)
229
+
230
+ # check if the contour sequence is imported on the correct CT slice:
231
+ if(hasattr(dcm_slice, 'ContourImageSequence') and CT.SOPInstanceUIDs[Slice["Slice_id"]] != dcm_slice.ContourImageSequence[0].ReferencedSOPInstanceUID):
232
+ SOPInstanceUID_match = 0
233
+
234
+ if SOPInstanceUID_match != 1:
235
+ print("WARNING: some SOPInstanceUIDs don't match during importation of " + Contour.ROIName + " contour on CT image")
236
+
237
+ self.Contours.append(Contour)
238
+ self.NumContours += 1
239
+ #print("self.NumContours",self.NumContours, len(self.Contours))
240
+ self.isLoaded = 1
241
+
242
+ def load_from_nii(self, struct_nii_path, rtstruct_labels, rtstruct_colors):
243
+
244
+ # load the nii image
245
+ struct_nib = nib.load(struct_nii_path)
246
+ struct_data = struct_nib.get_fdata()
247
+
248
+ # get contourexists from header
249
+ if len(struct_nib.header.extensions)==0:
250
+ contoursexist = []
251
+ else:
252
+ # TODO ENABLE IN CASE WE DONT HAVE contoursexist TAKE JUST THE LENGTH OF LABELS
253
+ contoursexist = list(struct_nib.header.extensions[0].get_content())
254
+
255
+ # get number of rois in struct_data
256
+ # for nii with consecutive integers
257
+ #roinumbers = np.unique(struct_data)
258
+ # for nii with power of 2 format
259
+ #roinumbers = list(np.arange(np.floor(np.log2(np.max(struct_data))).astype(int)+1)) # CAREFUL WITH THIS LINE, MIGHT NOT WORK ALWAYS IF WE HAVE OVERLAP OF
260
+ #nb_rois_in_struct = len(roinumbers)
261
+
262
+ # check that they match
263
+ if not len(rtstruct_labels) == len(contoursexist) :
264
+ #raise TypeError("The number or struct labels, contoursexist, and masks in struct.nii.gz is not the same")
265
+ # raise Warning("The number or struct labels and contoursexist in struct.nii.gz is not the same. Taking len(contoursexist) as number of rois")
266
+ self.NumContours = len(rtstruct_labels)#len(contoursexist)
267
+ else:
268
+ self.NumContours = len(rtstruct_labels)#len(contoursexist)
269
+ print("num contours", self.NumContours, len(rtstruct_labels) , len(contoursexist))
270
+ # fill in contours
271
+ #TODO fill in ContourSequence and ContourData to be faster later in writeDicomRTstruct
272
+ for c in range(self.NumContours):
273
+
274
+ Contour = ROIcontour()
275
+ Contour.SeriesInstanceUID = self.SeriesInstanceUID
276
+ Contour.ROIName = rtstruct_labels[c]
277
+ if rtstruct_colors[c] == None:
278
+ Contour.ROIDisplayColor = [0, 0, 255] # default color is blue
279
+ else:
280
+ Contour.ROIDisplayColor = rtstruct_colors[c]
281
+ if len(contoursexist)!=0 and contoursexist[c] == 0:
282
+ Contour.Mask = np.zeros((struct_nib.header['dim'][1], struct_nib.header['dim'][2], struct_nib.header['dim'][3]), dtype=np.bool_)
283
+ else:
284
+ Contour.Mask = np.bitwise_and(struct_data.astype(int), 2 ** c).astype(bool)
285
+ #TODO enable option for consecutive integers masks?
286
+ Contour.Mask_GridSize = [struct_nib.header['dim'][1], struct_nib.header['dim'][2], struct_nib.header['dim'][3]]
287
+ Contour.Mask_PixelSpacing = [struct_nib.header['pixdim'][1], struct_nib.header['pixdim'][2], struct_nib.header['pixdim'][3]]
288
+ Contour.Mask_Offset = [struct_nib.header['qoffset_x'], struct_nib.header['qoffset_y'], struct_nib.header['qoffset_z']]
289
+ Contour.Mask_NumVoxels = struct_nib.header['dim'][1].astype(int) * struct_nib.header['dim'][2].astype(int) * struct_nib.header['dim'][3].astype(int)
290
+ # Contour.ContourMask --> this should be only the contour, so far we don't need it so I'll skip it
291
+
292
+ # apend to self
293
+ self.Contours.append(Contour)
294
+
295
+
296
+ def export_Dicom(self, refCT, outputFile):
297
+ print("EXPORT DICOM")
298
+ # meta data
299
+
300
+ # generate UID
301
+ #uid_base = '' #TODO define one for us if we want? Siri is using: uid_base='1.2.826.0.1.3680043.10.230.',
302
+ # personal UID, applied for via https://www.medicalconnections.co.uk/FreeUID/
303
+
304
+ SOPInstanceUID = pydicom.uid.generate_uid() #TODO verify this! Siri was using a uid_base, this line is taken from OpenTPS writeRTPlan
305
+ #SOPInstanceUID = pydicom.uid.generate_uid('1.2.840.10008.5.1.4.1.1.481.3.') # siri's version
306
+
307
+ meta = pydicom.dataset.FileMetaDataset()
308
+ meta.MediaStorageSOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3' # UID class for RTSTRUCT
309
+ meta.MediaStorageSOPInstanceUID = SOPInstanceUID
310
+ # meta.ImplementationClassUID = uid_base + '1.1.1' # Siri's
311
+ meta.ImplementationClassUID = '1.2.250.1.59.3.0.3.5.0' # from OpenREGGUI
312
+ meta.TransferSyntaxUID = '1.2.840.10008.1.2' # Siri's and OpenREGGUI
313
+ meta.FileMetaInformationGroupLength = 188 # from Siri
314
+ # meta.ImplementationVersionName = 'DCIE 2.2' # from Siri
315
+
316
+
317
+ # Main data elements - only required fields, optional fields like StudyDescription are not included for simplicity
318
+ ds = pydicom.dataset.FileDataset(outputFile, {}, file_meta=meta, preamble=b"\0" * 128) # preamble is taken from this example https://pydicom.github.io/pydicom/dev/auto_examples/input_output/plot_write_dicom.html#sphx-glr-auto-examples-input-output-plot-write-dicom-py
319
+
320
+ # Patient info - will take it from the referenced CT image
321
+ ds.PatientName = refCT.PatientInfo.PatientName
322
+ ds.PatientID = refCT.PatientInfo.PatientID
323
+ ds.PatientBirthDate = refCT.PatientInfo.PatientBirthDate
324
+ ds.PatientSex = refCT.PatientInfo.PatientSex
325
+
326
+ # General Study
327
+ dt = datetime.datetime.now()
328
+ ds.StudyDate = dt.strftime('%Y%m%d')
329
+ ds.StudyTime = dt.strftime('%H%M%S.%f')
330
+ ds.AccessionNumber = '1' # A RIS/PACS (Radiology Information System/picture archiving and communication system) generated number that identifies the order for the Study.
331
+ ds.ReferringPhysicianName = 'NA'
332
+ ds.StudyInstanceUID = refCT.StudyInfo.StudyInstanceUID # get from reference CT to indicate that they belong to the same study
333
+ ds.StudyID = refCT.StudyInfo.StudyID # get from reference CT to indicate that they belong to the same study
334
+
335
+ # RT Series
336
+ #ds.SeriesDate # optional
337
+ #ds.SeriesTime # optional
338
+ ds.Modality = 'RTSTRUCT'
339
+ ds.SeriesDescription = 'AI-predicted' + dt.strftime('%Y%m%d') + dt.strftime('%H%M%S.%f')
340
+ ds.OperatorsName = 'MIRO AI team'
341
+ ds.SeriesInstanceUID = pydicom.uid.generate_uid() # if we have a uid_base --> pydicom.uid.generate_uid(uid_base)
342
+ ds.SeriesNumber = '1'
343
+
344
+ # General Equipment
345
+ ds.Manufacturer = 'MIRO lab'
346
+ #ds.InstitutionName = 'MIRO lab' # optional
347
+ #ds.ManufacturerModelName = 'nnUNet' # optional, but can be a good tag to insert the model information or label
348
+ #ds.SoftwareVersions # optional, but can be used to insert the version of the code in PARROT or the version of the model
349
+
350
+ # Frame of Reference
351
+ ds.FrameOfReferenceUID = refCT.FrameOfReferenceUID
352
+ ds.PositionReferenceIndicator = '' # empty if unknown - info here https://dicom.innolitics.com/ciods/rt-structure-set/frame-of-reference/00201040
353
+
354
+ # Structure Set
355
+ ds.StructureSetLabel = 'AI predicted' # do not use - or spetial characters or the Dicom Validation in Raystation will give a warning
356
+ #ds.StructureSetName # optional
357
+ #ds.StructureSetDescription # optional
358
+ ds.StructureSetDate = dt.strftime('%Y%m%d')
359
+ ds.StructureSetTime = dt.strftime('%H%M%S.%f')
360
+ ds.ReferencedFrameOfReferenceSequence = pydicom.Sequence()# optional
361
+ # we assume there is only one, the CT
362
+ dssr = pydicom.Dataset()
363
+ dssr.FrameOfReferenceUID = refCT.FrameOfReferenceUID
364
+ dssr.RTReferencedStudySequence = pydicom.Sequence()
365
+ # fill in sequence
366
+ dssr_refStudy = pydicom.Dataset()
367
+ dssr_refStudy.ReferencedSOPClassUID = '1.2.840.10008.3.1.2.3.1' # Study Management Detached
368
+ dssr_refStudy.ReferencedSOPInstanceUID = refCT.StudyInfo.StudyInstanceUID
369
+ dssr_refStudy.RTReferencedSeriesSequence = pydicom.Sequence()
370
+ #initialize
371
+ dssr_refStudy_series = pydicom.Dataset()
372
+ dssr_refStudy_series.SeriesInstanceUID = refCT.SeriesInstanceUID
373
+ dssr_refStudy_series.ContourImageSequence = pydicom.Sequence()
374
+ # loop over slices of CT
375
+ for slc in range(len(refCT.SOPInstanceUIDs)):
376
+ dssr_refStudy_series_slc = pydicom.Dataset()
377
+ dssr_refStudy_series_slc.ReferencedSOPClassUID = refCT.SOPClassUID
378
+ dssr_refStudy_series_slc.ReferencedSOPInstanceUID = refCT.SOPInstanceUIDs[slc]
379
+ # append
380
+ dssr_refStudy_series.ContourImageSequence.append(dssr_refStudy_series_slc)
381
+
382
+ # append
383
+ dssr_refStudy.RTReferencedSeriesSequence.append(dssr_refStudy_series)
384
+ # append
385
+ dssr.RTReferencedStudySequence.append(dssr_refStudy)
386
+ #append
387
+ ds.ReferencedFrameOfReferenceSequence.append(dssr)
388
+ #
389
+ ds.StructureSetROISequence = pydicom.Sequence()
390
+ # loop over the ROIs to fill in the fields
391
+ for iroi in range(self.NumContours):
392
+ # initialize the Dataset
393
+ dssr = pydicom.Dataset()
394
+ dssr.ROINumber = iroi + 1 # because iroi starts at zero and ROINumber cannot be zero
395
+ dssr.ReferencedFrameOfReferenceUID = ds.FrameOfReferenceUID # coming from refCT
396
+ dssr.ROIName = self.Contours[iroi].ROIName
397
+ #dssr.ROIDescription # optional
398
+ dssr.ROIGenerationAlgorithm = 'AUTOMATIC' # can also be 'SEMIAUTOMATIC' OR 'MANUAL', info here https://dicom.innolitics.com/ciods/rt-structure-set/structure-set/30060020/30060036
399
+ #TODO enable a function to tell us which type of GenerationAlgorithm we have
400
+ ds.StructureSetROISequence.append(dssr)
401
+
402
+ # delete to remove space
403
+ del dssr
404
+
405
+ #TODO merge all loops into one to be faster, although like this the code is easier to follow I find
406
+
407
+ # ROI Contour
408
+ ds.ROIContourSequence = pydicom.Sequence()
409
+ # loop over the ROIs to fill in the fields
410
+ for iroi in range(self.NumContours):
411
+ # initialize the Dataset
412
+ dssr = pydicom.Dataset()
413
+ dssr.ROIDisplayColor = self.Contours[iroi].ROIDisplayColor
414
+ dssr.ReferencedROINumber = iroi + 1 # because iroi starts at zero and ReferencedROINumber cannot be zero
415
+ dssr.ContourSequence = pydicom.Sequence()
416
+ # mask to polygon
417
+ polygonMeshList = self.Contours[iroi].getROIContour()
418
+ # get z vector
419
+ z_coords = list(np.arange(self.Contours[iroi].Mask_Offset[2],self.Contours[iroi].Mask_Offset[2]+self.Contours[iroi].Mask_GridSize[2]*self.Contours[iroi].Mask_PixelSpacing[2], self.Contours[iroi].Mask_PixelSpacing[2]))
420
+ # loop over the polygonMeshList to fill in ContourSequence
421
+ for polygon in polygonMeshList:
422
+
423
+ # initialize the Dataset
424
+ dssr_slc = pydicom.Dataset()
425
+ dssr_slc.ContourGeometricType = 'CLOSED_PLANAR' # can also be 'POINT', 'OPEN_PLANAR', 'OPEN_NONPLANAR', info here https://dicom.innolitics.com/ciods/rt-structure-set/roi-contour/30060039/30060040/30060042
426
+ #TODO enable the proper selection of the ContourGeometricType
427
+
428
+ # fill in contour points and data
429
+ dssr_slc.NumberOfContourPoints = len(polygon[0::3])
430
+ #dssr_slc.ContourNumber # optional
431
+ # Smooth contour
432
+ smoothed_array_2D = Taubin_smoothing(np.transpose(np.array([polygon[0::3],polygon[1::3]])))
433
+ # fill in smoothed contour
434
+ polygon[0::3] = smoothed_array_2D[:,0]
435
+ polygon[1::3] = smoothed_array_2D[:,1]
436
+ dssr_slc.ContourData = polygon
437
+
438
+ #get slice
439
+ polygon_z = polygon[2]
440
+ slc = z_coords.index(polygon_z)
441
+ # fill in ContourImageSequence
442
+ dssr_slc.ContourImageSequence = pydicom.Sequence() # Sequence of images containing the contour
443
+ # in our case, we assume we only have one, the reference CT (refCT)
444
+ dssr_slc_ref = pydicom.Dataset()
445
+ dssr_slc_ref.ReferencedSOPClassUID = refCT.SOPClassUID
446
+ dssr_slc_ref.ReferencedSOPInstanceUID = refCT.SOPInstanceUIDs[slc]
447
+ dssr_slc.ContourImageSequence.append(dssr_slc_ref)
448
+
449
+ # append Dataset to Sequence
450
+ dssr.ContourSequence.append(dssr_slc)
451
+
452
+ # append Dataset
453
+ ds.ROIContourSequence.append(dssr)
454
+
455
+ # RT ROI Observations
456
+ ds.RTROIObservationsSequence = pydicom.Sequence()
457
+ # loop over the ROIs to fill in the fields
458
+ for iroi in range(self.NumContours):
459
+ # initialize the Dataset
460
+ dssr = pydicom.Dataset()
461
+ dssr.ObservationNumber = iroi + 1 # because iroi starts at zero and ReferencedROINumber cannot be zero
462
+ dssr.ReferencedROINumber = iroi + 1 ## because iroi starts at zero and ReferencedROINumber cannot be zero
463
+ dssr.ROIObservationLabel = self.Contours[iroi].ROIName #optional
464
+ dssr.RTROIInterpretedType = 'ORGAN' # we can have many types, see here https://dicom.innolitics.com/ciods/rt-structure-set/rt-roi-observations/30060080/300600a4
465
+ # TODO enable a better fill in of the RTROIInterpretedType
466
+ dssr.ROIInterpreter = '' # empty if unknown
467
+ # append Dataset
468
+ ds.RTROIObservationsSequence.append(dssr)
469
+
470
+ # Approval
471
+ ds.ApprovalStatus = 'UNAPPROVED'#'APPROVED'
472
+ # if ds.ApprovalStatus = 'APPROVED', then we need to fill in the reviewer information
473
+ #ds.ReviewDate = dt.strftime('%Y%m%d')
474
+ #ds.ReviewTime = dt.strftime('%H%M%S.%f')
475
+ #ds.ReviewerName = 'MIRO AI team'
476
+
477
+ # SOP common
478
+ ds.SpecificCharacterSet = 'ISO_IR 100' # conditionally required - see info here https://dicom.innolitics.com/ciods/rt-structure-set/sop-common/00080005
479
+ #ds.InstanceCreationDate # optional
480
+ #ds.InstanceCreationTime # optional
481
+ ds.SOPClassUID = '1.2.840.10008.5.1.4.1.1.481.3' #RTSTRUCT file
482
+ ds.SOPInstanceUID = SOPInstanceUID# Siri's --> pydicom.uid.generate_uid(uid_base)
483
+ #ds.InstanceNumber # optional
484
+
485
+ # save dicom file
486
+ print("Export dicom RTSTRUCT: " + outputFile)
487
+ ds.save_as(outputFile)
488
+
489
+
490
+
491
+
492
+ class ROIcontour:
493
+
494
+ def __init__(self):
495
+ self.SeriesInstanceUID = ""
496
+ self.ROIName = ""
497
+ self.ContourSequence = []
498
+
499
+ def getROIContour(self): # this is from new version of OpenTPS, I(ana) have adapted it to work with old version of self.Contours[i].Mask
500
+
501
+ try:
502
+ from skimage.measure import label, find_contours
503
+ from skimage.segmentation import find_boundaries
504
+ except:
505
+ print('Module skimage (scikit-image) not installed, ROIMask cannot be converted to ROIContour')
506
+ return 0
507
+
508
+ polygonMeshList = []
509
+ for zSlice in range(self.Mask.shape[2]):
510
+
511
+ labeledImg, numberOfLabel = label(self.Mask[:, :, zSlice], return_num=True)
512
+
513
+ for i in range(1, numberOfLabel + 1):
514
+
515
+ singleLabelImg = labeledImg == i
516
+ contours = find_contours(singleLabelImg.astype(np.uint8), level=0.6)
517
+
518
+ if len(contours) > 0:
519
+
520
+ if len(contours) == 2:
521
+
522
+ ## use a different threshold in the case of an interior contour
523
+ contours2 = find_contours(singleLabelImg.astype(np.uint8), level=0.4)
524
+
525
+ interiorContour = contours2[1]
526
+ polygonMesh = []
527
+ for point in interiorContour:
528
+
529
+ xCoord = np.round(point[1]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] # original Damien in OpenTPS
530
+ yCoord = np.round(point[0]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] # original Damien in OpenTPS
531
+ # xCoord = np.round(point[1]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] #AB
532
+ # yCoord = np.round(point[0]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] #AB
533
+ zCoord = zSlice * self.Mask_PixelSpacing[2] + self.Mask_Offset[2]
534
+
535
+ polygonMesh.append(yCoord) # original Damien in OpenTPS
536
+ polygonMesh.append(xCoord) # original Damien in OpenTPS
537
+ # polygonMesh.append(xCoord) # AB
538
+ # polygonMesh.append(yCoord) # AB
539
+ polygonMesh.append(zCoord)
540
+
541
+ polygonMeshList.append(polygonMesh)
542
+
543
+ contour = contours[0]
544
+
545
+ polygonMesh = []
546
+ for point in contour:
547
+
548
+ #xCoord = np.round(point[1]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] # original Damien in OpenTPS
549
+ #yCoord = np.round(point[0]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] # original Damien in OpenTPS
550
+ xCoord = np.round(point[1]) * self.Mask_PixelSpacing[0] + self.Mask_Offset[0] #AB
551
+ yCoord = np.round(point[0]) * self.Mask_PixelSpacing[1] + self.Mask_Offset[1] #AB
552
+ zCoord = zSlice * self.Mask_PixelSpacing[2] + self.Mask_Offset[2]
553
+
554
+ polygonMesh.append(xCoord) # AB
555
+ polygonMesh.append(yCoord) # AB
556
+ #polygonMesh.append(yCoord) # original Damien in OpenTPS
557
+ #polygonMesh.append(xCoord) # original Damien in OpenTPS
558
+ polygonMesh.append(zCoord)
559
+
560
+ polygonMeshList.append(polygonMesh)
561
+
562
+ ## I (ana) will comment this part since I will not use the class ROIContour for simplicity ###
563
+ #from opentps.core.data._roiContour import ROIContour ## this is done here to avoir circular imports issue
564
+ #contour = ROIContour(name=self.ROIName, displayColor=self.ROIDisplayColor)
565
+ #contour.polygonMesh = polygonMeshList
566
+
567
+ #return contour
568
+
569
+ # instead returning the polygonMeshList directly
570
+ return polygonMeshList
predict_new.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import os.path
3
+ from os import environ
4
+ import sys
5
+ import json
6
+ import subprocess
7
+ import time
8
+ import nibabel as nib
9
+
10
+ # +++++++++++++ Conversion imports +++++++++++++++++++++++++
11
+ sys.path.append(os.path.dirname(os.path.abspath(__file__)))
12
+ sys.path.append(os.path.abspath(".."))
13
+ # +++++++++++++ Conversion imports +++++++++++++++++++++++++
14
+
15
+ from utils import *
16
+ from dicom_to_nii import convert_ct_dicom_to_nii, convert_transform_mr_to_nii, PatientList, save_images
17
+ from nii_to_dicom import convert_nii_to_dicom, integer_to_onehot
18
+ from predict_nnunet import predictNNUNet
19
+
20
+ def predict(tempPath, patient_id, regSeriesInstanceUID, runInterpreter):
21
+
22
+ # Important: Check the input parameters #################
23
+ if not patient_id or patient_id == "":
24
+ sys.exit("No Patient dataset loaded: Load the patient dataset in Study Management.")
25
+
26
+ if not regSeriesInstanceUID or regSeriesInstanceUID == "":
27
+ sys.exit("No series instance UID for Modality 'REG' file. Check for REG file in your study")
28
+
29
+ dir_base = os.path.join(tempPath, patient_id)
30
+ createdir(dir_base)
31
+
32
+ dir_ct_dicom = os.path.join(dir_base, 'ct_dicom')
33
+ createdir(dir_ct_dicom)
34
+
35
+ dir_mr_dicom = os.path.join(dir_base, 'mr_dicom')
36
+ createdir(dir_mr_dicom)
37
+
38
+ dir_reg_dicom = os.path.join(dir_base, 'reg_dicom')
39
+ createdir(dir_reg_dicom)
40
+
41
+ nnUNet_raw = os.path.join(os.getcwd(), 'nnUNet_raw')
42
+ nnUNet_preprocessed = os.path.join(os.getcwd(), 'nnUNet_preprocessed')
43
+ RESULTS_FOLDER = os.path.join(os.getcwd(), 'nnUNet_trained_models')
44
+ dataset = "Dataset103_EPTN_T1_CT_all_structures"
45
+ # IMPORTANT: data set modality: MR or CT ######################
46
+ predictType='MR'
47
+
48
+ # IMPORTANT DOT Remove ########################################
49
+ os.environ['nnUNet_raw'] = nnUNet_raw
50
+ os.environ['nnUNet_preprocessed'] = nnUNet_preprocessed
51
+ os.environ['nnUNet_results'] = RESULTS_FOLDER
52
+
53
+ # Important ++++++++++++++++++++++++++++++++++++++++++++++++
54
+ # Import the lib after setting environ parameters
55
+ # import nnunet.inference.predict_simple as nnunetpredict
56
+
57
+ print('** The python enviornment path: ', os.environ["PATH"])
58
+
59
+ # For nnunet version 2
60
+ import nnunetv2.inference.predict_from_raw_data as nnunetpredict
61
+ # ###########################################################
62
+
63
+ # predicted files
64
+ predictedNiiFile = os.path.join(tempPath, patient_id, 'predict_nii')
65
+ createdir(predictedNiiFile)
66
+
67
+ predictedDicom = os.path.join(tempPath, patient_id, 'predicted_dicom')
68
+ createdir(predictedDicom)
69
+
70
+ predictedDicomFile = os.path.join(predictedDicom, 'predicted_rtstruct.dcm')
71
+
72
+ print('** Use python interpreter: ', runInterpreter)
73
+ print('** Patient name: ', patient_id)
74
+ print('** REG series instance UID: ', regSeriesInstanceUID)
75
+
76
+ # Convert CT image to NII #############
77
+ startTime = time.time()
78
+
79
+ if predictType == 'CT':
80
+
81
+ dir_dicom_to_nii = os.path.join(nnUNet_raw, 'nnUNet_raw_data', 'Dataset098_HAN_nodes')
82
+ createdir(dir_dicom_to_nii)
83
+
84
+ downloadSeriesInstanceByModality(instanceID, dir_ct_dicom, "CT")
85
+ print("Loading CT from Orthanc done: ", time.time()-startTime)
86
+
87
+ # Convert CT image to NII #############
88
+ refCT= convert_ct_dicom_to_nii(dir_dicom=dir_ct_dicom, dir_nii=dir_dicom_to_nii, outputname='1a_001_0000.nii.gz', newvoxelsize = None)
89
+ print("Convert CT image to NII Done: ", time.time()-startTime)
90
+
91
+ # new version 2:
92
+ cmd = [modelPath, '-i', dir_dicom_to_nii, '-o', predictedNiiFile, '-d', dataset, '-tr', 'nnUNetTrainer_650epochs', '-c', '3d_fullres', '-f', '0']
93
+
94
+ out = subprocess.check_output(cmd)
95
+ # Important ########################
96
+ sys.argv = cmd
97
+
98
+ # #### nnunet version 2 #############
99
+ nnunetpredict.predict_entry_point()
100
+ print("Prediction CT done", time.time()-startTime)
101
+
102
+ niiFile = os.path.join(predictedNiiFile, '1a_001.nii.gz')
103
+
104
+ # POSTPROCESSING TO CONVERT FROM INTEGERS TO 2**i, ADD CONTOURS EXISTS, AND SMOOTH
105
+ integer_to_onehot(niiFile)
106
+ print("POST processing convert from integers done: ", time.time()-startTime)
107
+
108
+ startTime = time.time()
109
+ convert_nii_to_dicom(dicomctdir=dir_ct_dicom, predictedNiiFile=niiFile, predictedDicomFile=predictedDicomFile,
110
+ predicted_structures=predicted_structures, rtstruct_colors=rtstruct_colors, refCT=refCT)
111
+
112
+ print("Convert CT predicted NII to DICOM done: ", time.time()-startTime)
113
+
114
+ elif predictType == 'MR':
115
+
116
+ dir_dicom_to_nii = os.path.join(nnUNet_raw, 'nnUNet_raw_data',dataset)
117
+ createdir(dir_dicom_to_nii)
118
+
119
+ # Download the REG dicom ##############
120
+ downloadSeriesInstanceByModality(regSeriesInstanceUID, dir_reg_dicom, "REG")
121
+ print("Loading REG from Orthanc done: ", time.time()-startTime)
122
+
123
+ # Download the MR dicom ###############
124
+ # Read the mr study instance UID from the download REG dicom
125
+ mrSeriesInstanceUID = getSeriesInstanceUIDFromRegDicom(dir_reg_dicom, regSeriesInstanceUID)
126
+
127
+ downloadSeriesInstanceByModality(mrSeriesInstanceUID, dir_mr_dicom, "MR")
128
+ print("Loading MR from Orthanc done: ", time.time()-startTime)
129
+
130
+ # Execute REG tranformation ###########
131
+ ctSeriesInstanceUIDFromRegDicom = getCTSeriesInstanceUIDFromRegDicom(dir_reg_dicom, regSeriesInstanceUID)
132
+ print("CT Series Instance UID referenced by Reg dicom: ", ctSeriesInstanceUIDFromRegDicom)
133
+
134
+ downloadSeriesInstanceByModality(ctSeriesInstanceUIDFromRegDicom, dir_ct_dicom, "CT")
135
+
136
+ Patients = PatientList()
137
+ Patients.list_dicom_files(dir_ct_dicom, 1)
138
+ patient = Patients.list[0]
139
+ patient_name = patient.PatientInfo.PatientName
140
+ patient.import_patient_data(newvoxelsize=None)
141
+ CT = patient.CTimages[0]
142
+
143
+ startTime = time.time()
144
+ mr_reg = regMatrixTransformation(dir_mr_dicom, reg_file_path=dir_reg_dicom, regSeriesInstanceUID=regSeriesInstanceUID, CT=CT)
145
+ print("Transforming MR data done (OpenTPS.Core)")
146
+
147
+ # Convert transform MR image to NII ##################
148
+ refMR = convert_transform_mr_to_nii(dir_mr_dicom=dir_mr_dicom, tranform_mr = mr_reg, dir_nii=dir_dicom_to_nii, outputname='1a_001_0000.nii.gz', CT=CT)
149
+ refCT= convert_ct_dicom_to_nii(dir_dicom=dir_ct_dicom, dir_nii=dir_dicom_to_nii, outputname='1a_001_0001.nii.gz', newvoxelsize = None)
150
+ print("Convert CT image to NII Done: ", time.time()-startTime)
151
+ print("Convert transform MR image to NII Done: ", time.time()-startTime)
152
+
153
+
154
+ print("## start MR running prediction ###############")
155
+ startTime = time.time()
156
+ # modelPath = '..\\..\\python_environments\\prediction-3.10.9\\Scripts\\nnUNetv2_predict.exe'
157
+ # cmd = [modelPath, '-i', dir_dicom_to_nii, '-o', predictedNiiFile, '-d', '99', '-c', '3d_fullres' , '--disable_tta', '-tr', 'nnUNetTrainer_650epochs', '-f', '1, 4']
158
+
159
+ predictNNUNet(os.path.join(RESULTS_FOLDER,dataset, 'nnUNetTrainer_650epochs__nnUNetPlans__3d_fullres'),
160
+ dir_dicom_to_nii,
161
+ predictedNiiFile,
162
+ [1])
163
+
164
+ print("Prediction MR done", time.time()-startTime)
165
+
166
+ startTime = time.time()
167
+
168
+ predicted_structures = ["background", "BRAIN", "AMYGDALAE", "BRAINSTEM", "CAUDATENUCLEI", "CEREBELLUM", "CHIASM", "COCHLEAS", "CORNEAS", "CORPUSCALLOSUM", "FORNICES", "GLANDPINEAL", "HIPPOCAMPI", "HYPOTHALAMI", "LACRIMALGLANDS", "LENSES", "OPTICNERVES", "ORBITOFRONTALS", "PITUITARY", "RETINAS", "THALAMI", "VSCCs"]
169
+ rtstruct_colors = [[255,0,0]]*len(predicted_structures)
170
+
171
+ niiFile = os.path.join(predictedNiiFile, '1a_001.nii.gz')
172
+
173
+ # POSTPROCESSING TO CONVERT FROM INTEGERS TO 2**i, ADD CONTOURS EXISTS, AND SMOOTH
174
+ integer_to_onehot(niiFile)
175
+ print("POST processing convert from integers done: ", time.time()-startTime)
176
+
177
+ # Convert CT image to NII #############
178
+
179
+
180
+
181
+ convert_nii_to_dicom(dicomctdir=dir_ct_dicom, predictedNiiFile=niiFile, predictedDicomFile=predictedDicomFile,
182
+ predicted_structures=predicted_structures, rtstruct_colors=rtstruct_colors, refCT=refCT)
183
+ else:
184
+ print("Not supported yet")
185
+
186
+
187
+ startTime = time.time()
188
+ uploadDicomToOrthanc(predictedDicomFile)
189
+ print("Upload predicted result to Orthanc done: ", time.time()-startTime)
190
+
191
+ # tempPath = 'C:\Temp\parrot_prediction'
192
+ # regSeriesInstanceUID = '1.2.246.352.205.5029381855449574337.1508502639685232062'
193
+ # runInterpreter = 'py3109'
194
+ # patientName = 'P0461C0006I7638639'
195
+
196
+ '''
197
+ Prediction parameters provided by the server. Select the parameters to be used for prediction:
198
+ [1] tempPath: The path where the predict.py is stored,
199
+ [2] patientname: python version,
200
+ [3] ctSeriesInstanceUID: Series instance UID for data set with modality = CT. To predict 'MR' modality data, retrieve the CT UID by the code (see Precision Code)
201
+ [4] rtStructSeriesInstanceUID: Series instance UID for modality = RTSTURCT
202
+ [5] regSeriesInstanceUID: Series instance UID for modality = REG,
203
+ [6] runInterpreter: The python version for the python environment
204
+ [7] oarList: only for dose predciton. For contour predicion oarList = []
205
+ [8] tvList: only for dose prediction. For contour prediction tvList = []
206
+ '''
207
+ if __name__ == '__main__':
208
+ predict(tempPath=sys.argv[1], patient_id=sys.argv[2], regSeriesInstanceUID=sys.argv[5], runInterpreter=sys.argv[6])
209
+ # predict(tempPath=tempPath, patient_id=patientName, regSeriesInstanceUID=regSeriesInstanceUID, runInterpreter=runInterpreter)
predict_nnunet.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from nnunetv2.inference.predict_from_raw_data import nnUNetPredictor
3
+
4
+ def predictNNUNet(model_dir, input_dir, output_dir, folds):
5
+
6
+ predictor = nnUNetPredictor(
7
+ tile_step_size=0.9, #0.5,
8
+ use_gaussian=True,
9
+ use_mirroring=False, # --disable_tta
10
+ # perform_everything_on_device=True,
11
+ device=torch.device('cpu', 0),
12
+ verbose=True,
13
+ verbose_preprocessing=False,
14
+ allow_tqdm=True,
15
+ )
16
+
17
+ predictor.initialize_from_trained_model_folder(
18
+ model_dir,
19
+ use_folds=folds, # None if autodetect folds
20
+ checkpoint_name='checkpoint_final.pth',
21
+ )
22
+ print("input_dir",input_dir)
23
+ predictor.predict_from_files(input_dir,
24
+ output_dir,
25
+ save_probabilities=False,
26
+ overwrite=True,
27
+ num_processes_preprocessing=2,
28
+ num_processes_segmentation_export=2,
29
+ folder_with_segs_from_prev_stage=None,
30
+ num_parts=1,
31
+ part_id=0
32
+ )