File size: 11,390 Bytes
67e21eb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 |
#!/usr/bin/env python
from pathlib import Path
import argparse
import nibabel as nib
import numpy as np
import os
import multiprocessing
from time import time
import logging
import traceback
from scipy.ndimage import label as ndi_label, sum as ndi_sum
from nibabel.orientations import io_orientation, axcodes2ornt, ornt_transform
from scipy.ndimage import label as ndi_label, sum as ndi_sum, gaussian_filter
# logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s')
# maping of input labels to output labels
input_labels_map = {
"spine_to_vb": {
"labels-spine": {1:0,2:0,3:0,4:0,5:0,6:0,7:0, # removing cervical spine
8:1,9:2,10:3,11:4,12:5,13:6,14:7,15:8,16:9,17:10,18:11,19:12,
20:13,21:14,22:15,23:16,24:17,25:18,
26:19,27:20,28:21,},
"labels-bodyregions": {11:1}, # = nervous_system 11
"labels-spinalcord":{1:1,79:1} # = spinal_cord, 1 for Tseg binary masks OR 79 for inference results; do not combine with task total (includes 1=spleen)
},
}
# definition of classes
class ProcessLoader:
def __init__(self, input_root, method):
self.root = input_root
self.method = method
self.labels_map = input_labels_map[method]
logging.info(f"Initializing method `{method}`, loading relevant label map")
# reducing spine to vertebral bodies
def spine_to_vb(self, case):
output_np = case.fetch_label('labels-spine') # load original spine
myelon_np = case.fetch_label('labels-bodyregions') # load myelon from Body-and-Organ-Analysis, label nervous_system = 11
myelon2_np = case.fetch_label('labels-spinalcord') # load myelon from TotalSegmentator, label spinal_cord = 1 (for Tseg binary masks) OR 79 (for Inference)
# combine Body-and-Organ-Analysis and TotalSegmentator
myelon_np[myelon2_np > 0] = 1
# save sacrum temporarily, will not remove anything there
sacrum_np = output_np==19
# remove everything posterior from most anterior voxel of myelon
for slice in range(myelon_np.shape[2]):
slice_np = myelon_np[:,:,slice]
if np.sum(slice_np) > 0:
com_max = np.max(np.where(slice_np > 0), axis=1)[1] # most anterior
output_np[:,:com_max,slice] = 0
output_np[sacrum_np] = 19
# keep only the biggest connected component per vertebra (3D), removes noise
labels = np.unique(output_np)[1:]
for label in labels:
mask = output_np == label
if np.any(mask):
labeled, num_labels = ndi_label(mask)
sizes = ndi_sum(mask, labeled, index=range(1, num_labels+1))
largest_label = np.argmax(sizes) + 1
output_np[(labeled != largest_label) & mask] = 0
# fill holes in the vertebra mask
for label in labels:
mask = output_np == label
if np.any(mask):
np.invert(mask, out=mask)
labeled, num_labels = ndi_label(mask)
if num_labels <= 1:
continue
sizes = ndi_sum(mask, labeled, index=range(1, num_labels+1))
largest_label = np.argmax(sizes) + 1
mask[labeled != largest_label] = 0
np.invert(mask, out=mask)
output_np[mask] = label
# smooth mask
output_np_smoothed = np.zeros_like(output_np)
for label in labels:
mask = output_np == label
if np.any(mask):
smoothed_mask = gaussian_filter(mask.astype(float), sigma=1) > 0.5
output_np_smoothed[smoothed_mask] = label
return (output_np_smoothed, 'labels-vb')
def __call__(self, input_images_file):
time_start = time()
worker_name = multiprocessing.current_process().name
logging.debug(f"Processing `{input_images_file}` @{worker_name}")
try:
# load case
case = CaseLoader(self.root, input_images_file, self.labels_map)
# run pipeline according to method
output_np, output_dir = getattr(self, self.method)(case)
# skip patient if no output available from some reasons
if output_np is False or output_np.size == 0:
raise ValueError(f"no output available, skipping")
if not output_dir:
raise RuntimeError(f"directory for output was undefined")
# save output, logging
os.makedirs(self.root / output_dir, exist_ok=True)
output_path = self.root / output_dir / input_images_file
output_nib = nib.Nifti1Image(output_np, case.image_reoriented_affine)
# transform back to original orientation
affine_transformer = ornt_transform(axcodes2ornt("RAS"), # on purpose using RAS directly and not _reoriented_affine = additional check
io_orientation(case.image_original_affine))
output_nib = output_nib.as_reoriented(affine_transformer)
if not np.allclose(case.image_original_affine, output_nib.affine):
raise ValueError(f'Affine transformation failed: \n {case.image_original_affine} != \n {output_nib.affine}')
nib.save(output_nib, output_path)
logging.debug(f" saved `{output_path}` ({time()-time_start:.2f}s)")
logging.info(f"{input_images_file} finished @{worker_name} ({time()-time_start:.2f}s)")
except Exception as e:
logging.warning(f"{input_images_file} failed:\n {e}\n {traceback.format_exc()}\n")
class CaseLoader:
def __init__(self, input_root, input_images_file, input_labels_map):
# load source image, save shape and affine
input_path = input_root / 'images' / input_images_file
if not os.path.exists(input_path):
ValueError(f"{input_path} not available")
input_original_nib = nib.load(input_path)
input_reoriented_nib = nib.as_closest_canonical(input_original_nib)
input_reoriented_np = input_reoriented_nib.get_fdata().astype(np.float32)
logging.debug(f" loaded input `{input_path}`")
# set shape and affine
self.image_reoriented_np = input_reoriented_np
self.image_reoriented_shape = input_reoriented_np.shape
self.image_reoriented_affine = input_reoriented_nib.affine
self.image_reoriented_zooms = input_reoriented_nib.header.get_zooms()
self.image_original_affine = input_original_nib.affine
self.input_images_file = input_images_file
self.root = input_root
self.labels_map = input_labels_map
def fetch_label(self, label):
# load label
label_path = self.root / label / self.input_images_file
label_original_nib = nib.load(label_path)
label_reoriented_nib = nib.as_closest_canonical(label_original_nib)
label_reoriented_np = label_reoriented_nib.get_fdata().astype(np.uint8)
# shape and affine checks
if not np.allclose(self.image_reoriented_affine, label_reoriented_nib.affine, rtol=1e-03, atol=1e-04):
raise ValueError(f"affine matrices of input and label {label} do not match:\n{self.image_reoriented_affine}\n{label_reoriented_nib.affine}\n{self.image_reoriented_affine-label_reoriented_nib.affine}")
if not np.array_equal(self.image_reoriented_shape, label_reoriented_np.shape):
raise ValueError(f"shapes of input and label {label} do not match: {self.image_reoriented_shape} vs {label_reoriented_np.shape}")
# load labels, create mapping array for relabeling, not existing labels are replaced with 0
labels = self.labels_map[label]
labels_max = max(max(labels.keys()), np.max(label_reoriented_np))
relabel_array = np.zeros(labels_max+1, dtype=np.uint8)
for key, value in labels.items():
relabel_array[key] = value
# remap labels using fancy indexing
label_reoriented_np = relabel_array[label_reoriented_np]
logging.debug(f" loaded label `{label_path}` using {len(labels)} labels")
return label_reoriented_np
def main(input_root, input_prefix, method):
time_start = time()
# print
logging.info('PIPELINE: MASK PROCESSING')
logging.info(f'input root directory: `{input_root}`')
logging.info(f'method: `{method}`')
logging.info(f'input prefix: `{input_prefix}`')
# checks: input directory? method?
if not os.path.exists(input_root):
raise ValueError(f'Input root directory `{input_root}` does not exist.')
if method not in input_labels_map.keys():
raise ValueError(f"Method `{method}` not available.")
# find cases (images/*, prefix*nii.gz)
input_images_dir = input_root / "images"
input_images_files = [file.name for file in input_images_dir.glob(input_prefix + '*.nii.gz')]
logging.info(f"{len(input_images_files)} input images identified")
# identifying all input subdirectories
input_labels_required = input_labels_map[method]
logging.info(f"{len(input_labels_required)} labels required: {', '.join(input_labels_required.keys())}")
# check if all required label files are available
input_images_files = [file for file in input_images_files if all((input_root / label / file).exists() for label in input_labels_required.keys())]
logging.info(f"{len(input_images_files)} complete cases (all required labels available) identified")
# initialize current method as process
process = ProcessLoader(input_root, method)
n_processes = min(multiprocessing.cpu_count(), len(input_images_files))
# multiprocessing
logging.info(f"spawn processes at {n_processes}/{multiprocessing.cpu_count()} CPUs\n")
with multiprocessing.Pool(processes=n_processes, maxtasksperchild=20) as p:
p.map(process, input_images_files, chunksize=min(5, n_processes))
# finalized
logging.info(f"FINISHED PIPELINE ({len(input_images_files)} cases in {time()-time_start:.2f}s)")
if __name__ == "__main__":
"""
Toolkit for label manipulation, combination and agregation.
requires directories images/, and directories corresponding to the labels as specified in input_labels_map within the input root directory.
Usage:
suppl/4_MaskEdits.py -i /Volumes/path/to/main/ -m spine_to_vb
"""
parser = argparse.ArgumentParser(description="toolkit to combine and manipulate masks.")
parser.add_argument("-i", "--input", metavar="Input root directory", dest="input_root",
help="Root Directory",
type=lambda p: Path(p).absolute(), required=True)
parser.add_argument("-b", "--batch", metavar="Prefix of inputs", dest="input_prefix",
help="Prefix of input files to be processed",
type=str, required=False, default="")
parser.add_argument("-m", "--method", metavar="Method", dest="method",
help="The method / pipeline used for mask processing",
type=str, choices=["spine_to_vb"], required=True)
args = parser.parse_args()
main(input_root=args.input_root, input_prefix=args.input_prefix, method=args.method) |