File size: 38,866 Bytes
12d2e9e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 |
"""
Copyright 2021, Dana-Farber Cancer Institute and Weill Cornell Medicine
License: GNU GPL 2.0
"""
from io import BytesIO
import numpy as np
import openslide
from javabridge.jutil import JavaException
from loguru import logger
from PIL import Image
from pydicom.dataset import Dataset
from pydicom.encaps import get_frame_offsets
from pydicom.filebase import DicomFile
from pydicom.filereader import data_element_offset_to_value, dcmread
from pydicom.tag import SequenceDelimiterTag, TupleTag
from pydicom.uid import UID
from scipy.ndimage import zoom
import pathml.core
import pathml.core.tile
from pathml.utils import pil_to_rgb
try:
import bioformats
import javabridge
from bioformats.metadatatools import createOMEXMLMetadata
except ImportError: # pragma: no cover
logger.exception("Unable to import bioformats, javabridge")
raise Exception(
"Installation of PathML not complete. Please install openjdk8, bioformats, and javabridge:\nconda install openjdk==8.0.152\npip install javabridge==1.0.19 python-bioformats==4.0.0\nFor detailed installation instructions, please see https://github.com/Dana-Farber-AIOS/pathml/"
)
class SlideBackend:
"""base class for backends that interface with slides on disk"""
def extract_region(self, location, size, level, **kwargs):
raise NotImplementedError
def get_thumbnail(self, size, **kwargs):
raise NotImplementedError
def get_image_shape(self, **kwargs):
raise NotImplementedError
def generate_tiles(self, shape, stride, pad, **kwargs):
raise NotImplementedError
class OpenSlideBackend(SlideBackend):
"""
Use OpenSlide to interface with image files.
Depends on `openslide-python <https://openslide.org/api/python/>`_ which wraps the `openslide <https://openslide.org/>`_ C library.
Args:
filename (str): path to image file on disk
"""
def __init__(self, filename):
logger.info(f"OpenSlideBackend loading file at: {filename}")
self.filename = filename
self.slide = openslide.open_slide(filename=filename)
self.level_count = self.slide.level_count
def __repr__(self):
return f"OpenSlideBackend('{self.filename}')"
def extract_region(self, location, size, level=None):
"""
Extract a region of the image
Args:
location (Tuple[int, int]): Location of top-left corner of tile (i, j)
size (Union[int, Tuple[int, int]]): Size of each tile. May be a tuple of (height, width) or a
single integer, in which case square tiles of that size are generated.
level (int): level from which to extract chunks. Level 0 is highest resolution.
Returns:
np.ndarray: image at the specified region
"""
# verify args
if isinstance(size, int):
size = (size, size)
else:
assert (
isinstance(size, tuple)
and all([isinstance(a, int) for a in size])
and len(size) == 2
), f"Input size {size} not valid. Must be an integer or a tuple of two integers."
if level is None:
level = 0
else:
assert isinstance(level, int), f"level {level} must be an integer"
assert (
level < self.slide.level_count
), f"input level {level} invalid for a slide with {self.slide.level_count} levels"
# openslide read_region expects (x, y) coords, so need to switch order for compatibility with pathml (i, j)
i, j = location
# openslide read_region() uses coords in the level 0 reference frame
# if we are reading tiles from a higher level, need to convert to level 0 frame by multiplying by scale factor
# see: https://github.com/Dana-Farber-AIOS/pathml/issues/240
coord_scale_factor = int(self.slide.level_downsamples[level])
i *= coord_scale_factor
j *= coord_scale_factor
h, w = size
region = self.slide.read_region(location=(j, i), level=level, size=(w, h))
region_rgb = pil_to_rgb(region)
return region_rgb
def get_image_shape(self, level=0):
"""
Get the shape of the image at specified level.
Args:
level (int): Which level to get shape from. Level 0 is highest resolution. Defaults to 0.
Returns:
Tuple[int, int]: Shape of image at target level, in (i, j) coordinates.
"""
assert isinstance(level, int), f"level {level} invalid. Must be an int."
assert (
level < self.slide.level_count
), f"input level {level} invalid for slide with {self.slide.level_count} levels total"
j, i = self.slide.level_dimensions[level]
return i, j
def get_thumbnail(self, size):
"""
Get a thumbnail of the slide.
Args:
size (Tuple[int, int]): the maximum size of the thumbnail
Returns:
np.ndarray: RGB thumbnail image
"""
thumbnail = self.slide.get_thumbnail(size)
thumbnail = pil_to_rgb(thumbnail)
return thumbnail
def generate_tiles(self, shape=3000, stride=None, pad=False, level=0):
"""
Generator over tiles.
Padding works as follows:
If ``pad is False``, then the first tile will start flush with the edge of the image, and the tile locations
will increment according to specified stride, stopping with the last tile that is fully contained in the image.
If ``pad is True``, then the first tile will start flush with the edge of the image, and the tile locations
will increment according to specified stride, stopping with the last tile which starts in the image. Regions
outside the image will be padded with 0.
For example, for a 5x5 image with a tile size of 3 and a stride of 2, tile generation with ``pad=False`` will
create 4 tiles total, compared to 6 tiles if ``pad=True``.
Args:
shape (int or tuple(int)): Size of each tile. May be a tuple of (height, width) or a single integer,
in which case square tiles of that size are generated.
stride (int): stride between chunks. If ``None``, uses ``stride = size`` for non-overlapping chunks.
Defaults to ``None``.
pad (bool): How to handle tiles on the edges. If ``True``, these edge tiles will be zero-padded
and yielded with the other chunks. If ``False``, incomplete edge chunks will be ignored.
Defaults to ``False``.
level (int, optional): For slides with multiple levels, which level to extract tiles from.
Defaults to 0 (highest resolution).
Yields:
pathml.core.tile.Tile: Extracted Tile object
"""
assert isinstance(shape, int) or (
isinstance(shape, tuple) and len(shape) == 2
), f"input shape {shape} invalid. Must be a tuple of (H, W), or a single integer for square tiles"
if isinstance(shape, int):
shape = (shape, shape)
assert (
stride is None
or isinstance(stride, int)
or (isinstance(stride, tuple) and len(stride) == 2)
), f"input stride {stride} invalid. Must be a tuple of (stride_H, stride_W), or a single int"
if level is None:
level = 0
assert isinstance(level, int), f"level {level} invalid. Must be an int."
assert (
level < self.slide.level_count
), f"input level {level} invalid for slide with {self.slide.level_count} levels total"
if stride is None:
stride = shape
elif isinstance(stride, int):
stride = (stride, stride)
i, j = self.get_image_shape(level=level)
stride_i, stride_j = stride
# calculate number of expected tiles
# check for tile shape evenly dividing slide shape to fix https://github.com/Dana-Farber-AIOS/pathml/issues/305
if pad and i % stride_i != 0:
n_tiles_i = i // stride_i + 1
else:
n_tiles_i = (i - shape[0]) // stride_i + 1
if pad and j % stride_j != 0:
n_tiles_j = j // stride_j + 1
else:
n_tiles_j = (j - shape[1]) // stride_j + 1
for ix_i in range(n_tiles_i):
for ix_j in range(n_tiles_j):
coords = (int(ix_i * stride_i), int(ix_j * stride_j))
# get image for tile
tile_im = self.extract_region(location=coords, size=shape, level=level)
yield pathml.core.tile.Tile(image=tile_im, coords=coords)
def _init_logger():
"""
This is so that Javabridge doesn't spill out a lot of DEBUG messages
during runtime.
From CellProfiler/python-bioformats.
Credits to: https://github.com/pskeshu/microscoper/blob/master/microscoper/io.py#L141-L162
"""
rootLoggerName = javabridge.get_static_field(
"org/slf4j/Logger", "ROOT_LOGGER_NAME", "Ljava/lang/String;"
)
rootLogger = javabridge.static_call(
"org/slf4j/LoggerFactory",
"getLogger",
"(Ljava/lang/String;)Lorg/slf4j/Logger;",
rootLoggerName,
)
logLevel = javabridge.get_static_field(
"ch/qos/logback/classic/Level", "WARN", "Lch/qos/logback/classic/Level;"
)
javabridge.call(
rootLogger, "setLevel", "(Lch/qos/logback/classic/Level;)V", logLevel
)
logger.info("silenced javabridge logging")
class BioFormatsBackend(SlideBackend):
"""
Use BioFormats to interface with image files.
Now support multi-level images.
Depends on `python-bioformats <https://github.com/CellProfiler/python-bioformats>`_ which wraps ome bioformats
java library, parses pixel and metadata of proprietary formats, and
converts all formats to OME-TIFF. Please cite: https://pubmed.ncbi.nlm.nih.gov/20513764/
Args:
filename (str): path to image file on disk
dtype (numpy.dtype): data type of image. If ``None``, will use BioFormats to infer the data type from the
image's OME metadata. Defaults to ``None``.
Note:
While the Bio-Formats convention uses XYZCT channel order, we use YXZCT for compatibility with the rest of
PathML which is based on (i, j) coordinate system.
"""
def __init__(self, filename, dtype=None):
self.filename = filename
logger.info(f"BioFormatsBackend loading file at: {filename}")
# init java virtual machine
javabridge.start_vm(class_path=bioformats.JARS, max_heap_size="50G")
# disable verbose JVM logging if possible
try:
_init_logger()
except JavaException: # pragma: no cover
pass
# java maximum array size of 2GB constrains image size
ImageReader = bioformats.formatreader.make_image_reader_class()
reader = ImageReader()
omeMeta = createOMEXMLMetadata()
reader.setMetadataStore(omeMeta)
reader.setId(str(self.filename))
seriesCount = reader.getSeriesCount()
logger.info(f"Found n={seriesCount} series in image")
sizeSeries = []
for s in range(seriesCount):
reader.setSeries(s)
sizex, sizey, sizez, sizec, sizet = (
reader.getSizeX(),
reader.getSizeY(),
reader.getSizeZ(),
reader.getSizeC(),
reader.getSizeT(),
)
# use yxzct for compatibility with the rest of PathML which uses i,j coords (not x, y)
sizeSeries.append((sizey, sizex, sizez, sizec, sizet))
s = [s[0] * s[1] for s in sizeSeries]
self.level_count = seriesCount # count of levels
self.shape = sizeSeries[s.index(max(s))] # largest shape
self.shape_list = sizeSeries # shape on all levels
self.metadata = bioformats.get_omexml_metadata(self.filename)
logger.info(f"Bioformats OMEXML metadata: {self.metadata}")
if dtype:
assert isinstance(
dtype, np.dtype
), f"dtype is of type {type(dtype)}. Must be a np.dtype"
self.pixel_dtype = dtype
logger.info(f"Using specified dtype: {dtype}")
else:
# infer pixel data type from metadata
# map from ome pixel datatypes to numpy types. Based on:
# https://github.com/CellProfiler/python-bioformats/blob/c03fb0988caf686251707adc4332d0aff9f02941/bioformats/omexml.py#L77-L87
# but specifying that float = float32 (np.float defaults to float64)
pixel_dtype_map = {
bioformats.omexml.PT_INT8: np.dtype("int8"),
bioformats.omexml.PT_INT16: np.dtype("int16"),
bioformats.omexml.PT_INT32: np.dtype("int32"),
bioformats.omexml.PT_UINT8: np.dtype("uint8"),
bioformats.omexml.PT_UINT16: np.dtype("uint16"),
bioformats.omexml.PT_UINT32: np.dtype("uint32"),
bioformats.omexml.PT_FLOAT: np.dtype("float32"),
bioformats.omexml.PT_BIT: np.dtype("bool"),
bioformats.omexml.PT_DOUBLE: np.dtype("float64"),
}
ome_pixeltype = (
bioformats.OMEXML(self.metadata).image().Pixels.get_PixelType()
)
logger.info(f"Using pixel dtype found in OME metadata: {ome_pixeltype}")
try:
self.pixel_dtype = pixel_dtype_map[ome_pixeltype]
logger.info(f"Found corresponding dtype: {self.pixel_dtype}")
# TODO: change to specific exception
except Exception:
logger.exception("datatype from metadata not found in pixel_dtype_map")
raise Exception(
f"pixel type '{ome_pixeltype}' detected from OME metadata not recognized."
)
def __repr__(self):
return f"BioFormatsBackend('{self.filename}')"
def get_image_shape(self, level=None):
"""
Get the shape of the image on specific level.
Args:
level (int): Which level to get shape from. If ``level is None``, returns the shape of the biggest level.
Defaults to ``None``.
Returns:
Tuple[int, int]: Shape of image (i, j) at target level
"""
if level is None:
return self.shape[:2]
else:
assert isinstance(level, int), f"level {level} invalid. Must be an int."
assert (
level < self.level_count
), f"input level {level} invalid for slide with {self.level_count} levels total"
return self.shape_list[level][:2]
def extract_region(
self, location, size, level=0, series_as_channels=False, normalize=True
):
"""
Extract a region of the image. All bioformats images have 5 dimensions representing
(i, j, z, channel, time). Even if an image does not have multiple z-series or time-series,
those dimensions will still be kept. For example, a standard RGB image will be of shape (i, j, 1, 3, 1).
If a tuple with len < 5 is passed, missing dimensions will be
retrieved in full.
Args:
location (Tuple[int, int]): (i, j) location of corner of extracted region closest to the origin.
size (Tuple[int, int, ...]): (i, j) size of each region. If an integer is passed, will convert to a
tuple of (i, j) and extract a square region. If a tuple with len < 5 is passed, missing
dimensions will be retrieved in full.
level (int): level from which to extract chunks. Level 0 is highest resolution. Defaults to 0.
series_as_channels (bool): Whether to treat image series as channels. If ``True``, multi-level images
are not supported. Defaults to ``False``.
normalize (bool, optional): Whether to normalize the image to int8 before returning. Defaults to True.
If False, image will be returned as-is immediately after reading, typically in float64.
Returns:
np.ndarray: image at the specified region. 5-D array of (i, j, z, c, t)
"""
if level is None:
level = 0
else:
assert isinstance(level, int), f"level {level} invalid. Must be an int."
assert (
level < self.level_count
), f"input level {level} invalid for slide with {self.level_count} levels total"
# if a single int is passed for size, convert to a tuple to get a square region
if type(size) is int:
size = (size, size)
if not (
isinstance(location, tuple)
and len(location) < 3
and all([isinstance(x, int) for x in location])
):
raise ValueError(
f"input location {location} invalid. Must be a tuple of integer coordinates of len<2"
)
if not (
isinstance(size, tuple)
and len(size) < 3
and all([isinstance(x, int) for x in size])
):
raise ValueError(
f"input size {size} invalid. Must be a tuple of integer coordinates of len<2"
)
if series_as_channels:
logger.info("using series_as_channels=True")
if level != 0:
logger.exception(
f"When series_as_channels=True, must use level=0. Input 'level={level}' invalid."
)
raise ValueError(
f"Multi-level images not supported with series_as_channels=True. Input 'level={level}' invalid. Use 'level=0'."
)
javabridge.start_vm(class_path=bioformats.JARS, max_heap_size="100G")
with bioformats.ImageReader(str(self.filename), perform_init=True) as reader:
# expand size
logger.info(f"extracting region with input size = {size}")
size = list(size)
arrayshape = list(size)
for i in range(len(self.shape_list[level])):
if i > len(size) - 1:
arrayshape.append(self.shape_list[level][i])
arrayshape = tuple(arrayshape)
logger.info(f"input size converted to {arrayshape}")
array = np.empty(arrayshape)
# read a very small region to check whether the image has channels incorrectly stored as series
sample = reader.read(
z=0,
t=0,
series=level,
rescale=False,
XYWH=(location[1], location[0], 2, 2),
)
# need this part because some facilities output images where the channels are incorrectly stored as series
# in this case we pull the image for each series, then stack them together as channels
if series_as_channels:
logger.info("reading series as channels")
for z in range(self.shape_list[level][2]):
for c in range(self.shape_list[level][3]):
for t in range(self.shape_list[level][4]):
slicearray = reader.read(
z=z,
t=t,
series=c,
rescale=False,
XYWH=(location[1], location[0], size[1], size[0]),
)
slicearray = np.asarray(slicearray)
# some file formats read x, y out of order, transpose
array[:, :, z, c, t] = slicearray
# in this case, channels are correctly stored as channels, and we can support multi-level images as series
else:
logger.info("reading image")
for z in range(self.shape_list[level][2]):
for t in range(self.shape_list[level][4]):
slicearray = reader.read(
z=z,
t=t,
series=level,
rescale=False,
XYWH=(location[1], location[0], size[1], size[0]),
)
slicearray = np.asarray(slicearray)
if len(sample.shape) == 3:
array[:, :, z, :, t] = slicearray
else:
array[:, :, z, level, t] = slicearray
if not normalize:
logger.info("returning extracted region without normalizing dtype")
return array
else:
logger.info("normalizing extracted region to uint8")
# scale array before converting: https://github.com/Dana-Farber-AIOS/pathml/issues/271
# first scale to [0-1]
array_scaled = array / (2 ** (8 * self.pixel_dtype.itemsize))
logger.info(
f"Scaling image to [0, 1] by dividing by {(2 ** (8 * self.pixel_dtype.itemsize))}"
)
# then scale to [0-255] and convert to 8 bit
array_scaled = array_scaled * 2**8
return array_scaled.astype(np.uint8)
def get_thumbnail(self, size=None):
"""
Get a thumbnail of the image. Since there is no default thumbnail for multiparametric, volumetric
images, this function supports downsampling of all image dimensions.
Args:
size (Tuple[int, int]): thumbnail size
Returns:
np.ndarray: RGB thumbnail image
Example:
Get 1000x1000 thumbnail of 7 channel fluorescent image.
shape = data.slide.get_image_shape()
thumb = data.slide.get_thumbnail(size=(1000,1000, shape[2], shape[3], shape[4]))
"""
assert isinstance(size, (tuple, type(None))), "Size must be a tuple of ints."
if size is not None:
if len(size) != len(self.shape):
size = size + self.shape[len(size) :]
if self.shape[0] * self.shape[1] * self.shape[2] * self.shape[3] > 2147483647:
raise Exception(
f"Java arrays allocate maximum 32 bits (~2GB). Image size is {self.imsize}"
)
array = self.extract_region(location=(0, 0), size=self.shape[:2])
if size is not None:
ratio = tuple([x / y for x, y in zip(size, self.shape)])
assert (
ratio[3] == 1
), "cannot interpolate between fluor channels, resampling doesn't apply, fix size[3]"
image_array = zoom(array, ratio)
return image_array
def generate_tiles(self, shape=3000, stride=None, pad=False, level=0, **kwargs):
"""
Generator over tiles.
Padding works as follows:
If ``pad is False``, then the first tile will start flush with the edge of the image, and the tile locations
will increment according to specified stride, stopping with the last tile that is fully contained in the image.
If ``pad is True``, then the first tile will start flush with the edge of the image, and the tile locations
will increment according to specified stride, stopping with the last tile which starts in the image. Regions
outside the image will be padded with 0.
For example, for a 5x5 image with a tile size of 3 and a stride of 2, tile generation with ``pad=False`` will
create 4 tiles total, compared to 6 tiles if ``pad=True``.
Args:
shape (int or tuple(int)): Size of each tile. May be a tuple of (height, width) or a single integer,
in which case square tiles of that size are generated.
stride (int): stride between chunks. If ``None``, uses ``stride = size`` for non-overlapping chunks.
Defaults to ``None``.
pad (bool): How to handle tiles on the edges. If ``True``, these edge tiles will be zero-padded
and yielded with the other chunks. If ``False``, incomplete edge chunks will be ignored.
Defaults to ``False``.
**kwargs: Other arguments passed through to ``extract_region()`` method.
Yields:
pathml.core.tile.Tile: Extracted Tile object
"""
assert isinstance(level, int), f"level {level} invalid. Must be an int."
assert (
level < self.level_count
), f"input level {level} invalid for slide with {self.level_count} levels total"
assert isinstance(shape, int) or (
isinstance(shape, tuple) and len(shape) == 2
), f"input shape {shape} invalid. Must be a tuple of (H, W), or a single integer for square tiles"
if isinstance(shape, int):
shape = (shape, shape)
assert (
stride is None
or isinstance(stride, int)
or (isinstance(stride, tuple) and len(stride) == 2)
), f"input stride {stride} invalid. Must be a tuple of (stride_H, stride_W), or a single int"
if stride is None:
logger.info(f"stride not specified, using stride=shape ({shape})")
stride = shape
elif isinstance(stride, int):
stride = (stride, stride)
i, j = self.get_image_shape(level=level)
stride_i, stride_j = stride
# calculate number of expected tiles
# check for tile shape evenly dividing slide shape to fix https://github.com/Dana-Farber-AIOS/pathml/issues/305
if pad and i % stride_i != 0:
n_tiles_i = i // stride_i + 1
else:
n_tiles_i = (i - shape[0]) // stride_i + 1
if pad and j % stride_j != 0:
n_tiles_j = j // stride_j + 1
else:
n_tiles_j = (j - shape[1]) // stride_j + 1
logger.info(f"expected number of tiles: {n_tiles_i} x {n_tiles_j}")
for ix_i in range(n_tiles_i):
for ix_j in range(n_tiles_j):
coords = (int(ix_i * stride_i), int(ix_j * stride_j))
if coords[0] + shape[0] < i and coords[1] + shape[1] < j:
# get image for tile
tile_im = self.extract_region(
location=coords, size=shape, level=level, **kwargs
)
yield pathml.core.tile.Tile(image=tile_im, coords=coords)
else:
unpaddedshape = (
i - coords[0] if coords[0] + shape[0] > i else shape[0],
j - coords[1] if coords[1] + shape[1] > j else shape[1],
)
tile_im = self.extract_region(
location=coords, size=unpaddedshape, level=level, **kwargs
)
zeroarrayshape = list(tile_im.shape)
zeroarrayshape[0], zeroarrayshape[1] = (
list(shape)[0],
list(shape)[1],
)
padded_im = np.zeros(zeroarrayshape)
padded_im[: tile_im.shape[0], : tile_im.shape[1], ...] = tile_im
yield pathml.core.tile.Tile(image=padded_im, coords=coords)
class DICOMBackend(SlideBackend):
"""
Interface with DICOM files on disk.
Provides efficient access to individual Frame items contained in the
Pixel Data element without loading the entire element into memory.
Assumes that frames are non-overlapping. DICOM does not support multi-level images.
Args:
filename (str): Path to the DICOM Part10 file on disk
"""
def __init__(self, filename):
self.filename = str(filename)
logger.info(f"DICOMBackend loading file at: {filename}")
# read metadata fields of interest from DICOM, without reading the entire PixelArray
tags = [
"NumberOfFrames",
"Rows",
"Columns",
"TotalPixelMatrixRows",
"TotalPixelMatrixColumns",
]
metadata = dcmread(filename, specific_tags=tags)
# can use frame shape, total shape to map between frame index and coords
self.frame_shape = (metadata.Rows, metadata.Columns)
self.shape = (metadata.TotalPixelMatrixRows, metadata.TotalPixelMatrixColumns)
self.n_frames = int(metadata.NumberOfFrames)
# use ceiling division to account for padding (i.e. still count incomplete frames on edge)
# ceiling division from: https://stackoverflow.com/a/17511341
self.n_rows = -(-self.shape[0] // self.frame_shape[0])
self.n_cols = -(-self.shape[1] // self.frame_shape[1])
self.transfer_syntax_uid = UID(metadata.file_meta.TransferSyntaxUID)
logger.info(
f"DICOM metadata: frame_shape={self.frame_shape}, nrows = {self.n_rows}, ncols = {self.n_cols}"
)
# actual file
self.fp = DicomFile(self.filename, mode="rb")
self.fp.is_little_endian = self.transfer_syntax_uid.is_little_endian
self.fp.is_implicit_VR = self.transfer_syntax_uid.is_implicit_VR
# need to do this to advance the file to the correct point, at the beginning of the pixels
self.metadata = dcmread(self.fp, stop_before_pixels=True)
self.pixel_data_offset = self.fp.tell()
self.fp.seek(self.pixel_data_offset, 0)
# note that reading this tag is necessary to advance the file to correct position
_ = TupleTag(self.fp.read_tag())
# get basic offset table, to enable reading individual frames without loading entire image
self.bot = self.get_bot(self.fp)
self.first_frame = self.fp.tell()
def __repr__(self):
out = f"DICOMBackend('{self.filename}')\n"
out += f"image shape: {self.shape}; frame shape: {self.frame_shape}; frame grid: {(self.n_rows, self.n_cols)}"
return out
@staticmethod
def get_bot(fp):
"""
Reads the value of the Basic Offset Table. This table is used to access individual frames
without loading the entire file into memory
Args:
fp (pydicom.filebase.DicomFile): pydicom DicomFile object
Returns:
list: Offset of each Frame of the Pixel Data element following the Basic Offset Table
"""
# Skip Pixel Data element header
pixel_data_offset = data_element_offset_to_value(fp.is_implicit_VR, "OB")
fp.seek(pixel_data_offset - 4, 1)
_, basic_offset_table = get_frame_offsets(fp)
first_frame_offset = fp.tell()
fp.seek(first_frame_offset, 0)
return basic_offset_table
def get_image_shape(self):
"""
Get the shape of the image.
Returns:
Tuple[int, int]: Shape of image (H, W)
"""
return self.shape
def get_thumbnail(self, size, **kwargs):
raise NotImplementedError("DICOMBackend does not support thumbnail")
def _index_to_coords(self, index):
"""
convert from frame index to frame coordinates using image shape, frame_shape.
Frames start at 0, go across the rows sequentially, use padding at edges.
Coordinates are for top-left corner of each Frame.
e.g. if the image is 100x100, and each frame is 10x10, then the top row will contain frames 0 through 9,
second row will contain frames 10 through 19, etc.
Args:
index (int): index of frame
Returns:
tuple: corresponding coordinates
"""
frame_i, frame_j = self.frame_shape
# get which row and column we are in
row_ix = index // self.n_cols
col_ix = index % self.n_cols
return row_ix * frame_i, col_ix * frame_j
def _coords_to_index(self, coords):
"""
convert from frame coordinates to frame index using image shape, frame_shape.
Frames start at 0, go across the rows sequentially, use zero padding at edges.
Coordinates are for top-left corner of each Frame.
e.g. if the image is 100x100, and each frame is 10x10, then coordinate (10, 10) corresponds to frame index 11
(second row, second column).
Args:
tuple (tuple): coordinates
Returns:
int: frame index
"""
i, j = coords
frame_i, frame_j = self.frame_shape
# frame size must evenly divide coords, otherwise we aren't on a frame corner
if i % frame_i or j % frame_j:
logger.exception(f"i={i}, j={j}, frame shape = {self.frame_shape}")
raise ValueError(
f"coords {coords} are not evenly divided by frame size {(frame_i, frame_j)}. Must provide coords at upper left corner of Frame."
)
row_ix = i / frame_i
col_ix = j / frame_j
index = (row_ix * self.n_cols) + col_ix
return int(index)
def extract_region(self, location, size=None, level=None):
"""
Extract a single frame from the DICOM image.
Args:
location (Union[int, Tuple[int, int]]): coordinate location of top-left corner of frame, or integer index
of frame.
size (Union[int, Tuple[int, int]]): Size of each tile. May be a tuple of (height, width) or a
single integer, in which case square tiles of that size are generated.
Must be the same as the frame size.
Returns:
np.ndarray: image at the specified region
"""
assert level == 0 or level is None, "dicom does not support levels"
# check inputs first
# check location
if isinstance(location, tuple):
frame_ix = self._coords_to_index(location)
elif isinstance(location, int):
frame_ix = location
else:
raise ValueError(
f"Invalid location: {location}. Must be an int frame index or tuple of (i, j) coordinates"
)
if frame_ix > self.n_frames:
raise ValueError(
f"location {location} invalid. Exceeds total number of frames ({self.n_frames})"
)
# check size
if size:
if type(size) is int:
size = (size, size)
if size != self.frame_shape:
raise ValueError(
f"Input size {size} must equal frame shape in DICOM image {self.frame_shape}"
)
return self._read_frame(frame_ix)
def _read_frame(self, frame_ix):
"""
Reads and decodes the pixel data of one frame.
Based on implementation from highDICOM: https://github.com/MGHComputationalPathology/highdicom
Args:
frame_ix (int): zero-based index of the frame
Returns:
np.ndarray: pixel data of that frame
"""
frame_offset = self.bot[int(frame_ix)]
self.fp.seek(self.first_frame + frame_offset, 0)
try:
stop_at = self.bot[frame_ix + 1] - frame_offset
except IndexError:
stop_at = None
n = 0
# A frame may comprised of multiple chunks
chunks = []
while True:
tag = TupleTag(self.fp.read_tag())
if n == stop_at or int(tag) == SequenceDelimiterTag:
break
length = self.fp.read_UL()
chunks.append(self.fp.read(length))
n += 8 + length
frame_bytes = b"".join(chunks)
decoded_frame_array = self._decode_frame(
value=frame_bytes,
rows=self.metadata.Rows,
columns=self.metadata.Columns,
samples_per_pixel=self.metadata.SamplesPerPixel,
transfer_syntax_uid=self.metadata.file_meta.TransferSyntaxUID,
)
return decoded_frame_array
@staticmethod
def _decode_frame(
value,
transfer_syntax_uid,
rows,
columns,
samples_per_pixel,
photometric_interpretation="RGB",
):
"""
Decodes the data of an individual frame.
The pydicom library does currently not support reading individual frames.
This solution inspired from HighDICOM creates a small dataset for the individual frame which
can then be decoded using pydicom API.
Args:
value (bytes): Pixel data of a frame
transfer_syntax_uid (str): Transfer Syntax UID
rows (int): Number of pixel rows in the frame
columns (int): Number of pixel columns in the frame
samples_per_pixel (int): Number of samples per pixel
photometric_interpretation (str): Photometric interpretation --currently only supporting RGB
Returns:
np.ndarray: decoded pixel data
"""
filemetadata = Dataset()
filemetadata.TransferSyntaxUID = transfer_syntax_uid
dataset = Dataset()
dataset.file_meta = filemetadata
dataset.Rows = rows
dataset.Columns = columns
dataset.SamplesPerPixel = samples_per_pixel
dataset.PhotometricInterpretation = photometric_interpretation
image = Image.open(BytesIO(value))
return np.asarray(image)
def generate_tiles(self, shape, stride, pad, level=0, **kwargs):
"""
Generator over tiles.
For DICOMBackend, each tile corresponds to a frame.
Args:
shape (int or tuple(int)): Size of each tile. May be a tuple of (height, width) or a single integer,
in which case square tiles of that size are generated. Must match frame size.
stride (int): Ignored for DICOMBackend. Frames are yielded individually.
pad (bool): How to handle tiles on the edges. If ``True``, these edge tiles will be zero-padded
and yielded with the other chunks. If ``False``, incomplete edge chunks will be ignored.
Defaults to ``False``.
Yields:
pathml.core.tile.Tile: Extracted Tile object
"""
assert level == 0 or level is None, "dicom does not support levels"
for i in range(self.n_frames):
if not pad:
# skip frame if it's in the last column
if i % self.n_cols == (self.n_cols - 1):
continue
# skip frame if it's in the last row
if i >= (self.n_frames - self.n_cols):
continue
frame_im = self.extract_region(location=i)
coords = self._index_to_coords(i)
frame_tile = pathml.core.tile.Tile(image=frame_im, coords=coords)
yield frame_tile
|