repo_id
stringlengths 21
96
| file_path
stringlengths 31
155
| content
stringlengths 1
92.9M
| __index_level_0__
int64 0
0
|
---|---|---|---|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/_optical_flow_utils.py
|
"""Common tools to optical flow algorithms.
"""
import cupy as cp
import numpy as np
from cupyx.scipy import ndimage as ndi
from cucim.skimage.transform import pyramid_reduce
from cucim.skimage.util.dtype import _convert
def get_warp_points(grid, flow):
"""Compute warp point coordinates.
Parameters
----------
grid : iterable
The sparse grid to be warped (obtained using
``np.meshgrid(..., sparse=True)).``)
flow : ndarray
The warping motion field.
Returns
-------
out : ndarray
The warp point coordinates.
"""
out = flow.copy()
for idx, g in enumerate(grid):
out[idx, ...] += g
return out
def resize_flow(flow, shape):
"""Rescale the values of the vector field (u, v) to the desired shape.
The values of the output vector field are scaled to the new
resolution.
Parameters
----------
flow : ndarray
The motion field to be processed.
shape : iterable
Couple of integers representing the output shape.
Returns
-------
rflow : ndarray
The resized and rescaled motion field.
"""
scale = [n / o for n, o in zip(shape, flow.shape[1:])]
scale_factor = cp.asarray(scale, dtype=flow.dtype)
for _ in shape:
scale_factor = scale_factor[..., cp.newaxis]
rflow = scale_factor * ndi.zoom(
flow, [1] + scale, order=0, mode="nearest", prefilter=False
)
return rflow
def get_pyramid(I, downscale=2.0, nlevel=10, min_size=16): # noqa
"""Construct image pyramid.
Parameters
----------
I : ndarray
The image to be preprocessed (Gray scale or RGB).
downscale : float
The pyramid downscale factor.
nlevel : int
The maximum number of pyramid levels.
min_size : int
The minimum size for any dimension of the pyramid levels.
Returns
-------
pyramid : list[ndarray]
The coarse to fine images pyramid.
"""
pyramid = [I]
size = min(I.shape)
count = 1
while (count < nlevel) and (size > downscale * min_size):
J = pyramid_reduce(pyramid[-1], downscale, channel_axis=None)
pyramid.append(J)
size = min(J.shape)
count += 1
return pyramid[::-1]
def coarse_to_fine(
I0, I1, solver, downscale=2, nlevel=10, min_size=16, dtype=np.float32
):
"""Generic coarse to fine solver.
Parameters
----------
I0 : ndarray
The first gray scale image of the sequence.
I1 : ndarray
The second gray scale image of the sequence.
solver : callable
The solver applied at each pyramid level.
downscale : float
The pyramid downscale factor.
nlevel : int
The maximum number of pyramid levels.
min_size : int
The minimum size for any dimension of the pyramid levels.
dtype : dtype
Output data type.
Returns
-------
flow : ndarray
The estimated optical flow components for each axis.
"""
if I0.shape != I1.shape:
raise ValueError("Input images should have the same shape")
if np.dtype(dtype).char not in "efdg":
raise ValueError(
"Only floating point data type are valid" " for optical flow"
)
pyramid = list(
zip(
get_pyramid(_convert(I0, dtype), downscale, nlevel, min_size),
get_pyramid(_convert(I1, dtype), downscale, nlevel, min_size),
)
)
# Initialization to 0 at coarsest level.
flow = cp.zeros((pyramid[0][0].ndim,) + pyramid[0][0].shape, dtype=dtype)
flow = solver(pyramid[0][0], pyramid[0][1], flow)
for J0, J1 in pyramid[1:]:
flow = solver(J0, J1, resize_flow(flow, J0.shape))
return flow
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/_optical_flow.py
|
# coding: utf-8
"""TV-L1 optical flow algorithm implementation.
"""
from functools import partial
from itertools import combinations_with_replacement
import cupy as cp
from cupyx.scipy import ndimage as ndi
from .._shared._gradient import gradient
from .._shared.utils import _supported_float_type
from ..transform import warp
from ._optical_flow_utils import coarse_to_fine, get_warp_points
def _tvl1(
reference_image,
moving_image,
flow0,
attachment,
tightness,
num_warp,
num_iter,
tol,
prefilter,
):
"""TV-L1 solver for optical flow estimation.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
flow0 : ndarray, shape (image0.ndim, M, N[, P[, ...]])
Initialization for the vector field.
attachment : float
Attachment parameter. The smaller this parameter is,
the smoother is the solutions.
tightness : float
Tightness parameter. It should have a small value in order to
maintain attachment and regularization parts in
correspondence.
num_warp : int
Number of times moving_image is warped.
num_iter : int
Number of fixed point iteration.
tol : float
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool
Whether to prefilter the estimated optical flow before each
image warp.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
"""
dtype = reference_image.dtype
grid = cp.meshgrid(
*[cp.arange(n, dtype=dtype) for n in reference_image.shape],
indexing="ij",
sparse=True,
)
dt = 0.5 / reference_image.ndim
reg_num_iter = 2
f0 = attachment * tightness
f1 = dt / tightness
tol *= reference_image.size
flow_current = flow_previous = flow0
g = cp.zeros((reference_image.ndim,) + reference_image.shape, dtype=dtype)
proj = cp.zeros(
(
reference_image.ndim,
reference_image.ndim,
)
+ reference_image.shape,
dtype=dtype,
)
s_g = [slice(None)] * g.ndim
s_p = [slice(None)] * proj.ndim
s_d = [slice(None)] * (proj.ndim - 2)
for _ in range(num_warp):
if prefilter:
flow_current = ndi.median_filter(
flow_current, [1] + reference_image.ndim * [3]
)
image1_warp = warp(
moving_image, get_warp_points(grid, flow_current), mode="edge"
)
# output_as_array=True stacks the gradients along the first axis
grad = gradient(image1_warp, output_as_array=True)
NI = (grad * grad).sum(0)
NI[NI == 0] = 1
rho_0 = image1_warp - reference_image - (grad * flow_current).sum(0)
for _ in range(num_iter):
# Data term
rho = rho_0 + (grad * flow_current).sum(0)
idx = abs(rho) <= f0 * NI
flow_auxiliary = flow_current
flow_auxiliary[:, idx] -= rho[idx] * grad[:, idx] / NI[idx]
idx = ~idx
srho = f0 * cp.sign(rho[idx])
flow_auxiliary[:, idx] -= srho * grad[:, idx]
# Regularization term
flow_current = flow_auxiliary.copy()
for idx in range(reference_image.ndim):
s_p[0] = idx
for _ in range(reg_num_iter):
for ax in range(reference_image.ndim):
s_g[0] = ax
s_g[ax + 1] = slice(0, -1)
g[tuple(s_g)] = cp.diff(flow_current[idx], axis=ax)
s_g[ax + 1] = slice(None)
norm = cp.sqrt((g * g).sum(0, keepdims=True))
norm *= f1
norm += 1.0
proj[idx] -= dt * g
proj[idx] /= norm
# d will be the (negative) divergence of proj[idx]
d = -proj[idx].sum(0)
for ax in range(reference_image.ndim):
s_p[1] = ax
s_p[ax + 2] = slice(0, -1)
s_d[ax] = slice(1, None)
d[tuple(s_d)] += proj[tuple(s_p)]
s_p[ax + 2] = slice(None)
s_d[ax] = slice(None)
flow_current[idx] = flow_auxiliary[idx] + d
flow_previous -= flow_current # The difference as stopping criteria
if (flow_previous * flow_previous).sum() < tol:
break
flow_previous = flow_current
return flow_current
def optical_flow_tvl1(
reference_image,
moving_image,
*,
attachment=15,
tightness=0.3,
num_warp=5,
num_iter=10,
tol=1e-4,
prefilter=False,
dtype=cp.float32,
):
r"""Coarse to fine optical flow estimator.
The TV-L1 solver is applied at each level of the image
pyramid. TV-L1 is a popular algorithm for optical flow estimation
introduced by Zack et al. [1]_, improved in [2]_ and detailed in [3]_.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
attachment : float, optional
Attachment parameter (:math:`\lambda` in [1]_). The smaller
this parameter is, the smoother the returned result will be.
tightness : float, optional
Tightness parameter (:math:`\tau` in [1]_). It should have
a small value in order to maintain attachment and
regularization parts in correspondence.
num_warp : int, optional
Number of times moving_image is warped.
num_iter : int, optional
Number of fixed point iteration.
tol : float, optional
Tolerance used as stopping criterion based on the L² distance
between two consecutive values of (u, v).
prefilter : bool, optional
Whether to prefilter the estimated optical flow before each
image warp. When True, a median filter with window size 3
along each axis is applied. This helps to remove potential
outliers.
dtype : dtype, optional
Output data type: must be floating point. Single precision
provides good results and saves memory usage and computation
time compared to double precision.
Returns
-------
flow : ndarray, shape ((image0.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
Notes
-----
Color images are not supported.
References
----------
.. [1] Zach, C., Pock, T., & Bischof, H. (2007, September). A
duality based approach for realtime TV-L 1 optical flow. In Joint
pattern recognition symposium (pp. 214-223). Springer, Berlin,
Heidelberg. :DOI:`10.1007/978-3-540-74936-3_22`
.. [2] Wedel, A., Pock, T., Zach, C., Bischof, H., & Cremers,
D. (2009). An improved algorithm for TV-L 1 optical flow. In
Statistical and geometrical approaches to visual motion analysis
(pp. 23-45). Springer, Berlin, Heidelberg.
:DOI:`10.1007/978-3-642-03061-1_2`
.. [3] Pérez, J. S., Meinhardt-Llopis, E., & Facciolo,
G. (2013). TV-L1 optical flow estimation. Image Processing On
Line, 2013, 137-150. :DOI:`10.5201/ipol.2013.26`
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.color import rgb2gray
>>> from skimage.data import stereo_motorcycle
>>> from cucim.skimage.registration import optical_flow_tvl1
>>> image0, image1, disp = [cp.array(a) for a in stereo_motorcycle()]
>>> # --- Convert the images to gray level: color is not supported.
>>> image0 = rgb2gray(image0)
>>> image1 = rgb2gray(image1)
>>> flow = optical_flow_tvl1(image1, image0)
"""
solver = partial(
_tvl1,
attachment=attachment,
tightness=tightness,
num_warp=num_warp,
num_iter=num_iter,
tol=tol,
prefilter=prefilter,
)
if cp.dtype(dtype) != _supported_float_type(dtype):
msg = f"dtype={dtype} is not supported. Try 'float32' or 'float64.'"
raise ValueError(msg)
return coarse_to_fine(reference_image, moving_image, solver, dtype=dtype)
def _ilk(
reference_image, moving_image, flow0, radius, num_warp, gaussian, prefilter
):
"""Iterative Lucas-Kanade (iLK) solver for optical flow estimation.
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
flow0 : ndarray, shape (reference_image.ndim, M, N[, P[, ...]])
Initialization for the vector field.
radius : int
Radius of the window considered around each pixel.
num_warp : int
Number of times moving_image is warped.
gaussian : bool
if True, a gaussian kernel is used for the local
integration. Otherwise, a uniform kernel is used.
prefilter : bool
Whether to prefilter the estimated optical flow before each
image warp. This helps to remove potential outliers.
Returns
-------
flow : ndarray, shape ((reference_image.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
"""
from .._shared.filters import gaussian as gaussian_filter
dtype = reference_image.dtype
ndim = reference_image.ndim
size = 2 * radius + 1
if gaussian:
sigma = ndim * (size / 4,)
filter_func = partial(gaussian_filter, sigma=sigma, mode="mirror")
else:
filter_func = partial(
ndi.uniform_filter, size=ndim * (size,), mode="mirror"
)
flow = flow0
# For each pixel location (i, j), the optical flow X = flow[:, i, j]
# is the solution of the ndim x ndim linear system
# A[i, j] * X = b[i, j]
A = cp.zeros(reference_image.shape + (ndim, ndim), dtype=dtype)
b = cp.zeros(reference_image.shape + (ndim,), dtype=dtype)
grid = cp.meshgrid(
*[cp.arange(n, dtype=dtype) for n in reference_image.shape],
indexing="ij",
sparse=True,
)
for _ in range(num_warp):
if prefilter:
flow = ndi.median_filter(flow, (1,) + ndim * (3,))
moving_image_warp = warp(
moving_image, get_warp_points(grid, flow), mode="edge"
)
# output_as_array=True stacks the gradients along the first axis
grad = gradient(moving_image_warp, output_as_array=True)
error_image = (
(grad * flow).sum(axis=0) + reference_image - moving_image_warp
)
# Local linear systems creation
for i, j in combinations_with_replacement(range(ndim), 2):
A[..., i, j] = A[..., j, i] = filter_func(grad[i] * grad[j])
for i in range(ndim):
b[..., i] = filter_func(grad[i] * error_image)
# Don't consider badly conditioned linear systems
idx = abs(cp.linalg.det(A)) < 1e-14
A[idx] = cp.eye(ndim, dtype=dtype)
b[idx] = 0
# Solve the local linear systems
flow = cp.moveaxis(cp.linalg.solve(A, b), ndim, 0)
return flow
def optical_flow_ilk(
reference_image,
moving_image,
*,
radius=7,
num_warp=10,
gaussian=False,
prefilter=False,
dtype=cp.float32,
):
"""Coarse to fine optical flow estimator.
The iterative Lucas-Kanade (iLK) solver is applied at each level
of the image pyramid. iLK [1]_ is a fast and robust alternative to
TVL1 algorithm although less accurate for rendering flat surfaces
and object boundaries (see [2]_).
Parameters
----------
reference_image : ndarray, shape (M, N[, P[, ...]])
The first gray scale image of the sequence.
moving_image : ndarray, shape (M, N[, P[, ...]])
The second gray scale image of the sequence.
radius : int, optional
Radius of the window considered around each pixel.
num_warp : int, optional
Number of times moving_image is warped.
gaussian : bool, optional
If True, a Gaussian kernel is used for the local
integration. Otherwise, a uniform kernel is used.
prefilter : bool, optional
Whether to prefilter the estimated optical flow before each
image warp. When True, a median filter with window size 3
along each axis is applied. This helps to remove potential
outliers.
dtype : dtype, optional
Output data type: must be floating point. Single precision
provides good results and saves memory usage and computation
time compared to double precision.
Returns
-------
flow : ndarray, shape ((reference_image.ndim, M, N[, P[, ...]])
The estimated optical flow components for each axis.
Notes
-----
- The implemented algorithm is described in **Table2** of [1]_.
- Color images are not supported.
References
----------
.. [1] Le Besnerais, G., & Champagnat, F. (2005, September). Dense
optical flow by iterative local window registration. In IEEE
International Conference on Image Processing 2005 (Vol. 1,
pp. I-137). IEEE. :DOI:`10.1109/ICIP.2005.1529706`
.. [2] Plyer, A., Le Besnerais, G., & Champagnat,
F. (2016). Massively parallel Lucas Kanade optical flow for
real-time video processing applications. Journal of Real-Time
Image Processing, 11(4), 713-730. :DOI:`10.1007/s11554-014-0423-0`
Examples
--------
>>> import cupy as cp
>>> from skimage.data import stereo_motorcycle
>>> from cucim.skimage.color import rgb2gray
>>> from cucim.skimage.registration import optical_flow_ilk
>>> reference_image, moving_image, disp = map(cp.array, stereo_motorcycle())
>>> # --- Convert the images to gray level: color is not supported.
>>> reference_image = rgb2gray(reference_image)
>>> moving_image = rgb2gray(moving_image)
>>> flow = optical_flow_ilk(moving_image, reference_image)
"""
solver = partial(
_ilk,
radius=radius,
num_warp=num_warp,
gaussian=gaussian,
prefilter=prefilter,
)
if cp.dtype(dtype) != _supported_float_type(dtype):
msg = f"dtype={dtype} is not supported. Try 'float32' or 'float64.'"
raise ValueError(msg)
return coarse_to_fine(reference_image, moving_image, solver, dtype=dtype)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/tests/test_tvl1.py
|
import cupy as cp
import numpy as np
import pytest
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.registration import optical_flow_tvl1
from cucim.skimage.transform import warp
def _sin_flow_gen(image0, max_motion=4.5, npics=5):
"""Generate a synthetic ground truth optical flow with a sinusoid as
first component.
Parameters:
----
image0: ndarray
The base image to be warped.
max_motion: float
Maximum flow magnitude.
npics: int
Number of sinusoid pics.
Returns
-------
flow, image1 : ndarray
The synthetic ground truth optical flow with a sinusoid as
first component and the corresponding warped image.
"""
grid = cp.meshgrid(*[cp.arange(n) for n in image0.shape], indexing="ij")
grid = cp.stack(grid)
# TODO: make upstream scikit-image PR changing gt_flow dtype to float
gt_flow = cp.zeros_like(grid, dtype=float)
gt_flow[0, ...] = max_motion * cp.sin(
grid[0] / grid[0].max() * npics * np.pi
)
image1 = warp(image0, grid - gt_flow, mode="edge")
return gt_flow, image1
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_2d_motion(dtype):
# Generate synthetic data
rnd = cp.random.RandomState(0)
image0 = cp.array(rnd.normal(size=(256, 256)).astype(dtype))
gt_flow, image1 = _sin_flow_gen(image0)
image1 = image1.astype(dtype, copy=False)
float_dtype = _supported_float_type(dtype)
# Estimate the flow
flow = optical_flow_tvl1(image0, image1, attachment=5, dtype=float_dtype)
assert flow.dtype == float_dtype
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow).mean() < 0.5
if dtype != float_dtype:
with pytest.raises(ValueError):
optical_flow_tvl1(image0, image1, attachment=5, dtype=dtype)
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_3d_motion(dtype):
# Generate synthetic data
rnd = np.random.RandomState(0)
image0 = cp.array(rnd.normal(size=(100, 100, 100))).astype(dtype)
gt_flow, image1 = _sin_flow_gen(image0)
image1 = image1.astype(dtype, copy=False)
# Estimate the flow
# TODO: note: when changing _sin_flow_gen to use a float deformation field
# had to increase attachment here from 5 to pass the tolerance.
flow = optical_flow_tvl1(image0, image1, attachment=10, dtype=dtype)
assert flow.dtype == dtype
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow).mean() < 0.5
def test_no_motion_2d():
rnd = np.random.default_rng(0)
img = cp.array(rnd.normal(size=(256, 256)))
flow = optical_flow_tvl1(img, img)
assert cp.all(flow == 0)
def test_no_motion_3d():
rnd = np.random.default_rng(0)
img = cp.array(rnd.normal(size=(64, 64, 64)))
flow = optical_flow_tvl1(img, img)
assert cp.all(flow == 0)
def test_optical_flow_dtype():
# Generate synthetic data
rnd = np.random.default_rng(0)
image0 = cp.array(rnd.normal(size=(256, 256)))
gt_flow, image1 = _sin_flow_gen(image0)
# Estimate the flow at double precision
flow_f64 = optical_flow_tvl1(image0, image1, attachment=5, dtype=np.float64)
assert flow_f64.dtype == np.float64
# Estimate the flow at single precision
flow_f32 = optical_flow_tvl1(image0, image1, attachment=5, dtype=np.float32)
assert flow_f32.dtype == np.float32
# Assert that floating point precision does not affect the quality
# of the estimated flow
assert cp.abs(flow_f64 - flow_f32).mean() < 1e-3
def test_incompatible_shapes():
rnd = np.random.default_rng(0)
I0 = cp.array(rnd.normal(size=(256, 256)))
I1 = cp.array(rnd.normal(size=(128, 256)))
with pytest.raises(ValueError):
u, v = optical_flow_tvl1(I0, I1)
def test_wrong_dtype():
rnd = np.random.default_rng(0)
img = cp.array(rnd.normal(size=(256, 256)))
with pytest.raises(ValueError):
u, v = optical_flow_tvl1(img, img, dtype=np.int64)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/tests/test_masked_phase_cross_correlation.py
|
import cupy as cp
import numpy as np
import pytest
from cupyx.scipy.ndimage import fourier_shift, shift as real_shift
from numpy.testing import assert_almost_equal
from skimage.data import camera
from skimage.io import imread
from cucim.skimage._shared.fft import fftmodule as fft
from cucim.skimage._shared.testing import fetch
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.registration._masked_phase_cross_correlation import (
_masked_phase_cross_correlation as masked_register_translation,
cross_correlate_masked,
)
from cucim.skimage.registration._phase_cross_correlation import (
phase_cross_correlation,
)
def test_masked_registration_vs_phase_cross_correlation():
"""masked_register_translation should give the same results as
phase_cross_correlation in the case of trivial masks."""
reference_image = cp.array(camera())
shift = (-7, 12)
shifted = cp.real(
fft.ifft2(fourier_shift(fft.fft2(reference_image), shift))
)
trivial_mask = cp.ones_like(reference_image)
nonmasked_result, *_ = phase_cross_correlation(reference_image, shifted)
masked_result = masked_register_translation(
reference_image,
shifted,
reference_mask=trivial_mask,
overlap_ratio=1 / 10,
)
cp.testing.assert_array_equal(nonmasked_result, masked_result)
def test_masked_registration_random_masks():
"""masked_register_translation should be able to register translations
between images even with random masks."""
# See random number generator for reproducible results
np.random.seed(23)
reference_image = cp.array(camera())
shift = (-7, 12)
shifted = cp.real(
fft.ifft2(fourier_shift(fft.fft2(reference_image), shift))
)
# Random masks with 75% of pixels being valid
ref_mask = np.random.choice(
[True, False], reference_image.shape, p=[3 / 4, 1 / 4]
)
shifted_mask = np.random.choice(
[True, False], shifted.shape, p=[3 / 4, 1 / 4]
)
ref_mask = cp.asarray(ref_mask)
shifted_mask = cp.asarray(shifted_mask)
measured_shift = masked_register_translation(
reference_image,
shifted,
reference_mask=ref_mask,
moving_mask=shifted_mask,
)
cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift))
def test_masked_registration_3d_contiguous_mask():
"""masked_register_translation should be able to register translations
between volumes with contiguous masks."""
data = pytest.importorskip("skimage.data")
if not hasattr(data, "brain"):
pytest.skip("brain data not available in this version of scikit-image")
ref_vol = cp.array(data.brain()[:, ::2, ::2])
offset = (1, -5, 10)
# create square mask
ref_mask = cp.zeros_like(ref_vol, dtype=bool)
ref_mask[:-2, 75:100, 75:100] = True
ref_shifted = real_shift(ref_vol, offset)
measured_offset = masked_register_translation(
ref_vol, ref_shifted, reference_mask=ref_mask, moving_mask=ref_mask
)
cp.testing.assert_array_equal(offset, -cp.array(measured_offset))
def test_masked_registration_random_masks_non_equal_sizes():
"""masked_register_translation should be able to register
translations between images that are not the same size even
with random masks."""
# See random number generator for reproducible results
np.random.seed(23)
reference_image = cp.array(camera())
shift = (-7, 12)
shifted = cp.real(
fft.ifft2(fourier_shift(fft.fft2(reference_image), shift))
)
# Crop the shifted image
shifted = shifted[64:-64, 64:-64]
# Random masks with 75% of pixels being valid
ref_mask = np.random.choice(
[True, False], reference_image.shape, p=[3 / 4, 1 / 4]
)
shifted_mask = np.random.choice(
[True, False], shifted.shape, p=[3 / 4, 1 / 4]
)
reference_image = cp.asarray(reference_image)
shifted = cp.asarray(shifted)
measured_shift = masked_register_translation(
reference_image,
shifted,
reference_mask=cp.ones_like(ref_mask),
moving_mask=cp.ones_like(shifted_mask),
)
cp.testing.assert_array_equal(measured_shift, -cp.asarray(shift))
def test_masked_registration_padfield_data():
"""Masked translation registration should behave like in the original
publication"""
# Test translated from MATLABimplementation `MaskedFFTRegistrationTest`
# file. You can find the source code here:
# http://www.dirkpadfield.com/Home/MaskedFFTRegistrationCode.zip
shifts = [(75, 75), (-130, 130), (130, 130)]
for xi, yi in shifts:
fixed_image = cp.array(
imread(
fetch(
"registration/tests/data/OriginalX{:d}Y{:d}.png"
"".format(xi, yi)
)
)
)
moving_image = cp.array(
imread(
fetch(
"registration/tests/data/TransformedX{:d}Y{:d}.png"
"".format(xi, yi)
)
)
)
# Valid pixels are 1
fixed_mask = fixed_image != 0
moving_mask = moving_image != 0
# Note that shifts in x and y and shifts in cols and rows
shift_y, shift_x = cp.asnumpy(
masked_register_translation(
fixed_image,
moving_image,
reference_mask=fixed_mask,
moving_mask=moving_mask,
overlap_ratio=0.1,
)
)
# Note: by looking at the test code from Padfield's
# MaskedFFTRegistrationCode repository, the
# shifts were not xi and yi, but xi and -yi
np.testing.assert_array_equal((shift_x, shift_y), (-xi, yi))
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_cross_correlate_masked_output_shape(dtype):
"""Masked normalized cross-correlation should return a shape
of N + M + 1 for each transform axis."""
shape1 = (15, 4, 5)
shape2 = (6, 12, 7)
expected_full_shape = tuple(np.array(shape1) + np.array(shape2) - 1)
expected_same_shape = shape1
arr1 = cp.zeros(shape1, dtype=dtype)
arr2 = cp.zeros(shape2, dtype=dtype)
# Trivial masks
m1 = cp.ones_like(arr1)
m2 = cp.ones_like(arr2)
float_dtype = _supported_float_type(dtype)
full_xcorr = cross_correlate_masked(
arr1, arr2, m1, m2, axes=(0, 1, 2), mode="full"
)
assert full_xcorr.dtype.kind != "c" # grlee77: output should be real
assert full_xcorr.shape == expected_full_shape
assert full_xcorr.dtype == float_dtype
same_xcorr = cross_correlate_masked(
arr1, arr2, m1, m2, axes=(0, 1, 2), mode="same"
)
assert same_xcorr.shape == expected_same_shape
assert same_xcorr.dtype == float_dtype
def test_cross_correlate_masked_test_against_mismatched_dimensions():
"""Masked normalized cross-correlation should raise an error if array
dimensions along non-transformation axes are mismatched."""
shape1 = (23, 1, 1)
shape2 = (6, 2, 2)
arr1 = cp.zeros(shape1)
arr2 = cp.zeros(shape2)
# Trivial masks
m1 = cp.ones_like(arr1)
m2 = cp.ones_like(arr2)
with pytest.raises(ValueError):
cross_correlate_masked(arr1, arr2, m1, m2, axes=(1, 2))
def test_cross_correlate_masked_output_range():
"""Masked normalized cross-correlation should return between 1 and -1."""
# See random number generator for reproducible results
np.random.seed(23)
# Array dimensions must match along non-transformation axes, in
# this case
# axis 0
shape1 = (15, 4, 5)
shape2 = (15, 12, 7)
# Initial array ranges between -5 and 5
arr1 = 10 * np.random.random(shape1) - 5
arr2 = 10 * np.random.random(shape2) - 5
# random masks
m1 = np.random.choice([True, False], arr1.shape)
m2 = np.random.choice([True, False], arr2.shape)
arr1 = cp.asarray(arr1)
arr2 = cp.asarray(arr2)
m1 = cp.asarray(m1)
m2 = cp.asarray(m2)
xcorr = cross_correlate_masked(arr1, arr2, m1, m2, axes=(1, 2))
# No assert array less or equal, so we add an eps
# Also could not find an `assert_array_greater`, Use (-xcorr) instead
eps = np.finfo(float).eps
cp.testing.assert_array_less(xcorr, 1 + eps)
cp.testing.assert_array_less(-xcorr, 1 + eps)
def test_cross_correlate_masked_side_effects():
"""Masked normalized cross-correlation should not modify the inputs."""
shape1 = (2, 2, 2)
shape2 = (2, 2, 2)
arr1 = cp.zeros(shape1)
arr2 = cp.zeros(shape2)
# Trivial masks
m1 = cp.ones_like(arr1)
m2 = cp.ones_like(arr2)
# CuPy Backed: had to refactor (cannot set write=False)
# for arr in (arr1, arr2, m1, m2):
# arr.setflags(write=False)
arr1c, arr2c, m1c, m2c = [a.copy() for a in (arr1, arr2, m1, m2)]
cross_correlate_masked(arr1, arr2, m1, m2)
cp.testing.assert_array_equal(arr1, arr1c)
cp.testing.assert_array_equal(arr2, arr2c)
cp.testing.assert_array_equal(m1, m1c)
cp.testing.assert_array_equal(m2, m2c)
def test_cross_correlate_masked_over_axes():
"""Masked normalized cross-correlation over axes should be
equivalent to a loop over non-transform axes."""
# See random number generator for reproducible results
np.random.seed(23)
arr1 = np.random.random((8, 8, 5))
arr2 = np.random.random((8, 8, 5))
m1 = np.random.choice([True, False], arr1.shape)
m2 = np.random.choice([True, False], arr2.shape)
arr1 = cp.asarray(arr1)
arr2 = cp.asarray(arr2)
m1 = cp.asarray(m1)
m2 = cp.asarray(m2)
# Loop over last axis
with_loop = cp.empty_like(arr1, dtype=np.complex128)
for index in range(arr1.shape[-1]):
with_loop[:, :, index] = cross_correlate_masked(
arr1[:, :, index],
arr2[:, :, index],
m1[:, :, index],
m2[:, :, index],
axes=(0, 1),
mode="same",
)
over_axes = cross_correlate_masked(
arr1, arr2, m1, m2, axes=(0, 1), mode="same"
)
cp.testing.assert_array_almost_equal(with_loop, over_axes)
def test_cross_correlate_masked_autocorrelation_trivial_masks():
"""Masked normalized cross-correlation between identical arrays
should reduce to an autocorrelation even with random masks."""
# See random number generator for reproducible results
np.random.seed(23)
arr1 = cp.asarray(camera())
# Random masks with 75% of pixels being valid
m1 = np.random.choice([True, False], arr1.shape, p=[3 / 4, 1 / 4])
m2 = np.random.choice([True, False], arr1.shape, p=[3 / 4, 1 / 4])
m1 = cp.asarray(m1)
m2 = cp.asarray(m2)
xcorr = cross_correlate_masked(
arr1, arr1, m1, m2, axes=(0, 1), mode="same", overlap_ratio=0
).real
max_index = cp.unravel_index(cp.argmax(xcorr), xcorr.shape)
max_index = tuple(map(int, max_index))
# Autocorrelation should have maximum in center of array
# CuPy Backend: uint8 inputs will be processed in float32, so reduce
# decimal to 5
assert_almost_equal(float(xcorr.max()), 1, decimal=5)
np.testing.assert_array_equal(max_index, np.array(arr1.shape) / 2)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/tests/test_phase_cross_correlation.py
|
import itertools
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_allclose
from cupyx.scipy.ndimage import fourier_shift
from skimage.data import camera, eagle
from cucim.skimage import img_as_float
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage._shared.fft import fftmodule as fft
from cucim.skimage.data import binary_blobs
from cucim.skimage.registration._phase_cross_correlation import (
_upsampled_dft,
phase_cross_correlation,
)
@pytest.mark.parametrize("normalization", [None, "phase"])
def test_correlation(normalization):
reference_image = fft.fftn(cp.array(camera()))
shift = (-7, 12)
shifted_image = fourier_shift(reference_image, shift)
# pixel precision
result, _, _ = phase_cross_correlation(
reference_image,
shifted_image,
space="fourier",
normalization=normalization,
)
assert_allclose(result[:2], -cp.array(shift))
@pytest.mark.parametrize("normalization", ["nonexisting"])
def test_correlation_invalid_normalization(normalization):
reference_image = fft.fftn(cp.array(camera()))
shift = (-7, 12)
shifted_image = fourier_shift(reference_image, shift)
# pixel precision
with pytest.raises(ValueError):
phase_cross_correlation(
reference_image,
shifted_image,
space="fourier",
normalization=normalization,
)
@pytest.mark.parametrize("normalization", [None, "phase"])
def test_subpixel_precision(normalization):
reference_image = fft.fftn(cp.array(camera()))
subpixel_shift = (-2.4, 1.32)
shifted_image = fourier_shift(reference_image, subpixel_shift)
# subpixel precision
result, _, _ = phase_cross_correlation(
reference_image,
shifted_image,
upsample_factor=100,
space="fourier",
normalization=normalization,
)
assert_allclose(result[:2], -cp.array(subpixel_shift), atol=0.05)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_real_input(dtype):
reference_image = cp.array(camera()).astype(dtype, copy=False)
subpixel_shift = (-2.4, 1.32)
shifted_image = fourier_shift(fft.fftn(reference_image), subpixel_shift)
shifted_image = fft.ifftn(shifted_image).real.astype(dtype, copy=False)
# subpixel precision
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, upsample_factor=100
)
assert isinstance(result, tuple)
assert all(isinstance(s, float) for s in result)
assert_allclose(result[:2], -cp.array(subpixel_shift), atol=0.05)
def test_size_one_dimension_input():
# take a strip of the input image
reference_image = fft.fftn(cp.array(camera())[:, 15]).reshape((-1, 1))
subpixel_shift = (-2.4, 4)
shifted_image = fourier_shift(reference_image, subpixel_shift)
# subpixel precision
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, upsample_factor=20, space="fourier"
)
assert_allclose(result[:2], -cp.array((-2.4, 0)), atol=0.05)
def test_3d_input():
phantom = img_as_float(binary_blobs(length=32, n_dim=3))
reference_image = fft.fftn(phantom)
shift = (-2.0, 1.0, 5.0)
shifted_image = fourier_shift(reference_image, shift)
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, space="fourier"
)
assert_allclose(result, -cp.array(shift), atol=0.05)
# subpixel precision now available for 3-D data
subpixel_shift = (-2.3, 1.7, 5.4)
shifted_image = fourier_shift(reference_image, subpixel_shift)
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, upsample_factor=100, space="fourier"
)
assert_allclose(result, -cp.array(subpixel_shift), atol=0.05)
def test_unknown_space_input():
image = cp.ones((5, 5))
with pytest.raises(ValueError):
phase_cross_correlation(image, image, space="frank")
def test_wrong_input():
# Dimensionality mismatch
image = cp.ones((5, 5, 1))
template = cp.ones((5, 5))
with pytest.raises(ValueError):
phase_cross_correlation(template, image)
# Size mismatch
image = cp.ones((5, 5))
template = cp.ones((4, 4))
with pytest.raises(ValueError):
phase_cross_correlation(template, image)
# NaN values in data
image = cp.ones((5, 5))
image[0][0] = cp.nan
template = cp.ones((5, 5))
with expected_warnings([r"invalid value encountered in true_divide|\A\Z"]):
with pytest.raises(ValueError):
phase_cross_correlation(template, image, return_error=True)
def test_4d_input_pixel():
phantom = img_as_float(binary_blobs(length=32, n_dim=4))
reference_image = fft.fftn(phantom)
shift = (-2.0, 1.0, 5.0, -3)
shifted_image = fourier_shift(reference_image, shift)
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, space="fourier"
)
assert_allclose(result, -cp.array(shift), atol=0.05)
def test_4d_input_subpixel():
phantom = img_as_float(binary_blobs(length=32, n_dim=4))
reference_image = fft.fftn(phantom)
subpixel_shift = (-2.3, 1.7, 5.4, -3.2)
shifted_image = fourier_shift(reference_image, subpixel_shift)
result, error, diffphase = phase_cross_correlation(
reference_image, shifted_image, upsample_factor=10, space="fourier"
)
assert_allclose(result, -cp.array(subpixel_shift), atol=0.05)
def test_mismatch_upsampled_region_size():
with pytest.raises(ValueError):
_upsampled_dft(cp.ones((4, 4)), upsampled_region_size=[3, 2, 1, 4])
def test_mismatch_offsets_size():
with pytest.raises(ValueError):
_upsampled_dft(cp.ones((4, 4)), 3, axis_offsets=[3, 2, 1, 4])
@pytest.mark.parametrize(
("shift0", "shift1"),
itertools.product((100, -100, 350, -350), (100, -100, 350, -350)),
)
@cp.testing.with_requires("scikit-image>=0.20")
def test_disambiguate_2d(shift0, shift1):
image = cp.array(eagle()[500:, 900:]) # use a highly textured image region
# Protect against some versions of scikit-image + imagio loading as
# RGB instead of grayscale.
if image.ndim == 3:
image = image[..., 0]
shift = (shift0, shift1)
origin0 = []
for s in shift:
if s > 0:
origin0.append(0)
else:
origin0.append(-s)
origin1 = np.array(origin0) + shift
slice0 = tuple(slice(o, o + 450) for o in origin0)
slice1 = tuple(slice(o, o + 450) for o in origin1)
reference = image[slice0]
moving = image[slice1]
computed_shift, _, _ = phase_cross_correlation(
reference, moving, disambiguate=True, return_error="always"
)
np.testing.assert_equal(shift, computed_shift)
def test_disambiguate_zero_shift():
"""When the shift is 0, disambiguation becomes degenerate.
Some quadrants become size 0, which prevents computation of
cross-correlation. This test ensures that nothing bad happens in that
scenario.
"""
image = cp.array(camera())
computed_shift, _, _ = phase_cross_correlation(
image, image, disambiguate=True, return_error="always"
)
assert computed_shift == (0, 0)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/registration/tests/test_ilk.py
|
import cupy as cp
import numpy as np
import pytest
from test_tvl1 import _sin_flow_gen
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.registration import optical_flow_ilk
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
@pytest.mark.parametrize("gaussian", [True, False])
@pytest.mark.parametrize("prefilter", [True, False])
def test_2d_motion(dtype, gaussian, prefilter):
# Generate synthetic data
rnd = np.random.default_rng(0)
image0 = rnd.normal(size=(256, 256))
image0 = cp.asarray(image0, dtype=dtype)
gt_flow, image1 = _sin_flow_gen(image0)
image1 = image1.astype(dtype, copy=False)
float_dtype = _supported_float_type(dtype)
# Estimate the flow
flow = optical_flow_ilk(
image0,
image1,
gaussian=gaussian,
prefilter=prefilter,
dtype=float_dtype,
)
assert flow.dtype == float_dtype
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow).mean() < 0.5
if dtype != float_dtype:
with pytest.raises(ValueError):
optical_flow_ilk(
image0,
image1,
gaussian=gaussian,
prefilter=prefilter,
dtype=dtype,
)
@pytest.mark.parametrize("gaussian", [True, False])
@pytest.mark.parametrize("prefilter", [True, False])
def test_3d_motion(gaussian, prefilter):
# Generate synthetic data
rnd = np.random.default_rng(123)
image0 = rnd.normal(size=(50, 55, 60))
image0 = cp.asarray(image0)
gt_flow, image1 = _sin_flow_gen(image0, npics=3)
# Estimate the flow
flow = optical_flow_ilk(
image0, image1, radius=5, gaussian=gaussian, prefilter=prefilter
)
# Assert that the average absolute error is less then half a pixel
assert abs(flow - gt_flow).mean() < 0.5
def test_no_motion_2d():
rnd = np.random.default_rng(0)
img = rnd.normal(size=(256, 256))
img = cp.asarray(img)
flow = optical_flow_ilk(img, img)
assert cp.all(flow == 0)
def test_no_motion_3d():
rnd = np.random.default_rng(0)
img = rnd.normal(size=(64, 64, 64))
img = cp.asarray(img)
flow = optical_flow_ilk(img, img)
assert cp.all(flow == 0)
def test_optical_flow_dtype():
# Generate synthetic data
rnd = np.random.default_rng(0)
image0 = rnd.normal(size=(256, 256))
image0 = cp.asarray(image0)
gt_flow, image1 = _sin_flow_gen(image0)
# Estimate the flow at double precision
flow_f64 = optical_flow_ilk(image0, image1, dtype="float64")
assert flow_f64.dtype == "float64"
# Estimate the flow at single precision
flow_f32 = optical_flow_ilk(image0, image1, dtype="float32")
assert flow_f32.dtype == "float32"
# Assert that floating point precision does not affect the quality
# of the estimated flow
assert cp.abs(flow_f64 - flow_f32).mean() < 1e-3
def test_incompatible_shapes():
rnd = np.random.default_rng(0)
I0 = rnd.normal(size=(256, 256))
I1 = rnd.normal(size=(255, 256))
with pytest.raises(ValueError):
u, v = optical_flow_ilk(I0, I1)
def test_wrong_dtype():
rnd = np.random.default_rng(0)
img = rnd.normal(size=(256, 256))
with pytest.raises(ValueError):
u, v = optical_flow_ilk(img, img, dtype="int")
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/rgb_colors.py
|
aliceblue = (0.941, 0.973, 1)
antiquewhite = (0.98, 0.922, 0.843)
aqua = (0, 1, 1)
aquamarine = (0.498, 1, 0.831)
azure = (0.941, 1, 1)
beige = (0.961, 0.961, 0.863)
bisque = (1, 0.894, 0.769)
black = (0, 0, 0)
blanchedalmond = (1, 0.922, 0.804)
blue = (0, 0, 1)
blueviolet = (0.541, 0.169, 0.886)
brown = (0.647, 0.165, 0.165)
burlywood = (0.871, 0.722, 0.529)
cadetblue = (0.373, 0.62, 0.627)
chartreuse = (0.498, 1, 0)
chocolate = (0.824, 0.412, 0.118)
coral = (1, 0.498, 0.314)
cornflowerblue = (0.392, 0.584, 0.929)
cornsilk = (1, 0.973, 0.863)
crimson = (0.863, 0.0784, 0.235)
cyan = (0, 1, 1)
darkblue = (0, 0, 0.545)
darkcyan = (0, 0.545, 0.545)
darkgoldenrod = (0.722, 0.525, 0.0431)
darkgray = (0.663, 0.663, 0.663)
darkgreen = (0, 0.392, 0)
darkgrey = (0.663, 0.663, 0.663)
darkkhaki = (0.741, 0.718, 0.42)
darkmagenta = (0.545, 0, 0.545)
darkolivegreen = (0.333, 0.42, 0.184)
darkorange = (1, 0.549, 0)
darkorchid = (0.6, 0.196, 0.8)
darkred = (0.545, 0, 0)
darksalmon = (0.914, 0.588, 0.478)
darkseagreen = (0.561, 0.737, 0.561)
darkslateblue = (0.282, 0.239, 0.545)
darkslategray = (0.184, 0.31, 0.31)
darkslategrey = (0.184, 0.31, 0.31)
darkturquoise = (0, 0.808, 0.82)
darkviolet = (0.58, 0, 0.827)
deeppink = (1, 0.0784, 0.576)
deepskyblue = (0, 0.749, 1)
dimgray = (0.412, 0.412, 0.412)
dimgrey = (0.412, 0.412, 0.412)
dodgerblue = (0.118, 0.565, 1)
firebrick = (0.698, 0.133, 0.133)
floralwhite = (1, 0.98, 0.941)
forestgreen = (0.133, 0.545, 0.133)
fuchsia = (1, 0, 1)
gainsboro = (0.863, 0.863, 0.863)
ghostwhite = (0.973, 0.973, 1)
gold = (1, 0.843, 0)
goldenrod = (0.855, 0.647, 0.125)
gray = (0.502, 0.502, 0.502)
green = (0, 0.502, 0)
greenyellow = (0.678, 1, 0.184)
grey = (0.502, 0.502, 0.502)
honeydew = (0.941, 1, 0.941)
hotpink = (1, 0.412, 0.706)
indianred = (0.804, 0.361, 0.361)
indigo = (0.294, 0, 0.51)
ivory = (1, 1, 0.941)
khaki = (0.941, 0.902, 0.549)
lavender = (0.902, 0.902, 0.98)
lavenderblush = (1, 0.941, 0.961)
lawngreen = (0.486, 0.988, 0)
lemonchiffon = (1, 0.98, 0.804)
lightblue = (0.678, 0.847, 0.902)
lightcoral = (0.941, 0.502, 0.502)
lightcyan = (0.878, 1, 1)
lightgoldenrodyellow = (0.98, 0.98, 0.824)
lightgray = (0.827, 0.827, 0.827)
lightgreen = (0.565, 0.933, 0.565)
lightgrey = (0.827, 0.827, 0.827)
lightpink = (1, 0.714, 0.757)
lightsalmon = (1, 0.627, 0.478)
lightseagreen = (0.125, 0.698, 0.667)
lightskyblue = (0.529, 0.808, 0.98)
lightslategray = (0.467, 0.533, 0.6)
lightslategrey = (0.467, 0.533, 0.6)
lightsteelblue = (0.69, 0.769, 0.871)
lightyellow = (1, 1, 0.878)
lime = (0, 1, 0)
limegreen = (0.196, 0.804, 0.196)
linen = (0.98, 0.941, 0.902)
magenta = (1, 0, 1)
maroon = (0.502, 0, 0)
mediumaquamarine = (0.4, 0.804, 0.667)
mediumblue = (0, 0, 0.804)
mediumorchid = (0.729, 0.333, 0.827)
mediumpurple = (0.576, 0.439, 0.859)
mediumseagreen = (0.235, 0.702, 0.443)
mediumslateblue = (0.482, 0.408, 0.933)
mediumspringgreen = (0, 0.98, 0.604)
mediumturquoise = (0.282, 0.82, 0.8)
mediumvioletred = (0.78, 0.0824, 0.522)
midnightblue = (0.098, 0.098, 0.439)
mintcream = (0.961, 1, 0.98)
mistyrose = (1, 0.894, 0.882)
moccasin = (1, 0.894, 0.71)
navajowhite = (1, 0.871, 0.678)
navy = (0, 0, 0.502)
oldlace = (0.992, 0.961, 0.902)
olive = (0.502, 0.502, 0)
olivedrab = (0.42, 0.557, 0.137)
orange = (1, 0.647, 0)
orangered = (1, 0.271, 0)
orchid = (0.855, 0.439, 0.839)
palegoldenrod = (0.933, 0.91, 0.667)
palegreen = (0.596, 0.984, 0.596)
palevioletred = (0.686, 0.933, 0.933)
papayawhip = (1, 0.937, 0.835)
peachpuff = (1, 0.855, 0.725)
peru = (0.804, 0.522, 0.247)
pink = (1, 0.753, 0.796)
plum = (0.867, 0.627, 0.867)
powderblue = (0.69, 0.878, 0.902)
purple = (0.502, 0, 0.502)
red = (1, 0, 0)
rosybrown = (0.737, 0.561, 0.561)
royalblue = (0.255, 0.412, 0.882)
saddlebrown = (0.545, 0.271, 0.0745)
salmon = (0.98, 0.502, 0.447)
sandybrown = (0.98, 0.643, 0.376)
seagreen = (0.18, 0.545, 0.341)
seashell = (1, 0.961, 0.933)
sienna = (0.627, 0.322, 0.176)
silver = (0.753, 0.753, 0.753)
skyblue = (0.529, 0.808, 0.922)
slateblue = (0.416, 0.353, 0.804)
slategray = (0.439, 0.502, 0.565)
slategrey = (0.439, 0.502, 0.565)
snow = (1, 0.98, 0.98)
springgreen = (0, 1, 0.498)
steelblue = (0.275, 0.51, 0.706)
tan = (0.824, 0.706, 0.549)
teal = (0, 0.502, 0.502)
thistle = (0.847, 0.749, 0.847)
tomato = (1, 0.388, 0.278)
turquoise = (0.251, 0.878, 0.816)
violet = (0.933, 0.51, 0.933)
wheat = (0.961, 0.871, 0.702)
white = (1, 1, 1)
whitesmoke = (0.961, 0.961, 0.961)
yellow = (1, 1, 0)
yellowgreen = (0.604, 0.804, 0.196)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/colorlabel.py
|
import itertools
import cupy as cp
import numpy as np
from .._shared.utils import _supported_float_type, warn
from ..util import img_as_float
from . import rgb_colors
from .colorconv import gray2rgb, hsv2rgb, rgb2hsv
__all__ = ["color_dict", "label2rgb", "DEFAULT_COLORS"]
DEFAULT_COLORS = (
"red",
"blue",
"yellow",
"magenta",
"green",
"indigo",
"darkorange",
"cyan",
"pink",
"yellowgreen",
)
color_dict = {
k: v for k, v in rgb_colors.__dict__.items() if isinstance(v, tuple)
}
def _rgb_vector(color):
"""Return RGB color as (1, 3) array.
This RGB array gets multiplied by masked regions of an RGB image, which are
partially flattened by masking (i.e. dimensions 2D + RGB -> 1D + RGB).
Parameters
----------
color : str or array
Color name in `color_dict` or RGB float values between [0, 1].
"""
if isinstance(color, str):
color = color_dict[color]
# Slice to handle RGBA colors.
return np.asarray(color[:3]) # CuPy Backend: leave this array on the host
def _match_label_with_color(label, colors, bg_label, bg_color):
"""Return `unique_labels` and `color_cycle` for label array and color list.
Colors are cycled for normal labels, but the background color should only
be used for the background.
"""
# Temporarily set background color; it will be removed later.
if bg_color is None:
bg_color = (0, 0, 0)
bg_color = _rgb_vector(bg_color)
# map labels to their ranks among all labels from small to large
unique_labels, mapped_labels = cp.unique(label, return_inverse=True)
# get rank of bg_label
bg_label_rank_list = mapped_labels[label.ravel() == bg_label]
# The rank of each label is the index of the color it is matched to in
# color cycle. bg_label should always be mapped to the first color, so
# its rank must be 0. Other labels should be ranked from small to large
# from 1.
if len(bg_label_rank_list) > 0:
bg_label_rank = bg_label_rank_list[0]
mapped_labels[mapped_labels < bg_label_rank] += 1
mapped_labels[label.ravel() == bg_label] = 0
else:
mapped_labels += 1
# Modify labels and color cycle so background color is used only once.
color_cycle = itertools.cycle(colors)
color_cycle = itertools.chain([bg_color], color_cycle)
return mapped_labels, color_cycle
def label2rgb(
label,
image=None,
colors=None,
alpha=0.3,
bg_label=0,
bg_color=(0, 0, 0),
image_alpha=1,
kind="overlay",
*,
saturation=0,
channel_axis=-1,
):
"""Return an RGB image where color-coded labels are painted over the image.
Parameters
----------
label : ndarray
Integer array of labels with the same shape as `image`.
image : ndarray, optional
Image used as underlay for labels. It should have the same shape as
`labels`, optionally with an additional RGB (channels) axis. If `image`
is an RGB image, it is converted to grayscale before coloring.
colors : list, optional
List of colors. If the number of labels exceeds the number of colors,
then the colors are cycled.
alpha : float [0, 1], optional
Opacity of colorized labels. Ignored if image is `None`.
bg_label : int, optional
Label that's treated as the background. If `bg_label` is specified,
`bg_color` is `None`, and `kind` is `overlay`,
background is not painted by any colors.
bg_color : str or array, optional
Background color. Must be a name in `color_dict` or RGB float values
between [0, 1].
image_alpha : float [0, 1], optional
Opacity of the image.
kind : string, one of {'overlay', 'avg'}
The kind of color image desired. 'overlay' cycles over defined colors
and overlays the colored labels over the original image. 'avg' replaces
each labeled segment with its average color, for a stained-class or
pastel painting appearance.
saturation : float [0, 1], optional
Parameter to control the saturation applied to the original image
between fully saturated (original RGB, `saturation=1`) and fully
unsaturated (grayscale, `saturation=0`). Only applies when
`kind='overlay'`.
channel_axis : int, optional
This parameter indicates which axis of the output array will correspond
to channels. If `image` is provided, this must also match the axis of
`image` that corresponds to channels.
Returns
-------
result : array of float, shape (M, N, 3)
The result of blending a cycling colormap (`colors`) for each distinct
value in `label` with the image, at a certain alpha value.
"""
if image is not None:
image = np.moveaxis(image, source=channel_axis, destination=-1)
if kind == "overlay":
rgb = _label2rgb_overlay(
label,
image,
colors,
alpha,
bg_label,
bg_color,
image_alpha,
saturation,
)
elif kind == "avg":
rgb = _label2rgb_avg(label, image, bg_label, bg_color)
else:
raise ValueError("`kind` must be either 'overlay' or 'avg'.")
return np.moveaxis(rgb, source=-1, destination=channel_axis)
def _label2rgb_overlay(
label,
image=None,
colors=None,
alpha=0.3,
bg_label=-1,
bg_color=None,
image_alpha=1,
saturation=0,
):
"""Return an RGB image where color-coded labels are painted over the image.
Parameters
----------
label : ndarray
Integer array of labels with the same shape as `image`.
image : ndarray, optional
Image used as underlay for labels. It should have the same shape as
`labels`, optionally with an additional RGB (channels) axis. If `image`
is an RGB image, it is converted to grayscale before coloring.
colors : list, optional
List of colors. If the number of labels exceeds the number of colors,
then the colors are cycled.
alpha : float [0, 1], optional
Opacity of colorized labels. Ignored if image is `None`.
bg_label : int, optional
Label that's treated as the background. If `bg_label` is specified and
`bg_color` is `None`, background is not painted by any colors.
bg_color : str or array, optional
Background color. Must be a name in `color_dict` or RGB float values
between [0, 1].
image_alpha : float [0, 1], optional
Opacity of the image.
saturation : float [0, 1], optional
Parameter to control the saturation applied to the original image
between fully saturated (original RGB, `saturation=1`) and fully
unsaturated (grayscale, `saturation=0`).
Returns
-------
result : array of float, shape (M, N, 3)
The result of blending a cycling colormap (`colors`) for each distinct
value in `label` with the image, at a certain alpha value.
"""
if not 0 <= saturation <= 1:
warn(f"saturation must be in range [0, 1], got {saturation}")
if colors is None:
colors = DEFAULT_COLORS
colors = [_rgb_vector(c) for c in colors]
if image is None:
image = cp.zeros(label.shape + (3,), dtype=np.float64)
# Opacity doesn't make sense if no image exists.
alpha = 1
else:
if (
image.shape[: label.ndim] != label.shape
or image.ndim > label.ndim + 1
):
raise ValueError("`image` and `label` must be the same shape")
if image.ndim == label.ndim + 1 and image.shape[-1] != 3:
raise ValueError("`image` must be RGB (image.shape[-1] must be 3).")
if image.min() < 0:
warn("Negative intensities in `image` are not supported")
float_dtype = _supported_float_type(image.dtype)
image = img_as_float(image).astype(float_dtype, copy=False)
if image.ndim > label.ndim:
hsv = rgb2hsv(image)
hsv[..., 1] *= saturation
image = hsv2rgb(hsv)
elif image.ndim == label.ndim:
image = gray2rgb(image)
image = image * image_alpha + (1 - image_alpha)
# Ensure that all labels are non-negative so we can index into
# `label_to_color` correctly.
offset = min(int(label.min()), bg_label)
if offset != 0:
label = label - offset # Make sure you don't modify the input array.
bg_label -= offset
new_type = np.min_scalar_type(int(label.max()))
if new_type == bool:
new_type = np.uint8
label = label.astype(new_type)
mapped_labels_flat, color_cycle = _match_label_with_color(
label, colors, bg_label, bg_color
)
if len(mapped_labels_flat) == 0:
return image
dense_labels = range(int(mapped_labels_flat.max()) + 1)
# CuPy Backend: small color_cycle arrays are left on the CPU
label_to_color = np.stack([c for i, c in zip(dense_labels, color_cycle)])
# CuPy Backend: transfer to GPU after concatenation of small host arrays
label_to_color = cp.asarray(label_to_color)
mapped_labels = mapped_labels_flat.reshape(label.shape)
label = mapped_labels
result = label_to_color[mapped_labels] * alpha + image * (1 - alpha)
# Remove background label if its color was not specified.
remove_background = 0 in mapped_labels_flat and bg_color is None
if remove_background:
result[label == bg_label] = image[label == bg_label]
return result
def _label2rgb_avg(label_field, image, bg_label=0, bg_color=(0, 0, 0)):
"""Visualise each segment in `label_field` with its mean color in `image`.
Parameters
----------
label_field : ndarray of int
A segmentation of an image.
image : array, shape ``label_field.shape + (3,)``
A color image of the same spatial shape as `label_field`.
bg_label : int, optional
A value in `label_field` to be treated as background.
bg_color : 3-tuple of int, optional
The color for the background label
Returns
-------
out : ndarray, same shape and type as `image`
The output visualization.
"""
out = cp.zeros(label_field.shape + (3,), dtype=image.dtype)
labels = cp.unique(label_field)
bg = labels == bg_label
if bg.any():
labels = labels[labels != bg_label]
mask = (label_field == bg_label).nonzero()
out[mask] = bg_color
for label in labels:
mask = (label_field == label).nonzero()
color = image[mask].mean(axis=0)
out[mask] = color
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/colorconv.py
|
"""Functions for converting between color spaces.
The "central" color space in this module is RGB, more specifically the linear
sRGB color space using D65 as a white-point [1]_. This represents a
standard monitor (w/o gamma correction). For a good FAQ on color spaces see
[2]_.
The API consists of functions to convert to and from RGB as defined above, as
well as a generic function to convert to and from any supported color space
(which is done through RGB in most cases).
Supported color spaces
----------------------
* RGB : Red Green Blue.
Here the sRGB standard [1]_.
* HSV : Hue, Saturation, Value.
Uniquely defined when related to sRGB [3]_.
* RGB CIE : Red Green Blue.
The original RGB CIE standard from 1931 [4]_. Primary colors are 700 nm
(red), 546.1 nm (blue) and 435.8 nm (green).
* XYZ CIE : XYZ
Derived from the RGB CIE color space. Chosen such that
``x == y == z == 1/3`` at the whitepoint, and all color matching
functions are greater than zero everywhere.
* LAB CIE : Lightness, a, b
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LUV CIE : Lightness, u, v
Colorspace derived from XYZ CIE that is intended to be more
perceptually uniform
* LCH CIE : Lightness, Chroma, Hue
Defined in terms of LAB CIE. C and H are the polar representation of
a and b. The polar angle C is defined to be on ``(0, 2*pi)``
:author: Nicolas Pinto (rgb2hsv)
:author: Ralf Gommers (hsv2rgb)
:author: Travis Oliphant (XYZ and RGB CIE functions)
:author: Matt Terry (lab2lch)
:author: Alex Izvorski (yuv2rgb, rgb2yuv and related)
:license: modified BSD
References
----------
.. [1] Official specification of sRGB, IEC 61966-2-1:1999.
.. [2] http://www.poynton.com/ColorFAQ.html
.. [3] https://en.wikipedia.org/wiki/HSL_and_HSV
.. [4] https://en.wikipedia.org/wiki/CIE_1931_color_space
"""
from warnings import warn
import cupy as cp
import numpy as np
from scipy import linalg
from .._shared.utils import (
_supported_float_type,
channel_as_last_axis,
deprecate_func,
identity,
)
from ..util import dtype, dtype_limits
def convert_colorspace(arr, fromspace, tospace, *, channel_axis=-1):
"""Convert an image array to a new color space.
Valid color spaces are:
'RGB', 'HSV', 'RGB CIE', 'XYZ', 'YUV', 'YIQ', 'YPbPr', 'YCbCr', 'YDbDr'
Parameters
----------
arr : (..., 3, ...) array_like
The image to convert. By default, the final dimension denotes
channels.
fromspace : str
The color space to convert from. Can be specified in lower case.
tospace : str
The color space to convert to. Can be specified in lower case.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The converted image. Same dimensions as input.
Raises
------
ValueError
If fromspace is not a valid color space
ValueError
If tospace is not a valid color space
Notes
-----
Conversion is performed through the "central" RGB color space,
i.e. conversion from XYZ to HSV is implemented as ``XYZ -> RGB -> HSV``
instead of directly.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> img = cp.array(data.astronaut())
>>> img_hsv = convert_colorspace(img, 'RGB', 'HSV')
"""
fromdict = {
"rgb": identity,
"hsv": hsv2rgb,
"rgb cie": rgbcie2rgb,
"xyz": xyz2rgb,
"yuv": yuv2rgb,
"yiq": yiq2rgb,
"ypbpr": ypbpr2rgb,
"ycbcr": ycbcr2rgb,
"ydbdr": ydbdr2rgb,
}
todict = {
"rgb": identity,
"hsv": rgb2hsv,
"rgb cie": rgb2rgbcie,
"xyz": rgb2xyz,
"yuv": rgb2yuv,
"yiq": rgb2yiq,
"ypbpr": rgb2ypbpr,
"ycbcr": rgb2ycbcr,
"ydbdr": rgb2ydbdr,
}
fromspace = fromspace.lower()
tospace = tospace.lower()
if fromspace not in fromdict:
msg = f"`fromspace` has to be one of {fromdict.keys()}"
raise ValueError(msg)
if tospace not in todict:
msg = f"`tospace` has to be one of {todict.keys()}"
raise ValueError(msg)
return todict[tospace](
fromdict[fromspace](arr, channel_axis=channel_axis),
channel_axis=channel_axis,
)
def _prepare_colorarray(
arr, force_copy=False, force_c_contiguous=True, channel_axis=-1
):
"""Check the shape of the array and convert it to
floating point representation.
"""
if arr.shape[channel_axis] != 3:
msg = (
f"the input array must have size 3 along `channel_axis`, "
f"got {arr.shape}"
)
raise ValueError(msg)
float_dtype = _supported_float_type(arr.dtype)
if float_dtype == cp.float32:
_func = dtype.img_as_float32
else:
_func = dtype.img_as_float64
out = _func(arr, force_copy=force_copy)
if force_c_contiguous and not out.flags.c_contiguous:
out = cp.ascontiguousarray(out)
return out
def _validate_channel_axis(channel_axis, ndim):
if not isinstance(channel_axis, int):
raise TypeError("channel_axis must be an integer")
if channel_axis < -ndim or channel_axis >= ndim:
raise np.AxisError("channel_axis exceeds array dimensions")
@cp.memoize(for_each_device=True)
def _rgba2rgb_kernel(background, name="rgba2rgb"):
code = """
X alpha = rgba[4*i + 3];
X val;
"""
for ch in range(3):
code += f"""
val = (1 - alpha) * {background[ch]} + alpha * rgba[4*i + {ch}];
rgb[3*i + {ch}] = min(max(val, (X)0.0), (X)1.0);
"""
return cp.ElementwiseKernel(
"raw X rgba", "raw X rgb", code, name="cucim_skimage_color_" + name
)
@channel_as_last_axis() # current CUDA kernel assumes channel_axis is last
def rgba2rgb(rgba, background=(1, 1, 1), *, channel_axis=-1):
"""RGBA to RGB conversion using alpha blending [1]_.
Parameters
----------
rgba : (..., 4, ...) array_like
The image in RGBA format. By default, the final dimension denotes
channels.
background : array_like
The color of the background to blend the image with (3 floats
between 0 to 1 - the RGB value of the background).
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `rgba` is not at least 2D with shape (..., 4, ...).
References
----------
.. [1] https://en.wikipedia.org/wiki/Alpha_compositing#Alpha_blending
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import color
>>> from skimage import data
>>> img_rgba = cp.array(data.logo())
>>> img_rgb = color.rgba2rgb(img_rgba)
"""
_validate_channel_axis(channel_axis, rgba.ndim)
channel_axis = channel_axis % rgba.ndim
if rgba.shape[channel_axis] != 4:
msg = (
f"the input array must have size 4 along `channel_axis`, "
f"got {rgba.shape}"
)
raise ValueError(msg)
float_dtype = _supported_float_type(rgba.dtype)
if float_dtype == cp.float32:
rgba = dtype.img_as_float32(rgba)
else:
rgba = dtype.img_as_float64(rgba)
if not rgba.flags.c_contiguous:
rgba = cp.ascontiguousarray(rgba)
if isinstance(background, cp.ndarray):
background = cp.asnumpy(background) # synchronize
background = tuple(float(b) for b in background)
if len(background) != 3:
raise ValueError(
"background must be an array-like containing 3 RGB "
f"values. Got {len(background)} items"
)
if any((b < 0 or b > 1) for b in background):
raise ValueError(
"background RGB values must be floats between " "0 and 1."
)
name = f"rgba2rgb_{rgba.dtype.char}"
kern = _rgba2rgb_kernel(background, name)
rgb = cp.empty(rgba.shape[:-1] + (3,), dtype=rgba.dtype)
kern(rgba, rgb, size=rgb.size // 3)
return rgb
@cp.memoize(for_each_device=True)
def _rgb_to_hsv_kernel(name="rgb2hsv"):
code = """
X minv = rgb[3*i];
X maxv = rgb[3*i];
X tmp;
for (int ch=1; ch < 3; ch++)
{
tmp = rgb[3*i + ch];
if (tmp > maxv)
{
maxv = tmp;
} else if (tmp < minv)
{
minv = tmp;
}
}
X delta = maxv - minv;
if (delta == 0.0)
{
hsv[3*i] = 0.0;
hsv[3*i + 1] = 0.0;
} else {
hsv[3*i + 1] = delta / maxv;
if (rgb[3*i] == maxv)
{
hsv[3*i] = (rgb[3*i + 1] - rgb[3*i + 2]) / delta;
} else if (rgb[3*i + 1] == maxv)
{
hsv[3*i] = 2.0 + (rgb[3*i + 2] - rgb[3*i]) / delta;
} else if (rgb[3*i + 2] == maxv)
{
hsv[3*i] = 4.0 + (rgb[3*i] - rgb[3*i + 1]) / delta;
}
hsv[3*i] /= 6.0;
hsv[3*i] = hsv[3*i] - floor(hsv[3*i] / (X)1.0);
}
hsv[3*i + 2] = maxv;
"""
return cp.ElementwiseKernel(
"raw X rgb", "raw X hsv", code, name="cucim_skimage_color_" + name
)
@channel_as_last_axis()
def rgb2hsv(rgb, *, channel_axis=-1):
"""RGB to HSV color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in HSV format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import color
>>> from skimage import data
>>> img = cp.array(data.astronaut())
>>> img_hsv = color.rgb2hsv(img)
"""
input_is_one_pixel = rgb.ndim == 1
if input_is_one_pixel:
rgb = rgb[np.newaxis, ...]
rgb = _prepare_colorarray(
rgb, force_c_contiguous=True, channel_axis=channel_axis
)
hsv = cp.empty_like(rgb)
name = f"rgb2hsv_{rgb.dtype.char}"
kern = _rgb_to_hsv_kernel(name=name)
kern(rgb, hsv, size=rgb.size // 3)
if input_is_one_pixel:
hsv = cp.squeeze(hsv, axis=0)
return hsv
@cp.memoize(for_each_device=True)
def _hsv_to_rgb_kernel(name="hsv2rgb"):
code = """
int hi = (int)floor(hsv[3*i] * 6.0);
X f = hsv[3*i] * 6 - hi;
X v = hsv[3*i + 2];
X p = v * (1 - hsv[3*i + 1]);
int rem = (int)hi % 6;
switch(rem)
{
case 0:
rgb[3*i] = v;
rgb[3*i + 1] = v * (1 - (1 - f) * hsv[3*i + 1]);
rgb[3*i + 2] = p;
break;
case 1:
rgb[3*i] = v * (1 - f * hsv[3*i + 1]);
rgb[3*i + 1] = v;
rgb[3*i + 2] = p;
break;
case 2:
rgb[3*i] = p;
rgb[3*i + 1] = v;
rgb[3*i + 2] = v * (1 - (1 - f) * hsv[3*i + 1]);
break;
case 3:
rgb[3*i] = p;
rgb[3*i + 1] = v * (1 - f * hsv[3*i + 1]);
rgb[3*i + 2] = v;
break;
case 4:
rgb[3*i] = v * (1 - (1 - f) * hsv[3*i + 1]);
rgb[3*i + 1] = p;
rgb[3*i + 2] = v;
break;
case 5:
rgb[3*i] = v;
rgb[3*i + 1] = p;
rgb[3*i + 2] = v * (1 - f * hsv[3*i + 1]);
break;
}
"""
return cp.ElementwiseKernel(
"raw X hsv", "raw X rgb", code, name="cucim_skimage_color_" + name
)
@channel_as_last_axis()
def hsv2rgb(hsv, *, channel_axis=-1):
"""HSV to RGB color space conversion.
Parameters
----------
hsv : (..., 3, ...) array_like
The image in HSV format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `hsv` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Conversion between RGB and HSV color spaces results in some loss of
precision, due to integer arithmetic and rounding [1]_.
References
----------
.. [1] https://en.wikipedia.org/wiki/HSL_and_HSV
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> img = cp.array(data.astronaut())
>>> img_hsv = rgb2hsv(img)
>>> img_rgb = hsv2rgb(img_hsv)
"""
hsv = _prepare_colorarray(
hsv, force_c_contiguous=True, channel_axis=channel_axis
)
rgb = cp.empty_like(hsv)
name = f"hsv2rgb_{hsv.dtype.char}"
kern = _hsv_to_rgb_kernel(name=name)
kern(hsv, rgb, size=hsv.size // 3)
return rgb
# ---------------------------------------------------------------
# Primaries for the coordinate systems
# ---------------------------------------------------------------
cie_primaries = np.array([700, 546.1, 435.8])
sb_primaries = np.array([1.0 / 155, 1.0 / 190, 1.0 / 225]) * 1e5
# ---------------------------------------------------------------
# Matrices that define conversion between different color spaces
# ---------------------------------------------------------------
# From sRGB specification
# fmt: off
xyz_from_rgb = np.array([[0.412453, 0.357580, 0.180423],
[0.212671, 0.715160, 0.072169],
[0.019334, 0.119193, 0.950227]])
rgb_from_xyz = linalg.inv(xyz_from_rgb)
# From https://en.wikipedia.org/wiki/CIE_1931_color_space
# Note: Travis's code did not have the divide by 0.17697
xyz_from_rgbcie = np.array([[0.49, 0.31, 0.20],
[0.17697, 0.81240, 0.01063],
[0.00, 0.01, 0.99]]) / 0.17697
rgbcie_from_xyz = linalg.inv(xyz_from_rgbcie)
# construct matrices to and from rgb:
rgbcie_from_rgb = rgbcie_from_xyz @ xyz_from_rgb
rgb_from_rgbcie = rgb_from_xyz @ xyz_from_rgbcie
gray_from_rgb = np.array([[0.2125, 0.7154, 0.0721],
[0, 0, 0],
[0, 0, 0]])
yuv_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ], # noqa
[-0.14714119, -0.28886916, 0.43601035], # noqa
[ 0.61497538, -0.51496512, -0.10001026]]) # noqa
rgb_from_yuv = linalg.inv(yuv_from_rgb)
yiq_from_rgb = np.array([[0.299 , 0.587 , 0.114 ], # noqa
[0.59590059, -0.27455667, -0.32134392], # noqa
[0.21153661, -0.52273617, 0.31119955]]) # noqa
rgb_from_yiq = linalg.inv(yiq_from_rgb)
ypbpr_from_rgb = np.array([[ 0.299 , 0.587 , 0.114 ], # noqa
[-0.168736, -0.331264, 0.5 ], # noqa
[ 0.5 , -0.418688, -0.081312]]) # noqa
# fmt: on
rgb_from_ypbpr = linalg.inv(ypbpr_from_rgb)
ycbcr_from_rgb = np.array(
[
[65.481, 128.553, 24.966], # noqa
[-37.797, -74.203, 112.0], # noqa
[112.0, -93.786, -18.214],
]
) # noqa
rgb_from_ycbcr = linalg.inv(ycbcr_from_rgb)
ydbdr_from_rgb = np.array(
[
[0.299, 0.587, 0.114], # noqa
[-0.45, -0.883, 1.333], # noqa
[-1.333, 1.116, 0.217],
]
) # noqa
rgb_from_ydbdr = linalg.inv(ydbdr_from_rgb)
# CIE LAB constants for Observer=2A, Illuminant=D65
# NOTE: this is actually the XYZ values for the illuminant above.
lab_ref_white = np.array([0.95047, 1.0, 1.08883])
# XYZ coordinates of the illuminants, scaled to [0, 1]. For each illuminant I
# we have:
#
# illuminant[I]['2'] corresponds to the XYZ coordinates for the 2 degree
# field of view.
#
# illuminant[I]['10'] corresponds to the XYZ coordinates for the 10 degree
# field of view.
#
# illuminant[I]['R'] corresponds to the XYZ coordinates for R illuminants
# in grDevices::convertColor
#
# The XYZ coordinates are calculated from [1], using the formula:
#
# X = x * ( Y / y )
# Y = Y
# Z = ( 1 - x - y ) * ( Y / y )
#
# where Y = 1. The only exception is the illuminant "D65" with aperture angle
# 2, whose coordinates are copied from 'lab_ref_white' for
# backward-compatibility reasons.
#
# References
# ----------
# .. [1] https://en.wikipedia.org/wiki/Standard_illuminant
_illuminants = {
"A": {
"2": (1.098466069456375, 1, 0.3558228003436005),
"10": (1.111420406956693, 1, 0.3519978321919493),
"R": (1.098466069456375, 1, 0.3558228003436005),
},
"B": {
"2": (0.9909274480248003, 1, 0.8531327322886154),
"10": (0.9917777147717607, 1, 0.8434930535866175),
"R": (0.9909274480248003, 1, 0.8531327322886154),
},
"C": {
"2": (0.980705971659919, 1, 1.1822494939271255),
"10": (0.9728569189782166, 1, 1.1614480488951577),
"R": (0.980705971659919, 1, 1.1822494939271255),
},
"D50": {
"2": (0.9642119944211994, 1, 0.8251882845188288),
"10": (0.9672062750333777, 1, 0.8142801513128616),
"R": (0.9639501491621826, 1, 0.8241280285499208),
},
"D55": {
"2": (0.956797052643698, 1, 0.9214805860173273),
"10": (0.9579665682254781, 1, 0.9092525159847462),
"R": (0.9565317453467969, 1, 0.9202554587037198),
},
"D65": {
"2": (0.95047, 1.0, 1.08883), # This was: `lab_ref_white`
"10": (0.94809667673716, 1, 1.0730513595166162),
"R": (0.9532057125493769, 1, 1.0853843816469158),
},
"D75": {
"2": (0.9497220898840717, 1, 1.226393520724154),
"10": (0.9441713925645873, 1, 1.2064272211720228),
"R": (0.9497220898840717, 1, 1.226393520724154),
},
"E": {"2": (1.0, 1.0, 1.0), "10": (1.0, 1.0, 1.0), "R": (1.0, 1.0, 1.0)},
}
def xyz_tristimulus_values(*, illuminant, observer, dtype=None):
"""Get the CIE XYZ tristimulus values.
Given an illuminant and observer, this function returns the CIE XYZ
tristimulus values [2]_ scaled such that :math:`Y = 1`.
Parameters
----------
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}
One of: 2-degree observer, 10-degree observer, or 'R' observer as in
R function ``grDevices::convertColor`` [3]_.
dtype : np.dtype, optional
This argument is ignored in the cuCIM implementation of
`xyz_tristimulus_values` since an array is not returned. The output is
always a 3-tuple of float.
Returns
-------
values : 3-tuple of float
Three elements :math:`X, Y, Z` containing the CIE XYZ tristimulus values
of the given illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant#White_points_of_standard_illuminants
.. [2] https://en.wikipedia.org/wiki/CIE_1931_color_space#Meaning_of_X,_Y_and_Z
.. [3] https://www.rdocumentation.org/packages/grDevices/versions/3.6.2/topics/convertColor
Notes
-----
The return type of this function differs from the one in scikit-image as it
always returns a 3-tuple of float rather than an array with a
user-specified dtype.
The CIE XYZ tristimulus values are calculated from :math:`x, y` [1]_, using the
formula
.. math:: X = x / y
.. math:: Y = 1
.. math:: Z = (1 - x - y) / y
The only exception is the illuminant "D65" with aperture angle 2° for
backward-compatibility reasons.
Examples
--------
Get the CIE XYZ tristimulus values for a "D65" illuminant for a 10 degree
field of view
>>> xyz_tristimulus_values(illuminant="D65", observer="10")
array([0.94809668, 1. , 1.07305136])
""" # noqa
illuminant = illuminant.upper()
observer = observer.upper()
try:
return _illuminants[illuminant][observer]
except KeyError:
raise ValueError(
f"Unknown illuminant/observer combination "
f"(`{illuminant}`, `{observer}`)"
)
@deprecate_func(
hint="Use `skimage.color.xyz_tristimulus_values` instead.",
deprecated_version="23.08",
removed_version="24.06",
)
def get_xyz_coords(illuminant, observer, dtype=float):
"""Get the XYZ coordinates of the given illuminant and observer [1]_.
Parameters
----------
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
One of: 2-degree observer, 10-degree observer, or 'R' observer as in
R function grDevices::convertColor.
dtype: dtype, optional
Output data type.
Returns
-------
out : array
Array with 3 elements containing the XYZ coordinates of the given
illuminant.
Raises
------
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
return xyz_tristimulus_values(illuminant=illuminant, observer=observer)
# Haematoxylin-Eosin-DAB colorspace
# From original Ruifrok's paper: A. C. Ruifrok and D. A. Johnston,
# "Quantification of histochemical staining by color deconvolution,"
# Analytical and quantitative cytology and histology / the International
# Academy of Cytology [and] American Society of Cytology, vol. 23, no. 4,
# pp. 291-9, Aug. 2001.
# fmt: off
rgb_from_hed = np.array([[0.65, 0.70, 0.29],
[0.07, 0.99, 0.11],
[0.27, 0.57, 0.78]])
hed_from_rgb = linalg.inv(rgb_from_hed)
# Following matrices are adapted form the Java code written by G.Landini.
# The original code is available at:
# https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
# Hematoxylin + DAB
rgb_from_hdx = np.array([[0.650, 0.704, 0.286],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_hdx[2, :] = np.cross(rgb_from_hdx[0, :], rgb_from_hdx[1, :])
hdx_from_rgb = linalg.inv(rgb_from_hdx)
# Feulgen + Light Green
rgb_from_fgx = np.array([[0.46420921, 0.83008335, 0.30827187],
[0.94705542, 0.25373821, 0.19650764],
[0.0, 0.0, 0.0]])
rgb_from_fgx[2, :] = np.cross(rgb_from_fgx[0, :], rgb_from_fgx[1, :])
fgx_from_rgb = linalg.inv(rgb_from_fgx)
# Giemsa: Methyl Blue + Eosin
rgb_from_bex = np.array([[0.834750233, 0.513556283, 0.196330403],
[0.092789, 0.954111, 0.283111],
[0.0, 0.0, 0.0]])
rgb_from_bex[2, :] = np.cross(rgb_from_bex[0, :], rgb_from_bex[1, :])
bex_from_rgb = linalg.inv(rgb_from_bex)
# FastRed + FastBlue + DAB
rgb_from_rbd = np.array([[0.21393921, 0.85112669, 0.47794022],
[0.74890292, 0.60624161, 0.26731082],
[0.268, 0.570, 0.776]])
rbd_from_rgb = linalg.inv(rgb_from_rbd)
# Methyl Green + DAB
rgb_from_gdx = np.array([[0.98003, 0.144316, 0.133146],
[0.268, 0.570, 0.776],
[0.0, 0.0, 0.0]])
rgb_from_gdx[2, :] = np.cross(rgb_from_gdx[0, :], rgb_from_gdx[1, :])
gdx_from_rgb = linalg.inv(rgb_from_gdx)
# Hematoxylin + AEC
rgb_from_hax = np.array([[0.650, 0.704, 0.286],
[0.2743, 0.6796, 0.6803],
[0.0, 0.0, 0.0]])
rgb_from_hax[2, :] = np.cross(rgb_from_hax[0, :], rgb_from_hax[1, :])
hax_from_rgb = linalg.inv(rgb_from_hax)
# Blue matrix Anilline Blue + Red matrix Azocarmine + Orange matrix Orange-G
rgb_from_bro = np.array([[0.853033, 0.508733, 0.112656],
[0.09289875, 0.8662008, 0.49098468],
[0.10732849, 0.36765403, 0.9237484]])
bro_from_rgb = linalg.inv(rgb_from_bro)
# Methyl Blue + Ponceau Fuchsin
rgb_from_bpx = np.array([[0.7995107, 0.5913521, 0.10528667],
[0.09997159, 0.73738605, 0.6680326],
[0.0, 0.0, 0.0]])
rgb_from_bpx[2, :] = np.cross(rgb_from_bpx[0, :], rgb_from_bpx[1, :])
bpx_from_rgb = linalg.inv(rgb_from_bpx)
# Alcian Blue + Hematoxylin
rgb_from_ahx = np.array([[0.874622, 0.457711, 0.158256],
[0.552556, 0.7544, 0.353744],
[0.0, 0.0, 0.0]])
rgb_from_ahx[2, :] = np.cross(rgb_from_ahx[0, :], rgb_from_ahx[1, :])
ahx_from_rgb = linalg.inv(rgb_from_ahx)
# Hematoxylin + PAS
rgb_from_hpx = np.array([[0.644211, 0.716556, 0.266844],
[0.175411, 0.972178, 0.154589],
[0.0, 0.0, 0.0]])
rgb_from_hpx[2, :] = np.cross(rgb_from_hpx[0, :], rgb_from_hpx[1, :])
hpx_from_rgb = linalg.inv(rgb_from_hpx)
# fmt on
# -------------------------------------------------------------
# The conversion functions that make use of the matrices above
# -------------------------------------------------------------
@cp.memoize(for_each_device=True)
def _get_convert_kernel(matrix_tuple, pre, post, name):
# pre code may modify x so set both x and y as outputs
return cp.ElementwiseKernel(
'',
'raw X x, raw X y',
pre + _get_core_colorconv_operation(matrix_tuple) + post,
name='cucim_skimage_color_' + name)
def _convert(matrix, arr, pre='', post='', name='_convert'):
"""Do the color space conversion.
Parameters
----------
matrix : array_like
The 3x3 matrix to use.
arr : (..., 3) array_like
The input array. Final dimension denotes channels.
Returns
-------
out : (..., 3) ndarray
The converted array. Same dimensions as input.
"""
arr = _prepare_colorarray(arr)
name = name + f'_{arr.dtype.char}'
kern = _get_convert_kernel(tuple(matrix.ravel()), pre, post, name)
out = cp.empty_like(arr)
kern(arr, out, size=arr.size // 3)
return out
def _get_core_colorconv_operation(m):
"""Generate inline CUDA kernel code for color conversions.
x is the input image with 3 channels on the last axis
y is the output image with 3 channels on the last axis
m is a 3x3 color conversion matrix
"""
return f"""
y[3*i] = x[3*i] * {m[0]} + x[3*i + 1] * {m[1]} + x[3*i + 2] * {m[2]};
y[3*i + 1] = x[3*i] * {m[3]} + x[3*i + 1] * {m[4]} + x[3*i + 2] * {m[5]};
y[3*i + 2] = x[3*i] * {m[6]} + x[3*i + 1] * {m[7]} + x[3*i + 2] * {m[8]};
""" # noqa
@channel_as_last_axis()
def xyz2rgb(xyz, *, channel_axis=-1):
"""XYZ to RGB color space conversion.
Parameters
----------
xyz : (..., 3, ...) array_like
The image in XYZ format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not at least 2-D with shape (..., 3, ...).
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts to sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from cucim.skimage.color import rgb2xyz, xyz2rgb
>>> img = cp.array(data.astronaut())
>>> img_xyz = rgb2xyz(img)
>>> img_rgb = xyz2rgb(img_xyz)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
arr = _prepare_colorarray(xyz, force_c_contiguous=True,
channel_axis=channel_axis)
# scaling applied after the 3x3 conversion matrix multiplication
# (c indexes over color channels here)
_post_colorconv = """
for (int c=0; c < 3; c++) {
if (y[3*i + c] > 0.0031308) {
y[3*i + c] = 1.055 * pow(y[3*i + c], (X)(1 / 2.4)) - 0.055;
} else {
y[3*i + c] *= 12.92;
}
y[3*i + c] = min(max(y[3*i + c], (X)0.0), (X)1.0);
}
"""
return _convert(rgb_from_xyz, arr, post=_post_colorconv, name='xyz2rgb')
@channel_as_last_axis()
def rgb2xyz(rgb, *, channel_axis=-1):
"""RGB to XYZ color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
The CIE XYZ color space is derived from the CIE RGB color space. Note
however that this function converts from sRGB.
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> img = cp.array(data.astronaut())
>>> img_xyz = rgb2xyz(img)
"""
# Follow the algorithm from http://www.easyrgb.com/index.php
# except we don't multiply/divide by 100 in the conversion
rgb = _prepare_colorarray(rgb, force_copy=True, force_c_contiguous=True,
channel_axis=channel_axis)
# scaling applied to the input before 3x3 conversion matrix multiplication
# (c indexes over color channels here)
_pre_colorconv = """
for (int c=0; c < 3; c++) {
if (x[3*i + c] > 0.04045) {
x[3*i + c] = pow((x[3*i + c] + (X)0.055) / (X)1.055, (X)2.4);
} else {
x[3*i + c] /= 12.92;
}
}
"""
return _convert(xyz_from_rgb, rgb, pre=_pre_colorconv, name='rgb2xyz')
@channel_as_last_axis()
def rgb2rgbcie(rgb, *, channel_axis=-1):
"""RGB to RGB CIE color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB CIE format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from cucim.skimage.color import rgb2rgbcie
>>> img = cp.array(data.astronaut())
>>> img_rgbcie = rgb2rgbcie(img)
"""
return _convert(rgbcie_from_rgb, rgb, name='rgb2rgbcie')
@channel_as_last_axis()
def rgbcie2rgb(rgbcie, *, channel_axis=-1):
"""RGB CIE to RGB color space conversion.
Parameters
----------
rgbcie : (..., 3, ...) array_like
The image in RGB CIE format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `rgbcie` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] https://en.wikipedia.org/wiki/CIE_1931_color_space
Examples
--------
>>> from skimage import data
>>> from cucim.skimage.color import rgb2rgbcie, rgbcie2rgb
>>> img = cp.array(data.astronaut())
>>> img_rgbcie = rgb2rgbcie(img)
>>> img_rgb = rgbcie2rgb(img_rgbcie)
"""
return _convert(rgb_from_rgbcie, rgbcie, name='rgbcie2rgb')
@cp.memoize(for_each_device=True)
def _rgb_to_gray_kernel(dtype):
return cp.ElementwiseKernel(
'raw X rgb',
'raw X gray',
"""
gray[i] = 0.2125 * rgb[3*i] + 0.7154 * rgb[3*i + 1] + 0.0721 * rgb[3*i + 2];
""", # noqa
name=f'cucim_skimage_color_rgb2gray_{np.dtype(dtype).char}')
@channel_as_last_axis(multichannel_output=False)
def rgb2gray(rgb, *, channel_axis=-1):
"""Compute luminance of an RGB image.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
Returns
-------
out : ndarray
The luminance image - an array which is the same size as the input
array, but with the channel dimension removed.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
The weights used in this conversion are calibrated for contemporary
CRT phosphors::
Y = 0.2125 R + 0.7154 G + 0.0721 B
If there is an alpha channel present, it is ignored.
References
----------
.. [1] http://poynton.ca/PDFs/ColorFAQ.pdf
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.color import rgb2gray
>>> from skimage import data
>>> img = cp.array(data.astronaut())
>>> img_gray = rgb2gray(img)
"""
rgb = _prepare_colorarray(rgb, force_c_contiguous=True,
channel_axis=channel_axis)
kern = _rgb_to_gray_kernel(rgb.dtype)
gray = cp.empty(rgb.shape[:-1], dtype=rgb.dtype)
kern(rgb, gray, size=gray.size)
return gray
def gray2rgba(image, alpha=None, *, channel_axis=-1):
"""Create a RGBA representation of a gray-level image.
Parameters
----------
image : array_like
Input image.
alpha : array_like, optional
Alpha channel of the output image. It may be a scalar or an
array that can be broadcast to ``image``. If not specified it is
set to the maximum limit corresponding to the ``image`` dtype.
channel_axis : int, optional
This parameter indicates which axis of the output array will correspond
to channels.
Returns
-------
rgba : ndarray
RGBA image. A new dimension of length 4 is added to input
image shape.
"""
alpha_min, alpha_max = dtype_limits(image, clip_negative=False)
if alpha is None:
alpha = alpha_max
if not cp.can_cast(alpha, image.dtype):
warn("alpha can't be safely cast to image dtype {}"
.format(image.dtype.name), stacklevel=2)
if np.isscalar(alpha):
alpha = cp.full(image.shape, alpha, dtype=image.dtype)
elif alpha.shape != image.shape:
raise ValueError("alpha.shape must match image.shape")
rgba = np.stack((image,) * 3 + (alpha,), axis=channel_axis)
return rgba
def gray2rgb(image, *, channel_axis=-1):
"""Create an RGB representation of a gray-level image.
Parameters
----------
image : array_like
Input image.
channel_axis : int, optional
This parameter indicates which axis of the output array will correspond
to channels.
Returns
-------
rgb : (..., 3, ...) ndarray
RGB image. A new dimension of length 3 is added to input image.
Notes
-----
If the input is a 1-dimensional image of shape ``(M, )``, the output
will be shape ``(M, 3)``.
"""
return cp.stack(3 * (image,), axis=channel_axis)
@cp.memoize(for_each_device=True)
def _get_xyz_to_lab_kernel(xyz_ref_white, name='xyz2lab'):
_xyz_to_lab = f"""
// scale by CIE XYZ tristimulus values of the reference white point
arr[3*i] /= {xyz_ref_white[0]};
arr[3*i + 1] /= {xyz_ref_white[1]};
arr[3*i + 2] /= {xyz_ref_white[2]};
// Nonlinear distortion and linear transformation
for (int ch=0; ch < 3; ch++)
{{
if (arr[3*i + ch] > 0.008856)
{{
arr[3*i + ch] = cbrt(arr[3*i + ch]);
}} else {{
arr[3*i + ch] = 7.787 * arr[3*i + ch] + 16.0 / 116.0;
}}
}}
// Vector scaling
lab[3*i] = (116. * arr[3*i + 1]) - 16.0;
lab[3*i + 1] = 500.0 * (arr[3*i] - arr[3*i + 1]);
lab[3*i + 2] = 200.0 * (arr[3*i + 1] - arr[3*i + 2]);
"""
# array will be modified in-place
return cp.ElementwiseKernel(
'',
'raw X arr, raw X lab',
_xyz_to_lab,
name='cucim_skimage_color_' + name)
@channel_as_last_axis()
def xyz2lab(xyz, illuminant="D65", observer="2", *, channel_axis=-1):
"""XYZ to CIE-LAB color space conversion.
Parameters
----------
xyz : (..., 3, ...) array_like
The image in XYZ format. By default, the final dimension denotes
channels.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
One of: 2-degree observer, 10-degree observer, or 'R' observer as in
R function grDevices::convertColor.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in CIE-LAB format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not at least 2-D with shape (..., 3, ...).
ValueError
If either the illuminant or the observer angle is unsupported or
unknown.
Notes
-----
By default Observer="2", Illuminant="D65". CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function
:func:`~.xyz_tristimulus_values` for a list of supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/en/math.php
.. [2] https://en.wikipedia.org/wiki/CIELAB_color_space
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import rgb2xyz, xyz2lab
>>> img = cp.array(data.astronaut())
>>> img_xyz = rgb2xyz(img)
>>> img_lab = xyz2lab(img_xyz)
"""
xyz = _prepare_colorarray(xyz, force_copy=True, force_c_contiguous=True,
channel_axis=channel_axis)
xyz_ref_white = xyz_tristimulus_values(
illuminant=illuminant, observer=observer
)
name = f'xyz2lab_{xyz.dtype.char}'
kern = _get_xyz_to_lab_kernel(xyz_ref_white, name=name)
lab = cp.empty_like(xyz)
kern(xyz, lab, size=lab.size // 3)
return lab
@cp.memoize(for_each_device=True)
def _get_lab_to_xyz_kernel(xyz_ref_white, name='lab2xyz'):
_lab_to_xyz = f"""
xyz[3*i + 1] = (lab[3*i] + 16.) / 116.;
xyz[3*i] = (lab[3*i + 1] / 500.0) + xyz[3*i + 1];
xyz[3*i + 2] = xyz[3*i + 1] - (lab[3*i + 2] /200.0);
if (xyz[3*i + 2] < 0.0)
{{
xyz[3*i + 2] = 0.0;
warn[i] = 1;
}}
for (int ch=0; ch < 3; ch++)
{{
if (xyz[3*i + ch] > 0.2068966)
{{
xyz[3*i + ch] *= xyz[3*i + ch] * xyz[3*i + ch];
}} else {{
xyz[3*i + ch] = (xyz[3*i + ch] - 16.0 / 116.0) / 7.787;
}}
}}
xyz[3*i] *= {xyz_ref_white[0]};
xyz[3*i + 1] *= {xyz_ref_white[1]};
xyz[3*i + 2] *= {xyz_ref_white[2]};
// xyz[3*i] = min(max(xyz[3*i], 0.0), 1.0);
// xyz[3*i + 1] = min(max(xyz[3*i + 1], 0.0), 1.0);
// xyz[3*i + 2] = min(max(xyz[3*i + 2], 0.0), 1.0);
"""
# array will be modified in-place
return cp.ElementwiseKernel(
'',
'raw X lab, raw X xyz, raw int32 warn',
_lab_to_xyz,
name='cucim_skimage_color_' + name)
@channel_as_last_axis()
def lab2xyz(lab, illuminant="D65", observer="2", *, channel_axis=-1):
"""Convert image in CIE-LAB to XYZ color space.
Parameters
----------
lab : (..., 3, ...) array_like
The input image in CIE-LAB color space.
Unless `channel_axis` is set, the final dimension denotes the CIE-LAB
channels.
The L* values range from 0 to 100;
the a* and b* values range from -128 to 127.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
The aperture angle of the observer.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in XYZ color space, of same shape as input.
Raises
------
ValueError
If `lab` is not at least 2-D with shape (..., 3, ...).
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
UserWarning
If any of the pixels are invalid (Z < 0).
Notes
-----
The CIE XYZ tristimulus values are x_ref = 95.047, y_ref = 100., and
z_ref = 108.883. See function :func:`~.xyz_tristimulus_values` for a list of
supported illuminants.
See Also
--------
xyz2lab
References
----------
.. [1] http://www.easyrgb.com/en/math.php
.. [2] https://en.wikipedia.org/wiki/CIELAB_color_space
"""
xyz, n_invalid = _lab2xyz(lab, illuminant, observer, channel_axis)
if n_invalid > 0:
warn(
"Conversion from CIE-LAB to XYZ color space resulted in "
f"{n_invalid} negative Z values that have been clipped to zero",
stacklevel=3,
)
return xyz
def _lab2xyz(lab, illuminant, observer, channel_axis):
"""Convert CIE-LAB to XYZ color space.
Internal function for :func:`~.lab2xyz` and others. In addition to the
converted image, return the number of invalid pixels in the Z channel for
correct warning propagation.
Returns
-------
out : (..., 3, ...) ndarray
The image in XYZ format. Same dimensions as input.
n_invalid : int
Number of invalid pixels in the Z channel after conversion.
"""
lab = _prepare_colorarray(lab, force_c_contiguous=True,
channel_axis=channel_axis)
xyz_ref_white = xyz_tristimulus_values(
illuminant=illuminant, observer=observer
)
name = f'lab2xyz_{lab.dtype.char}'
kern = _get_lab_to_xyz_kernel(xyz_ref_white, name=name)
xyz = cp.empty_like(lab)
# TODO: better to use array for warn or a single element with atomic
# operations?
warnings = cp.zeros(lab.shape[:-1], dtype=np.int32)
kern(lab, xyz, warnings, size=lab.size // 3)
n_invalid = int(cp.count_nonzero(warnings)) # synchronize!
return xyz, n_invalid
@channel_as_last_axis()
def rgb2lab(rgb, illuminant="D65", observer="2", *, channel_axis=-1):
"""Conversion from the sRGB color space (IEC 61966-2-1:1999)
to the CIE Lab colorspace under the given illuminant and observer.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
The aperture angle of the observer.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in Lab format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
RGB is a device-dependent color space so, if you use this function, be
sure that the image you are analyzing has been mapped to the sRGB color
space.
This function uses rgb2xyz and xyz2lab.
By default Observer="2", Illuminant="D65". CIE XYZ tristimulus values
x_ref=95.047, y_ref=100., z_ref=108.883. See function
:func:`~.xyz_tristimulus_values` for a list of supported illuminants.
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
"""
return xyz2lab(rgb2xyz(rgb), illuminant, observer)
@channel_as_last_axis()
def lab2rgb(lab, illuminant="D65", observer="2", *, channel_axis=-1):
"""Convert image in CIE-LAB to sRGB color space.
Parameters
----------
lab : (..., 3, ...) array_like
The input image in CIE-LAB color space.
Unless `channel_axis` is set, the final dimension denotes the CIE-LAB
channels.
The L* values range from 0 to 100;
the a* and b* values range from -128 to 127.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
The aperture angle of the observer.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in sRGB color space, of same shape as input.
Raises
------
ValueError
If `lab` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This function uses :func:`~.lab2xyz` and :func:`~.xyz2rgb`.
The CIE XYZ tristimulus values are x_ref = 95.047, y_ref = 100., and
z_ref = 108.883. See function :func:`~.xyz_tristimulus_values` for a list of
supported illuminants.
See Also
--------
rgb2lab
References
----------
.. [1] https://en.wikipedia.org/wiki/Standard_illuminant
.. [2] https://en.wikipedia.org/wiki/CIELAB_color_space
"""
xyz, n_invalid = _lab2xyz(lab, illuminant, observer, channel_axis)
if n_invalid != 0:
warn(
"Conversion from CIE-LAB, via XYZ to sRGB color space resulted in "
f"{n_invalid} negative Z values that have been clipped to zero",
stacklevel=3,
)
return xyz2rgb(xyz, channel_axis=channel_axis)
@cp.memoize(for_each_device=True)
def _get_xyz_to_luv_kernel(xyz_ref_white, dtype):
eps = np.finfo(dtype).eps
preamble = f"""
// u' and v' helper functions
static __device__ __inline__ X fu(X v0, X v1, X v2)
{{
return (4.0 * v0) / (v0 + 15.0 * v1 + 3.0 * v2 + {eps});
}}
static __device__ __inline__ X fv(X v0, X v1, X v2)
{{
return (9.0 * v1) / (v0 + 15.0 * v1 + 3.0 * v2 + {eps});
}}
"""
denom = np.asarray([1, 15, 3]) @ np.asarray(xyz_ref_white, dtype=float)
denom = float(denom)
u0 = 4 * xyz_ref_white[0] / denom
v0 = 9 * xyz_ref_white[1] / denom
_xyz_to_luv = f"""
luv[3*i] = xyz[3*i + 1] / {xyz_ref_white[1]};
if (luv[3*i] > 0.008856)
{{
luv[3*i] = 116.0 * cbrt(luv[3*i]) - 16.0;
}} else {{
luv[3*i] *= 903.3;
}}
luv[3*i + 1] = (
13.0 * luv[3*i] * (fu(xyz[3*i], xyz[3*i + 1], xyz[3*i + 2]) - {u0})
);
luv[3*i + 2] = (
13.0 * luv[3*i] * (fv(xyz[3*i], xyz[3*i + 1], xyz[3*i + 2]) - {v0})
);
"""
# array will be modified in-place
return cp.ElementwiseKernel(
'',
'raw X xyz, raw X luv',
_xyz_to_luv,
preamble=preamble,
name=f'cucim_skimage_color_xyz2luv_{np.dtype(dtype).char}')
@channel_as_last_axis()
def xyz2luv(xyz, illuminant="D65", observer="2", *, channel_axis=-1):
"""XYZ to CIE-Luv color space conversion.
Parameters
----------
xyz : (..., 3, ...) array_like
The image in XYZ format. By default, the final dimension denotes
channels.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
The aperture angle of the observer.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in CIE-Luv format. Same dimensions as input.
Raises
------
ValueError
If `xyz` is not at least 2-D with shape (..., 3, ...).
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
By default XYZ conversion weights use observer=2A. Reference whitepoint
for D65 Illuminant, with XYZ tristimulus values of ``(95.047, 100.,
108.883)``. See function :func:`~.xyz_tristimulus_values` for a list of
supported illuminants.
References
----------
.. [1] http://www.easyrgb.com/en/math.php
.. [2] https://en.wikipedia.org/wiki/CIELUV
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import rgb2xyz, xyz2luv
>>> img = cp.array(data.astronaut())
>>> img_xyz = rgb2xyz(img)
>>> img_luv = xyz2luv(img_xyz)
"""
input_is_one_pixel = xyz.ndim == 1
if input_is_one_pixel:
xyz = xyz[np.newaxis, ...]
xyz = _prepare_colorarray(xyz, force_c_contiguous=True,
channel_axis=channel_axis)
xyz_ref_white = xyz_tristimulus_values(
illuminant=illuminant, observer=observer
)
kern = _get_xyz_to_luv_kernel(xyz_ref_white, xyz.dtype)
luv = cp.empty_like(xyz)
kern(xyz, luv, size=xyz.size // 3)
if input_is_one_pixel:
luv = cp.squeeze(luv, axis=0)
return luv
@cp.memoize(for_each_device=True)
def _get_luv_to_xyz_kernel(xyz_ref_white, dtype):
eps = np.finfo(dtype).eps
denom = np.asarray([1, 15, 3]) @ np.asarray(xyz_ref_white, dtype=float)
denom = float(denom)
u0 = 4 * xyz_ref_white[0] / denom
v0 = 9 * xyz_ref_white[1] / denom
_luv_to_xyz = f"""
if (luv[3*i] > 7.999625)
{{
xyz[3*i + 1] = (luv[3 * i] + 16.0) / 116.0;
xyz[3*i + 1] *= xyz[3*i + 1] * xyz[3*i + 1];
}} else {{
xyz[3*i + 1] = luv[3*i] / 903.3;
}}
xyz[3*i + 1] *= {xyz_ref_white[1]};
X a = {u0} + luv[3*i + 1] / (13.0 * luv[3*i] + {eps});
X b = {v0} + luv[3*i + 2] / (13.0 * luv[3*i] + {eps});
X c = 3.0 * xyz[3*i + 1] * (5.0 * b - 3.0);
xyz[3*i + 2] = ((a - 4.0) * c - 15.0 * a * b * xyz[3*i + 1]) / (12.0 * b);
xyz[3*i] = -(c / b + 3.0 * xyz[3*i + 2]);
""" # noqa
return cp.ElementwiseKernel(
'',
'raw X luv, raw X xyz',
_luv_to_xyz,
name=f'cucim_skimage_color_luv2xyz_{np.dtype(dtype).char}')
@channel_as_last_axis()
def luv2xyz(luv, illuminant="D65", observer="2", *, channel_axis=-1):
"""CIE-Luv to XYZ color space conversion.
Parameters
----------
luv : (..., 3, ...) array_like
The image in CIE-Luv format. By default, the final dimension denotes
channels.
illuminant : {"A", "B", "C", "D50", "D55", "D65", "D75", "E"}, optional
The name of the illuminant (the function is NOT case sensitive).
observer : {"2", "10", "R"}, optional
The aperture angle of the observer.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in XYZ format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not at least 2-D with shape (..., 3, ...).
ValueError
If either the illuminant or the observer angle are not supported or
unknown.
Notes
-----
XYZ conversion weights use observer=2A. Reference whitepoint for D65
Illuminant, with XYZ tristimulus values of ``(95.047, 100., 108.883)``. See
function :func:`~.xyz_tristimulus_values` for a list of supported
illuminants.
References
----------
.. [1] http://www.easyrgb.com/en/math.php
.. [2] https://en.wikipedia.org/wiki/CIELUV
"""
luv = _prepare_colorarray(luv, force_c_contiguous=True,
channel_axis=channel_axis)
xyz_ref_white = xyz_tristimulus_values(
illuminant=illuminant, observer=observer
)
kern = _get_luv_to_xyz_kernel(xyz_ref_white, luv.dtype)
xyz = cp.empty_like(luv)
kern(luv, xyz, size=luv.size // 3)
return xyz
@channel_as_last_axis()
def rgb2luv(rgb, *, channel_axis=-1):
"""RGB to CIE-Luv color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in CIE Luv format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This function uses rgb2xyz and xyz2luv.
References
----------
.. [1] http://www.easyrgb.com/en/math.php
.. [2] https://en.wikipedia.org/wiki/CIELUV
"""
return xyz2luv(rgb2xyz(rgb))
@channel_as_last_axis()
def luv2rgb(luv, *, channel_axis=-1):
"""Luv to RGB color space conversion.
Parameters
----------
luv : (..., 3, ...) array_like
The image in CIE Luv format. By default, the final dimension denotes
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `luv` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This function uses luv2xyz and xyz2rgb.
"""
return xyz2rgb(luv2xyz(luv))
@channel_as_last_axis()
def rgb2hed(rgb, *, channel_axis=-1):
"""RGB to Haematoxylin-Eosin-DAB (HED) color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in HED format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import rgb2hed
>>> ihc = cp.array(data.immunohistochemistry())
>>> ihc_hed = rgb2hed(ihc)
"""
return separate_stains(rgb, hed_from_rgb)
@channel_as_last_axis()
def hed2rgb(hed, *, channel_axis=-1):
"""Haematoxylin-Eosin-DAB (HED) to RGB color space conversion.
Parameters
----------
hed : (..., 3, ...) array_like
The image in the HED color space. By default, the final dimension
denotes channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB. Same dimensions as input.
Raises
------
ValueError
If `hed` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] A. C. Ruifrok and D. A. Johnston, "Quantification of histochemical
staining by color deconvolution.," Analytical and quantitative
cytology and histology / the International Academy of Cytology [and]
American Society of Cytology, vol. 23, no. 4, pp. 291-9, Aug. 2001.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import rgb2hed, hed2rgb
>>> ihc = cp.array(data.immunohistochemistry())
>>> ihc_hed = rgb2hed(ihc)
>>> ihc_rgb = hed2rgb(ihc_hed)
"""
return combine_stains(hed, rgb_from_hed)
@cp.memoize(for_each_device=True)
def _separate_stains_kernel(m):
log_adjust = 1 / np.log(1e-6)
code = f"""
X tmp[3];
for (int ch=0; ch<3; ch++)
{{
tmp[ch] = log(max(rgb[3*i + ch], 1e-6)) * {log_adjust};
}}
stains[3*i] = tmp[0] * {m[0]} + tmp[1] * {m[3]} + tmp[2] * {m[6]};
stains[3*i + 1] = tmp[0] * {m[1]} + tmp[1] * {m[4]} + tmp[2] * {m[7]};
stains[3*i + 2] = tmp[0] * {m[2]} + tmp[1] * {m[5]} + tmp[2] * {m[8]};
""" # noqa
return cp.ElementwiseKernel(
'raw X rgb',
'raw X stains',
code,
name='cucim_skimage_color_seperate_stains')
@channel_as_last_axis()
def separate_stains(rgb, conv_matrix, *, channel_axis=-1):
"""RGB to stain color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in stain color space. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Stain separation matrices available in the ``color`` module and their
respective colorspace:
* ``hed_from_rgb``: Hematoxylin + Eosin + DAB
* ``hdx_from_rgb``: Hematoxylin + DAB
* ``fgx_from_rgb``: Feulgen + Light Green
* ``bex_from_rgb``: Giemsa stain : Methyl Blue + Eosin
* ``rbd_from_rgb``: FastRed + FastBlue + DAB
* ``gdx_from_rgb``: Methyl Green + DAB
* ``hax_from_rgb``: Hematoxylin + AEC
* ``bro_from_rgb``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``bpx_from_rgb``: Methyl Blue + Ponceau Fuchsin
* ``ahx_from_rgb``: Alcian Blue + Hematoxylin
* ``hpx_from_rgb``: Hematoxylin + PAS
This implementation borrows some ideas from DIPlib [2]_, e.g. the
compensation using a small value to avoid log artifacts when
calculating the Beer-Lambert law.
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
.. [2] https://github.com/DIPlib/diplib/
.. [3] A. C. Ruifrok and D. A. Johnston, “Quantification of histochemical
staining by color deconvolution,” Anal. Quant. Cytol. Histol., vol.
23, no. 4, pp. 291–299, Aug. 2001.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import separate_stains, hdx_from_rgb
>>> ihc = cp.array(data.immunohistochemistry())
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
""" # noqa
rgb = _prepare_colorarray(rgb, force_c_contiguous=True,
channel_axis=channel_axis)
if conv_matrix.shape != (3, 3):
raise ValueError("conv_matrix must have shape (3, 3)")
conv_matrix = tuple(cp.asnumpy(conv_matrix).ravel())
# #cp.maximum(rgb, 1e-6, out=rgb) # avoiding log artifacts
# log_adjust = np.log(1e-6) # used to compensate the sum above
# conv_matrix = cp.asarray(conv_matrix, dtype=rgb.dtype)
# stains = (cp.log(rgb) / log_adjust) @ conv_matrix
kern = _separate_stains_kernel(conv_matrix)
stains = cp.empty_like(rgb)
kern(rgb, stains, size=rgb.size // 3)
cp.maximum(stains, 0, out=stains)
return stains
@cp.memoize(for_each_device=True)
def _combine_stains_kernel(m):
# log_adjust here is used to compensate the sum within separate_stains()
log_adjust = np.log(1e-6)
code = f"""
X tmp[3];
for (int ch=0; ch<3; ch++)
{{
tmp[ch] = stains[3*i + ch] * {log_adjust};
}}
rgb[3*i] = tmp[0] * {m[0]} + tmp[1] * {m[3]} + tmp[2] * {m[6]};
rgb[3*i + 1] = tmp[0] * {m[1]} + tmp[1] * {m[4]} + tmp[2] * {m[7]};
rgb[3*i + 2] = tmp[0] * {m[2]} + tmp[1] * {m[5]} + tmp[2] * {m[8]};
for (int ch=0; ch<3; ch++)
{{
rgb[3*i + ch] = min(max(exp(rgb[3*i + ch]), (X)0.0), (X)1.0);
}}
""" # noqa
return cp.ElementwiseKernel(
'raw X stains',
'raw X rgb',
code,
name='cucim_skimage_color_combine_stains')
@channel_as_last_axis()
def combine_stains(stains, conv_matrix, *, channel_axis=-1):
"""Stain to RGB color space conversion.
Parameters
----------
stains : (..., 3, ...) array_like
The image in stain color space. By default, the final dimension denotes
channels.
conv_matrix: ndarray
The stain separation matrix as described by G. Landini [1]_.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `stains` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Stain combination matrices available in the ``color`` module and their
respective colorspace:
* ``rgb_from_hed``: Hematoxylin + Eosin + DAB
* ``rgb_from_hdx``: Hematoxylin + DAB
* ``rgb_from_fgx``: Feulgen + Light Green
* ``rgb_from_bex``: Giemsa stain : Methyl Blue + Eosin
* ``rgb_from_rbd``: FastRed + FastBlue + DAB
* ``rgb_from_gdx``: Methyl Green + DAB
* ``rgb_from_hax``: Hematoxylin + AEC
* ``rgb_from_bro``: Blue matrix Anilline Blue + Red matrix Azocarmine\
+ Orange matrix Orange-G
* ``rgb_from_bpx``: Methyl Blue + Ponceau Fuchsin
* ``rgb_from_ahx``: Alcian Blue + Hematoxylin
* ``rgb_from_hpx``: Hematoxylin + PAS
References
----------
.. [1] https://web.archive.org/web/20160624145052/http://www.mecourse.com/landinig/software/cdeconv/cdeconv.html
.. [2] A. C. Ruifrok and D. A. Johnston, “Quantification of histochemical
staining by color deconvolution,” Anal. Quant. Cytol. Histol., vol.
23, no. 4, pp. 291–299, Aug. 2001.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.color import (separate_stains, combine_stains,
... hdx_from_rgb, rgb_from_hdx)
>>> ihc = cp.array(data.immunohistochemistry())
>>> ihc_hdx = separate_stains(ihc, hdx_from_rgb)
>>> ihc_rgb = combine_stains(ihc_hdx, rgb_from_hdx)
""" # noqa
stains = _prepare_colorarray(stains, force_c_contiguous=True,
channel_axis=channel_axis)
if conv_matrix.shape != (3, 3):
raise ValueError("conv_matrix must have shape (3, 3)")
conv_matrix = tuple(cp.asnumpy(conv_matrix).ravel())
kern = _combine_stains_kernel(conv_matrix)
rgb = cp.empty_like(stains)
kern(stains, rgb, size=stains.size // 3)
return rgb
@cp.memoize(for_each_device=True)
def _lab2lch_kernel(nchannels=3, name='lab2lch'):
code = f"""
X a = lab[{nchannels}*i + 1];
X b = lab[{nchannels}*i + 2];
// update lab array in-place with the lch values
lab[{nchannels}*i + 1] = hypot(a, b);
lab[{nchannels}*i + 2] = atan2(b, a);
// NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than ``(-pi, +pi)``
if (lab[{nchannels}*i + 2] < 0)
{{
lab[{nchannels}*i + 2] += 2 * M_PI;
}}
""" # noqa
return cp.ElementwiseKernel(
'',
'raw X lab',
code,
name='cucim_skimage_color_' + name)
@channel_as_last_axis()
def lab2lch(lab, *, channel_axis=-1):
"""CIE-LAB to CIE-LCH color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lab : (..., 3, ...) array_like
The N-D image in CIE-LAB format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in LCH format, in a N-D array with same shape as input `lab`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, a, b).
Notes
-----
The Hue is expressed as an angle between ``(0, 2*pi)``
Examples
--------
>>> from skimage import data
>>> from cucim.skimage.color import rgb2lab, lab2lch
>>> img = cp.array(data.astronaut())
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
"""
lab = _prepare_lab_array(lab, force_copy=True)
nchannels = lab.shape[-1]
name = f'lab2lch_{nchannels}channel_{lab.dtype}'
kern = _lab2lch_kernel(nchannels, name=name)
kern(lab, size=lab.size // nchannels)
return lab
def _cart2polar_2pi(x, y):
"""convert cartesian coordinates to polar (uses non-standard theta range!)
NON-STANDARD RANGE! Maps to ``(0, 2*pi)`` rather than usual ``(-pi, +pi)``
"""
r, t = cp.hypot(x, y), cp.arctan2(y, x)
t += cp.where(t < 0., 2 * np.pi, 0)
return r, t
@cp.memoize(for_each_device=True)
def _lch2lab_kernel(nchannels=3, name='lch2lab'):
code = f"""
X sin_h = sin(lch[{nchannels}*i + 2]);
X cos_h = cos(lch[{nchannels}*i + 2]);
// update lch array in-place with the lab values
lch[{nchannels}*i + 2] = lch[{nchannels}*i + 1] * sin_h;
lch[{nchannels}*i + 1] = lch[{nchannels}*i + 1] * cos_h;
""" # noqa
return cp.ElementwiseKernel(
'',
'raw X lch',
code,
name='cucim_skimage_color_' + name)
@channel_as_last_axis()
def lch2lab(lch, *, channel_axis=-1):
"""CIE-LCH to CIE-LAB color space conversion.
LCH is the cylindrical representation of the LAB (Cartesian) colorspace
Parameters
----------
lch : (..., 3, ...) array_like
The N-D image in CIE-LCH format. The last (``N+1``-th) dimension must
have at least 3 elements, corresponding to the ``L``, ``a``, and ``b``
color channels. Subsequent elements are copied.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in LAB format, with same shape as input `lch`.
Raises
------
ValueError
If `lch` does not have at least 3 color channels (i.e. l, c, h).
Examples
--------
>>> from skimage import data
>>> from cucim.skimage.color import rgb2lab, lch2lab
>>> img = cp.array(data.astronaut())
>>> img_lab = rgb2lab(img)
>>> img_lch = lab2lch(img_lab)
>>> img_lab2 = lch2lab(img_lch)
"""
# make a copy because lch will be modified in-place by the kernel below
lch = _prepare_lab_array(lch, force_copy=True)
nchannels = lch.shape[-1]
name = f'lch2lab_{nchannels}channel_{lch.dtype}'
kern = _lch2lab_kernel(nchannels, name=name)
kern(lch, size=lch.size // nchannels)
return lch
def _prepare_lab_array(arr, force_copy=True):
"""Ensure input for lab2lch, lch2lab are well-posed.
Arrays must be in floating point and have at least 3 elements in
last dimension. Return a new array.
"""
shape = arr.shape
if shape[-1] < 3:
raise ValueError('Input array has less than 3 color channels')
float_dtype = _supported_float_type(arr.dtype)
if float_dtype == np.float32:
_func = dtype.img_as_float32
else:
_func = dtype.img_as_float64
return _func(arr, force_copy=force_copy)
@channel_as_last_axis()
def rgb2yuv(rgb, *, channel_axis=-1):
"""RGB to YUV color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in YUV format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Y is between 0 and 1. Use YCbCr instead of YUV for the color space
commonly used by video codecs, where Y ranges from 16 to 235.
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(yuv_from_rgb, rgb, name='rgb2yuv')
@channel_as_last_axis()
def rgb2yiq(rgb, *, channel_axis=-1):
"""RGB to YIQ color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in YIQ format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
"""
return _convert(yiq_from_rgb, rgb, name='rgb2yiq')
@channel_as_last_axis()
def rgb2ypbpr(rgb, *, channel_axis=-1):
"""RGB to YPbPr color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in YPbPr format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(ypbpr_from_rgb, rgb, name='rgb2ypbpr')
@channel_as_last_axis()
def rgb2ycbcr(rgb, *, channel_axis=-1):
"""RGB to YCbCr color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in YCbCr format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Y is between 16 and 235. This is the color space commonly used by video
codecs; it is sometimes incorrectly called "YUV".
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
_post_colorconv = """
y[3*i] += 16;
y[3*i + 1] += 128;
y[3*i + 2] += 128;
"""
arr = _convert(ycbcr_from_rgb, rgb, post=_post_colorconv, name='rgb2ycbcr')
return arr
@channel_as_last_axis()
def rgb2ydbdr(rgb, *, channel_axis=-1):
"""RGB to YDbDr color space conversion.
Parameters
----------
rgb : (..., 3, ...) array_like
The image in RGB format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in YDbDr format. Same dimensions as input.
Raises
------
ValueError
If `rgb` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This is the color space commonly used by video codecs. It is also the
reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
return _convert(ydbdr_from_rgb, rgb, name='rgb2ydbdr')
@channel_as_last_axis()
def yuv2rgb(yuv, *, channel_axis=-1):
"""YUV to RGB color space conversion.
Parameters
----------
yuv : (..., 3, ...) array_like
The image in YUV format. By default, the final dimension denotes
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `yuv` is not at least 2-D with shape (..., 3, ...).
References
----------
.. [1] https://en.wikipedia.org/wiki/YUV
"""
return _convert(rgb_from_yuv, yuv, name='yuv2rgb')
@channel_as_last_axis()
def yiq2rgb(yiq, *, channel_axis=-1):
"""YIQ to RGB color space conversion.
Parameters
----------
yiq : (..., 3, ...) array_like
The image in YIQ format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `yiq` is not at least 2-D with shape (..., 3, ...).
"""
return _convert(rgb_from_yiq, yiq, name='yiq2rgb')
@channel_as_last_axis()
def ypbpr2rgb(ypbpr, *, channel_axis=-1):
"""YPbPr to RGB color space conversion.
Parameters
----------
ypbpr : (..., 3, ...) array_like
The image in YPbPr format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `ypbpr` is not at least 2-D with shape (..., 3).
References
----------
.. [1] https://en.wikipedia.org/wiki/YPbPr
"""
return _convert(rgb_from_ypbpr, ypbpr, name='ypbpr2rgb')
@channel_as_last_axis()
def ycbcr2rgb(ycbcr, *, channel_axis=-1):
"""YCbCr to RGB color space conversion.
Parameters
----------
ycbcr : (..., 3, ...) array_like
The image in YCbCr format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `ycbcr` is not at least 2-D with shape (..., 3, ...).
Notes
-----
Y is between 16 and 235. This is the color space commonly used by video
codecs; it is sometimes incorrectly called "YUV".
References
----------
.. [1] https://en.wikipedia.org/wiki/YCbCr
"""
arr = ycbcr.copy()
_pre_colorconv = """
x[3*i] -= 16;
x[3*i + 1] -= 128;
x[3*i + 2] -= 128;
"""
return _convert(rgb_from_ycbcr, arr, pre=_pre_colorconv,
name='ycbcr2rgb')
@channel_as_last_axis()
def ydbdr2rgb(ydbdr, *, channel_axis=-1):
"""YDbDr to RGB color space conversion.
Parameters
----------
ydbdr : (..., 3, ...) array_like
The image in YDbDr format. By default, the final dimension denotes
channels.
channel_axis : int, optional
This parameter indicates which axis of the array corresponds to
channels.
Returns
-------
out : (..., 3, ...) ndarray
The image in RGB format. Same dimensions as input.
Raises
------
ValueError
If `ydbdr` is not at least 2-D with shape (..., 3, ...).
Notes
-----
This is the color space commonly used by video codecs, also called the
reversible color transform in JPEG2000.
References
----------
.. [1] https://en.wikipedia.org/wiki/YDbDr
"""
return _convert(rgb_from_ydbdr, ydbdr, name='ydbdr2rgb')
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/__init__.py
|
from .colorconv import (
ahx_from_rgb,
bex_from_rgb,
bpx_from_rgb,
bro_from_rgb,
combine_stains,
convert_colorspace,
fgx_from_rgb,
gdx_from_rgb,
gray2rgb,
gray2rgba,
hax_from_rgb,
hdx_from_rgb,
hed2rgb,
hed_from_rgb,
hpx_from_rgb,
hsv2rgb,
lab2lch,
lab2rgb,
lab2xyz,
lch2lab,
luv2rgb,
luv2xyz,
rbd_from_rgb,
rgb2gray,
rgb2hed,
rgb2hsv,
rgb2lab,
rgb2luv,
rgb2rgbcie,
rgb2xyz,
rgb2ycbcr,
rgb2ydbdr,
rgb2yiq,
rgb2ypbpr,
rgb2yuv,
rgb_from_ahx,
rgb_from_bex,
rgb_from_bpx,
rgb_from_bro,
rgb_from_fgx,
rgb_from_gdx,
rgb_from_hax,
rgb_from_hdx,
rgb_from_hed,
rgb_from_hpx,
rgb_from_rbd,
rgba2rgb,
rgbcie2rgb,
separate_stains,
xyz2lab,
xyz2luv,
xyz2rgb,
xyz_tristimulus_values,
ycbcr2rgb,
ydbdr2rgb,
yiq2rgb,
ypbpr2rgb,
yuv2rgb,
)
from .colorlabel import color_dict, label2rgb
from .delta_e import deltaE_cie76, deltaE_ciede94, deltaE_ciede2000, deltaE_cmc
__all__ = [
"convert_colorspace",
"xyz_tristimulus_values",
"rgba2rgb",
"rgb2hsv",
"hsv2rgb",
"rgb2xyz",
"xyz2rgb",
"rgb2rgbcie",
"rgbcie2rgb",
"rgb2gray",
"gray2rgb",
"gray2rgba",
"xyz2lab",
"lab2xyz",
"lab2rgb",
"rgb2lab",
"xyz2luv",
"luv2xyz",
"luv2rgb",
"rgb2luv",
"rgb2hed",
"hed2rgb",
"lab2lch",
"lch2lab",
"rgb2yuv",
"yuv2rgb",
"rgb2yiq",
"yiq2rgb",
"rgb2ypbpr",
"ypbpr2rgb",
"rgb2ycbcr",
"ycbcr2rgb",
"rgb2ydbdr",
"ydbdr2rgb",
"separate_stains",
"combine_stains",
"rgb_from_hed",
"hed_from_rgb",
"rgb_from_hdx",
"hdx_from_rgb",
"rgb_from_fgx",
"fgx_from_rgb",
"rgb_from_bex",
"bex_from_rgb",
"rgb_from_rbd",
"rbd_from_rgb",
"rgb_from_gdx",
"gdx_from_rgb",
"rgb_from_hax",
"hax_from_rgb",
"rgb_from_bro",
"bro_from_rgb",
"rgb_from_bpx",
"bpx_from_rgb",
"rgb_from_ahx",
"ahx_from_rgb",
"rgb_from_hpx",
"hpx_from_rgb",
"color_dict",
"label2rgb",
"deltaE_cie76",
"deltaE_ciede94",
"deltaE_ciede2000", # TODO: fix accuracy
"deltaE_cmc",
]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/delta_e.py
|
"""
Functions for calculating the "distance" between colors.
Implicit in these definitions of "distance" is the notion of "Just Noticeable
Distance" (JND). This represents the distance between colors where a human can
perceive different colors. Humans are more sensitive to certain colors than
others, which different deltaE metrics correct for with varying degrees of
sophistication.
The literature often mentions 1 as the minimum distance for visual
differentiation, but more recent studies (Mahy 1994) peg JND at 2.3
The delta-E notation comes from the German word for "Sensation" (Empfindung).
Reference
---------
https://en.wikipedia.org/wiki/Color_difference
"""
import warnings
import cupy as cp
import numpy as np
from .._shared.utils import _supported_float_type
from .colorconv import _cart2polar_2pi, lab2lch
def _float_inputs(lab1, lab2, allow_float32=True):
if allow_float32:
float_dtype = _supported_float_type((lab1.dtype, lab2.dtype))
else:
float_dtype = cp.float64
lab1 = lab1.astype(float_dtype, copy=False)
lab2 = lab2.astype(float_dtype, copy=False)
return lab1, lab2
def deltaE_cie76(lab1, lab2, channel_axis=-1):
"""Euclidean distance between two points in Lab color space
Parameters
----------
lab1 : array_like
reference color (Lab colorspace)
lab2 : array_like
comparison color (Lab colorspace)
channel_axis : int, optional
This parameter indicates which axis of the arrays corresponds to
channels.
Returns
-------
dE : array_like
distance between colors `lab1` and `lab2`
References
----------
.. [1] https://en.wikipedia.org/wiki/Color_difference
.. [2] A. R. Robertson, "The CIE 1976 color-difference formulae,"
Color Res. Appl. 2, 7-11 (1977).
"""
lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=True)
L1, a1, b1 = cp.moveaxis(lab1, source=channel_axis, destination=0)[:3]
L2, a2, b2 = cp.moveaxis(lab2, source=channel_axis, destination=0)[:3]
out = (L2 - L1) * (L2 - L1)
out += (a2 - a1) * (a2 - a1)
out += (b2 - b1) * (b2 - b1)
return cp.sqrt(out, out=out)
def deltaE_ciede94(
lab1, lab2, kH=1, kC=1, kL=1, k1=0.045, k2=0.015, *, channel_axis=-1
):
"""Color difference according to CIEDE 94 standard
Accommodates perceptual non-uniformities through the use of application
specific scale factors (`kH`, `kC`, `kL`, `k1`, and `k2`).
Parameters
----------
lab1 : array_like
reference color (Lab colorspace)
lab2 : array_like
comparison color (Lab colorspace)
kH : float, optional
Hue scale
kC : float, optional
Chroma scale
kL : float, optional
Lightness scale
k1 : float, optional
first scale parameter
k2 : float, optional
second scale parameter
channel_axis : int, optional
This parameter indicates which axis of the arrays corresponds to
channels.
Returns
-------
dE : array_like
color difference between `lab1` and `lab2`
Notes
-----
deltaE_ciede94 is not symmetric with respect to lab1 and lab2. CIEDE94
defines the scales for the lightness, hue, and chroma in terms of the first
color. Consequently, the first color should be regarded as the "reference"
color.
`kL`, `k1`, `k2` depend on the application and default to the values
suggested for graphic arts
========== ============== ==========
Parameter Graphic Arts Textiles
========== ============== ==========
`kL` 1.000 2.000
`k1` 0.045 0.048
`k2` 0.015 0.014
========== ============== ==========
References
----------
.. [1] https://en.wikipedia.org/wiki/Color_difference
.. [2] http://www.brucelindbloom.com/index.html?Eqn_DeltaE_CIE94.html
"""
lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=True)
lab1 = cp.moveaxis(lab1, source=channel_axis, destination=0)
lab2 = cp.moveaxis(lab2, source=channel_axis, destination=0)
L1, C1 = lab2lch(lab1, channel_axis=0)[:2]
L2, C2 = lab2lch(lab2, channel_axis=0)[:2]
dL = L1 - L2
dC = C1 - C2
dH2 = get_dH2(lab1, lab2, channel_axis=0)
SL = 1
SC = 1 + k1 * C1
SH = 1 + k2 * C1
dE2 = dL / (kL * SL)
dE2 *= dE2
tmp = dC / (kC * SC)
tmp *= tmp
dE2 += tmp
tmp = kH * SH
tmp *= tmp
dE2 += dH2 / tmp
return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
def deltaE_ciede2000(lab1, lab2, kL=1, kC=1, kH=1, *, channel_axis=-1):
"""Color difference as given by the CIEDE 2000 standard.
CIEDE 2000 is a major revision of CIDE94. The perceptual calibration is
largely based on experience with automotive paint on smooth surfaces.
Parameters
----------
lab1 : array_like
reference color (Lab colorspace)
lab2 : array_like
comparison color (Lab colorspace)
kL : float (range), optional
lightness scale factor, 1 for "acceptably close"; 2 for "imperceptible"
see deltaE_cmc
kC : float (range), optional
chroma scale factor, usually 1
kH : float (range), optional
hue scale factor, usually 1
channel_axis : int, optional
This parameter indicates which axis of the arrays corresponds to
channels.
Returns
-------
deltaE : array_like
The distance between `lab1` and `lab2`
Notes
-----
CIEDE 2000 assumes parametric weighting factors for the lightness, chroma,
and hue (`kL`, `kC`, `kH` respectively). These default to 1.
References
----------
.. [1] https://en.wikipedia.org/wiki/Color_difference
.. [2] http://www.ece.rochester.edu/~gsharma/ciede2000/ciede2000noteCRNA.pdf
:DOI:`10.1364/AO.33.008069`
.. [3] M. Melgosa, J. Quesada, and E. Hita, "Uniformity of some recent
color metrics tested with an accurate color-difference tolerance
dataset," Appl. Opt. 33, 8069-8077 (1994).
"""
lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=True)
warnings.warn(
"The numerical accuracy of this function on the GPU is reduced "
"relative to the CPU version"
)
channel_axis = channel_axis % lab1.ndim
unroll = False
if lab1.ndim == 1 and lab2.ndim == 1:
unroll = True
if lab1.ndim == 1:
lab1 = lab1[None, :]
if lab2.ndim == 1:
lab2 = lab2[None, :]
channel_axis += 1
L1, a1, b1 = cp.moveaxis(lab1, source=channel_axis, destination=0)[:3]
L2, a2, b2 = cp.moveaxis(lab2, source=channel_axis, destination=0)[:3]
# distort `a` based on average chroma
# then convert to lch coordinates from distorted `a`
# all subsequence calculations are in the new coordinates
# (often denoted "prime" in the literature)
Cbar = 0.5 * (cp.hypot(a1, b1) + cp.hypot(a2, b2))
c7 = Cbar**7
G = 0.5 * (1 - cp.sqrt(c7 / (c7 + 25**7)))
scale = 1 + G
C1, h1 = _cart2polar_2pi(a1 * scale, b1)
C2, h2 = _cart2polar_2pi(a2 * scale, b2)
# recall that c, h are polar coordinates. c==r, h==theta
# cide2000 has four terms to delta_e:
# 1) Luminance term
# 2) Hue term
# 3) Chroma term
# 4) hue Rotation term
# lightness term
Lbar = 0.5 * (L1 + L2)
tmp = Lbar - 50
tmp *= tmp
SL = 1 + 0.015 * tmp / cp.sqrt(20 + tmp)
L_term = (L2 - L1) / (kL * SL)
# chroma term
Cbar = 0.5 * (C1 + C2) # new coordinates
SC = 1 + 0.045 * Cbar
C_term = (C2 - C1) / (kC * SC)
# hue term
h_diff = h2 - h1
h_sum = h1 + h2
CC = C1 * C2
dH = h_diff.copy()
dH[h_diff > np.pi] -= 2 * np.pi
dH[h_diff < -np.pi] += 2 * np.pi
dH[CC == 0.0] = 0.0 # if r == 0, dtheta == 0
dH_term = 2 * cp.sqrt(CC) * cp.sin(dH / 2)
Hbar = h_sum.copy()
mask = cp.logical_and(CC != 0.0, cp.abs(h_diff) > np.pi)
Hbar[mask * (h_sum < 2 * np.pi)] += 2 * np.pi
Hbar[mask * (h_sum >= 2 * np.pi)] -= 2 * np.pi
Hbar[CC == 0.0] *= 2
Hbar *= 0.5
T = (
1
- 0.17 * cp.cos(Hbar - np.deg2rad(30))
+ 0.24 * cp.cos(2 * Hbar)
+ 0.32 * cp.cos(3 * Hbar + np.deg2rad(6))
- 0.20 * cp.cos(4 * Hbar - np.deg2rad(63))
)
SH = 1 + 0.015 * Cbar * T
H_term = dH_term / (kH * SH)
# hue rotation
c7 = Cbar**7
Rc = 2 * cp.sqrt(c7 / (c7 + 25**7))
tmp = (cp.rad2deg(Hbar) - 275) / 25
tmp *= tmp
dtheta = np.deg2rad(30) * cp.exp(-tmp)
R_term = -cp.sin(2 * dtheta) * Rc * C_term * H_term
# put it all together
dE2 = L_term * L_term
dE2 += C_term * C_term
dE2 += H_term * H_term
dE2 += R_term
cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
if unroll:
dE2 = dE2[0]
return dE2
def deltaE_cmc(lab1, lab2, kL=1, kC=1, *, channel_axis=-1):
"""Color difference from the CMC l:c standard.
This color difference was developed by the Colour Measurement Committee
(CMC) of the Society of Dyers and Colourists (United Kingdom). It is
intended for use in the textile industry.
The scale factors `kL`, `kC` set the weight given to differences in
lightness and chroma relative to differences in hue. The usual values are
``kL=2``, ``kC=1`` for "acceptability" and ``kL=1``, ``kC=1`` for
"imperceptibility". Colors with ``dE > 1`` are "different" for the given
scale factors.
Parameters
----------
lab1 : array_like
reference color (Lab colorspace)
lab2 : array_like
comparison color (Lab colorspace)
channel_axis : int, optional
This parameter indicates which axis of the arrays corresponds to
channels.
Returns
-------
dE : array_like
distance between colors `lab1` and `lab2`
Notes
-----
deltaE_cmc the defines the scales for the lightness, hue, and chroma
in terms of the first color. Consequently
``deltaE_cmc(lab1, lab2) != deltaE_cmc(lab2, lab1)``
References
----------
.. [1] https://en.wikipedia.org/wiki/Color_difference
.. [2] http://www.brucelindbloom.com/index.html?Eqn_DeltaE_CIE94.html
.. [3] F. J. J. Clarke, R. McDonald, and B. Rigg, "Modification to the
JPC79 colour-difference formula," J. Soc. Dyers Colour. 100, 128-132
(1984).
"""
lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=True)
lab1 = cp.moveaxis(lab1, source=channel_axis, destination=0)
lab2 = cp.moveaxis(lab2, source=channel_axis, destination=0)
L1, C1, h1 = lab2lch(lab1, channel_axis=0)[:3]
L2, C2, h2 = lab2lch(lab2, channel_axis=0)[:3]
dC = C1 - C2
dL = L1 - L2
dH2 = get_dH2(lab1, lab2, channel_axis=0)
T = cp.where(
cp.logical_and(cp.rad2deg(h1) >= 164, cp.rad2deg(h1) <= 345),
0.56 + 0.2 * cp.abs(np.cos(h1 + cp.deg2rad(168))),
0.36 + 0.4 * cp.abs(np.cos(h1 + cp.deg2rad(35))),
)
c1_4 = C1**4
F = cp.sqrt(c1_4 / (c1_4 + 1900))
SL = cp.where(L1 < 16, 0.511, 0.040975 * L1 / (1.0 + 0.01765 * L1))
SC = 0.638 + 0.0638 * C1 / (1.0 + 0.0131 * C1)
SH = SC * (F * T + 1 - F)
dE2 = (dL / (kL * SL)) ** 2
dE2 += (dC / (kC * SC)) ** 2
dE2 += dH2 / (SH**2)
return cp.sqrt(cp.maximum(dE2, 0, out=dE2), out=dE2)
def get_dH2(lab1, lab2, *, channel_axis=-1):
"""squared hue difference term occurring in deltaE_cmc and deltaE_ciede94
Despite its name, "dH" is not a simple difference of hue values. We avoid
working directly with the hue value, since differencing angles is
troublesome. The hue term is usually written as:
c1 = sqrt(a1**2 + b1**2)
c2 = sqrt(a2**2 + b2**2)
term = (a1-a2)**2 + (b1-b2)**2 - (c1-c2)**2
dH = sqrt(term)
However, this has poor roundoff properties when a or b is dominant.
Instead, ab is a vector with elements a and b. The same dH term can be
re-written as:
|ab1-ab2|**2 - (|ab1| - |ab2|)**2
and then simplified to:
2*|ab1|*|ab2| - 2*dot(ab1, ab2)
"""
# This function needs double precision internally for accuracy
input_is_float_32 = (
_supported_float_type((lab1.dtype, lab2.dtype)) == cp.float32
)
lab1, lab2 = _float_inputs(lab1, lab2, allow_float32=False)
a1, b1 = cp.moveaxis(lab1, source=channel_axis, destination=0)[1:3]
a2, b2 = cp.moveaxis(lab2, source=channel_axis, destination=0)[1:3]
# magnitude of (a, b) is the chroma
C1 = cp.hypot(a1, b1)
C2 = cp.hypot(a2, b2)
term = (C1 * C2) - (a1 * a2 + b1 * b2)
out = 2 * term
if input_is_float_32:
out = out.astype(np.float32)
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/adapt_rgb.py
|
import functools
import cupy as cp
from .. import color
from ..util.dtype import _convert
__all__ = ["adapt_rgb", "hsv_value", "each_channel"]
def is_rgb_like(image, channel_axis=-1):
"""Return True if the image *looks* like it's RGB.
This function should not be public because it is only intended to be used
for functions that don't accept volumes as input, since checking an image's
shape is fragile.
"""
return (image.ndim == 3) and (image.shape[channel_axis] in (3, 4))
def adapt_rgb(apply_to_rgb):
"""Return decorator that adapts to RGB images to a gray-scale filter.
This function is only intended to be used for functions that don't accept
volumes as input, since checking an image's shape is fragile.
Parameters
----------
apply_to_rgb : function
Function that returns a filtered image from an image-filter and RGB
image. This will only be called if the image is RGB-like.
"""
def decorator(image_filter):
@functools.wraps(image_filter)
def image_filter_adapted(image, *args, **kwargs):
if is_rgb_like(image):
return apply_to_rgb(image_filter, image, *args, **kwargs)
else:
return image_filter(image, *args, **kwargs)
return image_filter_adapted
return decorator
def hsv_value(image_filter, image, *args, **kwargs):
"""Return color image by applying `image_filter` on HSV-value of `image`.
Note that this function is intended for use with `adapt_rgb`.
Parameters
----------
image_filter : function
Function that filters a gray-scale image.
image : array
Input image. Note that RGBA images are treated as RGB.
"""
# Slice the first three channels so that we remove any alpha channels.
hsv = color.rgb2hsv(image[:, :, :3])
value = hsv[:, :, 2].copy()
value = image_filter(value, *args, **kwargs)
hsv[:, :, 2] = _convert(value, hsv.dtype)
return color.hsv2rgb(hsv)
def each_channel(image_filter, image, *args, **kwargs):
"""Return color image by applying `image_filter` on channels of `image`.
Note that this function is intended for use with `adapt_rgb`.
Parameters
----------
image_filter : function
Function that filters a gray-scale image.
image : array
Input image.
"""
c_new = [
image_filter(c, *args, **kwargs) for c in cp.moveaxis(image, -1, 0)
]
return cp.stack(c_new, axis=-1)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/tests/test_delta_e.py
|
"""Test for correctness of color distance functions"""
import cupy as cp
import numpy as np
import pytest
from cupy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from cucim.skimage._shared.testing import expected_warnings, fetch
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.color.delta_e import (
deltaE_cie76,
deltaE_ciede94,
deltaE_ciede2000,
deltaE_cmc,
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_ciede2000_dE(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data["L1"]
lab1[:, 1] = data["a1"]
lab1[:, 2] = data["b1"]
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data["L2"]
lab2[:, 1] = data["a2"]
lab2[:, 2] = data["b2"]
lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis)
lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis)
dE2 = deltaE_ciede2000(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
# Note: lower float64 accuracy than scikit-image
# rtol = 1e-2 if dtype == cp.float32 else 1e-4
rtol = 1e-2
assert_allclose(dE2, data["dE"], rtol=rtol)
def load_ciede2000_data():
dtype = [
("pair", int),
("1", int),
("L1", float),
("a1", float),
("b1", float),
("a1_prime", float),
("C1_prime", float),
("h1_prime", float),
("hbar_prime", float),
("G", float),
("T", float),
("SL", float),
("SC", float),
("SH", float),
("RT", float),
("dE", float),
("2", int),
("L2", float),
("a2", float),
("b2", float),
("a2_prime", float),
("C2_prime", float),
("h2_prime", float),
]
# note: ciede_test_data.txt contains several intermediate quantities
path = fetch("color/tests/ciede2000_test_data.txt")
return np.loadtxt(path, dtype=dtype)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_cie76(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data["L1"]
lab1[:, 1] = data["a1"]
lab1[:, 2] = data["b1"]
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data["L2"]
lab2[:, 1] = data["a2"]
lab2[:, 2] = data["b2"]
lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis)
lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis)
dE2 = deltaE_cie76(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
# fmt: off
oracle = cp.asarray([
4.00106328, 6.31415011, 9.1776999, 2.06270077, 2.36957073,
2.91529271, 2.23606798, 2.23606798, 4.98000036, 4.9800004,
4.98000044, 4.98000049, 4.98000036, 4.9800004, 4.98000044,
3.53553391, 36.86800781, 31.91002977, 30.25309901, 27.40894015,
0.89242934, 0.7972, 0.8583065, 0.82982507, 3.1819238,
2.21334297, 1.53890382, 4.60630929, 6.58467989, 3.88641412,
1.50514845, 2.3237848, 0.94413208, 1.31910843
])
# fmt: on
rtol = 1e-5 if dtype == cp.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_ciede94(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data["L1"]
lab1[:, 1] = data["a1"]
lab1[:, 2] = data["b1"]
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data["L2"]
lab2[:, 1] = data["a2"]
lab2[:, 2] = data["b2"]
lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis)
lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis)
dE2 = deltaE_ciede94(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
# fmt: off
oracle = cp.asarray([
1.39503887, 1.93410055, 2.45433566, 0.68449187, 0.6695627,
0.69194527, 2.23606798, 2.03163832, 4.80069441, 4.80069445,
4.80069449, 4.80069453, 4.80069441, 4.80069445, 4.80069449,
3.40774352, 34.6891632, 29.44137328, 27.91408781, 24.93766082,
0.82213163, 0.71658427, 0.8048753, 0.75284394, 1.39099471,
1.24808929, 1.29795787, 1.82045088, 2.55613309, 1.42491303,
1.41945261, 2.3225685, 0.93853308, 1.30654464
])
# fmt: on
rtol = 1e-5 if dtype == cp.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
@pytest.mark.parametrize("dtype", [cp.float32, cp.float64])
def test_cmc(dtype, channel_axis):
data = load_ciede2000_data()
N = len(data)
lab1 = np.zeros((N, 3), dtype=dtype)
lab1[:, 0] = data["L1"]
lab1[:, 1] = data["a1"]
lab1[:, 2] = data["b1"]
lab2 = np.zeros((N, 3), dtype=dtype)
lab2[:, 0] = data["L2"]
lab2[:, 1] = data["a2"]
lab2[:, 2] = data["b2"]
lab1 = cp.moveaxis(cp.asarray(lab1), source=-1, destination=channel_axis)
lab2 = cp.moveaxis(cp.asarray(lab2), source=-1, destination=channel_axis)
dE2 = deltaE_cmc(lab1, lab2, channel_axis=channel_axis)
assert dE2.dtype == _supported_float_type(dtype)
# fmt: off
oracle = cp.asarray([
1.73873611, 2.49660844, 3.30494501, 0.85735576, 0.88332927,
0.97822692, 3.50480874, 2.87930032, 6.5783807, 6.57838075,
6.5783808, 6.57838086, 6.67492321, 6.67492326, 6.67492331,
4.66852997, 42.10875485, 39.45889064, 38.36005919, 33.93663807,
1.14400168, 1.00600419, 1.11302547, 1.05335328, 1.42822951,
1.2548143, 1.76838061, 2.02583367, 3.08695508, 1.74893533,
1.90095165, 1.70258148, 1.80317207, 2.44934417
])
# fmt: on
rtol = 1e-5 if dtype == cp.float32 else 1e-8
assert_allclose(dE2, oracle, rtol=rtol)
# Equal or close colors make `delta_e.get_dH2` function to return
# negative values resulting in NaNs when passed to sqrt (see #1908
# issue on Github):
lab1 = lab2
expected = cp.zeros_like(oracle)
assert_array_almost_equal(
deltaE_cmc(lab1, lab2, channel_axis=channel_axis), expected, decimal=6
)
lab2[0, 0] += cp.finfo(float).eps
assert_array_almost_equal(
deltaE_cmc(lab1, lab2, channel_axis=channel_axis), expected, decimal=6
)
def test_cmc_single_item():
# Single item case:
lab1 = lab2 = cp.array([0.0, 1.59607713, 0.87755709])
assert_array_equal(deltaE_cmc(lab1, lab2), 0)
lab2[0] += cp.finfo(float).eps
assert_array_equal(deltaE_cmc(lab1, lab2), 0)
def test_single_color_cie76():
lab1 = cp.array((0.5, 0.5, 0.5))
lab2 = cp.array((0.4, 0.4, 0.4))
deltaE_cie76(lab1, lab2)
def test_single_color_ciede94():
lab1 = cp.array((0.5, 0.5, 0.5))
lab2 = cp.array((0.4, 0.4, 0.4))
deltaE_ciede94(lab1, lab2)
def test_single_color_ciede2000():
lab1 = cp.array((0.5, 0.5, 0.5))
lab2 = cp.array((0.4, 0.4, 0.4))
with expected_warnings(["The numerical accuracy of this function"]):
deltaE_ciede2000(lab1, lab2)
def test_single_color_cmc():
lab1 = cp.array((0.5, 0.5, 0.5))
lab2 = cp.array((0.4, 0.4, 0.4))
deltaE_cmc(lab1, lab2)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/tests/test_colorlabel.py
|
import itertools
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_no_warnings
from cucim.skimage._shared.testing import expected_warnings
from cucim.skimage.color.colorlabel import hsv2rgb, label2rgb, rgb2hsv
def test_shape_mismatch():
image = cp.ones((3, 3))
label = cp.ones((2, 2))
with pytest.raises(ValueError):
label2rgb(image, label, bg_label=-1)
def test_wrong_kind():
label = cp.ones((3, 3))
# Must not raise an error.
label2rgb(label, bg_label=-1)
# kind='foo' is wrong.
with pytest.raises(ValueError):
label2rgb(label, kind="foo", bg_label=-1)
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
def test_uint_image(channel_axis):
img = cp.random.randint(0, 255, (10, 10), dtype=cp.uint8)
labels = cp.zeros((10, 10), dtype=cp.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
output = label2rgb(labels, image=img, bg_label=0, channel_axis=channel_axis)
# Make sure that the output is made of floats and in the correct range
assert cp.issubdtype(output.dtype, cp.floating)
assert output.max() <= 1
# size 3 (RGB) along the specified channel_axis
new_axis = channel_axis % output.ndim
assert output.shape[new_axis] == 3
def test_rgb():
image = cp.ones((1, 3))
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# Set alphas just in case the defaults change
rgb = label2rgb(
label, image=image, colors=colors, alpha=1, image_alpha=1, bg_label=-1
)
assert_array_almost_equal(rgb, [colors])
def test_alpha():
image = cp.random.uniform(size=(3, 3))
label = cp.random.randint(0, 9, size=(3, 3))
# If we set `alpha = 0`, then rgb should match image exactly.
rgb = label2rgb(label, image=image, alpha=0, image_alpha=1, bg_label=-1)
assert_array_almost_equal(rgb[..., 0], image)
assert_array_almost_equal(rgb[..., 1], image)
assert_array_almost_equal(rgb[..., 2], image)
def test_no_input_image():
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
rgb = label2rgb(label, colors=colors, bg_label=-1)
assert_array_almost_equal(rgb, [colors])
def test_image_alpha():
image = cp.random.uniform(size=(1, 3))
label = cp.arange(3).reshape(1, -1)
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# If we set `image_alpha = 0`, then rgb should match label colors exactly.
rgb = label2rgb(
label, image=image, colors=colors, alpha=1, image_alpha=0, bg_label=-1
)
assert_array_almost_equal(rgb, [colors])
def test_color_names():
image = cp.ones((1, 3))
label = cp.arange(3).reshape(1, -1)
cnames = ["red", "lime", "blue"]
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1)]
# Set alphas just in case the defaults change
rgb = label2rgb(
label, image=image, colors=cnames, alpha=1, image_alpha=1, bg_label=-1
)
assert_array_almost_equal(rgb, [colors])
def test_bg_and_color_cycle():
image = cp.zeros((1, 10)) # dummy image
label = cp.arange(10).reshape(1, -1)
colors = [(1, 0, 0), (0, 0, 1)]
bg_color = (0, 0, 0)
rgb = label2rgb(
label,
image=image,
bg_label=0,
bg_color=bg_color,
colors=colors,
alpha=1,
)
assert_array_almost_equal(rgb[0, 0], bg_color)
for pixel, color in zip(rgb[0, 1:], itertools.cycle(colors)):
assert_array_almost_equal(pixel, color)
def test_negative_labels():
labels = cp.array([0, -1, -2, 0])
rout = cp.array(
[(0.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (0.0, 0.0, 0.0)]
)
assert_array_almost_equal(
rout, label2rgb(labels, bg_label=0, alpha=1, image_alpha=1)
)
def test_nonconsecutive():
labels = cp.array([0, 2, 4, 0])
colors = [(1, 0, 0), (0, 0, 1)]
rout = cp.array(
[(1.0, 0.0, 0.0), (0.0, 0.0, 1.0), (1.0, 0.0, 0.0), (1.0, 0.0, 0.0)]
)
assert_array_almost_equal(
rout,
label2rgb(labels, colors=colors, alpha=1, image_alpha=1, bg_label=-1),
)
def test_label_consistency():
"""Assert that the same labels map to the same colors."""
label_1 = cp.arange(5).reshape(1, -1)
label_2 = cp.array([0, 1])
colors = [(1, 0, 0), (0, 1, 0), (0, 0, 1), (1, 1, 0), (1, 0, 1)]
# Set alphas just in case the defaults change
rgb_1 = label2rgb(label_1, colors=colors, bg_label=-1)
rgb_2 = label2rgb(label_2, colors=colors, bg_label=-1)
for label_id in label_2.ravel():
assert_array_almost_equal(
rgb_1[label_1 == label_id], rgb_2[label_2 == label_id]
)
def test_leave_labels_alone():
labels = cp.array([-1, 0, 1])
labels_saved = labels.copy()
label2rgb(labels, bg_label=-1)
label2rgb(labels, bg_label=1)
assert_array_equal(labels, labels_saved)
# TODO: diagnose test error that occurs only with CUB enabled: CuPy bug?
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
def test_avg(channel_axis):
# label image
# fmt: off
label_field = cp.asarray([[1, 1, 1, 2],
[1, 2, 2, 2],
[3, 3, 4, 4]], dtype=np.uint8)
# color image
r = cp.asarray([[1., 1., 0., 0.],
[0., 0., 1., 1.],
[0., 0., 0., 0.]])
g = cp.asarray([[0., 0., 0., 1.],
[1., 1., 1., 0.],
[0., 0., 0., 0.]])
b = cp.asarray([[0., 0., 0., 1.],
[0., 1., 1., 1.],
[0., 0., 1., 1.]])
image = cp.dstack((r, g, b))
# reference label-colored image
rout = cp.asarray([[0.5, 0.5, 0.5, 0.5],
[0.5, 0.5, 0.5, 0.5],
[0. , 0. , 0. , 0. ]]) # noqa
gout = cp.asarray([[0.25, 0.25, 0.25, 0.75],
[0.25, 0.75, 0.75, 0.75],
[0. , 0. , 0. , 0. ]]) # noqa
bout = cp.asarray([[0. , 0. , 0. , 1. ], # noqa
[0. , 1. , 1. , 1. ], # noqa
[0.0, 0.0, 1.0, 1.0]]) # noqa
expected_out = cp.dstack((rout, gout, bout))
# test standard averaging
_image = cp.moveaxis(image, source=-1, destination=channel_axis)
out = label2rgb(label_field, _image, kind='avg', bg_label=-1,
channel_axis=channel_axis)
out = cp.moveaxis(out, source=channel_axis, destination=-1)
assert_array_equal(out, expected_out)
# test averaging with custom background value
out_bg = label2rgb(label_field, _image, bg_label=2, bg_color=(0, 0, 0),
kind='avg', channel_axis=channel_axis)
out_bg = cp.moveaxis(out_bg, source=channel_axis, destination=-1)
expected_out_bg = expected_out.copy()
expected_out_bg[label_field == 2] = 0
assert_array_equal(out_bg, expected_out_bg)
# test default background color
out_bg = label2rgb(label_field, _image, bg_label=2, kind='avg',
channel_axis=channel_axis)
out_bg = cp.moveaxis(out_bg, source=channel_axis, destination=-1)
assert_array_equal(out_bg, expected_out_bg)
def test_negative_intensity():
labels = cp.arange(100).reshape(10, 10)
image = cp.full((10, 10), -1, dtype="float64")
with pytest.warns(UserWarning):
label2rgb(labels, image, bg_label=-1)
def test_bg_color_rgb_string():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
img = cp.asarray(img)
labels = cp.asarray(labels)
output = label2rgb(labels, image=img, alpha=0.9, bg_label=0, bg_color="red")
assert output[0, 0, 0] > 0.9 # red channel
def test_avg_with_2d_image():
img = np.random.randint(0, 255, (10, 10), dtype=np.uint8)
labels = np.zeros((10, 10), dtype=np.int64)
labels[1:3, 1:3] = 1
labels[6:9, 6:9] = 2
img = cp.asarray(img)
labels = cp.asarray(labels)
assert_no_warnings(label2rgb, labels, image=img, bg_label=0, kind="avg")
@pytest.mark.parametrize("image_type", ["rgb", "gray", None])
def test_label2rgb_nd(image_type):
# validate 1D and 3D cases by testing their output relative to the 2D case
shape = (10, 10)
if image_type == "rgb":
img = cp.random.randint(0, 255, shape + (3,), dtype=np.uint8)
elif image_type == "gray":
img = cp.random.randint(0, 255, shape, dtype=np.uint8)
else:
img = None
# add a couple of rectangular labels
labels = cp.zeros(shape, dtype=np.int64)
# Note: Have to choose labels here so that the 1D slice below also contains
# both label values. Otherwise the labeled colors will not match.
labels[2:-2, 1:3] = 1
labels[3:-3, 6:9] = 2
# label in the 2D case (correct 2D output is tested in other functions)
labeled_2d = label2rgb(labels, image=img, bg_label=0)
# labeling a single line gives an equivalent result
image_1d = img[5] if image_type is not None else None
labeled_1d = label2rgb(labels[5], image=image_1d, bg_label=0)
expected = labeled_2d[5]
assert_array_equal(labeled_1d, expected)
# Labeling a 3D stack of duplicates gives the same result in each plane
image_3d = cp.stack((img,) * 4) if image_type is not None else None
labels_3d = cp.stack((labels,) * 4)
labeled_3d = label2rgb(labels_3d, image=image_3d, bg_label=0)
for labeled_plane in labeled_3d:
assert_array_equal(labeled_plane, labeled_2d)
def test_label2rgb_shape_errors():
img = cp.random.randint(0, 255, (10, 10, 3), dtype=np.uint8)
labels = cp.zeros((10, 10), dtype=np.int64)
labels[2:5, 2:5] = 1
# mismatched 2D shape
with pytest.raises(ValueError):
label2rgb(labels, img[1:])
# too many axes in img
with pytest.raises(ValueError):
label2rgb(labels, img[..., np.newaxis])
# too many channels along the last axis
with pytest.raises(ValueError):
label2rgb(labels, np.concatenate((img, img), axis=-1))
def test_overlay_full_saturation():
rgb_img = cp.random.uniform(size=(10, 10, 3))
labels = cp.ones((10, 10), dtype=np.int64)
labels[5:, 5:] = 2
labels[:3, :3] = 0
alpha = 0.3
rgb = label2rgb(
labels, image=rgb_img, alpha=alpha, bg_label=0, saturation=1
)
# check that rgb part of input image is preserved, where labels=0
assert_array_almost_equal(rgb_img[:3, :3] * (1 - alpha), rgb[:3, :3])
def test_overlay_custom_saturation():
rgb_img = cp.random.uniform(size=(10, 10, 3))
labels = cp.ones((10, 10), dtype=np.int64)
labels[5:, 5:] = 2
labels[:3, :3] = 0
alpha = 0.3
saturation = 0.3
rgb = label2rgb(
labels, image=rgb_img, alpha=alpha, bg_label=0, saturation=saturation
)
hsv = rgb2hsv(rgb_img)
hsv[..., 1] *= saturation
saturated_img = hsv2rgb(hsv)
# check that rgb part of input image is saturated, where labels=0
assert_array_almost_equal(saturated_img[:3, :3] * (1 - alpha), rgb[:3, :3])
def test_saturation_warning():
rgb_img = cp.random.uniform(size=(10, 10, 3))
labels = cp.ones((10, 10), dtype=np.int64)
with expected_warnings(["saturation must be in range"]):
label2rgb(labels, image=rgb_img, bg_label=0, saturation=2)
with expected_warnings(["saturation must be in range"]):
label2rgb(labels, image=rgb_img, bg_label=0, saturation=-1)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/tests/test_colorconv.py
|
"""Tests for color conversion functions.
Authors
-------
- the rgb2hsv test was written by Nicolas Pinto, 2009
- other tests written by Ralf Gommers, 2009
:license: modified BSD
"""
import colorsys
import os
import cupy as cp
import numpy as np
import pytest
from cupy.testing import (
assert_allclose,
assert_array_almost_equal,
assert_array_equal,
)
from numpy.testing import assert_equal
from skimage import data
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage._shared.utils import _supported_float_type, slice_at_axis
from cucim.skimage.color import (
combine_stains,
convert_colorspace,
gray2rgb,
gray2rgba,
hed2rgb,
hsv2rgb,
lab2lch,
lab2rgb,
lab2xyz,
lch2lab,
luv2rgb,
luv2xyz,
rgb2gray,
rgb2hed,
rgb2hsv,
rgb2lab,
rgb2luv,
rgb2rgbcie,
rgb2xyz,
rgb2ycbcr,
rgb2ydbdr,
rgb2yiq,
rgb2ypbpr,
rgb2yuv,
rgba2rgb,
rgbcie2rgb,
separate_stains,
xyz2lab,
xyz2luv,
xyz2rgb,
ycbcr2rgb,
ydbdr2rgb,
yiq2rgb,
ypbpr2rgb,
yuv2rgb,
)
from cucim.skimage.util import img_as_float, img_as_float32, img_as_ubyte
data_dir = os.path.join(os.path.dirname(__file__), "data")
class TestColorconv:
img_rgb = cp.asarray(data.colorwheel())
img_grayscale = cp.asarray(data.camera())
# fmt: off
img_rgba = cp.array([[[0, 0.5, 1, 0],
[0, 0.5, 1, 1],
[0, 0.5, 1, 0.5]]]).astype(float)
img_stains = img_as_float(img_rgb) * 0.3
colbars = cp.array([[1, 1, 0, 0, 1, 1, 0, 0],
[1, 1, 1, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 1, 0]]).astype(float)
colbars_array = cp.swapaxes(colbars.reshape(3, 4, 2), 0, 2)
colbars_point75 = colbars * 0.75
colbars_point75_array = cp.swapaxes(colbars_point75.reshape(3, 4, 2), 0, 2)
xyz_array = cp.asarray([[[0.4124, 0.21260, 0.01930]], # red
[[0, 0, 0]], # black
[[.9505, 1., 1.089]], # white
[[.1805, .0722, .9505]], # blue
[[.07719, .15438, .02573]], # green
])
lab_array = cp.asarray([[[53.233, 80.109, 67.220]], # red
[[0., 0., 0.]], # black
[[100.0, 0.005, -0.010]], # white
[[32.303, 79.197, -107.864]], # blue
[[46.229, -51.7, 49.898]], # green
])
luv_array = cp.asarray([[[53.233, 175.053, 37.751]], # red
[[0., 0., 0.]], # black
[[100., 0.001, -0.017]], # white
[[32.303, -9.400, -130.358]], # blue
[[46.228, -43.774, 56.589]], # green
])
# fmt: on
# RGBA to RGB
@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1, -2, -3])
def test_rgba2rgb_conversion(self, channel_axis):
rgba = self.img_rgba
rgba = cp.moveaxis(rgba, source=-1, destination=channel_axis)
rgb = rgba2rgb(rgba, channel_axis=channel_axis)
rgb = cp.moveaxis(rgb, source=channel_axis, destination=-1)
# fmt: off
expected = cp.asarray([[[1, 1, 1],
[0, 0.5, 1],
[0.5, 0.75, 1]]]).astype(float)
# fmt: on
assert_equal(rgb.shape, expected.shape)
assert_array_almost_equal(rgb, expected)
def test_rgba2rgb_error_grayscale(self):
with pytest.raises(ValueError):
rgba2rgb(self.img_grayscale)
@pytest.mark.parametrize("channel_axis", [None, 1.5])
def test_rgba2rgb_error_channel_axis_invalid(self, channel_axis):
with pytest.raises(TypeError):
rgba2rgb(self.img_rgba, channel_axis=channel_axis)
@pytest.mark.parametrize("channel_axis", [-4, 3])
def test_rgba2rgb_error_channel_axis_out_of_range(self, channel_axis):
with pytest.raises(np.AxisError):
rgba2rgb(self.img_rgba, channel_axis=channel_axis)
def test_rgba2rgb_error_rgb(self):
with pytest.raises(ValueError):
rgba2rgb(self.img_rgb)
def test_rgba2rgb_dtype(self):
rgba = self.img_rgba.astype("float64")
rgba32 = img_as_float32(rgba)
assert rgba2rgb(rgba).dtype == rgba.dtype
assert rgba2rgb(rgba32).dtype == rgba32.dtype
# RGB to HSV
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_rgb2hsv_conversion(self, channel_axis):
rgb = img_as_float(self.img_rgb)[::16, ::16]
_rgb = cp.moveaxis(rgb, source=-1, destination=channel_axis)
hsv = rgb2hsv(_rgb, channel_axis=channel_axis)
hsv = cp.moveaxis(hsv, source=channel_axis, destination=-1)
hsv = hsv.reshape(-1, 3)
# ground truth from colorsys
gt = np.asarray(
[
colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in cp.asnumpy(rgb).reshape(-1, 3)
]
)
assert_array_almost_equal(hsv, gt)
def test_rgb2hsv_error_grayscale(self):
with pytest.raises(ValueError):
rgb2hsv(self.img_grayscale)
def test_rgb2hsv_dtype(self):
rgb = img_as_float(self.img_rgb)
rgb32 = img_as_float32(self.img_rgb)
assert rgb2hsv(rgb).dtype == rgb.dtype
assert rgb2hsv(rgb32).dtype == rgb32.dtype
# HSV to RGB
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_hsv2rgb_conversion(self, channel_axis):
rgb = self.img_rgb.astype("float32")[::16, ::16]
# create HSV image with colorsys
hsv = cp.asarray(
[
colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3).get()
]
).reshape(rgb.shape)
hsv = np.moveaxis(hsv, source=-1, destination=channel_axis)
_rgb = hsv2rgb(hsv, channel_axis=channel_axis)
_rgb = np.moveaxis(_rgb, source=channel_axis, destination=-1)
# convert back to RGB and compare with original.
# relative precision for RGB -> HSV roundtrip is about 1e-6
assert_array_almost_equal(rgb, _rgb, decimal=4)
def test_hsv2rgb_error_grayscale(self):
with pytest.raises(ValueError):
hsv2rgb(self.img_grayscale)
def test_hsv2rgb_dtype(self):
rgb = self.img_rgb.astype("float32")[::16, ::16]
# create HSV image with colorsys
hsv = cp.asarray(
[
colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in rgb.reshape(-1, 3).get()
],
dtype="float64",
).reshape(rgb.shape)
hsv32 = hsv.astype("float32")
assert hsv2rgb(hsv).dtype == hsv.dtype
assert hsv2rgb(hsv32).dtype == hsv32.dtype
# RGB to XYZ
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_rgb2xyz_conversion(self, channel_axis):
# fmt: off
gt = cp.asarray([[[0.950456, 1. , 1.088754], # noqa
[0.538003, 0.787329, 1.06942 ], # noqa
[0.592876, 0.28484 , 0.969561], # noqa
[0.180423, 0.072169, 0.950227]], # noqa
[[0.770033, 0.927831, 0.138527], # noqa
[0.35758 , 0.71516 , 0.119193], # noqa
[0.412453, 0.212671, 0.019334], # noqa
[0. , 0. , 0. ]]]) # noqa
# fmt: on
img = cp.moveaxis(
self.colbars_array, source=-1, destination=channel_axis
)
out = rgb2xyz(img, channel_axis=channel_axis)
out = cp.moveaxis(out, source=channel_axis, destination=-1)
assert_array_almost_equal(out, gt)
# stop repeating the "raises" checks for all other functions that are
# implemented with color._convert()
def test_rgb2xyz_error_grayscale(self):
with pytest.raises(ValueError):
rgb2xyz(self.img_grayscale)
def test_rgb2xyz_dtype(self):
img = self.colbars_array
img32 = img.astype("float32")
assert rgb2xyz(img).dtype == img.dtype
assert rgb2xyz(img32).dtype == img32.dtype
# XYZ to RGB
def test_xyz2rgb_conversion(self):
assert_array_almost_equal(
xyz2rgb(rgb2xyz(self.colbars_array)), self.colbars_array
)
def test_xyz2rgb_dtype(self):
img = rgb2xyz(self.colbars_array)
img32 = img.astype("float32")
assert xyz2rgb(img).dtype == img.dtype
assert xyz2rgb(img32).dtype == img32.dtype
# RGB<->XYZ roundtrip on another image
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_xyz_rgb_roundtrip(self, channel_axis):
img_rgb = img_as_float(self.img_rgb)
img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis)
round_trip = xyz2rgb(
rgb2xyz(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
)
assert_allclose(round_trip, img_rgb, rtol=1e-5, atol=1e-5)
# RGB<->HED roundtrip with ubyte image
def test_hed_rgb_roundtrip(self):
img_in = img_as_ubyte(self.img_stains)
img_out = rgb2hed(hed2rgb(img_in))
assert_array_equal(img_as_ubyte(img_out), img_in)
# HED<->RGB roundtrip with float image
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_hed_rgb_float_roundtrip(self, channel_axis):
img_in = self.img_stains
img_in = cp.moveaxis(img_in, source=-1, destination=channel_axis)
img_out = rgb2hed(
hed2rgb(img_in, channel_axis=channel_axis),
channel_axis=channel_axis,
)
assert_array_almost_equal(img_out, img_in)
# RGB<->BRO roundtrip with ubyte image
def test_bro_rgb_roundtrip(self):
from cucim.skimage.color.colorconv import bro_from_rgb, rgb_from_bro
img_in = img_as_ubyte(self.img_stains)
img_out = combine_stains(img_in, rgb_from_bro)
img_out = separate_stains(img_out, bro_from_rgb)
assert_array_equal(img_as_ubyte(img_out), img_in)
# BRO<->RGB roundtrip with float image
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
def test_bro_rgb_roundtrip_float(self, channel_axis):
from skimage.color.colorconv import bro_from_rgb, rgb_from_bro
img_in = self.img_stains
img_in = cp.moveaxis(img_in, source=-1, destination=channel_axis)
img_out = combine_stains(
img_in, rgb_from_bro, channel_axis=channel_axis
)
img_out = separate_stains(
img_out, bro_from_rgb, channel_axis=channel_axis
)
assert_array_almost_equal(img_out, img_in)
# RGB to RGB CIE
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_rgb2rgbcie_conversion(self, channel_axis):
# fmt: off
gt = cp.asarray([[[ 0.1488856 , 0.18288098, 0.19277574], # noqa
[ 0.01163224, 0.16649536, 0.18948516], # noqa
[ 0.12259182, 0.03308008, 0.17298223], # noqa
[-0.01466154, 0.01669446, 0.16969164]], # noqa
[[ 0.16354714, 0.16618652, 0.0230841 ], # noqa
[ 0.02629378, 0.1498009 , 0.01979351], # noqa
[ 0.13725336, 0.01638562, 0.00329059], # noqa
[ 0. , 0. , 0. ]]]) # noqa
# fmt: on
img = np.moveaxis(
self.colbars_array, source=-1, destination=channel_axis
)
out = rgb2rgbcie(img, channel_axis=channel_axis)
out = np.moveaxis(out, source=channel_axis, destination=-1)
assert_array_almost_equal(out, gt)
def test_rgb2rgbcie_dtype(self):
img = self.colbars_array.astype("float64")
img32 = img.astype("float32")
assert rgb2rgbcie(img).dtype == img.dtype
assert rgb2rgbcie(img32).dtype == img32.dtype
# RGB CIE to RGB
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_rgbcie2rgb_conversion(self, channel_axis):
rgb = cp.moveaxis(
self.colbars_array, source=-1, destination=channel_axis
)
round_trip = rgbcie2rgb(
rgb2rgbcie(rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
)
# only roundtrip test, we checked rgb2rgbcie above already
assert_array_almost_equal(round_trip, rgb)
def test_rgbcie2rgb_dtype(self):
img = rgb2rgbcie(self.colbars_array).astype("float64")
img32 = img.astype("float32")
assert rgbcie2rgb(img).dtype == img.dtype
assert rgbcie2rgb(img32).dtype == img32.dtype
@pytest.mark.parametrize("channel_axis", [0, -1])
def test_convert_colorspace(self, channel_axis):
colspaces = ["HSV", "RGB CIE", "XYZ", "YCbCr", "YPbPr", "YDbDr"]
colfuncs_from = [
hsv2rgb,
rgbcie2rgb,
xyz2rgb,
ycbcr2rgb,
ypbpr2rgb,
ydbdr2rgb,
]
colfuncs_to = [
rgb2hsv,
rgb2rgbcie,
rgb2xyz,
rgb2ycbcr,
rgb2ypbpr,
rgb2ydbdr,
]
colbars_array = cp.moveaxis(
self.colbars_array, source=-1, destination=channel_axis
)
kw = dict(channel_axis=channel_axis)
assert_array_almost_equal(
convert_colorspace(colbars_array, "RGB", "RGB", **kw), colbars_array
)
for i, space in enumerate(colspaces):
# print(f"space={space}")
gt = colfuncs_from[i](colbars_array, **kw)
assert_array_almost_equal(
convert_colorspace(colbars_array, space, "RGB", **kw), gt
)
gt = colfuncs_to[i](colbars_array, **kw)
assert_array_almost_equal(
convert_colorspace(colbars_array, "RGB", space, **kw), gt
)
with pytest.raises(ValueError):
convert_colorspace(colbars_array, "nokey", "XYZ", **kw)
with pytest.raises(ValueError):
convert_colorspace(colbars_array, "RGB", "nokey", **kw)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_rgb2gray(self, channel_axis):
x = cp.array([1, 1, 1]).reshape((1, 1, 3)).astype(float)
x = cp.moveaxis(x, source=-1, destination=channel_axis)
g = rgb2gray(x, channel_axis=channel_axis)
assert_array_almost_equal(g, 1)
assert_array_equal(g.shape, (1, 1))
def test_rgb2gray_contiguous(self):
x = cp.random.rand(10, 10, 3)
assert rgb2gray(x).flags["C_CONTIGUOUS"]
assert rgb2gray(x[:5, :5]).flags["C_CONTIGUOUS"]
def test_rgb2gray_alpha(self):
x = cp.empty((10, 10, 4))
with pytest.raises(ValueError):
rgb2gray(x)
def test_rgb2gray_on_gray(self):
with pytest.raises(ValueError):
rgb2gray(np.empty((5, 5)))
def test_rgb2gray_dtype(self):
img = cp.random.rand(10, 10, 3).astype("float64")
img32 = img.astype("float32")
assert rgb2gray(img).dtype == img.dtype
assert rgb2gray(img32).dtype == img32.dtype
# test matrices for xyz2lab and lab2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2lab(self):
assert_array_almost_equal(
xyz2lab(self.xyz_array), self.lab_array, decimal=3
)
# Test the conversion with the rest of the illuminants.
for i in ["A", "B", "C", "d50", "d55", "d65"]:
i = i.lower()
for obs in ["2", "10", "R"]:
obs = obs.lower()
fname = os.path.join(data_dir, f"lab_array_{i}_{obs}.npy")
lab_array_i_obs = np.load(fname)
assert_array_almost_equal(
lab_array_i_obs, xyz2lab(self.xyz_array, i, obs), decimal=2
)
for i in ["d75", "e"]:
fname = os.path.join(data_dir, f"lab_array_{i}_2.npy")
lab_array_i_obs = np.load(fname)
assert_array_almost_equal(
lab_array_i_obs, xyz2lab(self.xyz_array, i, "2"), decimal=2
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_xyz2lab_channel_axis(self, channel_axis):
# test conversion with channels along a specified axis
xyz = cp.moveaxis(self.xyz_array, source=-1, destination=channel_axis)
lab = xyz2lab(xyz, channel_axis=channel_axis)
lab = cp.moveaxis(lab, source=channel_axis, destination=-1)
assert_array_almost_equal(lab, self.lab_array, decimal=3)
def test_xyz2lab_dtype(self):
img = self.xyz_array.astype("float64")
img32 = img.astype("float32")
assert xyz2lab(img).dtype == img.dtype
assert xyz2lab(img32).dtype == img32.dtype
def test_lab2xyz(self):
assert_array_almost_equal(
lab2xyz(self.lab_array), self.xyz_array, decimal=3
)
# Test the conversion with the rest of the illuminants.
for i in ["A", "B", "C", "d50", "d55", "d65"]:
i = i.lower()
for obs in ["2", "10", "R"]:
obs = obs.lower()
fname = os.path.join(data_dir, f"lab_array_{i}_{obs}.npy")
lab_array_i_obs = cp.array(np.load(fname))
assert_array_almost_equal(
lab2xyz(lab_array_i_obs, i, obs), self.xyz_array, decimal=3
)
for i in ["d75", "e"]:
fname = os.path.join(data_dir, f"lab_array_{i}_2.npy")
lab_array_i_obs = cp.array(np.load(fname))
assert_array_almost_equal(
lab2xyz(lab_array_i_obs, i, "2"), self.xyz_array, decimal=3
)
# And we include a call to test the exception handling in the code.
with pytest.raises(ValueError):
lab2xyz(lab_array_i_obs, "NaI", "2") # Not an illuminant
with pytest.raises(ValueError):
lab2xyz(lab_array_i_obs, "d50", "42") # Not a degree
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_lab2xyz_channel_axis(self, channel_axis):
# test conversion with channels along a specified axis
lab = cp.moveaxis(self.lab_array, source=-1, destination=channel_axis)
xyz = lab2xyz(lab, channel_axis=channel_axis)
xyz = cp.moveaxis(xyz, source=channel_axis, destination=-1)
assert_array_almost_equal(xyz, self.xyz_array, decimal=3)
def test_lab2xyz_dtype(self):
img = self.lab_array.astype("float64")
img32 = img.astype("float32")
assert lab2xyz(img).dtype == img.dtype
assert lab2xyz(img32).dtype == img32.dtype
def test_rgb2lab_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
# fmt: off
gt_for_colbars = cp.asarray([
[100, 0, 0],
[97.1393, -21.5537, 94.4780],
[91.1132, -48.0875, -14.1312],
[87.7347, -86.1827, 83.1793],
[60.3242, 98.2343, -60.8249],
[53.2408, 80.0925, 67.2032],
[32.2970, 79.1875, -107.8602],
[0, 0, 0]]).T
# fmt: on
gt_array = cp.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(
rgb2lab(self.colbars_array), gt_array, decimal=2
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_lab_rgb_roundtrip(self, channel_axis):
img_rgb = img_as_float(self.img_rgb)
img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis)
assert_allclose(
lab2rgb(
rgb2lab(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
def test_rgb2lab_dtype(self):
img = self.colbars_array.astype("float64")
img32 = img.astype("float32")
assert rgb2lab(img).dtype == img.dtype
assert rgb2lab(img32).dtype == img32.dtype
def test_lab2rgb_dtype(self):
img = self.lab_array.astype("float64")
img32 = img.astype("float32")
assert lab2rgb(img).dtype == img.dtype
assert lab2rgb(img32).dtype == img32.dtype
# test matrices for xyz2luv and luv2xyz generated using
# http://www.easyrgb.com/index.php?X=CALC
# Note: easyrgb website displays xyz*100
def test_xyz2luv(self):
assert_array_almost_equal(
xyz2luv(self.xyz_array), self.luv_array, decimal=3
)
# Test the conversion with the rest of the illuminants.
for i in ["A", "B", "C", "d50", "d55", "d65"]:
i = i.lower()
for obs in ["2", "10", "R"]:
obs = obs.lower()
fname = os.path.join(data_dir, f"luv_array_{i}_{obs}.npy")
luv_array_i_obs = np.load(fname)
assert_array_almost_equal(
luv_array_i_obs, xyz2luv(self.xyz_array, i, obs), decimal=2
)
for i in ["d75", "e"]:
fname = os.path.join(data_dir, f"luv_array_{i}_2.npy")
luv_array_i_obs = np.load(fname)
assert_array_almost_equal(
luv_array_i_obs, xyz2luv(self.xyz_array, i, "2"), decimal=2
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_xyz2luv_channel_axis(self, channel_axis):
# test conversion with channels along a specified axis
xyz = cp.moveaxis(self.xyz_array, source=-1, destination=channel_axis)
luv = xyz2luv(xyz, channel_axis=channel_axis)
luv = cp.moveaxis(luv, source=channel_axis, destination=-1)
assert_array_almost_equal(luv, self.luv_array, decimal=3)
def test_xyz2luv_dtype(self):
img = self.xyz_array.astype("float64")
img32 = img.astype("float32")
assert xyz2luv(img).dtype == img.dtype
assert xyz2luv(img32).dtype == img32.dtype
def test_luv2xyz(self):
assert_array_almost_equal(
luv2xyz(self.luv_array), self.xyz_array, decimal=3
)
# Test the conversion with the rest of the illuminants.
for i in ["A", "B", "C", "d50", "d55", "d65"]:
i = i.lower()
for obs in ["2", "10", "R"]:
obs = obs.lower()
fname = os.path.join(data_dir, f"luv_array_{i}_{obs}.npy")
luv_array_i_obs = cp.array(np.load(fname))
assert_array_almost_equal(
luv2xyz(luv_array_i_obs, i, obs), self.xyz_array, decimal=3
)
for i in ["d75", "e"]:
fname = os.path.join(data_dir, f"luv_array_{i}_2.npy")
luv_array_i_obs = cp.array(np.load(fname))
assert_array_almost_equal(
luv2xyz(luv_array_i_obs, i, "2"), self.xyz_array, decimal=3
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_luv2xyz_channel_axis(self, channel_axis):
# test conversion with channels along a specified axis
luv = cp.moveaxis(self.luv_array, source=-1, destination=channel_axis)
xyz = luv2xyz(luv, channel_axis=channel_axis)
xyz = cp.moveaxis(xyz, source=channel_axis, destination=-1)
assert_array_almost_equal(xyz, self.xyz_array, decimal=3)
def test_luv2xyz_dtype(self):
img = self.luv_array.astype("float64")
img32 = img.astype("float32")
assert luv2xyz(img).dtype == img.dtype
assert luv2xyz(img32).dtype == img32.dtype
def test_rgb2luv_brucelindbloom(self):
"""
Test the RGB->Lab conversion by comparing to the calculator on the
authoritative Bruce Lindbloom
[website](http://brucelindbloom.com/index.html?ColorCalculator.html).
"""
# Obtained with D65 white point, sRGB model and gamma
# fmt: off
gt_for_colbars = cp.asarray([
[100, 0, 0],
[97.1393, 7.7056, 106.7866],
[91.1132, -70.4773, -15.2042],
[87.7347, -83.0776, 107.3985],
[60.3242, 84.0714, -108.6834],
[53.2408, 175.0151, 37.7564],
[32.2970, -9.4054, -130.3423],
[0, 0, 0]]).T
# fmt: on
gt_array = cp.swapaxes(gt_for_colbars.reshape(3, 4, 2), 0, 2)
assert_array_almost_equal(
rgb2luv(self.colbars_array), gt_array, decimal=2
)
def test_rgb2luv_dtype(self):
img = self.colbars_array.astype("float64")
img32 = img.astype("float32")
assert rgb2luv(img).dtype == img.dtype
assert rgb2luv(img32).dtype == img32.dtype
def test_luv2rgb_dtype(self):
img = self.luv_array.astype("float64")
img32 = img.astype("float32")
assert luv2rgb(img).dtype == img.dtype
assert luv2rgb(img32).dtype == img32.dtype
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_luv_rgb_roundtrip(self, channel_axis):
img_rgb = img_as_float(self.img_rgb)
img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis)
assert_allclose(
luv2rgb(
rgb2luv(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-4,
atol=1e-4,
)
def test_lab_rgb_outlier(self):
lab_array = np.ones((3, 1, 3))
lab_array[0] = [50, -12, 85]
lab_array[1] = [50, 12, -85]
lab_array[2] = [90, -4, -47]
lab_array = cp.asarray(lab_array)
# fmt: off
rgb_array = cp.asarray([[[0.501, 0.481, 0]],
[[0, 0.482, 1.]],
[[0.578, 0.914, 1.]],
])
# fmt: on
assert_array_almost_equal(lab2rgb(lab_array), rgb_array, decimal=3)
def test_lab_full_gamut(self):
a, b = cp.meshgrid(cp.arange(-100, 100), cp.arange(-100, 100))
L = cp.ones(a.shape)
lab = cp.dstack((L, a, b))
regex = (
"Conversion from CIE-LAB to XYZ color space resulted in "
"\\d+ negative Z values that have been clipped to zero"
)
for value in [0, 10, 20]:
lab[:, :, 0] = value
with pytest.warns(UserWarning, match=regex):
lab2xyz(lab)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_lab_lch_roundtrip(self, channel_axis):
rgb = img_as_float(self.img_rgb)
rgb = cp.moveaxis(rgb, source=-1, destination=channel_axis)
lab = rgb2lab(rgb, channel_axis=channel_axis)
lab2 = lch2lab(
lab2lch(lab, channel_axis=channel_axis),
channel_axis=channel_axis,
)
assert_allclose(lab2, lab, rtol=1e-4, atol=1e-4)
def test_rgb_lch_roundtrip(self):
rgb = img_as_float(self.img_rgb)
lab = rgb2lab(rgb)
lch = lab2lch(lab)
lab2 = lch2lab(lch)
rgb2 = lab2rgb(lab2)
assert_allclose(rgb, rgb2, rtol=1e-4, atol=1e-4)
def test_lab_lch_0d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch2 = lab2lch(lab0[None, None, :])
assert_array_almost_equal(lch0, lch2[0, 0, :])
def test_lab_lch_1d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch1 = lab2lch(lab0[None, :])
assert_array_almost_equal(lch0, lch1[0, :])
def test_lab_lch_3d(self):
lab0 = self._get_lab0()
lch0 = lab2lch(lab0)
lch3 = lab2lch(lab0[None, None, None, :])
assert_array_almost_equal(lch0, lch3[0, 0, 0, :])
def _get_lab0(self):
rgb = img_as_float(self.img_rgb[:1, :1, :])
return rgb2lab(rgb)[0, 0, :]
def test_yuv(self):
rgb = cp.asarray([[[1.0, 1.0, 1.0]]])
assert_array_almost_equal(rgb2yuv(rgb), cp.asarray([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2yiq(rgb), cp.asarray([[[1, 0, 0]]]))
assert_array_almost_equal(rgb2ypbpr(rgb), cp.asarray([[[1, 0, 0]]]))
assert_array_almost_equal(
rgb2ycbcr(rgb), cp.asarray([[[235, 128, 128]]])
)
assert_array_almost_equal(rgb2ydbdr(rgb), cp.asarray([[[1, 0, 0]]]))
rgb = cp.asarray([[[0.0, 1.0, 0.0]]])
assert_array_almost_equal(
rgb2yuv(rgb), cp.asarray([[[0.587, -0.28886916, -0.51496512]]])
)
assert_array_almost_equal(
rgb2yiq(rgb), cp.asarray([[[0.587, -0.27455667, -0.52273617]]])
)
assert_array_almost_equal(
rgb2ypbpr(rgb), cp.asarray([[[0.587, -0.331264, -0.418688]]])
)
assert_array_almost_equal(
rgb2ycbcr(rgb), cp.asarray([[[144.553, 53.797, 34.214]]])
)
assert_array_almost_equal(
rgb2ydbdr(rgb), cp.asarray([[[0.587, -0.883, 1.116]]])
)
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_yuv_roundtrip(self, channel_axis):
img_rgb = img_as_float(self.img_rgb)[::16, ::16]
img_rgb = cp.moveaxis(img_rgb, source=-1, destination=channel_axis)
assert_allclose(
yuv2rgb(
rgb2yuv(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
yiq2rgb(
rgb2yiq(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
ypbpr2rgb(
rgb2ypbpr(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
ycbcr2rgb(
rgb2ycbcr(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
assert_allclose(
ydbdr2rgb(
rgb2ydbdr(img_rgb, channel_axis=channel_axis),
channel_axis=channel_axis,
),
img_rgb,
rtol=1e-5,
atol=1e-5,
)
def test_rgb2yuv_dtype(self):
img = self.colbars_array.astype("float64")
img32 = img.astype("float32")
assert rgb2yuv(img).dtype == img.dtype
assert rgb2yuv(img32).dtype == img32.dtype
def test_yuv2rgb_dtype(self):
img = rgb2yuv(self.colbars_array).astype("float64")
img32 = img.astype("float32")
assert yuv2rgb(img).dtype == img.dtype
assert yuv2rgb(img32).dtype == img32.dtype
def test_rgb2yiq_conversion(self):
rgb = img_as_float(self.img_rgb)[::16, ::16]
yiq = rgb2yiq(rgb).reshape(-1, 3)
gt = np.asarray(
[
colorsys.rgb_to_yiq(pt[0], pt[1], pt[2])
for pt in cp.asnumpy(rgb).reshape(-1, 3)
]
)
assert_array_almost_equal(yiq, gt, decimal=2)
@pytest.mark.parametrize("func", [lab2rgb, lab2xyz])
def test_warning_stacklevel(self, func):
regex = (
"Conversion from CIE-LAB.* XYZ.*color space resulted in "
"1 negative Z values that have been clipped to zero"
)
with pytest.warns(UserWarning, match=regex) as messages:
func(lab=cp.array([[[0, 0, 300.0]]]))
assert len(messages) == 1
assert messages[0].filename == __file__, "warning points at wrong file"
def test_gray2rgb():
x = cp.asarray([0, 0.5, 1])
w = gray2rgb(x)
# fmt off
expected_output = cp.asarray([[0, 0, 0], [0.5, 0.5, 0.5], [1, 1, 1]])
# fmt on
assert_array_equal(w, expected_output)
x = x.reshape((3, 1))
y = gray2rgb(x)
assert_array_equal(y.shape, (3, 1, 3))
assert_array_equal(y.dtype, x.dtype)
assert_array_equal(y[..., 0], x)
assert_array_equal(y[0, 0, :], [0, 0, 0])
x = cp.asarray([[0, 128, 255]], dtype=np.uint8)
z = gray2rgb(x)
assert_array_equal(z.shape, (1, 3, 3))
assert_array_equal(z[..., 0], x)
assert_array_equal(z[0, 1, :], [128, 128, 128])
def test_gray2rgb_rgb():
x = cp.random.rand(5, 5, 4)
y = gray2rgb(x)
assert y.shape == (x.shape + (3,))
for i in range(3):
assert_array_equal(x, y[..., i])
@pytest.mark.parametrize("shape", [(5, 5), (5, 5, 4), (5, 4, 5, 4)])
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_gray2rgba(shape, channel_axis):
# nD case
img = cp.random.random(shape)
rgba = gray2rgba(img, channel_axis=channel_axis)
assert rgba.ndim == img.ndim + 1
# Shape check
new_axis_loc = channel_axis % rgba.ndim
assert_equal(rgba.shape, shape[:new_axis_loc] + (4,) + shape[new_axis_loc:])
# dtype check
assert rgba.dtype == img.dtype
# RGB channels check
for channel in range(3):
assert_array_equal(rgba[slice_at_axis(channel, axis=new_axis_loc)], img)
# Alpha channel check
assert_array_equal(rgba[slice_at_axis(3, axis=new_axis_loc)], 1.0)
@pytest.mark.parametrize("shape", [(5, 5), (5, 5, 4), (5, 4, 5, 4)])
@pytest.mark.parametrize("channel_axis", [0, 1, -1, -2])
def test_gray2rgb_channel_axis(shape, channel_axis):
# nD case
img = cp.random.random(shape)
rgb = gray2rgb(img, channel_axis=channel_axis)
assert rgb.ndim == img.ndim + 1
# Shape check
new_axis_loc = channel_axis % rgb.ndim
assert_equal(rgb.shape, shape[:new_axis_loc] + (3,) + shape[new_axis_loc:])
# dtype check
assert rgb.dtype == img.dtype
def test_gray2rgba_dtype():
img_f64 = cp.random.random((5, 5))
img_f32 = img_f64.astype("float32")
img_u8 = img_as_ubyte(img_f64)
img_int = img_u8.astype(int)
for img in [img_f64, img_f32, img_u8, img_int]:
assert gray2rgba(img).dtype == img.dtype
def test_gray2rgba_alpha():
img = cp.random.random((5, 5))
img_u8 = img_as_ubyte(img)
# Default
alpha = None
rgba = gray2rgba(img, alpha)
assert_array_equal(rgba[..., :3], gray2rgb(img))
assert_array_equal(rgba[..., 3], 1.0)
# Scalar
alpha = 0.5
rgba = gray2rgba(img, alpha)
assert_array_equal(rgba[..., :3], gray2rgb(img))
assert_array_equal(rgba[..., 3], alpha)
# Array
alpha = cp.random.random((5, 5))
rgba = gray2rgba(img, alpha)
assert_array_equal(rgba[..., :3], gray2rgb(img))
assert_array_equal(rgba[..., 3], alpha)
# Warning about alpha cast
alpha = 0.5
with expected_warnings(["alpha can't be safely cast to image dtype"]):
rgba = gray2rgba(img_u8, alpha)
assert_array_equal(rgba[..., :3], gray2rgb(img_u8))
# Invalid shape
alpha = cp.random.random((5, 5, 1))
expected_err_msg = "alpha.shape must match image.shape"
with pytest.raises(ValueError) as err:
rgba = gray2rgba(img, alpha)
assert expected_err_msg == str(err.value)
@pytest.mark.parametrize("func", [rgb2gray, gray2rgb, gray2rgba])
@pytest.mark.parametrize(
"shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)])
)
def test_nD_gray_conversion(func, shape):
img = cp.random.rand(*shape)
out = func(img)
common_ndim = min(out.ndim, len(shape))
assert out.shape[:common_ndim] == shape[:common_ndim]
@pytest.mark.parametrize(
"func",
[
rgb2hsv,
hsv2rgb,
rgb2xyz,
xyz2rgb,
rgb2hed,
hed2rgb,
rgb2rgbcie,
rgbcie2rgb,
xyz2lab,
lab2xyz,
lab2rgb,
rgb2lab,
xyz2luv,
luv2xyz,
luv2rgb,
rgb2luv,
lab2lch,
lch2lab,
rgb2yuv,
yuv2rgb,
rgb2yiq,
yiq2rgb,
rgb2ypbpr,
ypbpr2rgb,
rgb2ycbcr,
ycbcr2rgb,
rgb2ydbdr,
ydbdr2rgb,
],
)
@pytest.mark.parametrize(
"shape", ([(3,), (2, 3), (4, 5, 3), (5, 4, 5, 3), (4, 5, 4, 5, 3)])
)
def test_nD_color_conversion(func, shape):
img = cp.random.rand(*shape)
out = func(img)
assert out.shape == img.shape
@pytest.mark.parametrize(
"shape", ([(4,), (2, 4), (4, 5, 4), (5, 4, 5, 4), (4, 5, 4, 5, 4)])
)
def test_rgba2rgb_nD(shape):
img = cp.random.rand(*shape)
out = rgba2rgb(img)
expected_shape = shape[:-1] + (3,)
assert out.shape == expected_shape
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_rgba2rgb_dtypes(dtype):
rgba = cp.array(
[[[0, 0.5, 1, 0], [0, 0.5, 1, 1], [0, 0.5, 1, 0.5]]]
).astype(dtype=dtype)
rgb = rgba2rgb(rgba)
float_dtype = _supported_float_type(rgba.dtype)
assert rgb.dtype == float_dtype
expected = cp.array([[[1, 1, 1], [0, 0.5, 1], [0.5, 0.75, 1]]]).astype(
float
)
assert rgb.shape == expected.shape
assert_array_almost_equal(rgb, expected)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_lab_lch_roundtrip_dtypes(dtype):
rgb = cp.asarray(data.colorwheel())
rgb = img_as_float(rgb).astype(dtype=dtype, copy=False)
lab = rgb2lab(rgb)
float_dtype = _supported_float_type(dtype)
assert lab.dtype == float_dtype
lab2 = lch2lab(lab2lch(lab))
decimal = 4 if float_dtype == cp.float32 else 7
assert_array_almost_equal(lab2, lab, decimal=decimal)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_rgb2hsv_dtypes(dtype):
rgb = cp.asarray(data.colorwheel())
rgb = img_as_float(rgb)[::16, ::16]
rgb = rgb.astype(dtype=dtype, copy=False)
hsv = rgb2hsv(rgb).reshape(-1, 3)
float_dtype = _supported_float_type(dtype)
assert hsv.dtype == float_dtype
# ground truth from colorsys
gt = cp.asarray(
[
colorsys.rgb_to_hsv(pt[0], pt[1], pt[2])
for pt in cp.asnumpy(rgb).reshape(-1, 3)
]
)
decimal = 3 if float_dtype == cp.float32 else 7
assert_array_almost_equal(hsv, gt, decimal=decimal)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/tests/test_adapt_rgb.py
|
from functools import partial
import cupy as cp
import numpy as np
from skimage import data
from cucim.skimage import color, filters, img_as_float, img_as_uint
from cucim.skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
# Down-sample image for quicker testing.
COLOR_IMAGE = cp.asarray(data.astronaut()[::5, ::6])
GRAY_IMAGE = cp.asarray(data.camera()[::5, ::5])
SIGMA = 3
smooth = partial(filters.gaussian, sigma=SIGMA)
assert_allclose = partial(cp.testing.assert_allclose, atol=1e-8)
@adapt_rgb(each_channel)
def edges_each(image):
return filters.sobel(image)
@adapt_rgb(each_channel)
def smooth_each(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(each_channel)
def mask_each(image, mask):
result = image.copy()
result[mask] = 0
return result
@adapt_rgb(hsv_value)
def edges_hsv(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def smooth_hsv(image, sigma):
return filters.gaussian(image, sigma)
@adapt_rgb(hsv_value)
def edges_hsv_uint(image):
return img_as_uint(filters.sobel(image))
def test_gray_scale_image():
# We don't need to test both `hsv_value` and `each_channel` since
# `adapt_rgb` is handling gray-scale inputs.
assert_allclose(edges_each(GRAY_IMAGE), filters.sobel(GRAY_IMAGE))
def test_each_channel():
filtered = edges_each(COLOR_IMAGE)
for i, channel in enumerate(cp.rollaxis(filtered, axis=-1)):
expected = img_as_float(filters.sobel(COLOR_IMAGE[:, :, i]))
assert_allclose(channel, expected)
def test_each_channel_with_filter_argument():
filtered = smooth_each(COLOR_IMAGE, SIGMA)
for i, channel in enumerate(cp.rollaxis(filtered, axis=-1)):
assert_allclose(channel, smooth(COLOR_IMAGE[:, :, i]))
def test_each_channel_with_asymmetric_kernel():
mask = cp.triu(cp.ones(COLOR_IMAGE.shape[:2], dtype=np.bool_))
mask_each(COLOR_IMAGE, mask)
def test_hsv_value():
filtered = edges_hsv(COLOR_IMAGE)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], filters.sobel(value))
def test_hsv_value_with_filter_argument():
filtered = smooth_hsv(COLOR_IMAGE, SIGMA)
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
assert_allclose(color.rgb2hsv(filtered)[:, :, 2], smooth(value))
def test_hsv_value_with_non_float_output():
# Since `rgb2hsv` returns a float image and the result of the filtered
# result is inserted into the HSV image, we want to make sure there isn't
# a dtype mismatch.
filtered = edges_hsv_uint(COLOR_IMAGE)
filtered_value = color.rgb2hsv(filtered)[:, :, 2]
value = color.rgb2hsv(COLOR_IMAGE)[:, :, 2]
# Reduce tolerance because dtype conversion.
assert_allclose(filtered_value, filters.sobel(value), rtol=1e-5, atol=1e-5)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/color/tests/ciede2000_test_data.txt
|
# input, intermediate, and output values for CIEDE2000 dE function
# data taken from "The CIEDE2000 Color-Difference Formula: Implementation Notes, ..." http://www.ece.rochester.edu/~gsharma/ciede2000/ciede2000noteCRNA.pdf
# tab delimited data
# pair 1 L1 a1 b1 ap1 cp1 hp1 hbar1 G T SL SC SH RT dE 2 L2 a2 b2 ap2 cp2 hp2
1 1 50.0000 2.6772 -79.7751 2.6774 79.8200 271.9222 270.9611 0.0001 0.6907 1.0000 4.6578 1.8421 -1.7042 2.0425 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
2 1 50.0000 3.1571 -77.2803 3.1573 77.3448 272.3395 271.1698 0.0001 0.6843 1.0000 4.6021 1.8216 -1.7070 2.8615 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
3 1 50.0000 2.8361 -74.0200 2.8363 74.0743 272.1944 271.0972 0.0001 0.6865 1.0000 4.5285 1.8074 -1.7060 3.4412 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
4 1 50.0000 -1.3802 -84.2814 -1.3803 84.2927 269.0618 269.5309 0.0001 0.7357 1.0000 4.7584 1.9217 -1.6809 1.0000 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
5 1 50.0000 -1.1848 -84.8006 -1.1849 84.8089 269.1995 269.5997 0.0001 0.7335 1.0000 4.7700 1.9218 -1.6822 1.0000 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
6 1 50.0000 -0.9009 -85.5211 -0.9009 85.5258 269.3964 269.6982 0.0001 0.7303 1.0000 4.7862 1.9217 -1.6840 1.0000 2 50.0000 0.0000 -82.7485 0.0000 82.7485 270.0000
7 1 50.0000 0.0000 0.0000 0.0000 0.0000 0.0000 126.8697 0.5000 1.2200 1.0000 1.0562 1.0229 0.0000 2.3669 2 50.0000 -1.0000 2.0000 -1.5000 2.5000 126.8697
8 1 50.0000 -1.0000 2.0000 -1.5000 2.5000 126.8697 126.8697 0.5000 1.2200 1.0000 1.0562 1.0229 0.0000 2.3669 2 50.0000 0.0000 0.0000 0.0000 0.0000 0.0000
9 1 50.0000 2.4900 -0.0010 3.7346 3.7346 359.9847 269.9854 0.4998 0.7212 1.0000 1.1681 1.0404 -0.0022 7.1792 2 50.0000 -2.4900 0.0009 -3.7346 3.7346 179.9862
10 1 50.0000 2.4900 -0.0010 3.7346 3.7346 359.9847 269.9847 0.4998 0.7212 1.0000 1.1681 1.0404 -0.0022 7.1792 2 50.0000 -2.4900 0.0010 -3.7346 3.7346 179.9847
11 1 50.0000 2.4900 -0.0010 3.7346 3.7346 359.9847 89.9839 0.4998 0.6175 1.0000 1.1681 1.0346 0.0000 7.2195 2 50.0000 -2.4900 0.0011 -3.7346 3.7346 179.9831
12 1 50.0000 2.4900 -0.0010 3.7346 3.7346 359.9847 89.9831 0.4998 0.6175 1.0000 1.1681 1.0346 0.0000 7.2195 2 50.0000 -2.4900 0.0012 -3.7346 3.7346 179.9816
13 1 50.0000 -0.0010 2.4900 -0.0015 2.4900 90.0345 180.0328 0.4998 0.9779 1.0000 1.1121 1.0365 0.0000 4.8045 2 50.0000 0.0009 -2.4900 0.0013 2.4900 270.0311
14 1 50.0000 -0.0010 2.4900 -0.0015 2.4900 90.0345 180.0345 0.4998 0.9779 1.0000 1.1121 1.0365 0.0000 4.8045 2 50.0000 0.0010 -2.4900 0.0015 2.4900 270.0345
15 1 50.0000 -0.0010 2.4900 -0.0015 2.4900 90.0345 0.0362 0.4998 1.3197 1.0000 1.1121 1.0493 0.0000 4.7461 2 50.0000 0.0011 -2.4900 0.0016 2.4900 270.0380
16 1 50.0000 2.5000 0.0000 3.7496 3.7496 0.0000 315.0000 0.4998 0.8454 1.0000 1.1406 1.0396 -0.0001 4.3065 2 50.0000 0.0000 -2.5000 0.0000 2.5000 270.0000
17 1 50.0000 2.5000 0.0000 3.4569 3.4569 0.0000 346.2470 0.3827 1.4453 1.1608 1.9547 1.4599 -0.0003 27.1492 2 73.0000 25.0000 -18.0000 34.5687 38.9743 332.4939
18 1 50.0000 2.5000 0.0000 3.4954 3.4954 0.0000 51.7766 0.3981 0.6447 1.0640 1.7498 1.1612 0.0000 22.8977 2 61.0000 -5.0000 29.0000 -6.9907 29.8307 103.5532
19 1 50.0000 2.5000 0.0000 3.5514 3.5514 0.0000 272.2362 0.4206 0.6521 1.0251 1.9455 1.2055 -0.8219 31.9030 2 56.0000 -27.0000 -3.0000 -38.3556 38.4728 184.4723
20 1 50.0000 2.5000 0.0000 3.5244 3.5244 0.0000 11.9548 0.4098 1.1031 1.0400 1.9120 1.3353 0.0000 19.4535 2 58.0000 24.0000 15.0000 33.8342 37.0102 23.9095
21 1 50.0000 2.5000 0.0000 3.7494 3.7494 0.0000 3.5056 0.4997 1.2616 1.0000 1.1923 1.0808 0.0000 1.0000 2 50.0000 3.1736 0.5854 4.7596 4.7954 7.0113
22 1 50.0000 2.5000 0.0000 3.7493 3.7493 0.0000 0.0000 0.4997 1.3202 1.0000 1.1956 1.0861 0.0000 1.0000 2 50.0000 3.2972 0.0000 4.9450 4.9450 0.0000
23 1 50.0000 2.5000 0.0000 3.7497 3.7497 0.0000 5.8190 0.4999 1.2197 1.0000 1.1486 1.0604 0.0000 1.0000 2 50.0000 1.8634 0.5757 2.7949 2.8536 11.6380
24 1 50.0000 2.5000 0.0000 3.7493 3.7493 0.0000 1.9603 0.4997 1.2883 1.0000 1.1946 1.0836 0.0000 1.0000 2 50.0000 3.2592 0.3350 4.8879 4.8994 3.9206
25 1 60.2574 -34.0099 36.2677 -34.0678 49.7590 133.2085 132.0835 0.0017 1.3010 1.1427 3.2946 1.9951 0.0000 1.2644 2 60.4626 -34.1751 39.4387 -34.2333 52.2238 130.9584
26 1 63.0109 -31.0961 -5.8663 -32.6194 33.1427 190.1951 188.8221 0.0490 0.9402 1.1831 2.4549 1.4560 0.0000 1.2630 2 62.8187 -29.7946 -4.0864 -31.2542 31.5202 187.4490
27 1 61.2901 3.7196 -5.3901 5.5668 7.7487 315.9240 310.0313 0.4966 0.6952 1.1586 1.3092 1.0717 -0.0032 1.8731 2 61.4292 2.2480 -4.9620 3.3644 5.9950 304.1385
28 1 35.0831 -44.1164 3.7933 -44.3939 44.5557 175.1161 176.4290 0.0063 1.0168 1.2148 2.9105 1.6476 0.0000 1.8645 2 35.0232 -40.0716 1.5901 -40.3237 40.3550 177.7418
29 1 22.7233 20.0904 -46.6940 20.1424 50.8532 293.3339 291.3809 0.0026 0.3636 1.4014 3.1597 1.2617 -1.2537 2.0373 2 23.0331 14.9730 -42.5619 15.0118 45.1317 289.4279
30 1 36.4612 47.8580 18.3852 47.9197 51.3256 20.9901 21.8781 0.0013 0.9239 1.1943 3.3888 1.7357 0.0000 1.4146 2 36.2715 50.5065 21.2231 50.5716 54.8444 22.7660
31 1 90.8027 -2.0831 1.4410 -3.1245 3.4408 155.2410 167.1011 0.4999 1.1546 1.6110 1.1329 1.0511 0.0000 1.4441 2 91.1528 -1.6435 0.0447 -2.4651 2.4655 178.9612
32 1 90.9257 -0.5406 -0.9208 -0.8109 1.2270 228.6315 218.4363 0.5000 1.3916 1.5930 1.0620 1.0288 0.0000 1.5381 2 88.6381 -0.8985 -0.7239 -1.3477 1.5298 208.2412
33 1 6.7747 -0.2908 -2.4247 -0.4362 2.4636 259.8025 263.0049 0.4999 0.9556 1.6517 1.1057 1.0337 -0.0004 0.6377 2 5.8714 -0.0985 -2.2286 -0.1477 2.2335 266.2073
34 1 2.0776 0.0795 -1.1350 0.1192 1.1412 275.9978 268.0910 0.5000 0.7826 1.7246 1.0383 1.0100 0.0000 0.9082 2 0.9033 -0.0636 -0.5514 -0.0954 0.5596 260.18421
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/deconvolution.py
|
"""Implementations restoration functions"""
import warnings
import cupy as cp
import numpy as np
from .._shared.utils import _supported_float_type, deprecate_kwarg
from . import uft
__keywords__ = "restoration, image, deconvolution"
def wiener(image, psf, balance, reg=None, is_real=True, clip=True):
r"""Wiener-Hunt deconvolution
Return the deconvolution with a Wiener-Hunt approach (i.e. with
Fourier diagonalisation).
Parameters
----------
image : cp.ndarray
Input degraded image (can be n-dimensional).
psf : ndarray
Point Spread Function. This is assumed to be the impulse
response (input image space) if the data-type is real, or the
transfer function (Fourier space) if the data-type is
complex. There is no constraints on the shape of the impulse
response. The transfer function must be of shape
`(N1, N2, ..., ND)` if `is_real is True`,
`(N1, N2, ..., ND // 2 + 1)` otherwise (see `cp.fft.rfftn`).
balance : float
The regularisation parameter value that tunes the balance
between the data adequacy that improve frequency restoration
and the prior adequacy that reduce frequency restoration (to
avoid noise artifacts).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the
psf. Shape constraint is the same as for the `psf` parameter.
is_real : boolean, optional
True by default. Specify if ``psf`` and ``reg`` are provided
with hermitian hypothesis, that is only half of the frequency
plane is provided (due to the redundancy of Fourier transform
of real signal). It's apply only if ``psf`` and/or ``reg`` are
provided as transfer function. For the hermitian property see
``uft`` module or ``cupy.fft.rfftn``.
clip : boolean, optional
True by default. If True, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
Returns
-------
im_deconv : (M, N) ndarray
The deconvolved image.
Examples
--------
>>> import cupy as cp
>>> import cupyx.scipy.ndimage as ndi
>>> from cucim.skimage import color, restoration
>>> from skimage import data
>>> img = color.rgb2gray(cp.array(data.astronaut()))
>>> psf = cp.ones((5, 5)) / 25
>>> img = ndi.uniform_filter(img, size=psf.shape)
>>> img += 0.1 * img.std() * cp.random.standard_normal(img.shape)
>>> deconvolved_img = restoration.wiener(img, psf, 0.1)
Notes
-----
This function applies the Wiener filter to a noisy and degraded
image by an impulse response (or PSF). If the data model is
.. math:: y = Hx + n
where :math:`n` is noise, :math:`H` the PSF and :math:`x` the
unknown original image, the Wiener filter is
.. math::
\hat x = F^\dagger (|\Lambda_H|^2 + \lambda |\Lambda_D|^2)
\Lambda_H^\dagger F y
where :math:`F` and :math:`F^\dagger` are the Fourier and inverse
Fourier transforms respectively, :math:`\Lambda_H` the transfer
function (or the Fourier transform of the PSF, see [Hunt] below)
and :math:`\Lambda_D` the filter to penalize the restored image
frequencies (Laplacian by default, that is penalization of high
frequency). The parameter :math:`\lambda` tunes the balance
between the data (that tends to increase high frequency, even
those coming from noise), and the regularization.
These methods are then specific to a prior model. Consequently,
the application or the true image nature must correspond to the
prior model. By default, the prior model (Laplacian) introduce
image smoothness or pixel correlation. It can also be interpreted
as high-frequency penalization to compensate the instability of
the solution with respect to the data (sometimes called noise
amplification or "explosive" solution).
Finally, the use of Fourier space implies a circulant property of
:math:`H`, see [2]_.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508
.. [2] B. R. Hunt "A matrix theory proof of the discrete
convolution theorem", IEEE Trans. on Audio and
Electroacoustics, vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not cp.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_func = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_func = psf
atf2 = cp.abs(trans_func)
atf2 *= atf2
areg2 = cp.abs(reg)
areg2 *= areg2
wiener_filter = cp.conj(trans_func) / (atf2 + balance * areg2)
if is_real:
deconv = uft.uirfftn(
wiener_filter * uft.urfftn(image), shape=image.shape
)
else:
deconv = uft.uifftn(wiener_filter * uft.ufftn(image))
if clip:
deconv[deconv > 1] = 1
deconv[deconv < -1] = -1
return deconv
@deprecate_kwarg(
{"random_state": "rng"},
removed_version="24.12.00",
deprecated_version="23.08.00",
)
@deprecate_kwarg(
{"seed": "rng"},
removed_version="24.12.00",
deprecated_version="23.12.00",
)
def unsupervised_wiener(
image,
psf,
reg=None,
user_params=None,
is_real=True,
clip=True,
*,
rng=None,
):
"""Unsupervised Wiener-Hunt deconvolution.
Return the deconvolution with a Wiener-Hunt approach, where the
hyperparameters are automatically estimated. The algorithm is a
stochastic iterative process (Gibbs sampler) described in the
reference below. See also ``wiener`` function.
Parameters
----------
image : (M, N) ndarray
The input degraded image.
psf : ndarray
The impulse response (input image's space) or the transfer
function (Fourier space). Both are accepted. The transfer
function is automatically recognized as being complex
(``cupy.iscomplexobj(psf)``).
reg : ndarray, optional
The regularisation operator. The Laplacian by default. It can
be an impulse response or a transfer function, as for the psf.
user_params : dict, optional
Dictionary of parameters for the Gibbs sampler. See below.
clip : boolean, optional
True by default. If true, pixel values of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
rng : {`cupy.random.Generator`, int}, optional
Pseudo-random number generator.
By default, a PCG64 generator is used
(see :func:`cupy.random.default_rng`).
If `rng` is an int, it is used to seed the generator.
Returns
-------
x_postmean : (M, N) ndarray
The deconvolved image (the posterior mean).
chains : dict
The keys ``noise`` and ``prior`` contain the chain list of
noise and prior precision respectively.
Other parameters
----------------
The keys of ``user_params`` are:
threshold : float
The stopping criterion: the norm of the difference between to
successive approximated solution (empirical mean of object
samples, see Notes section). 1e-4 by default.
burnin : int
The number of sample to ignore to start computation of the
mean. 15 by default.
min_num_iter : int
The minimum number of iterations. 30 by default.
max_num_iter : int
The maximum number of iterations if ``threshold`` is not
satisfied. 200 by default.
callback : callable (None by default)
A user provided callable to which is passed, if the function
exists, the current image sample for whatever purpose. The user
can store the sample, or compute other moments than the
mean. It has no influence on the algorithm execution and is
only for inspection.
Examples
--------
>>> import cupy as cp
>>> import cupyx.scipy.ndimage as ndi
>>> from cucim.skimage import color, restoration
>>> from skimage import data
>>> img = color.rgb2gray(cp.array(data.astronaut()))
>>> psf = cp.ones((5, 5)) / 25
>>> img = ndi.uniform_filter(img, size=psf.shape)
>>> rng = cp.random.default_rng()
>>> img += 0.1 * img.std() * rng.standard_normal(img.shape)
>>> deconvolved_img = restoration.unsupervised_wiener(img, psf)
Notes
-----
The estimated image is design as the posterior mean of a
probability law (from a Bayesian analysis). The mean is defined as
a sum over all the possible images weighted by their respective
probability. Given the size of the problem, the exact sum is not
tractable. This algorithm use of MCMC to draw image under the
posterior law. The practical idea is to only draw highly probable
images since they have the biggest contribution to the mean. At the
opposite, the less probable images are drawn less often since
their contribution is low. Finally, the empirical mean of these
samples give us an estimation of the mean, and an exact
computation with an infinite sample set.
References
----------
.. [1] François Orieux, Jean-François Giovannelli, and Thomas
Rodet, "Bayesian estimation of regularization and point
spread function parameters for Wiener-Hunt deconvolution",
J. Opt. Soc. Am. A 27, 1593-1607 (2010)
https://www.osapublishing.org/josaa/abstract.cfm?URI=josaa-27-7-1593
https://hal.archives-ouvertes.fr/hal-00674508
"""
if user_params is not None:
for s in ("max", "min"):
if (s + "_iter") in user_params:
warning_msg = (
f"`{s}_iter` is a deprecated key for `user_params`. "
f"It will be removed in version 1.0. "
f"Use `{s}_num_iter` instead."
)
warnings.warn(warning_msg, FutureWarning)
user_params[s + "_num_iter"] = user_params.pop(s + "_iter")
params = {
"threshold": 1e-4,
"max_num_iter": 200,
"min_num_iter": 30,
"burnin": 15,
"callback": None,
}
params.update(user_params or {})
if reg is None:
reg, _ = uft.laplacian(image.ndim, image.shape, is_real=is_real)
if not cp.iscomplexobj(reg):
reg = uft.ir2tf(reg, image.shape, is_real=is_real)
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.real.astype(float_type, copy=False)
reg = reg.real.astype(float_type, copy=False)
if psf.shape != reg.shape:
trans_fct = uft.ir2tf(psf, image.shape, is_real=is_real)
else:
trans_fct = psf
# The mean of the object
x_postmean = cp.zeros(trans_fct.shape, dtype=float_type)
# The previous computed mean in the iterative loop
prev_x_postmean = cp.zeros(trans_fct.shape, dtype=float_type)
# Difference between two successive mean
delta = np.NAN
# Initial state of the chain
gn_chain, gx_chain = [1], [1]
# The correlation of the object in Fourier space (if size is big,
# this can reduce computation time in the loop)
areg2 = cp.abs(reg)
areg2 *= areg2
atf2 = cp.abs(trans_fct)
atf2 *= atf2
# The Fourier transform may change the image.size attribute, so we
# store it.
if is_real:
data_spectrum = uft.urfft2(image)
else:
data_spectrum = uft.ufft2(image)
rng = cp.random.default_rng(rng)
# Gibbs sampling
for iteration in range(params["max_num_iter"]):
# Sample of Eq. 27 p(circX^k | gn^k-1, gx^k-1, y).
# weighting (correlation in direct space)
precision = gn_chain[-1] * atf2 + gx_chain[-1] * areg2 # Eq. 29
# Note: Use astype instead of dtype argument to standard_normal to get
# similar random values across precisions, as needed for
# reference data used by test_unsupervised_wiener.
_rand1 = rng.standard_normal(data_spectrum.shape)
_rand1 = _rand1.astype(float_type, copy=False)
_rand2 = rng.standard_normal(data_spectrum.shape)
_rand2 = _rand2.astype(float_type, copy=False)
excursion = cp.sqrt(0.5 / precision) * (_rand1 + 1j * _rand2)
# mean Eq. 30 (RLS for fixed gn, gamma0 and gamma1 ...)
wiener_filter = gn_chain[-1] * cp.conj(trans_fct) / precision
# sample of X in Fourier space
x_sample = wiener_filter * data_spectrum + excursion
if params["callback"]:
params["callback"](x_sample)
# sample of Eq. 31 p(gn | x^k, gx^k, y)
gn_chain.append(
rng.gamma(
image.size / 2,
2 / uft.image_quad_norm(data_spectrum - x_sample * trans_fct),
).astype(float_type, copy=False)
)
# sample of Eq. 31 p(gx | x^k, gn^k-1, y)
gx_chain.append(
rng.gamma(
(image.size - 1) / 2, 2 / uft.image_quad_norm(x_sample * reg)
).astype(float_type, copy=False)
)
# current empirical average
if iteration > params["burnin"]:
x_postmean = prev_x_postmean + x_sample
if iteration > (params["burnin"] + 1):
current = x_postmean / (iteration - params["burnin"])
previous = prev_x_postmean / (iteration - params["burnin"] - 1)
delta = (
cp.sum(cp.abs(current - previous))
/ cp.sum(cp.abs(x_postmean))
/ (iteration - params["burnin"])
)
prev_x_postmean = x_postmean
# stop of the algorithm
if (iteration > params["min_num_iter"]) and (
delta < params["threshold"]
):
break
# Empirical average \approx POSTMEAN Eq. 44
x_postmean = x_postmean / (iteration - params["burnin"])
if is_real:
x_postmean = uft.uirfft2(x_postmean, shape=image.shape)
else:
x_postmean = uft.uifft2(x_postmean)
if clip:
x_postmean[x_postmean > 1] = 1
x_postmean[x_postmean < -1] = -1
return (x_postmean, {"noise": gn_chain, "prior": gx_chain})
def richardson_lucy(image, psf, num_iter=50, clip=True, filter_epsilon=None):
"""Richardson-Lucy deconvolution.
Parameters
----------
image : ndarray
Input degraded image (can be n-dimensional).
psf : ndarray
The point spread function.
num_iter : int, optional
Number of iterations. This parameter plays the role of
regularisation.
clip : boolean, optional
True by default. If true, pixel value of the result above 1 or
under -1 are thresholded for skimage pipeline compatibility.
filter_epsilon: float, optional
Value below which intermediate results become 0 to avoid division
by small numbers.
Returns
-------
im_deconv : ndarray
The deconvolved image.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import img_as_float, restoration
>>> from skimage import data
>>> camera = cp.asarray(img_as_float(cp.array(data.camera())))
>>> from cupyx.scipy.signal import convolve2d
>>> psf = cp.ones((5, 5)) / 25
>>> camera = convolve2d(camera, psf, 'same')
>>> camera += 0.1 * camera.std() * cp.random.standard_normal(camera.shape)
>>> deconvolved = restoration.richardson_lucy(camera, psf, 5)
References
----------
.. [1] https://en.wikipedia.org/wiki/Richardson%E2%80%93Lucy_deconvolution
"""
# TODO: use cupyx.scipy.signal once upstream fftconvolve and
# choose_conv_method for > 1d has been implemented.
from cucim.skimage import _vendored as signal
float_type = _supported_float_type(image.dtype)
image = image.astype(float_type, copy=False)
psf = psf.astype(float_type, copy=False)
im_deconv = cp.full(image.shape, 0.5, dtype=float_type)
psf_mirror = cp.ascontiguousarray(psf[::-1, ::-1])
# Small regularization parameter used to avoid 0 divisions
eps = 1e-12
for _ in range(num_iter):
conv = signal.convolve(im_deconv, psf, mode="same") + eps
if filter_epsilon:
relative_blur = cp.where(conv < filter_epsilon, 0, image / conv)
else:
relative_blur = image / conv
im_deconv *= signal.convolve(relative_blur, psf_mirror, mode="same")
if clip:
im_deconv[im_deconv > 1] = 1
im_deconv[im_deconv < -1] = -1
return im_deconv
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/_denoise.py
|
import functools
import cupy as cp
from cucim.skimage.util import img_as_float
from .._shared import utils
from .._shared.utils import _supported_float_type
def _denoise_tv_chambolle_nd(image, weight=0.1, eps=2.0e-4, max_num_iter=200):
"""Perform total-variation denoising on n-dimensional images.
Parameters
----------
image : ndarray
n-D input data to be denoised.
weight : float, optional
Denoising weight. The greater `weight`, the more denoising (at
the expense of fidelity to `input`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
max_num_iter : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : ndarray
Denoised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
ndim = image.ndim
p = cp.zeros((image.ndim,) + image.shape, dtype=image.dtype)
g = cp.zeros_like(p)
d = cp.zeros_like(image)
i = 0
slices_g = [slice(None)] * (ndim + 1)
slices_d = [slice(None)] * ndim
slices_p = [slice(None)] * (ndim + 1)
while i < max_num_iter:
if i > 0:
# d will be the (negative) divergence of p
d = -p.sum(0)
for ax in range(ndim):
slices_d[ax] = slice(1, None)
slices_p[ax + 1] = slice(0, -1)
slices_p[0] = ax
d[tuple(slices_d)] += p[tuple(slices_p)]
slices_d[ax] = slice(None)
slices_p[ax + 1] = slice(None)
out = image + d
E = (d * d).sum()
else:
out = image
E = 0.0
# g stores the gradients of out along each axis
# e.g. g[0] is the first order finite difference along axis 0
for ax in range(ndim):
slices_g[ax + 1] = slice(0, -1)
slices_g[0] = ax
g[tuple(slices_g)] = cp.diff(out, axis=ax)
slices_g[ax + 1] = slice(None)
norm = (g * g).sum(axis=0, keepdims=True)
cp.sqrt(norm, out=norm)
E += weight * norm.sum()
tau = 1.0 / (2.0 * ndim)
norm *= tau / weight
norm += 1.0
p -= tau * g
p /= norm
E /= float(image.size)
if i == 0:
E_init = E
E_previous = E
else:
if abs(E_previous - E) < eps * E_init:
break
else:
E_previous = E
i += 1
return out
@utils.deprecate_kwarg(
{"n_iter_max": "max_num_iter"},
removed_version="23.02.00",
deprecated_version="22.06.00",
)
def denoise_tv_chambolle(
image, weight=0.1, eps=2.0e-4, max_num_iter=200, *, channel_axis=None
):
r"""Perform total variation denoising in nD.
Given :math:`f`, a noisy image (input data),
total variation denoising (also known as total variation regularization)
aims to find an image :math:`u` with less total variation than :math:`f`,
under the constraint that :math:`u` remain similar to :math:`f`.
This can be expressed by the Rudin--Osher--Fatemi (ROF) minimization
problem:
.. math::
\min_{u} \sum_{i=0}^{N-1} \left( \left| \nabla{u_i} \right| + \frac{\lambda}{2}(f_i - u_i)^2 \right)
where :math:`\lambda` is a positive parameter.
The first term of this cost function is the total variation;
the second term represents data fidelity. As :math:`\lambda \to 0`,
the total variation term dominates, forcing the solution to have smaller
total variation, at the expense of looking less like the input data.
This code is an implementation of the algorithm proposed by Chambolle
in [1]_ to solve the ROF problem.
Parameters
----------
image : ndarray
Input image to be denoised. If its dtype is not float, it gets
converted with :func:`~.img_as_float`.
weight : float, optional
Denoising weight. It is equal to :math:`\frac{1}{\lambda}`. Therefore,
the greater the `weight`, the more denoising (at the expense of
fidelity to `image`).
eps : float, optional
Tolerance :math:`\varepsilon > 0` for the stop criterion (compares to
absolute value of relative difference of the cost function :math:`E`):
The algorithm stops when :math:`|E_{n-1} - E_n| < \varepsilon * E_0`.
max_num_iter : int, optional
Maximal number of iterations used for the optimization.
channel_axis : int or None, optional
If ``None``, the image is assumed to be grayscale (single-channel).
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. versionadded:: 0.19
``channel_axis`` was added in 0.19.
Returns
-------
u : ndarray
Denoised image.
Notes
-----
Make sure to set the `channel_axis` parameter appropriately for color
images.
The principle of total variation denoising is explained in [2]_.
It is about minimizing the total variation of an image,
which can be roughly described as
the integral of the norm of the image gradient. Total variation
denoising tends to produce cartoon-like images, that is,
piecewise-constant images.
See Also
--------
denoise_tv_bregman : Perform total variation denoising using split-Bregman
optimization.
References
----------
.. [1] A. Chambolle, An algorithm for total variation minimization and
applications, Journal of Mathematical Imaging and Vision,
Springer, 2004, 20, 89-97.
.. [2] https://en.wikipedia.org/wiki/Total_variation_denoising
Examples
--------
2D example on astronaut image:
>>> import cupy as cp
>>> from cucim.skimage import color
>>> from skimage import data
>>> img = color.rgb2gray(cp.array(data.astronaut()[:50, :50]))
>>> img += 0.5 * img.std() * cp.random.randn(*img.shape)
>>> denoised_img = denoise_tv_chambolle(img, weight=60)
3D example on synthetic data:
>>> x, y, z = cp.ogrid[0:20, 0:20, 0:20]
>>> mask = (x - 22)**2 + (y - 20)**2 + (z - 17)**2 < 8**2
>>> mask = mask.astype(float)
>>> mask += 0.2*cp.random.randn(*mask.shape)
>>> res = denoise_tv_chambolle(mask, weight=100)
""" # noqa
im_type = image.dtype
if not im_type.kind == "f":
image = img_as_float(image)
# enforce float16->float32 and float128->float64
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if channel_axis is not None:
channel_axis = channel_axis % image.ndim
_at = functools.partial(utils.slice_at_axis, axis=channel_axis)
out = cp.zeros_like(image)
for c in range(image.shape[channel_axis]):
out[_at(c)] = _denoise_tv_chambolle_nd(
image[_at(c)], weight, eps, max_num_iter
)
else:
out = _denoise_tv_chambolle_nd(image, weight, eps, max_num_iter)
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/__init__.py
|
from ._denoise import denoise_tv_chambolle
from .deconvolution import richardson_lucy, unsupervised_wiener, wiener
from .j_invariant import calibrate_denoiser, denoise_invariant
__all__ = [
"wiener",
"unsupervised_wiener",
"richardson_lucy",
"denoise_tv_chambolle",
"calibrate_denoiser",
"denoise_invariant",
]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/j_invariant.py
|
import functools
import itertools
import cupy as cp
import numpy as np
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import _supported_float_type
from ..metrics import mean_squared_error
from ..util import img_as_float
def _interpolate_image(image, *, multichannel=False):
"""Replacing each pixel in ``image`` with the average of its neighbors.
Parameters
----------
image : ndarray
Input data to be interpolated.
multichannel : bool, optional
Whether the last axis of the image is to be interpreted as multiple
channels or another spatial dimension.
Returns
-------
interp : ndarray
Interpolated version of `image`.
"""
spatialdims = image.ndim if not multichannel else image.ndim - 1
conv_filter = ndi.generate_binary_structure(spatialdims, 1).astype(
image.dtype
)
conv_filter.ravel()[conv_filter.size // 2] = 0
conv_filter /= conv_filter.sum()
# CuPy Backend: refactored below to avoid for loop
if multichannel:
conv_filter = conv_filter[..., np.newaxis]
interp = ndi.convolve(image, conv_filter, mode="mirror")
return interp
def _generate_grid_slice(shape, *, offset, stride=3):
"""Generate slices of uniformly-spaced points in an array.
Parameters
----------
shape : tuple of int
Shape of the mask.
offset : int
The offset of the grid of ones. Iterating over ``offset`` will cover
the entire array. It should be between 0 and ``stride ** ndim``, not
inclusive, where ``ndim = len(shape)``.
stride : int, optional
The spacing between ones, used in each dimension.
Returns
-------
mask : ndarray
The mask.
Examples
--------
>>> shape = (4, 4)
>>> array = cp.zeros(shape, dtype=int)
>>> grid_slice = _generate_grid_slice(shape, offset=0, stride=2)
>>> array[grid_slice] = 1
>>> print(array)
[[1 0 1 0]
[0 0 0 0]
[1 0 1 0]
[0 0 0 0]]
Changing the offset moves the location of the 1s:
>>> array = cp.zeros(shape, dtype=int)
>>> grid_slice = _generate_grid_slice(shape, offset=3, stride=2)
>>> array[grid_slice] = 1
>>> print(array)
[[0 0 0 0]
[0 1 0 1]
[0 0 0 0]
[0 1 0 1]]
"""
phases = np.unravel_index(offset, (stride,) * len(shape))
mask = tuple(slice(p, None, stride) for p in phases)
return mask
def denoise_invariant(
image, denoise_function, *, stride=4, masks=None, denoiser_kwargs=None
):
"""Apply a J-invariant version of `denoise_function`.
Parameters
----------
image : ndarray ([M[, N[, ...P]][, C]) of ints, uints or floats
Input data to be denoised. `image` can be of any numeric type,
but it is cast into a ndarray of floats (using `img_as_float`) for the
computation of the denoised image.
denoise_function : function
Original denoising function.
stride : int, optional
Stride used in masking procedure that converts `denoise_function`
to J-invariance.
masks : list of ndarray, optional
Set of masks to use for computing J-invariant output. If `None`,
a full set of masks covering the image will be used.
denoiser_kwargs:
Keyword arguments passed to `denoise_function`.
Returns
-------
output : ndarray
Denoised image, of same shape as `image`.
Notes
-----
A denoising function is J-invariant if the prediction it makes for each
pixel does not depend on the value of that pixel in the original image.
The prediction for each pixel may instead use all the relevant information
contained in the rest of the image, which is typically quite significant.
Any function can be converted into a J-invariant one using a simple masking
procedure, as described in [1].
The pixel-wise error of a J-invariant denoiser is uncorrelated to the noise,
so long as the noise in each pixel is independent. Consequently, the average
difference between the denoised image and the oisy image, the
*self-supervised loss*, is the same as the difference between the denoised
image and the original clean image, the *ground-truth loss* (up to a
constant).
This means that the best J-invariant denoiser for a given image can be found
using the noisy data alone, by selecting the denoiser minimizing the self-
supervised loss.
References
----------
.. [1] J. Batson & L. Royer. Noise2Self: Blind Denoising by
Self-Supervision, International Conference on Machine Learning,
p. 524-533 (2019).
"""
image = img_as_float(image)
# promote float16->float32 if needed
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if denoiser_kwargs is None:
denoiser_kwargs = {}
if "multichannel" in denoiser_kwargs:
multichannel = denoiser_kwargs["multichannel"]
else:
multichannel = denoiser_kwargs.get("channel_axis", None) is not None
interp = _interpolate_image(image, multichannel=multichannel)
output = cp.zeros_like(image)
if masks is None:
spatialdims = image.ndim if not multichannel else image.ndim - 1
n_masks = stride**spatialdims
masks = (
_generate_grid_slice(
image.shape[:spatialdims], offset=idx, stride=stride
)
for idx in range(n_masks)
)
for mask in masks:
input_image = image.copy()
input_image[mask] = interp[mask]
output[mask] = denoise_function(input_image, **denoiser_kwargs)[mask]
return output
def _product_from_dict(dictionary):
"""Utility function to convert parameter ranges to parameter combinations.
Converts a dict of lists into a list of dicts whose values consist of the
cartesian product of the values in the original dict.
Parameters
----------
dictionary : dict of lists
Dictionary of lists to be multiplied.
Yields
------
selections : dicts of values
Dicts containing individual combinations of the values in the input
dict.
"""
keys = dictionary.keys()
for element in itertools.product(*dictionary.values()):
yield dict(zip(keys, element))
def calibrate_denoiser(
image,
denoise_function,
denoise_parameters,
*,
stride=4,
approximate_loss=True,
extra_output=False,
):
"""Calibrate a denoising function and return optimal J-invariant version.
The returned function is partially evaluated with optimal parameter values
set for denoising the input image.
Parameters
----------
image : ndarray
Input data to be denoised (converted using `img_as_float`).
denoise_function : function
Denoising function to be calibrated.
denoise_parameters : dict of list
Ranges of parameters for `denoise_function` to be calibrated over.
stride : int, optional
Stride used in masking procedure that converts `denoise_function`
to J-invariance.
approximate_loss : bool, optional
Whether to approximate the self-supervised loss used to evaluate the
denoiser by only computing it on one masked version of the image.
If False, the runtime will be a factor of `stride**image.ndim` longer.
extra_output : bool, optional
If True, return parameters and losses in addition to the calibrated
denoising function
Returns
-------
best_denoise_function : function
The optimal J-invariant version of `denoise_function`.
If `extra_output` is True, the following tuple is also returned:
(parameters_tested, losses) : tuple (list of dict, list of int)
List of parameters tested for `denoise_function`, as a dictionary of
kwargs
Self-supervised loss for each set of parameters in `parameters_tested`.
Notes
-----
The calibration procedure uses a self-supervised mean-square-error loss
to evaluate the performance of J-invariant versions of `denoise_function`.
The minimizer of the self-supervised loss is also the minimizer of the
ground-truth loss (i.e., the true MSE error) [1]. The returned function
can be used on the original noisy image, or other images with similar
characteristics.
Increasing the stride increases the performance of `best_denoise_function`
at the expense of increasing its runtime. It has no effect on the runtime
of the calibration.
References
----------
.. [1] J. Batson & L. Royer. Noise2Self: Blind Denoising by Self-Supervision,
International Conference on Machine Learning, p. 524-533 (2019).
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import color
>>> from skimage import data
>>> from cucim.skimage.restoration import (denoise_tv_chambolle,
... calibrate_denoiser)
>>> img = color.rgb2gray(cp.array(data.astronaut()[:50, :50]))
>>> noisy = img + 0.5 * img.std() * cp.random.randn(*img.shape)
>>> parameters = {'weight': cp.arange(0.01, 0.5, 0.05)}
>>> denoising_function = calibrate_denoiser(noisy, denoise_tv_chambolle,
... denoise_parameters=parameters)
>>> denoised_img = denoising_function(img)
""" # noqa
parameters_tested, losses = _calibrate_denoiser_search(
image,
denoise_function,
denoise_parameters=denoise_parameters,
stride=stride,
approximate_loss=approximate_loss,
)
idx = np.argmin(losses)
best_parameters = parameters_tested[idx]
best_denoise_function = functools.partial(
denoise_invariant,
denoise_function=denoise_function,
stride=stride,
denoiser_kwargs=best_parameters,
)
if extra_output:
return best_denoise_function, (parameters_tested, losses)
else:
return best_denoise_function
def _calibrate_denoiser_search(
image,
denoise_function,
denoise_parameters,
*,
stride=4,
approximate_loss=True,
):
"""Return a parameter search history with losses for a denoise function.
Parameters
----------
image : ndarray
Input data to be denoised (converted using `img_as_float`).
denoise_function : function
Denoising function to be calibrated.
denoise_parameters : dict of list
Ranges of parameters for `denoise_function` to be calibrated over.
stride : int, optional
Stride used in masking procedure that converts `denoise_function`
to J-invariance.
approximate_loss : bool, optional
Whether to approximate the self-supervised loss used to evaluate the
denoiser by only computing it on one masked version of the image.
If False, the runtime will be a factor of `stride**image.ndim` longer.
Returns
-------
parameters_tested : list of dict
List of parameters tested for `denoise_function`, as a dictionary of
kwargs.
losses : list of int
Self-supervised loss for each set of parameters in `parameters_tested`.
"""
image = img_as_float(image)
parameters_tested = list(_product_from_dict(denoise_parameters))
losses = []
for denoiser_kwargs in parameters_tested:
if "multichannel" in denoiser_kwargs:
multichannel = denoiser_kwargs["multichannel"]
else:
multichannel = denoiser_kwargs.get("channel_axis", None) is not None
if not approximate_loss:
denoised = denoise_invariant(
image,
denoise_function,
stride=stride,
denoiser_kwargs=denoiser_kwargs,
)
loss = mean_squared_error(image, denoised)
else:
spatialdims = image.ndim if not multichannel else image.ndim - 1
n_masks = stride**spatialdims
mask = _generate_grid_slice(
image.shape[:spatialdims], offset=n_masks // 2, stride=stride
)
masked_denoised = denoise_invariant(
image,
denoise_function,
masks=[mask],
denoiser_kwargs=denoiser_kwargs,
)
loss = mean_squared_error(image[mask], masked_denoised[mask])
losses.append(float(loss))
return parameters_tested, losses
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/uft.py
|
r"""Function of unitary fourier transform (uft) and utilities
This module implements the unitary fourier transform, also known as
the ortho-normal transform. It is especially useful for convolution
[1], as it respects the Parseval equality. The value of the null
frequency is equal to
.. math:: \frac{1}{\sqrt{n}} \sum_i x_i
so the Fourier transform has the same energy as the original image
(see ``image_quad_norm`` function). The transform is applied from the
last axis for performance (assuming a C-order array input).
References
----------
.. [1] B. R. Hunt "A matrix theory proof of the discrete convolution
theorem", IEEE Trans. on Audio and Electroacoustics,
vol. au-19, no. 4, pp. 285-288, dec. 1971
"""
import math
import cupy as cp
import cupyx.scipy.fft as fft
import numpy as np
from .._shared.utils import _supported_float_type
def ufftn(inarray, dim=None):
"""N-dimensional unitary Fourier transform.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray (same shape than inarray)
The unitary N-D Fourier transform of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((3, 3, 3))
>>> output = ufftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
array(True)
>>> output.shape
(3, 3, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.fftn(inarray, axes=range(-dim, 0), norm="ortho")
return outarray
def uifftn(inarray, dim=None):
"""N-dimensional unitary inverse Fourier transform.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray (same shape than inarray)
The unitary inverse N-D Fourier transform of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((3, 3, 3))
>>> output = uifftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
array(True)
>>> output.shape
(3, 3, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.ifftn(inarray, axes=range(-dim, 0), norm="ortho")
return outarray
def urfftn(inarray, dim=None):
"""N-dimensional real unitary Fourier transform.
This transform considers the Hermitian property of the transform on
real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
Returns
-------
outarray : ndarray, shape (M, N, ..., P / 2 + 1)
The unitary N-D real Fourier transform of ``inarray``.
Notes
-----
The ``urfft`` functions assume an input array of real
values. Consequently, the output has a Hermitian property and
redundant values are not computed or returned.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5, 5))
>>> output = urfftn(input)
>>> cp.allclose(cp.sum(input) / cp.sqrt(input.size), output[0, 0, 0])
array(True)
>>> output.shape
(5, 5, 3)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.rfftn(inarray, axes=range(-dim, 0), norm="ortho")
return outarray
def uirfftn(inarray, dim=None, shape=None):
"""N-dimensional inverse real unitary Fourier transform.
This transform considers the Hermitian property of the transform
from complex to real input.
Parameters
----------
inarray : ndarray
The array to transform.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
shape : tuple of int, optional
The shape of the output. The shape of ``rfft`` is ambiguous in
case of odd-valued input shape. In this case, this parameter
should be provided. See ``cupy.fft.irfftn``.
Returns
-------
outarray : ndarray
The unitary N-D inverse real Fourier transform of ``inarray``.
Notes
-----
The ``uirfft`` function assumes that the output array is
real-valued. Consequently, the input is assumed to have a Hermitian
property and redundant values are implicit.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5, 5))
>>> output = uirfftn(urfftn(input), shape=input.shape)
>>> cp.allclose(input, output)
array(True)
>>> output.shape
(5, 5, 5)
"""
if dim is None:
dim = inarray.ndim
outarray = fft.irfftn(inarray, shape, axes=range(-dim, 0), norm="ortho")
return outarray
def ufft2(inarray):
"""2-dimensional unitary Fourier transform.
Compute the Fourier transform on the last 2 axes.
Parameters
----------
inarray : ndarray
The array to transform.
Returns
-------
outarray : ndarray (same shape as inarray)
The unitary 2-D Fourier transform of ``inarray``.
See Also
--------
uifft2, ufftn, urfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = ufft2(input)
>>> cp.allclose(cp.sum(input[1, ...]) / cp.sqrt(input[1, ...].size),
... output[1, 0, 0])
array(True)
>>> output.shape
(10, 128, 128)
"""
return ufftn(inarray, 2)
def uifft2(inarray):
"""2-dimensional inverse unitary Fourier transform.
Compute the inverse Fourier transform on the last 2 axes.
Parameters
----------
inarray : ndarray
The array to transform.
Returns
-------
outarray : ndarray (same shape as inarray)
The unitary 2-D inverse Fourier transform of ``inarray``.
See Also
--------
uifft2, uifftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = uifft2(input)
>>> cp.allclose(cp.sum(input[1, ...]) / cp.sqrt(input[1, ...].size),
... output[0, 0, 0])
array(True)
>>> output.shape
(10, 128, 128)
"""
return uifftn(inarray, 2)
def urfft2(inarray):
"""2-dimensional real unitary Fourier transform
Compute the real Fourier transform on the last 2 axes. This
transform considers the Hermitian property of the transform from
complex to real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
Returns
-------
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
The unitary 2-D real Fourier transform of ``inarray``.
See Also
--------
ufft2, ufftn, urfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = urfft2(input)
>>> cp.allclose(cp.sum(input[1,...]) / cp.sqrt(input[1,...].size),
... output[1, 0, 0])
array(True)
>>> output.shape
(10, 128, 65)
"""
return urfftn(inarray, 2)
def uirfft2(inarray, shape=None):
"""2-dimensional inverse real unitary Fourier transform.
Compute the real inverse Fourier transform on the last 2 axes.
This transform considers the Hermitian property of the transform
from complex to real-valued input.
Parameters
----------
inarray : ndarray, shape (M, N, ..., P)
The array to transform.
shape : tuple of int, optional
The shape of the output. The shape of ``rfft`` is ambiguous in
case of odd-valued input shape. In this case, this parameter
should be provided. See ``cupy.fft.irfftn``.
Returns
-------
outarray : ndarray, shape (M, N, ..., 2 * (P - 1))
The unitary 2-D inverse real Fourier transform of ``inarray``.
See Also
--------
urfft2, uifftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((10, 128, 128))
>>> output = uirfftn(urfftn(input), shape=input.shape)
>>> cp.allclose(input, output)
array(True)
>>> output.shape
(10, 128, 128)
"""
return uirfftn(inarray, 2, shape=shape)
def image_quad_norm(inarray):
"""Return the quadratic norm of images in Fourier space.
This function detects whether the input image satisfies the
Hermitian property.
Parameters
----------
inarray : ndarray
Input image. The image data should reside in the final two
axes.
Returns
-------
norm : float
The quadratic norm of ``inarray``.
Examples
--------
>>> import cupy as cp
>>> input = cp.ones((5, 5))
>>> image_quad_norm(ufft2(input)) == cp.sum(cp.abs(input)**2)
array(True)
>>> image_quad_norm(ufft2(input)) == image_quad_norm(urfft2(input))
array(True)
"""
# If there is a Hermitian symmetry
abs_sq = cp.abs(inarray)
abs_sq *= abs_sq
if inarray.shape[-1] != inarray.shape[-2]:
return 2 * cp.sum(cp.sum(abs_sq, axis=-1), axis=-1) - cp.sum(
cp.abs(inarray[..., 0]) ** 2, axis=-1
)
else:
return cp.sum(cp.sum(abs_sq, axis=-1), axis=-1)
def ir2tf(imp_resp, shape, dim=None, is_real=True):
"""Compute the transfer function of an impulse response (IR).
This function makes the necessary correct zero-padding, zero
convention, correct fft2, etc... to compute the transfer function
of IR. To use with unitary Fourier transform for the signal (ufftn
or equivalent).
Parameters
----------
imp_resp : ndarray
The impulse responses.
shape : tuple of int
A tuple of integer corresponding to the target shape of the
transfer function.
dim : int, optional
The last axis along which to compute the transform. All
axes by default.
is_real : boolean, optional
If True (default), imp_resp is supposed real and the Hermitian property
is used with rfftn Fourier transform.
Returns
-------
y : complex ndarray
The transfer function of shape ``shape``.
See Also
--------
ufftn, uifftn, urfftn, uirfftn
Examples
--------
>>> import cupy as cp
>>> cp.all(cp.array([[4, 0], [0, 0]]) == ir2tf(cp.ones((2, 2)), (2, 2)))
array(True)
>>> ir2tf(cp.ones((2, 2)), (512, 512)).shape == (512, 257)
True
>>> ir2tf(cp.ones((2, 2)), (512, 512), is_real=False).shape == (512, 512)
True
Notes
-----
The input array can be composed of multiple-dimensional IR with
an arbitrary number of IR. The individual IR must be accessed
through the first axes. The last ``dim`` axes contain the space
definition.
"""
if not dim:
dim = imp_resp.ndim
# Zero padding and fill
irpadded_dtype = _supported_float_type(imp_resp.dtype)
irpadded = cp.zeros(shape, dtype=irpadded_dtype)
irpadded[tuple([slice(0, s) for s in imp_resp.shape])] = imp_resp
# Roll for zero convention of the fft to avoid the phase
# problem. Work with odd and even size.
for axis, axis_size in enumerate(imp_resp.shape):
if axis >= imp_resp.ndim - dim:
irpadded = cp.roll(
irpadded, shift=-math.floor(axis_size / 2), axis=axis
)
func = fft.rfftn if is_real else fft.fftn
out = func(irpadded, axes=(range(-dim, 0)))
return out
def laplacian(ndim, shape, is_real=True, *, dtype=None):
"""Return the transfer function of the Laplacian.
Laplacian is the second order difference, on row and column.
Parameters
----------
ndim : int
The dimension of the Laplacian.
shape : tuple
The support on which to compute the transfer function.
is_real : boolean, optional
If True (default), imp_resp is assumed to be real-valued and
the Hermitian property is used with rfftn Fourier transform
to return the transfer function.
Returns
-------
tf : array_like, complex
The transfer function.
impr : array_like, real
The Laplacian.
Examples
--------
>>> import cupy as cp
>>> tf, ir = laplacian(2, (32, 32))
>>> cp.all(ir == cp.array([[0, -1, 0], [-1, 4, -1], [0, -1, 0]]))
array(True)
>>> cp.all(tf == ir2tf(ir, (32, 32)))
array(True)
"""
if dtype is None:
dtype = cp.float64 if is_real else cp.complex128
elif np.dtype(dtype).kind != "f":
raise ValueError("dtype must be a floating point dtype")
# CuPy Backend: assemble the small kernel on the host and then transfer it
impr = np.zeros([3] * ndim)
for dim in range(ndim):
idx = tuple(
[slice(1, 2)] * dim
+ [slice(None)]
+ [slice(1, 2)] * (ndim - dim - 1)
)
impr[idx] = np.array([-1.0, 0.0, -1.0]).reshape(
[-1 if i == dim else 1 for i in range(ndim)]
)
impr[(slice(1, 2),) * ndim] = 2.0 * ndim
impr = cp.array(impr, dtype=dtype)
if shape is None: # filters.laplace only uses the spatial kernel
return impr
return ir2tf(impr, shape, is_real=is_real), impr
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/tests/test_denoise.py
|
import functools
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_equal
from skimage import color, data, img_as_float
from cucim.skimage import restoration
from cucim.skimage._shared.utils import _supported_float_type, slice_at_axis
from cucim.skimage.metrics import structural_similarity
cp.random.seed(1234)
astro = img_as_float(data.astronaut()[:128, :128])
astro_gray = color.rgb2gray(astro)
checkerboard_gray = img_as_float(data.checkerboard())
checkerboard = color.gray2rgb(checkerboard_gray)
# versions with one odd-sized dimension
astro_gray_odd = astro_gray[:, :-1]
astro_odd = astro[:, :-1]
# transfer test images to the GPU
astro = cp.asarray(astro)
astro_gray = cp.asarray(astro_gray)
astro_gray_odd = cp.asarray(astro_gray_odd)
astro_odd = cp.asarray(astro_odd)
checkerboard = cp.asarray(checkerboard)
checkerboard_gray = cp.asarray(checkerboard_gray)
float_dtypes = [cp.float16, cp.float32, cp.float64]
try:
float_dtypes += [cp.float128]
except AttributeError:
pass
@pytest.mark.parametrize("dtype", float_dtypes)
def test_denoise_tv_chambolle_2d(dtype):
# astronaut image
img = astro_gray.astype(dtype, copy=True)
# add noise to astronaut
img += 0.5 * img.std() * cp.random.rand(*img.shape)
# clip noise so that it does not exceed allowed range for float images.
img = cp.clip(img, 0, 1)
# denoise
denoised_astro = restoration.denoise_tv_chambolle(img, weight=0.1)
assert denoised_astro.dtype == _supported_float_type(img.dtype)
# TODO: remove device to host transfers if cuda
# morphological_gradient is implemented
from scipy import ndimage as ndi # noqa
# Convert to a floating point type supported by scipy.ndimage
float_dtype = _supported_float_type(img.dtype)
img = img.astype(float_dtype, copy=False)
grad = ndi.morphological_gradient(cp.asnumpy(img), size=((3, 3)))
grad_denoised = ndi.morphological_gradient(
cp.asnumpy(denoised_astro), size=((3, 3))
)
# test if the total variation has decreased
assert grad_denoised.dtype == float_dtype
assert np.sqrt((grad_denoised**2).sum()) < np.sqrt((grad**2).sum())
@pytest.mark.parametrize("channel_axis", [0, 1, 2, -1])
def test_denoise_tv_chambolle_multichannel(channel_axis):
denoised0 = restoration.denoise_tv_chambolle(astro[..., 0], weight=0.1)
img = cp.moveaxis(astro, -1, channel_axis)
denoised = restoration.denoise_tv_chambolle(
img, weight=0.1, channel_axis=channel_axis
)
_at = functools.partial(slice_at_axis, axis=channel_axis % img.ndim)
assert_array_equal(denoised[_at(0)], denoised0)
# tile astronaut subset to generate 3D+channels data
astro3 = cp.tile(astro[:64, :64, cp.newaxis, :], [1, 1, 2, 1])
# modify along tiled dimension to give non-zero gradient on 3rd axis
astro3[:, :, 0, :] = 2 * astro3[:, :, 0, :]
denoised0 = restoration.denoise_tv_chambolle(astro3[..., 0], weight=0.1)
astro3 = cp.moveaxis(astro3, -1, channel_axis)
denoised = restoration.denoise_tv_chambolle(
astro3, weight=0.1, channel_axis=channel_axis
)
_at = functools.partial(slice_at_axis, axis=channel_axis % astro3.ndim)
assert_array_equal(denoised[_at(0)], denoised0)
def test_denoise_tv_chambolle_float_result_range():
# astronaut image
img = astro_gray
int_astro = cp.multiply(img, 255).astype(np.uint8)
assert cp.max(int_astro) > 1
denoised_int_astro = restoration.denoise_tv_chambolle(int_astro, weight=0.1)
# test if the value range of output float data is within [0.0:1.0]
assert denoised_int_astro.dtype == _supported_float_type(int_astro.dtype)
assert cp.max(denoised_int_astro) <= 1.0
assert cp.min(denoised_int_astro) >= 0.0
def test_denoise_tv_chambolle_3d():
"""Apply the TV denoising algorithm on a 3D image representing a sphere."""
x, y, z = cp.ogrid[0:40, 0:40, 0:40]
mask = (x - 22) ** 2 + (y - 20) ** 2 + (z - 17) ** 2 < 8**2
mask = 100 * mask.astype(float)
mask += 60
mask += 20 * cp.random.rand(*mask.shape)
mask[mask < 0] = 0
mask[mask > 255] = 255
mask = mask.astype(np.uint8)
res = restoration.denoise_tv_chambolle(mask, weight=0.1)
assert res.dtype == _supported_float_type(mask.dtype)
assert res.std() * 255 < mask.std()
def test_denoise_tv_chambolle_1d():
"""Apply the TV denoising algorithm on a 1D sinusoid."""
x = 125 + 100 * cp.sin(cp.linspace(0, 8 * cp.pi, 1000))
x += 20 * cp.random.rand(x.size)
x = cp.clip(x, 0, 255)
x = x.astype(np.uint8)
res = restoration.denoise_tv_chambolle(x, weight=0.1)
assert res.dtype == _supported_float_type(x.dtype)
assert res.std() * 255 < x.std()
def test_denoise_tv_chambolle_4d():
"""TV denoising for a 4D input."""
im = 255 * cp.random.rand(8, 8, 8, 8)
im = im.astype(np.uint8)
res = restoration.denoise_tv_chambolle(im, weight=0.1)
assert res.dtype == _supported_float_type(im.dtype)
assert res.std() * 255 < im.std()
def test_denoise_tv_chambolle_weighting():
# make sure a specified weight gives consistent results regardless of
# the number of input image dimensions
rstate = cp.random.RandomState(1234)
img2d = astro_gray.copy()
img2d += 0.15 * rstate.standard_normal(img2d.shape)
img2d = cp.clip(img2d, 0, 1)
# generate 4D image by tiling
img4d = cp.tile(img2d[..., None, None], (1, 1, 2, 2))
w = 0.2
denoised_2d = restoration.denoise_tv_chambolle(img2d, weight=w)
denoised_4d = restoration.denoise_tv_chambolle(img4d, weight=w)
assert (
structural_similarity(
denoised_2d, denoised_4d[:, :, 0, 0], data_range=1.0
)
> 0.98
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/tests/test_j_invariant.py
|
import cupy as cp
import numpy as np
import pytest
from skimage.data import camera, chelsea
# from cucim.skimage.restoration import denoise_wavelet
from skimage.restoration import denoise_wavelet
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.data import binary_blobs
from cucim.skimage.metrics import mean_squared_error as mse
from cucim.skimage.restoration import calibrate_denoiser, denoise_tv_chambolle
from cucim.skimage.restoration.j_invariant import denoise_invariant
from cucim.skimage.util import img_as_float, random_noise
test_img = img_as_float(cp.asarray(camera()))
test_img_color = img_as_float(cp.asarray(chelsea()))
test_img_3d = img_as_float(binary_blobs(64, n_dim=3)) / 2
noisy_img = random_noise(test_img, mode="gaussian", var=0.01)
noisy_img_color = random_noise(test_img_color, mode="gaussian", var=0.01)
noisy_img_3d = random_noise(test_img_3d, mode="gaussian", var=0.1)
# TODO: replace with CuPy version once completed
def _denoise_wavelet(image, rescale_sigma=True, **kwargs):
return cp.asarray(
denoise_wavelet(
cp.asnumpy(image), rescale_sigma=rescale_sigma, **kwargs
)
)
def test_denoise_invariant():
# denoised_img = denoise_invariant(noisy_img, _denoise_wavelet)
denoised_img = denoise_invariant(noisy_img, denoise_tv_chambolle)
denoised_mse = mse(denoised_img, test_img)
original_mse = mse(noisy_img, test_img)
assert denoised_mse < original_mse
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_denoise_invariant_color(dtype):
denoised_img_color = denoise_invariant(
noisy_img_color.astype(dtype),
_denoise_wavelet,
denoiser_kwargs=dict(channel_axis=-1),
)
denoised_mse = mse(denoised_img_color, test_img_color)
original_mse = mse(noisy_img_color, test_img_color)
assert denoised_mse < original_mse
assert denoised_img_color.dtype == _supported_float_type(dtype)
def test_denoise_invariant_3d():
denoised_img_3d = denoise_invariant(noisy_img_3d, _denoise_wavelet)
denoised_mse = mse(denoised_img_3d, test_img_3d)
original_mse = mse(noisy_img_3d, test_img_3d)
assert denoised_mse < original_mse
def test_calibrate_denoiser_extra_output():
parameter_ranges = {"sigma": np.linspace(0.1, 1, 5) / 2}
_, (parameters_tested, losses) = calibrate_denoiser(
noisy_img,
_denoise_wavelet,
denoise_parameters=parameter_ranges,
extra_output=True,
)
all_denoised = [
denoise_invariant(
noisy_img, _denoise_wavelet, denoiser_kwargs=denoiser_kwargs
)
for denoiser_kwargs in parameters_tested
]
ground_truth_losses = [float(mse(img, test_img)) for img in all_denoised]
assert np.argmin(losses) == np.argmin(ground_truth_losses)
def test_calibrate_denoiser():
parameter_ranges = {"sigma": np.linspace(0.1, 1, 5) / 2}
denoiser = calibrate_denoiser(
noisy_img, _denoise_wavelet, denoise_parameters=parameter_ranges
)
denoised_mse = mse(denoiser(noisy_img), test_img)
original_mse = mse(noisy_img, test_img)
assert denoised_mse < original_mse
def test_calibrate_denoiser_tv():
parameter_ranges = {"weight": np.linspace(0.01, 0.4, 10)}
denoiser = calibrate_denoiser(
noisy_img, denoise_tv_chambolle, denoise_parameters=parameter_ranges
)
denoised_mse = mse(denoiser(noisy_img), test_img)
original_mse = mse(noisy_img, test_img)
assert denoised_mse < original_mse
def test_input_image_not_modified():
input_image = noisy_img.copy()
parameter_ranges = {"sigma": np.random.random(5) / 2}
calibrate_denoiser(
input_image, _denoise_wavelet, denoise_parameters=parameter_ranges
)
assert cp.all(noisy_img == input_image)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/restoration/tests/test_restoration.py
|
import cupy as cp
import numpy as np
import pytest
from cupyx.scipy import ndimage as ndi
from scipy import signal
from cucim.skimage import restoration
from cucim.skimage._shared.testing import expected_warnings, fetch
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.color import rgb2gray
from cucim.skimage.restoration import uft
def camera():
import skimage
import skimage.data
return cp.asarray(skimage.img_as_float(skimage.data.camera()))
def astronaut():
import skimage
import skimage.data
return cp.asarray(skimage.img_as_float(skimage.data.astronaut()))
test_img = camera()
def _get_rtol_atol(dtype):
rtol = 1e-3
atol = 0
if dtype == np.float16:
rtol = 1e-2
atol = 1e-3
elif dtype == np.float32:
atol = 1e-5
return rtol, atol
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_wiener(dtype):
psf = np.ones((5, 5), dtype=dtype) / 25
data = signal.convolve2d(cp.asnumpy(test_img), psf, "same")
np.random.seed(0)
data += 0.1 * data.std() * np.random.standard_normal(data.shape)
psf = cp.asarray(psf, dtype=dtype)
data = cp.asarray(data, dtype=dtype)
deconvolved = restoration.wiener(data, psf, 0.05)
assert deconvolved.dtype == _supported_float_type(dtype)
rtol, atol = _get_rtol_atol(dtype)
path = fetch("restoration/tests/camera_wiener.npy")
cp.testing.assert_allclose(deconvolved, np.load(path), rtol=rtol, atol=atol)
_, laplacian = uft.laplacian(2, data.shape)
otf = uft.ir2tf(psf, data.shape, is_real=False)
assert otf.real.dtype == _supported_float_type(dtype)
deconvolved = restoration.wiener(
data, otf, 0.05, reg=laplacian, is_real=False
)
assert deconvolved.real.dtype == _supported_float_type(dtype)
cp.testing.assert_allclose(
cp.real(deconvolved), np.load(path), rtol=rtol, atol=atol
)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_unsupervised_wiener(dtype):
psf = np.ones((5, 5), dtype=dtype) / 25
data = signal.convolve2d(cp.asnumpy(test_img), psf, "same")
seed = 16829302
# keep old-style RandomState here for compatibility with previously stored
# reference data in camera_unsup.npy and camera_unsup2.npy
rng = np.random.RandomState(seed)
data += 0.1 * data.std() * rng.standard_normal(data.shape)
psf = cp.asarray(psf, dtype=dtype)
data = cp.asarray(data, dtype=dtype)
deconvolved, _ = restoration.unsupervised_wiener(data, psf, rng=seed)
float_type = _supported_float_type(dtype)
assert deconvolved.dtype == float_type
rtol, atol = _get_rtol_atol(dtype)
# CuPy Backend: Cannot use the following comparison to scikit-image data
# due to different random values generated by cp.random
# within unsupervised_wiener.
# Verified similar appearance qualitatively.
# path = fetch("restoration/tests/camera_unsup.npy")
# cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3)
_, laplacian = uft.laplacian(2, data.shape)
otf = uft.ir2tf(psf, data.shape, is_real=False)
assert otf.real.dtype == float_type
np.random.seed(0)
deconvolved2 = restoration.unsupervised_wiener( # noqa
data,
otf,
reg=laplacian,
is_real=False,
user_params={
"callback": lambda x: None,
"max_num_iter": 200,
"min_num_iter": 30,
},
rng=seed,
)[0]
assert deconvolved2.real.dtype == float_type
# CuPy Backend: Cannot use the following comparison to scikit-image data
# due to different random values generated by cp.random
# within unsupervised_wiener.
# Verified similar appearance qualitatively.
# path = fetch("restoration/tests/camera_unsup2.npy")
# cp.testing.assert_allclose(cp.real(deconvolved), np.load(path), rtol=1e-3)
def test_unsupervised_wiener_deprecated_user_param():
psf = np.ones((5, 5), dtype=float) / 25
data = signal.convolve2d(cp.asnumpy(test_img), psf, "same")
data = cp.array(data)
psf = cp.array(psf)
otf = uft.ir2tf(psf, data.shape, is_real=False)
_, laplacian = uft.laplacian(2, data.shape)
with expected_warnings(
[
"`max_iter` is a deprecated key",
"`min_iter` is a deprecated key",
"`random_state` is a deprecated argument name",
]
):
restoration.unsupervised_wiener(
data,
otf,
reg=laplacian,
is_real=False,
user_params={"max_iter": 200, "min_iter": 30},
random_state=5,
)
with expected_warnings(
[
"`seed` is a deprecated argument name",
]
):
restoration.unsupervised_wiener(
data,
otf,
reg=laplacian,
is_real=False,
seed=5,
)
def test_image_shape():
"""Test that shape of output image in deconvolution is same as input.
This addresses issue #1172.
"""
point = cp.zeros((5, 5), float)
point[2, 2] = 1.0
psf = ndi.gaussian_filter(point, sigma=1.0)
# image shape: (45, 45), as reported in #1172
image = cp.asarray(test_img[65:165, 215:315]) # just the face
image_conv = ndi.convolve(image, psf)
deconv_sup = restoration.wiener(image_conv, psf, 1)
deconv_un = restoration.unsupervised_wiener(image_conv, psf)[0]
# test the shape
assert image.shape == deconv_sup.shape
assert image.shape == deconv_un.shape
# test the reconstruction error
sup_relative_error = cp.abs(deconv_sup - image) / image
un_relative_error = cp.abs(deconv_un - image) / image
cp.testing.assert_array_less(cp.median(sup_relative_error), 0.1)
cp.testing.assert_array_less(cp.median(un_relative_error), 0.1)
def test_richardson_lucy():
rstate = np.random.RandomState(0)
psf = np.ones((5, 5)) / 25
data = signal.convolve2d(cp.asnumpy(test_img), psf, "same")
np.random.seed(0)
data += 0.1 * data.std() * rstate.standard_normal(data.shape)
data = cp.asarray(data)
psf = cp.asarray(psf)
deconvolved = restoration.richardson_lucy(data, psf, 5)
path = fetch("restoration/tests/camera_rl.npy")
cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-4)
@pytest.mark.parametrize("dtype_image", [cp.float16, cp.float32, cp.float64])
@pytest.mark.parametrize("dtype_psf", [cp.float32, cp.float64])
def test_richardson_lucy_filtered(dtype_image, dtype_psf):
if dtype_image == cp.float64:
atol = 1e-8
else:
atol = 1e-4
test_img_astro = rgb2gray(astronaut())
psf = cp.ones((5, 5), dtype=dtype_psf) / 25
data = cp.array(
signal.convolve2d(cp.asnumpy(test_img_astro), cp.asnumpy(psf), "same"),
dtype=dtype_image,
)
deconvolved = restoration.richardson_lucy(data, psf, 5, filter_epsilon=1e-6)
assert deconvolved.dtype == _supported_float_type(data.dtype)
path = fetch("restoration/tests/astronaut_rl.npy")
cp.testing.assert_allclose(deconvolved, np.load(path), rtol=1e-3, atol=atol)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/histogram_matching.py
|
import cupy as cp
from .._shared import utils
def _match_cumulative_cdf(source, template):
"""
Return modified source array so that the cumulative density function of
its values matches the cumulative density function of the template.
"""
if source.dtype.kind == "u":
src_lookup = source.reshape(-1)
src_counts = cp.bincount(src_lookup)
tmpl_counts = cp.bincount(template.reshape(-1))
# omit values where the count was 0
tmpl_values = cp.nonzero(tmpl_counts)[0]
tmpl_counts = tmpl_counts[tmpl_values]
else:
src_values, src_lookup, src_counts = cp.unique(
source.reshape(-1), return_inverse=True, return_counts=True
)
tmpl_values, tmpl_counts = cp.unique(
template.reshape(-1), return_counts=True
)
# calculate normalized quantiles for each array
src_quantiles = cp.cumsum(src_counts) / source.size
tmpl_quantiles = cp.cumsum(tmpl_counts) / template.size
interp_a_values = cp.interp(src_quantiles, tmpl_quantiles, tmpl_values)
return interp_a_values[src_lookup].reshape(source.shape)
@utils.channel_as_last_axis(channel_arg_positions=(0, 1))
def match_histograms(image, reference, *, channel_axis=None):
"""Adjust an image so that its cumulative histogram matches that of another.
The adjustment is applied separately for each channel.
Parameters
----------
image : ndarray
Input image. Can be gray-scale or in color.
reference : ndarray
Image to match histogram of. Must have the same number of channels as
image.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
Returns
-------
matched : ndarray
Transformed input image.
Raises
------
ValueError
Thrown when the number of channels in the input image and the reference
differ.
References
----------
.. [1] http://paulbourke.net/miscellaneous/equalisation/
"""
if image.ndim != reference.ndim:
raise ValueError(
"Image and reference must have the same number " "of channels."
)
if channel_axis is not None:
if image.shape[channel_axis] != reference.shape[channel_axis]:
raise ValueError(
"Number of channels in the input image and "
"reference image must match!"
)
matched = cp.empty(image.shape, dtype=image.dtype)
for channel in range(image.shape[-1]):
matched_channel = _match_cumulative_cdf(
image[..., channel], reference[..., channel]
)
matched[..., channel] = matched_channel
else:
matched = _match_cumulative_cdf(image, reference)
if matched.dtype.kind == "f":
# output a float32 result when the input is float16 or float32
out_dtype = utils._supported_float_type(image.dtype)
matched = matched.astype(out_dtype, copy=False)
return matched
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/_adapthist.py
|
"""
Adapted code from "Contrast Limited Adaptive Histogram Equalization" by Karel
Zuiderveld <[email protected]>, Graphics Gems IV, Academic Press, 1994.
http://tog.acm.org/resources/GraphicsGems/
Relicensed with permission of the author under the Modified BSD license.
"""
import functools
import itertools
import math
import numbers
import operator
import cupy as cp
import numpy as np
# TODO: replace _misc.prod with math.prod once minimum Python >= 3.88
from cucim import _misc
from cucim.skimage.exposure.exposure import rescale_intensity
from .._shared.utils import _supported_float_type
from .._vendored import pad
from ..color.adapt_rgb import adapt_rgb, hsv_value
from ..util import img_as_uint
NR_OF_GRAY = 2**14 # number of grayscale levels to use in CLAHE algorithm
@adapt_rgb(hsv_value)
def equalize_adapthist(image, kernel_size=None, clip_limit=0.01, nbins=256):
"""Contrast Limited Adaptive Histogram Equalization (CLAHE).
An algorithm for local contrast enhancement, that uses histograms computed
over different tile regions of the image. Local details can therefore be
enhanced even in regions that are darker or lighter than most of the image.
Parameters
----------
image : (N1, ...,NN[, C]) ndarray
Input image.
kernel_size : int or array_like, optional
Defines the shape of contextual regions used in the algorithm. If
iterable is passed, it must have the same number of elements as
``image.ndim`` (without color channel). If integer, it is broadcasted
to each `image` dimension. By default, ``kernel_size`` is 1/8 of
``image`` height by 1/8 of its width.
clip_limit : float, optional
Clipping limit, normalized between 0 and 1 (higher values give more
contrast).
nbins : int, optional
Number of gray bins for histogram ("data range").
Returns
-------
out : (N1, ...,NN[, C]) ndarray
Equalized image with float64 dtype.
See Also
--------
equalize_hist, rescale_intensity
Notes
-----
* For color images, the following steps are performed:
- The image is converted to HSV color space
- The CLAHE algorithm is run on the V (Value) channel
- The image is converted back to RGB space and returned
* For RGBA images, the original alpha channel is removed.
.. versionchanged:: 0.17
The values returned by this function are slightly shifted upwards
because of an internal change in rounding behavior.
References
----------
.. [1] http://tog.acm.org/resources/GraphicsGems/
.. [2] https://en.wikipedia.org/wiki/CLAHE#CLAHE
"""
float_dtype = _supported_float_type(image.dtype)
image = img_as_uint(image)
image = cp.around(
rescale_intensity(image, out_range=(0, NR_OF_GRAY - 1))
).astype(cp.min_scalar_type(NR_OF_GRAY))
if kernel_size is None:
kernel_size = tuple([max(s // 8, 1) for s in image.shape])
elif isinstance(kernel_size, numbers.Number):
kernel_size = (kernel_size,) * image.ndim
elif len(kernel_size) != image.ndim:
raise ValueError(f"Incorrect value of `kernel_size`: {kernel_size}")
kernel_size = [int(k) for k in kernel_size]
image = _clahe(image, kernel_size, clip_limit, nbins)
image = image.astype(float_dtype, copy=False)
return rescale_intensity(image)
def _clahe(image, kernel_size, clip_limit, nbins):
"""Contrast Limited Adaptive Histogram Equalization.
Parameters
----------
image : (N1,...,NN) ndarray
Input image.
kernel_size : int or N-tuple of int
Defines the shape of contextual regions used in the algorithm.
clip_limit : float
Normalized clipping limit between 0 and 1 (higher values give more
contrast).
nbins : int
Number of gray bins for histogram ("data range").
Returns
-------
out : (N1,...,NN) ndarray
Equalized image.
The number of "effective" graylevels in the output image is set by `nbins`;
selecting a small value (e.g. 128) speeds up processing and still produces
an output image of good quality. A clip limit of 0 or larger than or equal
to 1 results in standard (non-contrast limited) AHE.
"""
ndim = image.ndim
dtype = image.dtype
# pad the image such that the shape in each dimension
# - is a multiple of the kernel_size and
# - is preceded by half a kernel size
pad_start_per_dim = [k // 2 for k in kernel_size]
pad_end_per_dim = [
(k - s % k) % k + math.ceil(k / 2.0)
for k, s in zip(kernel_size, image.shape)
]
image = pad(
image,
[[p_i, p_f] for p_i, p_f in zip(pad_start_per_dim, pad_end_per_dim)],
mode="reflect",
)
# determine gray value bins
bin_size = 1 + NR_OF_GRAY // nbins
lut = cp.arange(NR_OF_GRAY, dtype=cp.min_scalar_type(NR_OF_GRAY))
lut //= bin_size
image = lut[image]
# calculate graylevel mappings for each contextual region
# rearrange image into flattened contextual regions
ns_hist = [int(s / k) - 1 for s, k in zip(image.shape, kernel_size)]
hist_blocks_shape = functools.reduce(
operator.add, [(s, k) for s, k in zip(ns_hist, kernel_size)]
)
hist_blocks_axis_order = tuple(range(0, ndim * 2, 2)) + tuple(
range(1, ndim * 2, 2)
)
hist_slices = [
slice(k // 2, k // 2 + n * k) for k, n in zip(kernel_size, ns_hist)
]
hist_blocks = image[tuple(hist_slices)].reshape(hist_blocks_shape)
hist_blocks = hist_blocks.transpose(hist_blocks_axis_order)
hist_block_assembled_shape = hist_blocks.shape
hist_blocks = hist_blocks.reshape((_misc.prod(ns_hist), -1))
# Calculate actual clip limit
kernel_elements = _misc.prod(kernel_size)
if clip_limit > 0.0:
clim = int(max(clip_limit * kernel_elements, 1))
else:
# largest possible value, i.e., do not clip (AHE)
clim = kernel_elements
# Note: for 4096, 4096 input and default args, shapes are:
# hist_blocks.shape = (64, 262144)
# hist.shape = (64, 256)
hist = cp.apply_along_axis(cp.bincount, -1, hist_blocks, minlength=nbins)
if isinstance(hist_blocks, cp.ndarray):
# CuPy Backend:
# faster to loop over the arrays on the host
# (hist is small and clip_histogram has too much overhead)
# TODO: implement clip_histogram kernel to avoid synchronization?
hist = cp.asarray(
np.apply_along_axis( # synchronize!
clip_histogram, -1, cp.asnumpy(hist), clip_limit=clim
)
)
else:
hist = cp.apply_along_axis(clip_histogram, -1, hist, clip_limit=clim)
hist = map_histogram(hist, 0, NR_OF_GRAY - 1, kernel_elements)
hist = hist.reshape(hist_block_assembled_shape[:ndim] + (-1,))
# duplicate leading mappings in each dim
map_array = pad(hist, [(1, 1) for _ in range(ndim)] + [(0, 0)], mode="edge")
# Perform multilinear interpolation of graylevel mappings
# using the convention described here:
# https://en.wikipedia.org/w/index.php?title=Adaptive_histogram_
# equalization&oldid=936814673#Efficient_computation_by_interpolation
# rearrange image into blocks for vectorized processing
ns_proc = [int(s / k) for s, k in zip(image.shape, kernel_size)]
blocks_shape = functools.reduce(
operator.add, [(s, k) for s, k in zip(ns_proc, kernel_size)]
)
blocks_axis_order = hist_blocks_axis_order
blocks = image.reshape(blocks_shape)
blocks = blocks.transpose(blocks_axis_order)
blocks_flattened_shape = blocks.shape
blocks = blocks.reshape(
(_misc.prod(ns_proc), _misc.prod(blocks.shape[ndim:]))
)
# calculate interpolation coefficients
coeffs = cp.meshgrid(
*tuple([cp.arange(k) / k for k in kernel_size[::-1]]), indexing="ij"
)
coeffs = [cp.transpose(c).flatten() for c in coeffs]
inv_coeffs = [1 - c for c in coeffs]
# sum over contributions of neighboring contextual
# regions in each direction
result = cp.zeros(blocks.shape, dtype=cp.float32)
for iedge, edge in enumerate(itertools.product(*((range(2),) * ndim))):
edge_maps = map_array[
tuple(slice(e, e + n) for e, n in zip(edge, ns_proc))
]
edge_maps = edge_maps.reshape((_misc.prod(ns_proc), -1))
# apply map
edge_mapped = cp.take_along_axis(edge_maps, blocks, axis=-1)
# interpolate
edge_coeffs = functools.reduce(
operator.mul,
[[inv_coeffs, coeffs][e][d] for d, e in enumerate(edge[::-1])],
)
result += (edge_mapped * edge_coeffs).astype(result.dtype)
result = result.astype(dtype)
# rebuild result image from blocks
result = result.reshape(blocks_flattened_shape)
blocks_axis_rebuild_order = functools.reduce(
operator.add,
[(s, k) for s, k in zip(range(0, ndim), range(ndim, ndim * 2))],
)
result = result.transpose(blocks_axis_rebuild_order)
result = result.reshape(image.shape)
# undo padding
unpad_slices = tuple(
[
slice(p_i, s - p_f)
for p_i, p_f, s in zip(
pad_start_per_dim, pad_end_per_dim, image.shape
)
]
)
result = result[unpad_slices]
return result
# TODO: refactor this clip_histogram bottleneck.
def clip_histogram(hist, clip_limit):
"""Perform clipping of the histogram and redistribution of bins.
The histogram is clipped and the number of excess pixels is counted.
Afterwards the excess pixels are equally redistributed across the
whole histogram (providing the bin count is smaller than the cliplimit).
Parameters
----------
hist : ndarray
Histogram array.
clip_limit : int
Maximum allowed bin count.
Returns
-------
hist : ndarray
Clipped histogram.
"""
# calculate total number of excess pixels
excess_mask = hist > clip_limit
excess = hist[excess_mask]
n_excess = excess.sum() - excess.size * clip_limit
hist[excess_mask] = clip_limit
# Second part: clip histogram and redistribute excess pixels in each bin
bin_incr = n_excess // hist.size # average binincrement
xp = cp.get_array_module(hist)
upper = clip_limit - bin_incr # Bins larger than upper set to cliplimit
low_mask = hist < upper
n_excess -= hist[low_mask].size * bin_incr
hist[low_mask] += bin_incr
mid_mask = xp.logical_and(hist >= upper, hist < clip_limit)
mid = hist[mid_mask]
n_excess += mid.sum() - mid.size * clip_limit
hist[mid_mask] = clip_limit
while n_excess > 0: # Redistribute remaining excess
prev_n_excess = n_excess
for index in range(hist.size):
under_mask = hist < clip_limit
step_size = max(1, xp.count_nonzero(under_mask) // n_excess)
under_mask = under_mask[index::step_size]
hist[index::step_size][under_mask] += 1
n_excess -= xp.count_nonzero(under_mask)
if n_excess <= 0:
break
if prev_n_excess == n_excess:
break
return hist
def map_histogram(hist, min_val, max_val, n_pixels):
"""Calculate the equalized lookup table (mapping).
It does so by cumulating the input histogram.
Histogram bins are assumed to be represented by the last array dimension.
Parameters
----------
hist : ndarray
Clipped histogram.
min_val : int
Minimum value for mapping.
max_val : int
Maximum value for mapping.
n_pixels : int
Number of pixels in the region.
Returns
-------
out : ndarray
Mapped intensity LUT.
"""
xp = cp.get_array_module(hist)
out = xp.cumsum(hist, axis=-1).astype(float)
out *= (max_val - min_val) / n_pixels
out += min_val
cp.clip(out, a_min=None, a_max=max_val, out=out)
return out.astype(int)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/exposure.py
|
import cupy as cp
import numpy as np
from .._shared import utils
from ..util.dtype import dtype_limits, dtype_range
__all__ = [
"histogram",
"cumulative_distribution",
"equalize_hist",
"rescale_intensity",
"adjust_gamma",
"adjust_log",
"adjust_sigmoid",
]
DTYPE_RANGE = dtype_range.copy()
DTYPE_RANGE.update((d.__name__, limits) for d, limits in dtype_range.items())
DTYPE_RANGE.update(
{
"uint10": (0, 2**10 - 1),
"uint12": (0, 2**12 - 1),
"uint14": (0, 2**14 - 1),
"bool": dtype_range[bool],
"float": dtype_range[np.float64],
}
)
def _offset_array(arr, low_boundary, high_boundary):
"""Offset the array to get the lowest value at 0 if negative."""
if low_boundary < 0:
offset = low_boundary
dyn_range = high_boundary - low_boundary
# get smallest dtype that can hold both minimum and offset maximum
offset_dtype = np.promote_types(
np.min_scalar_type(dyn_range), np.min_scalar_type(low_boundary)
)
if arr.dtype != offset_dtype:
# prevent overflow errors when offsetting
arr = arr.astype(offset_dtype)
arr = arr - offset
return arr
def _bincount_histogram_centers(image, source_range):
"""Compute bin centers for bincount-based histogram."""
if source_range not in ["image", "dtype"]:
raise ValueError(
f"Incorrect value for `source_range` argument: {source_range}"
)
if source_range == "image":
image_min = int(image.min().astype(np.int64)) # synchronize
image_max = int(image.max().astype(np.int64)) # synchronize
elif source_range == "dtype":
image_min, image_max = dtype_limits(image, clip_negative=False)
bin_centers = cp.arange(image_min, image_max + 1)
return bin_centers
def _bincount_histogram(image, source_range, bin_centers=None):
"""
Efficient histogram calculation for an image of integers.
This function is significantly more efficient than cupy.histogram but
works only on images of integers. It is based on cupy.bincount.
Parameters
----------
image : array
Input image.
source_range : string
'image' determines the range from the input image.
'dtype' determines the range from the expected range of the images
of that data type.
Returns
-------
hist : array
The values of the histogram.
bin_centers : array
The values at the center of the bins.
"""
if bin_centers is None:
bin_centers = _bincount_histogram_centers(image, source_range)
image_min, image_max = bin_centers[0].item(), bin_centers[-1].item()
image = _offset_array(image, image_min, image_max) # synchronize # noqa
# Casting back to unsigned dtype seems necessary to avoid incorrect
# results for larger integer ranges with CUDA 12.x.
unsigned_dtype_char = image.dtype.char.upper()
image = image.astype(unsigned_dtype_char, copy=False)
hist = cp.bincount(
image.ravel(), minlength=image_max - min(image_min, 0) + 1
)
if source_range == "image":
idx = max(image_min, 0)
hist = hist[idx:]
return hist, bin_centers
def _get_outer_edges(image, hist_range):
"""Determine the outer bin edges to use for `numpy.histogram`.
These are obtained from either the image or hist_range.
Parameters
----------
image : ndarray
Image for which the histogram is to be computed.
hist_range: 2-tuple of int or None
Range of values covered by the histogram bins. If None, the minimum
and maximum values of `image` are used.
Returns
-------
first_edge, last_edge : int
The range spanned by the histogram bins.
Notes
-----
This function is adapted from ``np.lib.histograms._get_outer_edges``.
"""
if hist_range is not None:
first_edge, last_edge = hist_range
if first_edge > last_edge:
raise ValueError(
"max must be larger than min in hist_range parameter."
)
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
f"supplied hist_range of [{first_edge}, {last_edge}] is "
f"not finite"
)
elif image.size == 0:
# handle empty arrays. Can't determine hist_range, so use 0-1.
first_edge, last_edge = 0, 1
else:
first_edge, last_edge = float(image.min()), float(
image.max()
) # synchronize # noqa
if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
raise ValueError(
f"autodetected hist_range of [{first_edge}, {last_edge}] is "
f"not finite"
)
# expand empty hist_range to avoid divide by zero
if first_edge == last_edge:
first_edge = first_edge - 0.5
last_edge = last_edge + 0.5
return first_edge, last_edge
def _get_bin_edges(image, nbins, hist_range):
"""Computes histogram bins for use with `numpy.histogram`.
Parameters
----------
image : ndarray
Image for which the histogram is to be computed.
nbins : int
The number of bins.
hist_range: 2-tuple of int
Range of values covered by the histogram bins.
Returns
-------
bin_edges : ndarray
The histogram bin edges.
Notes
-----
This function is a simplified version of
``np.lib.histograms._get_bin_edges`` that only supports uniform bins.
"""
first_edge, last_edge = _get_outer_edges(image, hist_range)
# numpy/gh-10322 means that type resolution rules are dependent on array
# shapes. To avoid this causing problems, we pick a type now and stick
# with it throughout.
bin_type = np.result_type(first_edge, last_edge, image)
if np.issubdtype(bin_type, np.integer):
bin_type = np.result_type(bin_type, float)
# compute bin edges
bin_edges = np.linspace(
first_edge, last_edge, nbins + 1, endpoint=True, dtype=bin_type
)
return bin_edges
def _get_numpy_hist_range(image, source_range):
if source_range == "image":
hist_range = None
elif source_range == "dtype":
hist_range = dtype_limits(image, clip_negative=False)
else:
raise ValueError(
f"Incorrect value for `source_range` argument: {source_range}"
)
return hist_range
@utils.channel_as_last_axis(multichannel_output=False)
def histogram(
image,
nbins=256,
source_range="image",
normalize=False,
*,
channel_axis=None,
):
"""Return histogram of image.
Unlike `numpy.histogram`, this function returns the centers of bins and
does not rebin integer arrays. For integer arrays, each integer value has
its own bin, which improves speed and intensity-resolution.
If `channel_axis` is not set, the histogram is computed on the flattened
image. For color or multichannel images, set ``channel_axis`` to use a
common binning for all channels. Alternatively, one may apply the function
separately on each channel to obtain a histogram for each color channel
with separate binning.
Parameters
----------
image : array
Input image.
nbins : int, optional
Number of bins used to calculate histogram. This value is ignored for
integer arrays.
source_range : string, optional
'image' (default) determines the range from the input image.
'dtype' determines the range from the expected range of the images
of that data type.
normalize : bool, optional
If True, normalize the histogram by the sum of its values.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
Returns
-------
hist : array
The values of the histogram. When ``channel_axis`` is not None, hist
will be a 2D array where the first axis corresponds to channels.
bin_centers : array
The values at the center of the bins.
See Also
--------
cumulative_distribution
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import exposure, img_as_float
>>> image = img_as_float(cp.array(data.camera()))
>>> cp.histogram(image, bins=2)
(array([ 93585, 168559]), array([0. , 0.5, 1. ]))
>>> exposure.histogram(image, nbins=2)
(array([ 93585, 168559]), array([0.25, 0.75]))
"""
sh = image.shape
if len(sh) == 3 and sh[-1] < 4 and channel_axis is None:
utils.warn(
"This might be a color image. The histogram will be "
"computed on the flattened image. You can instead "
"apply this function to each color channel, or set "
"channel_axis."
)
if channel_axis is not None:
channels = sh[-1]
hist = []
# compute bins based on the raveled array
if cp.issubdtype(image.dtype, cp.integer):
# here bins corresponds to the bin centers
bins = _bincount_histogram_centers(image, source_range)
else:
# determine the bin edges for np.histogram
hist_range = _get_numpy_hist_range(image, source_range)
bins = _get_bin_edges(image, nbins, hist_range)
for chan in range(channels):
h, bc = _histogram(image[..., chan], bins, source_range, normalize)
hist.append(h)
# Convert to numpy arrays
bin_centers = cp.asarray(bc)
hist = cp.stack(hist, axis=0)
else:
hist, bin_centers = _histogram(image, nbins, source_range, normalize)
return hist, bin_centers
def _histogram(image, bins, source_range, normalize):
"""
Parameters
----------
image : ndarray
Image for which the histogram is to be computed.
bins : int or ndarray
The number of histogram bins. For images with integer dtype, an array
containing the bin centers can also be provided. For images with
floating point dtype, this can be an array of bin_edges for use by
``np.histogram``.
source_range : string, optional
'image' (default) determines the range from the input image.
'dtype' determines the range from the expected range of the images
of that data type.
normalize : bool, optional
If True, normalize the histogram by the sum of its values.
"""
image = image.flatten()
# For integer types, histogramming with bincount is more efficient.
if np.issubdtype(image.dtype, cp.integer):
bin_centers = bins if isinstance(bins, cp.ndarray) else None
hist, bin_centers = _bincount_histogram(
image, source_range, bin_centers
)
else:
hist_range = _get_numpy_hist_range(image, source_range)
hist, bin_edges = cp.histogram(image, bins=bins, range=hist_range)
bin_centers = (bin_edges[:-1] + bin_edges[1:]) / 2.0
if normalize:
hist = hist / cp.sum(hist)
return hist, bin_centers
def cumulative_distribution(image, nbins=256):
"""Return cumulative distribution function (cdf) for the given image.
Parameters
----------
image : array
Image array.
nbins : int, optional
Number of bins for image histogram.
Returns
-------
img_cdf : array
Values of cumulative distribution function.
bin_centers : array
Centers of bins.
See Also
--------
histogram
References
----------
.. [1] https://en.wikipedia.org/wiki/Cumulative_distribution_function
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import exposure, img_as_float
>>> image = img_as_float(cp.array(data.camera()))
>>> hi = exposure.histogram(image)
>>> cdf = exposure.cumulative_distribution(image)
>>> cp.alltrue(cdf[0] == cp.cumsum(hi[0])/float(image.size))
array(True)
"""
hist, bin_centers = histogram(image, nbins)
img_cdf = hist.cumsum()
img_cdf = img_cdf / float(img_cdf[-1])
return img_cdf, bin_centers
def equalize_hist(image, nbins=256, mask=None):
"""Return image after histogram equalization.
Parameters
----------
image : array
Image array.
nbins : int, optional
Number of bins for image histogram. Note: this argument is
ignored for integer images, for which each integer is its own
bin.
mask: ndarray of bools or 0s and 1s, optional
Array of same shape as `image`. Only points at which mask == True
are used for the equalization, which is applied to the whole image.
Returns
-------
out : float array
Image array after histogram equalization.
Notes
-----
This function is adapted from [1]_ with the author's permission.
References
----------
.. [1] http://www.janeriksolem.net/histogram-equalization-with-python-and.html
.. [2] https://en.wikipedia.org/wiki/Histogram_equalization
""" # noqa
if mask is not None:
mask = mask.astype(bool, copy=False)
cdf, bin_centers = cumulative_distribution(image[mask], nbins)
else:
cdf, bin_centers = cumulative_distribution(image, nbins)
out = cp.interp(image.ravel(), bin_centers, cdf)
out = out.reshape(image.shape)
# Unfortunately, np.interp currently always promotes to float64, so we
# have to cast back to single precision when float32 output is desired
return out.astype(utils._supported_float_type(image.dtype), copy=False)
def intensity_range(image, range_values="image", clip_negative=False):
"""Return image intensity range (min, max) based on desired value type.
Parameters
----------
image : array
Input image.
range_values : str or 2-tuple, optional
The image intensity range is configured by this parameter.
The possible values for this parameter are enumerated below.
'image'
Return image min/max as the range.
'dtype'
Return min/max of the image's dtype as the range.
dtype-name
Return intensity range based on desired `dtype`. Must be valid key
in `DTYPE_RANGE`. Note: `image` is ignored for this range type.
2-tuple
Return `range_values` as min/max intensities. Note that there's no
reason to use this function if you just want to specify the
intensity range explicitly. This option is included for functions
that use `intensity_range` to support all desired range types.
clip_negative : bool, optional
If True, clip the negative range (i.e. return 0 for min intensity)
even if the image dtype allows negative values.
Returns
-------
i_range : tuple
A 2-tuple where the first element is the minimum and the second is the
maximum.
"""
if range_values == "dtype":
range_values = image.dtype.type
if range_values == "image":
i_min = image.min().item()
i_max = image.max().item()
elif range_values in DTYPE_RANGE:
i_min, i_max = DTYPE_RANGE[range_values]
if clip_negative:
i_min = 0
else:
i_min, i_max = range_values
return i_min, i_max
def _output_dtype(dtype_or_range, image_dtype):
"""Determine the output dtype for rescale_intensity.
The dtype is determined according to the following rules:
- if ``dtype_or_range`` is a dtype, that is the output dtype.
- if ``dtype_or_range`` is a dtype string, that is the dtype used, unless
it is not a NumPy data type (e.g. 'uint12' for 12-bit unsigned integers),
in which case the data type that can contain it will be used
(e.g. uint16 in this case).
- if ``dtype_or_range`` is a pair of values, the output data type will be
``_supported_float_type(image_dtype)``. This preserves float32 output for
float32 inputs.
Parameters
----------
dtype_or_range : type, string, or 2-tuple of int/float
The desired range for the output, expressed as either a NumPy dtype or
as a (min, max) pair of numbers.
image_dtype : np.dtype
The input image dtype.
Returns
-------
out_dtype : type
The data type appropriate for the desired output.
"""
if type(dtype_or_range) in [list, tuple, np.ndarray]:
# pair of values: always return float.
return utils._supported_float_type(image_dtype)
if type(dtype_or_range) == type:
# already a type: return it
return dtype_or_range
if dtype_or_range in DTYPE_RANGE:
# string key in DTYPE_RANGE dictionary
try:
# if it's a canonical numpy dtype, convert
return np.dtype(dtype_or_range).type
except TypeError: # uint10, uint12, uint14
# otherwise, return uint16
return np.uint16
else:
raise ValueError(
"Incorrect value for out_range, should be a valid image data "
f"type or a pair of values, got {dtype_or_range}."
)
def rescale_intensity(image, in_range="image", out_range="dtype"):
"""Return image after stretching or shrinking its intensity levels.
The desired intensity range of the input and output, `in_range` and
`out_range` respectively, are used to stretch or shrink the intensity range
of the input image. See examples below.
Parameters
----------
image : array
Image array.
in_range, out_range : str or 2-tuple, optional
Min and max intensity values of input and output image.
The possible values for this parameter are enumerated below.
'image'
Use image min/max as the intensity range.
'dtype'
Use min/max of the image's dtype as the intensity range.
dtype-name
Use intensity range based on desired `dtype`. Must be valid key
in `DTYPE_RANGE`.
2-tuple
Use `range_values` as explicit min/max intensities.
Returns
-------
out : array
Image array after rescaling its intensity. This image is the same dtype
as the input image.
Notes
-----
.. versionchanged:: 0.17
The dtype of the output array has changed to match the output dtype, or
float if the output range is specified by a pair of values.
See Also
--------
equalize_hist
Examples
--------
By default, the min/max intensities of the input image are stretched to
the limits allowed by the image's dtype, since `in_range` defaults to
'image' and `out_range` defaults to 'dtype':
>>> image = cp.array([51, 102, 153], dtype=np.uint8)
>>> rescale_intensity(image)
array([ 0, 127, 255], dtype=uint8)
It's easy to accidentally convert an image dtype from uint8 to float:
>>> 1.0 * image
array([ 51., 102., 153.])
Use `rescale_intensity` to rescale to the proper range for float dtypes:
>>> image_float = 1.0 * image
>>> rescale_intensity(image_float)
array([0. , 0.5, 1. ])
To maintain the low contrast of the original, use the `in_range` parameter:
>>> rescale_intensity(image_float, in_range=(0, 255))
array([0.2, 0.4, 0.6])
If the min/max value of `in_range` is more/less than the min/max image
intensity, then the intensity levels are clipped:
>>> rescale_intensity(image_float, in_range=(0, 102))
array([0.5, 1. , 1. ])
If you have an image with signed integers but want to rescale the image to
just the positive range, use the `out_range` parameter. In that case, the
output dtype will be float:
>>> image = cp.asarray([-10, 0, 10], dtype=np.int8)
>>> rescale_intensity(image, out_range=(0, 127))
array([ 0. , 63.5, 127. ])
To get the desired range with a specific dtype, use ``.astype()``:
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int8)
array([ 0, 63, 127], dtype=int8)
If the input image is constant, the output will be clipped directly to the
output range:
>>> image = cp.asarray([130, 130, 130], dtype=np.int32)
>>> rescale_intensity(image, out_range=(0, 127)).astype(np.int32)
array([127, 127, 127], dtype=int32)
"""
if out_range in ["dtype", "image"]:
out_dtype = _output_dtype(image.dtype.type, image.dtype)
else:
out_dtype = _output_dtype(out_range, image.dtype)
imin, imax = map(float, intensity_range(image, in_range))
omin, omax = map(
float, intensity_range(image, out_range, clip_negative=(imin >= 0))
)
if np.any(np.isnan([imin, imax, omin, omax])):
utils.warn(
"One or more intensity levels are NaN. Rescaling will broadcast "
"NaN to the full image. Provide intensity levels yourself to "
"avoid this. E.g. with np.nanmin(image), np.nanmax(image).",
stacklevel=2,
)
image = cp.clip(image, imin, imax)
if imin != imax:
image = (image - imin) / (imax - imin)
return cp.asarray(image * (omax - omin) + omin, dtype=out_dtype)
else:
return cp.clip(image, omin, omax).astype(out_dtype, copy=False)
def _assert_non_negative(image):
if cp.any(image < 0): # synchronize!
raise ValueError(
"Image Correction methods work correctly only on "
"images with non-negative values. Use "
"skimage.exposure.rescale_intensity."
)
def _adjust_gamma_u8(image, gamma, gain):
"""LUT based implementation of gamma adjustment."""
lut = 255 * gain * (np.linspace(0, 1, 256) ** gamma)
lut = np.minimum(np.rint(lut), 255).astype("uint8")
lut = cp.asarray(lut)
return lut[image]
def adjust_gamma(image, gamma=1, gain=1):
"""Performs Gamma Correction on the input image.
Also known as Power Law Transform.
This function transforms the input image pixelwise according to the
equation ``O = I**gamma`` after scaling each pixel to the range 0 to 1.
Parameters
----------
image : ndarray
Input image.
gamma : float, optional
Non negative real number. Default value is 1.
gain : float, optional
The constant multiplier. Default value is 1.
Returns
-------
out : ndarray
Gamma corrected output image.
See Also
--------
adjust_log
Notes
-----
For gamma greater than 1, the histogram will shift towards left and
the output image will be darker than the input image.
For gamma less than 1, the histogram will shift towards right and
the output image will be brighter than the input image.
References
----------
.. [1] https://en.wikipedia.org/wiki/Gamma_correction
Examples
--------
>>> from skimage import data
>>> from cucim.skimage import exposure, img_as_float
>>> image = img_as_float(cp.array(data.moon()))
>>> gamma_corrected = exposure.adjust_gamma(image, 2)
>>> # Output is darker for gamma > 1
>>> image.mean() > gamma_corrected.mean()
array(True)
"""
if gamma < 0:
raise ValueError("Gamma should be a non-negative real number.")
dtype = image.dtype.type
if dtype is cp.uint8:
out = _adjust_gamma_u8(image, gamma, gain)
else:
_assert_non_negative(image)
scale = float(
dtype_limits(image, True)[1] - dtype_limits(image, True)[0]
)
out = (((image / scale) ** gamma) * scale * gain).astype(dtype)
return out
def adjust_log(image, gain=1, inv=False):
"""Performs Logarithmic correction on the input image.
This function transforms the input image pixelwise according to the
equation ``O = gain*log(1 + I)`` after scaling each pixel to the range
0 to 1.
For inverse logarithmic correction, the equation is
``O = gain*(2**I - 1)``.
Parameters
----------
image : ndarray
Input image.
gain : float, optional
The constant multiplier. Default value is 1.
inv : float, optional
If True, it performs inverse logarithmic correction,
else correction will be logarithmic. Defaults to False.
Returns
-------
out : ndarray
Logarithm corrected output image.
See Also
--------
adjust_gamma
References
----------
.. [1] http://www.ece.ucsb.edu/Faculty/Manjunath/courses/ece178W03/EnhancePart1.pdf
""" # noqa
_assert_non_negative(image)
dtype = image.dtype.type
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
if inv:
out = (2 ** (image / scale) - 1) * scale * gain
return out.astype(dtype, copy=False)
out = cp.log2(1 + image / scale) * scale * gain
return out.astype(dtype, copy=False)
def adjust_sigmoid(image, cutoff=0.5, gain=10, inv=False):
"""Performs Sigmoid Correction on the input image.
Also known as Contrast Adjustment.
This function transforms the input image pixelwise according to the
equation ``O = 1/(1 + exp*(gain*(cutoff - I)))`` after scaling each pixel
to the range 0 to 1.
Parameters
----------
image : ndarray
Input image.
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.5.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 10.
inv : bool, optional
If True, returns the negative sigmoid correction. Defaults to False.
Returns
-------
out : ndarray
Sigmoid corrected output image.
See Also
--------
adjust_gamma
References
----------
.. [1] Gustav J. Braun, "Image Lightness Rescaling Using Sigmoidal Contrast
Enhancement Functions",
http://markfairchild.org/PDFs/PAP07.pdf
"""
_assert_non_negative(image)
dtype = image.dtype.type
scale = float(dtype_limits(image, True)[1] - dtype_limits(image, True)[0])
if inv:
out = (1 - 1 / (1 + cp.exp(gain * (cutoff - image / scale)))) * scale
return out.astype(dtype, copy=False)
out = (1 / (1 + cp.exp(gain * (cutoff - image / scale)))) * scale
return out.astype(dtype, copy=False)
def is_low_contrast(
image,
fraction_threshold=0.05,
lower_percentile=1,
upper_percentile=99,
method="linear",
):
"""Determine if an image is low contrast.
Parameters
----------
image : array-like
The image under test.
fraction_threshold : float, optional
The low contrast fraction threshold. An image is considered low-
contrast when its range of brightness spans less than this
fraction of its data type's full range. [1]_
lower_percentile : float, optional
Disregard values below this percentile when computing image contrast.
upper_percentile : float, optional
Disregard values above this percentile when computing image contrast.
method : str, optional
The contrast determination method. Right now the only available
option is "linear".
Returns
-------
out : bool
True when the image is determined to be low contrast.
Notes
-----
For boolean images, this function returns False only if all values are
the same (the method, threshold, and percentile arguments are ignored).
References
----------
.. [1] https://scikit-image.org/docs/dev/user_guide/data_types.html
Examples
--------
>>> import cupy as cp
>>> image = cp.linspace(0, 0.04, 100)
>>> is_low_contrast(image)
array(True)
>>> image[-1] = 1
>>> is_low_contrast(image)
array(True)
>>> is_low_contrast(image, upper_percentile=100)
array(False)
"""
if image.dtype == bool:
return not ((image.max() == 1) and (image.min() == 0))
if image.ndim == 3:
from ..color import rgb2gray, rgba2rgb # avoid circular import
if image.shape[2] == 4:
image = rgba2rgb(image)
if image.shape[2] == 3:
image = rgb2gray(image)
dlimits = dtype_limits(image, clip_negative=False)
limits = cp.percentile(image, [lower_percentile, upper_percentile])
ratio = (limits[1] - limits[0]) / (dlimits[1] - dlimits[0])
return ratio < fraction_threshold
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/__init__.py
|
from ._adapthist import equalize_adapthist
from .exposure import (
adjust_gamma,
adjust_log,
adjust_sigmoid,
cumulative_distribution,
equalize_hist,
histogram,
is_low_contrast,
rescale_intensity,
)
from .histogram_matching import match_histograms
__all__ = [
"histogram",
"equalize_hist",
"equalize_adapthist",
"rescale_intensity",
"cumulative_distribution",
"adjust_gamma",
"adjust_sigmoid",
"adjust_log",
"is_low_contrast",
"match_histograms",
]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/tests/test_exposure.py
|
import platform
import warnings
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_almost_equal
from skimage import data
from cucim.skimage import exposure, util
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.color import rgb2gray
from cucim.skimage.exposure.exposure import intensity_range
from cucim.skimage.util.dtype import dtype_range
# TODO: Some tests fail unexpectedly on ARM.
ON_AARCH64 = platform.machine() == "aarch64"
ON_AARCH64_REASON = "TODO: Test fails unexpectedly on ARM."
# Test integer histograms
# =======================
def test_wrong_source_range():
im = cp.array([-1, 100], dtype=cp.int8)
match = "Incorrect value for `source_range` argument"
with pytest.raises(ValueError, match=match):
frequencies, bin_centers = exposure.histogram(im, source_range="foobar")
@pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON)
def test_negative_overflow():
im = cp.array([-1, 100], dtype=cp.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, cp.arange(-1, 101))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
def test_all_negative_image():
im = cp.array([-100, -1], dtype=cp.int8)
frequencies, bin_centers = exposure.histogram(im)
assert_array_equal(bin_centers, cp.arange(-100, 0))
assert frequencies[0] == 1
assert frequencies[-1] == 1
assert_array_equal(frequencies[1:-1], 0)
def test_int_range_image():
im = cp.array([10, 100], dtype=cp.int8)
frequencies, bin_centers = exposure.histogram(im)
assert len(bin_centers) == len(frequencies)
assert bin_centers[0] == 10
assert bin_centers[-1] == 100
def test_multichannel_int_range_image():
im = cp.array([[10, 5], [100, 102]], dtype=np.int8)
frequencies, bin_centers = exposure.histogram(im, channel_axis=-1)
for ch in range(im.shape[-1]):
assert len(frequencies[ch]) == len(bin_centers)
assert bin_centers[0] == 5
assert bin_centers[-1] == 102
def test_peak_uint_range_dtype():
im = cp.array([10, 100], dtype=cp.uint8)
frequencies, bin_centers = exposure.histogram(im, source_range="dtype")
assert_array_equal(bin_centers, cp.arange(0, 256))
assert frequencies[10] == 1
assert frequencies[100] == 1
assert frequencies[101] == 0
assert frequencies.shape == (256,)
def test_peak_int_range_dtype():
im = cp.array([10, 100], dtype=cp.int8)
frequencies, bin_centers = exposure.histogram(im, source_range="dtype")
assert_array_equal(bin_centers, cp.arange(-128, 128))
assert frequencies[128 + 10] == 1
assert frequencies[128 + 100] == 1
assert frequencies[128 + 101] == 0
assert frequencies.shape == (256,)
def test_flat_uint_range_dtype():
im = cp.linspace(0, 255, 256, dtype=cp.uint8)
frequencies, bin_centers = exposure.histogram(im, source_range="dtype")
assert_array_equal(bin_centers, cp.arange(0, 256))
assert frequencies.shape == (256,)
def test_flat_int_range_dtype():
im = cp.linspace(-128, 128, 256, dtype=cp.int8)
frequencies, bin_centers = exposure.histogram(im, source_range="dtype")
assert_array_equal(bin_centers, cp.arange(-128, 128))
assert frequencies.shape == (256,)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_peak_float_out_of_range_image(dtype):
im = cp.array([10, 100], dtype=dtype)
frequencies, bin_centers = exposure.histogram(im, nbins=90)
# offset values by 0.5 for float...
assert_array_equal(bin_centers, cp.arange(10, 100) + 0.5)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_peak_float_out_of_range_dtype(dtype):
im = cp.array([10, 100], dtype=dtype)
nbins = 10
frequencies, bin_centers = exposure.histogram(
im, nbins=nbins, source_range="dtype"
)
assert bin_centers.dtype == dtype
assert_almost_equal(cp.min(bin_centers).get(), -0.9, 3)
assert_almost_equal(cp.max(bin_centers).get(), 0.9, 3)
assert len(bin_centers) == 10
def test_normalize():
im = cp.array([0, 255, 255], dtype=cp.uint8)
frequencies, bin_centers = exposure.histogram(
im, source_range="dtype", normalize=False
)
expected = cp.zeros(256)
expected[0] = 1
expected[-1] = 2
assert_array_equal(frequencies, expected)
frequencies, bin_centers = exposure.histogram(
im, source_range="dtype", normalize=True
)
expected /= 3.0
assert_array_equal(frequencies, expected)
# Test multichannel histograms
# ============================
@pytest.mark.parametrize("source_range", ["dtype", "image"])
@pytest.mark.parametrize("dtype", [cp.uint8, cp.int16, cp.float64])
@pytest.mark.parametrize("channel_axis", [0, 1, -1])
def test_multichannel_hist_common_bins_uint8(dtype, source_range, channel_axis):
"""Check that all channels use the same binning."""
# Construct multichannel image with uniform values within each channel,
# but the full range of values across channels.
shape = (5, 5)
channel_size = shape[0] * shape[1]
imin, imax = dtype_range[dtype]
im = np.stack(
(
np.full(shape, imin, dtype=dtype),
np.full(shape, imax, dtype=dtype),
),
axis=channel_axis,
)
im = cp.asarray(im)
frequencies, bin_centers = exposure.histogram(
im, source_range=source_range, channel_axis=channel_axis
)
if cp.issubdtype(dtype, cp.integer):
assert_array_equal(bin_centers, np.arange(imin, imax + 1))
assert frequencies[0][0] == channel_size
assert frequencies[0][-1] == 0
assert frequencies[1][0] == 0
assert frequencies[1][-1] == channel_size
# Test histogram equalization
# ===========================
np.random.seed(0)
test_img_int = cp.array(data.camera())
# squeeze image intensities to lower image contrast
test_img = util.img_as_float(test_img_int)
test_img = exposure.rescale_intensity(test_img / 5.0 + 100)
test_img = cp.array(test_img)
def test_equalize_uint8_approx():
"""Check integer bins used for uint8 images."""
img_eq0 = exposure.equalize_hist(test_img_int)
img_eq1 = exposure.equalize_hist(test_img_int, nbins=3)
cp.testing.assert_allclose(img_eq0, img_eq1)
def test_equalize_ubyte():
img = util.img_as_ubyte(test_img)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_equalize_float(dtype):
img = util.img_as_float(test_img).astype(dtype, copy=False)
img_eq = exposure.equalize_hist(img)
assert img_eq.dtype == _supported_float_type(dtype)
cdf, bin_edges = exposure.cumulative_distribution(img_eq)
check_cdf_slope(cdf)
assert bin_edges.dtype == _supported_float_type(dtype)
def test_equalize_masked():
img = util.img_as_float(test_img)
mask = cp.zeros(test_img.shape)
mask[100:400, 100:400] = 1
img_mask_eq = exposure.equalize_hist(img, mask=mask)
img_eq = exposure.equalize_hist(img)
cdf, bin_edges = exposure.cumulative_distribution(img_mask_eq)
check_cdf_slope(cdf)
assert not (img_eq == img_mask_eq).all()
def check_cdf_slope(cdf):
"""Slope of cdf which should equal 1 for an equalized histogram."""
norm_intensity = np.linspace(0, 1, len(cdf))
slope, intercept = np.polyfit(norm_intensity, cp.asnumpy(cdf), 1)
assert 0.9 < slope < 1.1
# Test intensity range
# ====================
@pytest.mark.parametrize(
"test_input,expected",
[("image", [0, 1]), ("dtype", [0, 255]), ((10, 20), [10, 20])],
)
def test_intensity_range_uint8(test_input, expected):
image = cp.array([0, 1], dtype=cp.uint8)
out = intensity_range(image, range_values=test_input)
assert_array_equal(out, cp.array(expected))
@pytest.mark.parametrize(
"test_input,expected",
[("image", [0.1, 0.2]), ("dtype", [-1, 1]), ((0.3, 0.4), [0.3, 0.4])],
)
def test_intensity_range_float(test_input, expected):
image = cp.array([0.1, 0.2], dtype=cp.float64)
out = intensity_range(image, range_values=test_input)
assert_array_equal(out, expected)
def test_intensity_range_clipped_float():
image = cp.array([0.1, 0.2], dtype=cp.float64)
out = intensity_range(image, range_values="dtype", clip_negative=True)
assert_array_equal(out, (0, 1))
# Test rescale intensity
# ======================
uint10_max = 2**10 - 1
uint12_max = 2**12 - 1
uint14_max = 2**14 - 1
uint16_max = 2**16 - 1
def test_rescale_stretch():
image = cp.array([51, 102, 153], dtype=cp.uint8)
out = exposure.rescale_intensity(image)
assert out.dtype == cp.uint8
assert_array_almost_equal(out, [0, 127, 255])
def test_rescale_shrink():
image = cp.array([51.0, 102.0, 153.0])
out = exposure.rescale_intensity(image)
assert_array_almost_equal(out, [0, 0.5, 1])
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_rescale_in_range(dtype):
image = cp.array([51.0, 102.0, 153.0], dtype=dtype)
out = exposure.rescale_intensity(image, in_range=(0, 255))
assert_array_almost_equal(out, [0.2, 0.4, 0.6], decimal=4)
# with out_range='dtype', the output has the same dtype
assert out.dtype == image.dtype
def test_rescale_in_range_clip():
image = cp.array([51.0, 102.0, 153.0])
out = exposure.rescale_intensity(image, in_range=(0, 102))
assert_array_almost_equal(out, [0.5, 1, 1])
@pytest.mark.parametrize(
"dtype", [cp.int8, cp.int32, cp.float16, cp.float32, cp.float64]
)
@pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON)
def test_rescale_out_range(dtype):
"""Check that output range is correct.
.. versionchanged:: 22.02.00
float16 and float32 inputs now result in float32 output. Formerly they
would give float64 outputs.
"""
image = cp.array([-10, 0, 10], dtype=cp.int8)
out = exposure.rescale_intensity(image, out_range=(0, 127))
assert out.dtype == _supported_float_type(image.dtype)
assert_array_almost_equal(out, [0, 63.5, 127])
def test_rescale_named_in_range():
image = cp.array([0, uint10_max, uint10_max + 100], dtype=cp.uint16)
out = exposure.rescale_intensity(image, in_range="uint10")
assert_array_almost_equal(out, [0, uint16_max, uint16_max])
def test_rescale_named_out_range():
image = cp.array([0, uint16_max], dtype=cp.uint16)
out = exposure.rescale_intensity(image, out_range="uint10")
assert_array_almost_equal(out, [0, uint10_max])
def test_rescale_uint12_limits():
image = cp.array([0, uint16_max], dtype=cp.uint16)
out = exposure.rescale_intensity(image, out_range="uint12")
assert_array_almost_equal(out, [0, uint12_max])
def test_rescale_uint14_limits():
image = cp.array([0, uint16_max], dtype=cp.uint16)
out = exposure.rescale_intensity(image, out_range="uint14")
assert_array_almost_equal(out, [0, uint14_max])
def test_rescale_all_zeros():
image = cp.zeros((2, 2), dtype=cp.uint8)
out = exposure.rescale_intensity(image)
assert ~cp.isnan(out).all()
assert_array_almost_equal(out, image)
def test_rescale_constant():
image = cp.array([130, 130], dtype=cp.uint16)
out = exposure.rescale_intensity(image, out_range=(0, 127))
assert_array_almost_equal(out, [127, 127])
def test_rescale_same_values():
image = cp.ones((2, 2))
out = exposure.rescale_intensity(image)
assert ~cp.isnan(out).all()
assert_array_almost_equal(out, image)
@pytest.mark.parametrize(
"in_range,out_range", [("image", "dtype"), ("dtype", "image")]
)
def test_rescale_nan_warning(in_range, out_range):
image = cp.arange(12, dtype=float).reshape(3, 4)
image[1, 1] = cp.nan
msg = (
r"One or more intensity levels are NaN\."
r" Rescaling will broadcast NaN to the full image\."
)
with expected_warnings([msg]):
exposure.rescale_intensity(image, in_range, out_range)
@pytest.mark.parametrize(
"out_range, out_dtype",
[
("uint8", cp.uint8),
("uint10", cp.uint16),
("uint12", cp.uint16),
("uint16", cp.uint16),
("float", float),
],
)
def test_rescale_output_dtype(out_range, out_dtype):
image = cp.array([-128, 0, 127], dtype=cp.int8)
output_image = exposure.rescale_intensity(image, out_range=out_range)
assert output_image.dtype == out_dtype
@pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON)
def test_rescale_no_overflow():
image = cp.array([-128, 0, 127], dtype=cp.int8)
output_image = exposure.rescale_intensity(image, out_range=cp.uint8)
cp.testing.assert_array_equal(output_image, [0, 128, 255])
assert output_image.dtype == cp.uint8
@pytest.mark.xfail(ON_AARCH64, reason=ON_AARCH64_REASON)
def test_rescale_float_output():
image = cp.array([-128, 0, 127], dtype=cp.int8)
output_image = exposure.rescale_intensity(image, out_range=(0, 255))
cp.testing.assert_array_equal(output_image, [0, 128, 255])
assert output_image.dtype == _supported_float_type(image.dtype)
def test_rescale_raises_on_incorrect_out_range():
image = cp.array([-128, 0, 127], dtype=cp.int8)
with pytest.raises(ValueError):
_ = exposure.rescale_intensity(image, out_range="flat")
# Test adaptive histogram equalization
# ====================================
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_adapthist_grayscale(dtype):
"""Test a grayscale float image"""
img = cp.array(data.astronaut())
img = util.img_as_float(img).astype(dtype, copy=False)
img = rgb2gray(img)
img = cp.dstack((img, img, img))
adapted = exposure.equalize_adapthist(
img, kernel_size=(57, 51), clip_limit=0.01, nbins=128
)
assert img.shape == adapted.shape
assert adapted.dtype == _supported_float_type(dtype)
snr_decimal = 3 if dtype != cp.float16 else 2
assert_almost_equal(float(peak_snr(img, adapted)), 100.140, snr_decimal)
assert_almost_equal(float(norm_brightness_err(img, adapted)), 0.0529, 3)
def test_adapthist_color():
"""Test an RGB color uint16 image"""
img = util.img_as_uint(cp.array(data.astronaut()))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
hist, bin_centers = exposure.histogram(img)
assert len(w) > 0
adapted = exposure.equalize_adapthist(img, clip_limit=0.01)
assert adapted.min() == 0
assert adapted.max() == 1.0
assert img.shape == adapted.shape
full_scale = exposure.rescale_intensity(img)
assert_almost_equal(float(peak_snr(full_scale, adapted)), 109.393, 1)
assert_almost_equal(
float(norm_brightness_err(full_scale, adapted)), 0.02, 2
)
def test_adapthist_alpha():
"""Test an RGBA color image"""
img = util.img_as_float(cp.array(data.astronaut()))
alpha = cp.ones((img.shape[0], img.shape[1]), dtype=float)
img = cp.dstack((img, alpha))
adapted = exposure.equalize_adapthist(img)
assert adapted.shape != img.shape
img = img[:, :, :3]
full_scale = exposure.rescale_intensity(img)
assert img.shape == adapted.shape
assert_almost_equal(float(peak_snr(full_scale, adapted)), 109.393, 2)
assert_almost_equal(
float(norm_brightness_err(full_scale, adapted)), 0.0248, 3
)
def test_adapthist_grayscale_Nd():
"""
Test for n-dimensional consistency with float images
Note: Currently if img.ndim == 3, img.shape[2] > 4 must hold for the image
not to be interpreted as a color image by @adapt_rgb
"""
# take 2d image, subsample and stack it
img = util.img_as_float(cp.array(data.astronaut()))
img = rgb2gray(img)
a = 15
img2d = util.img_as_float(img[0:-1:a, 0:-1:a])
img3d = cp.stack([img2d] * (img.shape[0] // a), axis=0)
# apply CLAHE
adapted2d = exposure.equalize_adapthist(
img2d, kernel_size=5, clip_limit=0.05
)
adapted3d = exposure.equalize_adapthist(
img3d, kernel_size=5, clip_limit=0.05
)
# check that dimensions of input and output match
assert img2d.shape == adapted2d.shape
assert img3d.shape == adapted3d.shape
# check that the result from the stack of 2d images is similar
# to the underlying 2d image
assert (
cp.mean(cp.abs(adapted2d - adapted3d[adapted3d.shape[0] // 2])) < 0.02
)
def test_adapthist_constant():
"""Test constant image, float and uint"""
img = cp.zeros((8, 8))
img += 2
img = img.astype(cp.uint16)
adapted = exposure.equalize_adapthist(img, 3)
assert cp.min(adapted) == cp.max(adapted)
img = cp.zeros((8, 8))
img += 0.1
img = img.astype(cp.float64)
adapted = exposure.equalize_adapthist(img, 3)
assert cp.min(adapted) == cp.max(adapted)
def test_adapthist_borders():
"""Test border processing"""
img = rgb2gray(util.img_as_float(cp.array(data.astronaut())))
# maximize difference between orig and processed img
img /= 100.0
img[img.shape[0] // 2, img.shape[1] // 2] = 1.0
# check borders are processed for different kernel sizes
border_index = -1
for kernel_size in range(51, 71, 2):
adapted = exposure.equalize_adapthist(img, kernel_size, clip_limit=0.5)
# Check last columns are processed
assert (
norm_brightness_err(adapted[:, border_index], img[:, border_index])
> 0.1
)
# Check last rows are processed
assert (
norm_brightness_err(adapted[border_index, :], img[border_index, :])
> 0.1
)
def test_adapthist_clip_limit():
img_u = cp.array(data.moon())
img_f = util.img_as_float(img_u)
# uint8 input
img_clahe0 = exposure.equalize_adapthist(img_u, clip_limit=0)
img_clahe1 = exposure.equalize_adapthist(img_u, clip_limit=1)
assert_array_equal(img_clahe0, img_clahe1)
# float64 input
img_clahe0 = exposure.equalize_adapthist(img_f, clip_limit=0)
img_clahe1 = exposure.equalize_adapthist(img_f, clip_limit=1)
assert_array_equal(img_clahe0, img_clahe1)
def peak_snr(img1, img2):
"""Peak signal to noise ratio of two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
peak_snr : float
Peak signal to noise ratio
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1.copy()), rgb2gray(img2.copy())
img1 = util.img_as_float(img1)
img2 = util.img_as_float(img2)
mse = 1.0 / img1.size * cp.square(img1 - img2).sum()
_, max_ = dtype_range[img1.dtype.type]
return 20 * cp.log(max_ / mse)
def norm_brightness_err(img1, img2):
"""Normalized Absolute Mean Brightness Error between two images
Parameters
----------
img1 : array-like
img2 : array-like
Returns
-------
norm_brightness_error : float
Normalized absolute mean brightness error
"""
if img1.ndim == 3:
img1, img2 = rgb2gray(img1), rgb2gray(img2)
ambe = cp.abs(img1.mean() - img2.mean())
nbe = ambe / dtype_range[img1.dtype.type][1]
return nbe
def test_adapthist_incorrect_kernel_size():
img = cp.ones((8, 8), dtype=float)
with pytest.raises(ValueError, match="Incorrect value of `kernel_size`"):
exposure.equalize_adapthist(img, (3, 3, 3))
# Test Gamma Correction
# =====================
def test_adjust_gamma_1x1_shape():
"""Check that the shape is maintained"""
img = cp.ones([1, 1])
result = exposure.adjust_gamma(img, 1.5)
assert img.shape == result.shape
def test_adjust_gamma_one():
"""Same image should be returned for gamma equal to one"""
image = cp.arange(0, 256, dtype=np.uint8).reshape((16, 16))
result = exposure.adjust_gamma(image, 1)
assert_array_almost_equal(result, image)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_adjust_gamma_zero(dtype):
"""White image should be returned for gamma equal to zero"""
image = cp.random.uniform(0, 255, (8, 8)).astype(dtype, copy=False)
result = exposure.adjust_gamma(image, 0)
dtype = image.dtype.type
assert_array_almost_equal(result, dtype_range[dtype][1])
assert result.dtype == image.dtype
def test_adjust_gamma_less_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to half"""
image = cp.arange(0, 256, dtype=np.uint8).reshape((16, 16))
# fmt: off
expected = cp.array([0, 16, 23, 28, 32, 36, 39, 42, 45, 48, 50,
53, 55, 58, 60, 62, 64, 66, 68, 70, 71, 73,
75, 77, 78, 80, 81, 83, 84, 86, 87, 89, 90,
92, 93, 94, 96, 97, 98, 100, 101, 102, 103,
105, 106, 107, 108, 109, 111, 112, 113, 114,
115, 116, 117, 118, 119, 121, 122, 123, 124,
125, 126, 127, 128, 129, 130, 131, 132, 133,
134, 135, 135, 136, 137, 138, 139, 140, 141,
142, 143, 144, 145, 145, 146, 147, 148, 149,
150, 151, 151, 152, 153, 154, 155, 156, 156,
157, 158, 159, 160, 160, 161, 162, 163, 164,
164, 165, 166, 167, 167, 168, 169, 170, 170,
171, 172, 173, 173, 174, 175, 176, 176, 177,
178, 179, 179, 180, 181, 181, 182, 183, 183,
184, 185, 186, 186, 187, 188, 188, 189, 190,
190, 191, 192, 192, 193, 194, 194, 195, 196,
196, 197, 198, 198, 199, 199, 200, 201, 201,
202, 203, 203, 204, 204, 205, 206, 206, 207,
208, 208, 209, 209, 210, 211, 211, 212, 212,
213, 214, 214, 215, 215, 216, 217, 217, 218,
218, 219, 220, 220, 221, 221, 222, 222, 223,
224, 224, 225, 225, 226, 226, 227, 228, 228,
229, 229, 230, 230, 231, 231, 232, 233, 233,
234, 234, 235, 235, 236, 236, 237, 237, 238,
238, 239, 240, 240, 241, 241, 242, 242, 243,
243, 244, 244, 245, 245, 246, 246, 247, 247,
248, 248, 249, 249, 250, 250, 251, 251, 252,
252, 253, 253, 254, 254, 255],
dtype=cp.uint8).reshape((16, 16))
# fmt: on
result = exposure.adjust_gamma(image, 0.5)
assert_array_equal(result, expected)
def test_adjust_gamma_greater_one():
"""Verifying the output with expected results for gamma
correction with gamma equal to two"""
image = np.arange(0, 256, dtype=np.uint8).reshape((16, 16))
# fmt: off
expected = cp.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1,
1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3,
4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8,
8, 8, 9, 9, 9, 10, 10, 11, 11, 11, 12, 12,
13, 13, 14, 14, 15, 15, 16, 16, 17, 17, 18,
18, 19, 19, 20, 20, 21, 21, 22, 23, 23, 24,
24, 25, 26, 26, 27, 28, 28, 29, 30, 30, 31,
32, 32, 33, 34, 35, 35, 36, 37, 38, 38, 39,
40, 41, 42, 42, 43, 44, 45, 46, 47, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 84, 85, 86, 87, 88, 89, 91, 92, 93, 94,
95, 97, 98, 99, 100, 102, 103, 104, 105,
107, 108, 109, 111, 112, 113, 115, 116, 117,
119, 120, 121, 123, 124, 126, 127, 128, 130,
131, 133, 134, 136, 137, 139, 140, 142, 143,
145, 146, 148, 149, 151, 152, 154, 155, 157,
158, 160, 162, 163, 165, 166, 168, 170, 171,
173, 175, 176, 178, 180, 181, 183, 185, 186,
188, 190, 192, 193, 195, 197, 199, 200, 202,
204, 206, 207, 209, 211, 213, 215, 217, 218,
220, 222, 224, 226, 228, 230, 232, 233, 235,
237, 239, 241, 243, 245, 247, 249, 251, 253,
255] , dtype=cp.uint8).reshape((16, 16))
# fmt: on
result = exposure.adjust_gamma(image, 2)
assert_array_equal(result, expected)
def test_adjust_gamma_neggative():
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
with pytest.raises(ValueError):
exposure.adjust_gamma(image, -1)
def test_adjust_gamma_u8_overflow():
img = 255 * cp.ones((2, 2), dtype=np.uint8)
assert cp.all(exposure.adjust_gamma(img, gamma=1, gain=1.1) == 255)
# Test Logarithmic Correction
# ===========================
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_adjust_log_1x1_shape(dtype):
"""Check that the shape is maintained"""
img = cp.ones([1, 1], dtype=dtype)
result = exposure.adjust_log(img, 1)
assert img.shape == result.shape
assert result.dtype == dtype
def test_adjust_log():
"""Verifying the output with expected results for logarithmic
correction with multiplier constant multiplier equal to unity"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[ 0, 5, 11, 16, 22, 27, 33, 38], # noqa
[ 43, 48, 53, 58, 63, 68, 73, 77], # noqa
[ 82, 86, 91, 95, 100, 104, 109, 113], # noqa
[117, 121, 125, 129, 133, 137, 141, 145],
[149, 153, 157, 160, 164, 168, 172, 175],
[179, 182, 186, 189, 193, 196, 199, 203],
[206, 209, 213, 216, 219, 222, 225, 228],
[231, 234, 238, 241, 244, 246, 249, 252]], dtype=cp.uint8)
# fmt: on
result = exposure.adjust_log(image, 1)
assert_array_equal(result, expected)
def test_adjust_inv_log():
"""Verifying the output with expected results for inverse logarithmic
correction with multiplier constant multiplier equal to unity"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[ 0, 2, 5, 8, 11, 14, 17, 20], # noqa
[ 23, 26, 29, 32, 35, 38, 41, 45], # noqa
[ 48, 51, 55, 58, 61, 65, 68, 72], # noqa
[ 76, 79, 83, 87, 90, 94, 98, 102], # noqa
[106, 110, 114, 118, 122, 126, 130, 134],
[138, 143, 147, 151, 156, 160, 165, 170],
[174, 179, 184, 188, 193, 198, 203, 208],
[213, 218, 224, 229, 234, 239, 245, 250]], dtype=cp.uint8)
# fmt: on
result = exposure.adjust_log(image, 1, True)
assert_array_equal(result, expected)
# Test Sigmoid Correction
# =======================
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_adjust_sigmoid_1x1_shape(dtype):
"""Check that the shape is maintained"""
img = cp.ones([1, 1], dtype=dtype)
result = exposure.adjust_sigmoid(img, 1, 5)
assert img.shape == result.shape
assert result.dtype == dtype
def test_adjust_sigmoid_cutoff_one():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to one and gain of 5"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[ 1, 1, 1, 2, 2, 2, 2, 2], # noqa
[ 3, 3, 3, 4, 4, 4, 5, 5], # noqa
[ 5, 6, 6, 7, 7, 8, 9, 10], # noqa
[ 10, 11, 12, 13, 14, 15, 16, 18], # noqa
[ 19, 20, 22, 24, 25, 27, 29, 32], # noqa
[ 34, 36, 39, 41, 44, 47, 50, 54], # noqa
[ 57, 61, 64, 68, 72, 76, 80, 85], # noqa
[ 89, 94, 99, 104, 108, 113, 118, 123]], dtype=cp.uint8) # noqa
# fmt: on
result = exposure.adjust_sigmoid(image, 1, 5)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_zero():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to zero and gain of 10"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[127, 137, 147, 156, 166, 175, 183, 191],
[198, 205, 211, 216, 221, 225, 229, 232],
[235, 238, 240, 242, 244, 245, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253],
[253, 253, 253, 253, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254],
[254, 254, 254, 254, 254, 254, 254, 254]], dtype=cp.uint8)
# fmt: on
result = exposure.adjust_sigmoid(image, 0, 10)
assert_array_equal(result, expected)
def test_adjust_sigmoid_cutoff_half():
"""Verifying the output with expected results for sigmoid correction
with cutoff equal to half and gain of 10"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[ 1, 1, 2, 2, 3, 3, 4, 5], # noqa
[ 5, 6, 7, 9, 10, 12, 14, 16], # noqa
[ 19, 22, 25, 29, 34, 39, 44, 50], # noqa
[ 57, 64, 72, 80, 89, 99, 108, 118], # noqa
[128, 138, 148, 158, 167, 176, 184, 192],
[199, 205, 211, 217, 221, 226, 229, 233],
[236, 238, 240, 242, 244, 246, 247, 248],
[249, 250, 250, 251, 251, 252, 252, 253]], dtype=cp.uint8)
# fmt: on
result = exposure.adjust_sigmoid(image, 0.5, 10)
assert_array_equal(result, expected)
def test_adjust_inv_sigmoid_cutoff_half():
"""Verifying the output with expected results for inverse sigmoid
correction with cutoff equal to half and gain of 10"""
image = cp.arange(0, 255, 4, cp.uint8).reshape((8, 8))
# fmt: off
expected = cp.array([
[253, 253, 252, 252, 251, 251, 250, 249],
[249, 248, 247, 245, 244, 242, 240, 238],
[235, 232, 229, 225, 220, 215, 210, 204],
[197, 190, 182, 174, 165, 155, 146, 136],
[126, 116, 106, 96, 87, 78, 70, 62], # noqa
[ 55, 49, 43, 37, 33, 28, 25, 21], # noqa
[ 18, 16, 14, 12, 10, 8, 7, 6], # noqa
[ 5, 4, 4, 3, 3, 2, 2, 1]], dtype=cp.uint8) # noqa
# fmt: on
result = exposure.adjust_sigmoid(image, 0.5, 10, True)
assert_array_equal(result, expected)
def test_is_low_contrast():
image = cp.linspace(0, 0.04, 100)
assert exposure.is_low_contrast(image)
image[-1] = 1
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image * 255).astype(cp.uint8)
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
image = (image.astype(cp.uint16)) * 2**8
assert exposure.is_low_contrast(image)
assert not exposure.is_low_contrast(image, upper_percentile=100)
def test_is_low_contrast_boolean():
image = cp.zeros((8, 8), dtype=bool)
assert exposure.is_low_contrast(image)
image[:5] = 1
assert not exposure.is_low_contrast(image)
# Test negative input
#####################
@pytest.mark.parametrize(
"exposure_func",
[exposure.adjust_gamma, exposure.adjust_log, exposure.adjust_sigmoid],
)
def test_negative_input(exposure_func):
image = cp.arange(-10, 245, 4).reshape((8, 8)).astype(cp.float64)
with pytest.raises(ValueError):
exposure_func(image)
# Test Dask Compatibility
# =======================
# TODO: this Dask-based test case does not work (segfault!)
# @pytest.mark.xfail(True, reason="dask case not currently supported")
@pytest.mark.skip("dask case not currently supported")
def test_dask_histogram():
pytest.importorskip("dask", reason="dask python library is not installed")
import dask.array as da
dask_array = da.from_array(cp.array([[0, 1], [1, 2]]), chunks=(1, 2))
output_hist, output_bins = exposure.histogram(dask_array)
expected_bins = [0, 1, 2]
expected_hist = [1, 2, 1]
assert cp.allclose(expected_bins, output_bins)
assert cp.allclose(expected_hist, output_hist)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/exposure/tests/test_histogram_matching.py
|
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal
from skimage import data
from cucim.skimage import exposure
from cucim.skimage._shared.utils import _supported_float_type
from cucim.skimage.exposure import histogram_matching
@pytest.mark.parametrize(
"array, template, expected_array",
[
(cp.arange(10), cp.arange(100), cp.arange(9, 100, 10)),
(cp.random.rand(4), cp.ones(3), cp.ones(4)),
],
)
def test_match_array_values(array, template, expected_array):
# when
matched = histogram_matching._match_cumulative_cdf(array, template)
# then
assert_array_almost_equal(matched, expected_array)
class TestMatchHistogram:
image_rgb = cp.asarray(data.chelsea())
template_rgb = cp.asarray(data.astronaut())
@pytest.mark.parametrize("channel_axis", (0, 1, -1))
def test_match_histograms_channel_axis(self, channel_axis):
"""Assert that pdf of matched image is close to the reference's pdf for
all channels and all values of matched"""
image = cp.moveaxis(self.image_rgb, -1, channel_axis)
reference = cp.moveaxis(self.template_rgb, -1, channel_axis)
matched = exposure.match_histograms(
image, reference, channel_axis=channel_axis
)
assert matched.dtype == image.dtype
matched = cp.moveaxis(matched, channel_axis, -1)
reference = cp.moveaxis(reference, channel_axis, -1)
matched = cp.asnumpy(matched)
reference = cp.asnumpy(reference)
matched_pdf = self._calculate_image_empirical_pdf(matched)
reference_pdf = self._calculate_image_empirical_pdf(reference)
for channel in range(len(matched_pdf)):
reference_values, reference_quantiles = reference_pdf[channel]
matched_values, matched_quantiles = matched_pdf[channel]
for i, matched_value in enumerate(matched_values):
closest_id = (np.abs(reference_values - matched_value)).argmin()
assert_array_almost_equal(
matched_quantiles[i],
reference_quantiles[closest_id],
decimal=1,
)
@pytest.mark.parametrize("dtype", [cp.float16, cp.float32, cp.float64])
def test_match_histograms_float_dtype(self, dtype):
"""float16 or float32 inputs give float32 output"""
image = self.image_rgb.astype(dtype, copy=False)
reference = self.template_rgb.astype(dtype, copy=False)
matched = exposure.match_histograms(image, reference)
assert matched.dtype == _supported_float_type(dtype)
@pytest.mark.parametrize(
"image, reference",
[
(image_rgb, template_rgb[:, :, 0]),
(image_rgb[:, :, 0], template_rgb),
],
)
def test_raises_value_error_on_channels_mismatch(self, image, reference):
with pytest.raises(ValueError):
exposure.match_histograms(image, reference)
@classmethod
def _calculate_image_empirical_pdf(cls, image):
"""Helper function for calculating empirical probability density
function of a given image for all channels"""
if image.ndim > 2:
image = image.transpose(2, 0, 1)
channels = np.array(image, copy=False, ndmin=3)
channels_pdf = []
for channel in channels:
channel_values, counts = np.unique(channel, return_counts=True)
channel_quantiles = np.cumsum(counts).astype(np.float64)
channel_quantiles /= channel_quantiles[-1]
channels_pdf.append((channel_values, channel_quantiles))
return np.asarray(channels_pdf, dtype=object)
def test_match_histograms_consistency(self):
"""ensure equivalent results for float and integer-based code paths"""
image_u8 = self.image_rgb
reference_u8 = self.template_rgb
image_f64 = self.image_rgb.astype(np.float64)
reference_f64 = self.template_rgb.astype(np.float64, copy=False)
matched_u8 = exposure.match_histograms(image_u8, reference_u8)
matched_f64 = exposure.match_histograms(image_f64, reference_f64)
assert_array_almost_equal(
matched_u8.astype(np.float64), matched_f64, decimal=5
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/data/_binary_blobs.py
|
import cupy as cp
from .._shared.filters import gaussian
from .._shared.utils import deprecate_kwarg
@deprecate_kwarg(
{"seed": "rng"}, deprecated_version="23.12.00", removed_version="24.12.00"
)
def binary_blobs(
length=512, blob_size_fraction=0.1, n_dim=2, volume_fraction=0.5, rng=None
):
"""
Generate synthetic binary image with several rounded blob-like objects.
Parameters
----------
length : int, optional
Linear size of output image.
blob_size_fraction : float, optional
Typical linear size of blob, as a fraction of ``length``, should be
smaller than 1.
n_dim : int, optional
Number of dimensions of output image.
volume_fraction : float, default 0.5
Fraction of image pixels covered by the blobs (where the output is 1).
Should be in [0, 1].
rng : {`cupy.random.Generator`, int}, optional
Pseudo-random number generator.
By default, a PCG64 generator is used (see :func:`cupy.random.default_rng`).
If `rng` is an int, it is used to seed the generator.
Returns
-------
blobs : ndarray of bools
Output binary image
Notes
-----
Warning: CuPy does not give identical randomly generated numbers as NumPy,
so using a specific `rng` here will not give an identical pattern to the
scikit-image implementation.
The behavior for a given random seed may also change across CuPy major
versions.
See: https://docs.cupy.dev/en/stable/reference/random.html
Examples
--------
>>> from cucim.skimage import data
>>> # tiny size (5, 5)
>>> blobs = data.binary_blobs(length=5, blob_size_fraction=0.2)
>>> # larger size
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.1)
>>> # Finer structures
>>> blobs = data.binary_blobs(length=256, blob_size_fraction=0.05)
>>> # Blobs cover a smaller volume fraction of the image
>>> blobs = data.binary_blobs(length=256, volume_fraction=0.3)
""" # noqa: E501
rs = cp.random.default_rng(rng)
shape = tuple([length] * n_dim)
mask = cp.zeros(shape)
n_pts = max(int(1.0 / blob_size_fraction) ** n_dim, 1)
points = (length * rs.random((n_dim, n_pts))).astype(int)
mask[tuple(indices for indices in points)] = 1
mask = gaussian(mask, sigma=0.25 * length * blob_size_fraction)
threshold = cp.percentile(mask, 100 * (1 - volume_fraction))
return cp.logical_not(mask < threshold)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/data/__init__.py
|
import lazy_loader as lazy
__getattr__, __dir__, __all__ = lazy.attach_stub(__name__, __file__)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/data/__init__.pyi
|
__all__ = [
"binary_blobs",
]
from ._binary_blobs import binary_blobs
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/data
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/data/tests/test_data.py
|
import cupy as cp
import pytest
from numpy.testing import assert_almost_equal
from cucim.skimage import data
def test_binary_blobs():
blobs = data.binary_blobs(length=128)
assert_almost_equal(blobs.mean(), 0.5, decimal=1)
blobs = data.binary_blobs(length=128, volume_fraction=0.25)
assert_almost_equal(blobs.mean(), 0.25, decimal=1)
blobs = data.binary_blobs(length=32, volume_fraction=0.25, n_dim=3)
assert_almost_equal(blobs.mean(), 0.25, decimal=1)
other_realization = data.binary_blobs(
length=32, volume_fraction=0.25, n_dim=3
)
assert not cp.all(blobs == other_realization)
def test_binary_blobs_futurewarning():
with pytest.warns(FutureWarning):
data.binary_blobs(length=128, seed=5)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_interp_kernels.py
|
import cupy
import numpy
from cucim.skimage._vendored import (
_ndimage_spline_kernel_weights as _spline_kernel_weights,
_ndimage_spline_prefilter_core as _spline_prefilter_core,
_ndimage_util as _util,
)
math_constants_preamble = r"""
// workaround for HIP: line begins with #include
#include <cupy/math_constants.h>
"""
spline_weights_inline = _spline_kernel_weights.spline_weights_inline
def _get_coord_map(ndim, nprepad=0):
"""Extract target coordinate from coords array (for map_coordinates).
Notes
-----
Assumes the following variables have been initialized on the device::
coords (ndarray): array of shape (ncoords, ndim) containing the target
coordinates.
c_j: variables to hold the target coordinates
computes::
c_j = coords[i + j * ncoords];
ncoords is determined by the size of the output array, y.
y will be indexed by the CIndexer, _ind.
Thus ncoords = _ind.size();
"""
ops = []
ops.append("ptrdiff_t ncoords = _ind.size();")
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = coords[i + {j} * ncoords]{pre};"""
)
return ops
def _get_coord_zoom_and_shift(ndim, nprepad=0):
"""Compute target coordinate based on a shift followed by a zoom.
This version zooms from the center of the edge pixels.
Notes
-----
Assumes the following variables have been initialized on the device::
in_coord[ndim]: array containing the source coordinate
zoom[ndim]: array containing the zoom for each axis
shift[ndim]: array containing the zoom for each axis
computes::
c_j = zoom[j] * (in_coord[j] - shift[j])
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[{j}]){pre};"""
)
return ops
def _get_coord_zoom_and_shift_grid(ndim, nprepad=0):
"""Compute target coordinate based on a shift followed by a zoom.
This version zooms from the outer edges of the grid pixels.
Notes
-----
Assumes the following variables have been initialized on the device::
in_coord[ndim]: array containing the source coordinate
zoom[ndim]: array containing the zoom for each axis
shift[ndim]: array containing the zoom for each axis
computes::
c_j = zoom[j] * (in_coord[j] - shift[j] + 0.5) - 0.5
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = zoom[{j}] * ((W)in_coord[{j}] - shift[j] + 0.5) - 0.5{pre};"""
)
return ops
def _get_coord_zoom(ndim, nprepad=0):
"""Compute target coordinate based on a zoom.
This version zooms from the center of the edge pixels.
Notes
-----
Assumes the following variables have been initialized on the device::
in_coord[ndim]: array containing the source coordinate
zoom[ndim]: array containing the zoom for each axis
computes::
c_j = zoom[j] * in_coord[j]
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = zoom[{j}] * (W)in_coord[{j}]{pre};"""
)
return ops
def _get_coord_zoom_grid(ndim, nprepad=0):
"""Compute target coordinate based on a zoom (grid_mode=True version).
This version zooms from the outer edges of the grid pixels.
Notes
-----
Assumes the following variables have been initialized on the device::
in_coord[ndim]: array containing the source coordinate
zoom[ndim]: array containing the zoom for each axis
computes::
c_j = zoom[j] * (in_coord[j] + 0.5) - 0.5
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = zoom[{j}] * ((W)in_coord[{j}] + 0.5) - 0.5{pre};"""
)
return ops
def _get_coord_shift(ndim, nprepad=0):
"""Compute target coordinate based on a shift.
Notes
-----
Assumes the following variables have been initialized on the device::
in_coord[ndim]: array containing the source coordinate
shift[ndim]: array containing the zoom for each axis
computes::
c_j = in_coord[j] - shift[j]
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
for j in range(ndim):
ops.append(
f"""
W c_{j} = (W)in_coord[{j}] - shift[{j}]{pre};"""
)
return ops
def _get_coord_affine(ndim, nprepad=0):
"""Compute target coordinate based on a homogeneous transformation matrix.
The homogeneous matrix has shape (ndim, ndim + 1). It corresponds to
affine matrix where the last row of the affine is assumed to be:
``[0] * ndim + [1]``.
Notes
-----
Assumes the following variables have been initialized on the device::
mat(array): array containing the (ndim, ndim + 1) transform matrix.
in_coords(array): coordinates of the input
For example, in 2D:
c_0 = mat[0] * in_coords[0] + mat[1] * in_coords[1] + aff[2];
c_1 = mat[3] * in_coords[0] + mat[4] * in_coords[1] + aff[5];
"""
ops = []
pre = f" + (W){nprepad}" if nprepad > 0 else ""
ncol = ndim + 1
for j in range(ndim):
ops.append(
f"""
W c_{j} = (W)0.0;"""
)
for k in range(ndim):
ops.append(
f"""
c_{j} += mat[{ncol * j + k}] * (W)in_coord[{k}];"""
)
ops.append(
f"""
c_{j} += mat[{ncol * j + ndim}]{pre};"""
)
return ops
def _unravel_loop_index(shape, uint_t="unsigned int"):
"""
declare a multi-index array in_coord and unravel the 1D index, i into it.
This code assumes that the array is a C-ordered array.
"""
ndim = len(shape)
code = [
f"""
{uint_t} in_coord[{ndim}];
{uint_t} s, t, idx = i;"""
]
for j in range(ndim - 1, 0, -1):
code.append(
f"""
s = {shape[j]};
t = idx / s;
in_coord[{j}] = idx - t * s;
idx = t;"""
)
code.append(
"""
in_coord[0] = idx;"""
)
return "\n".join(code)
def _generate_interp_custom(
coord_func,
ndim,
large_int,
yshape,
mode,
cval,
order,
name="",
integer_output=False,
nprepad=0,
omit_in_coord=False,
):
"""
Args:
coord_func (function): generates code to do the coordinate
transformation. See for example, `_get_coord_shift`.
ndim (int): The number of dimensions.
large_int (bool): If true use Py_ssize_t instead of int for indexing.
yshape (tuple): Shape of the output array.
mode (str): Signal extension mode to use at the array boundaries
cval (float): constant value used when `mode == 'constant'`.
name (str): base name for the interpolation kernel
integer_output (bool): boolean indicating whether the output has an
integer type.
nprepad (int): integer indicating the amount of prepadding at the
boundaries.
Returns:
operation (str): code body for the ElementwiseKernel
name (str): name for the ElementwiseKernel
"""
ops = []
internal_dtype = "double" if integer_output else "Y"
ops.append(f"{internal_dtype} out = 0.0;")
if large_int:
uint_t = "size_t"
int_t = "ptrdiff_t"
else:
uint_t = "unsigned int"
int_t = "int"
# determine strides for x along each axis
for j in range(ndim):
ops.append(f"const {int_t} xsize_{j} = x.shape()[{j}];")
ops.append(f"const {uint_t} sx_{ndim - 1} = 1;")
for j in range(ndim - 1, 0, -1):
ops.append(f"const {uint_t} sx_{j - 1} = sx_{j} * xsize_{j};")
if not omit_in_coord:
# create in_coords array to store the unraveled indices
ops.append(_unravel_loop_index(yshape, uint_t))
# compute the transformed (target) coordinates, c_j
ops = ops + coord_func(ndim, nprepad)
if cval is numpy.nan:
cval = "(Y)CUDART_NAN"
elif cval == numpy.inf:
cval = "(Y)CUDART_INF"
elif cval == -numpy.inf:
cval = "(Y)(-CUDART_INF)"
else:
cval = f"({internal_dtype}){cval}"
if mode == "constant":
# use cval if coordinate is outside the bounds of x
_cond = " || ".join(
[f"(c_{j} < 0) || (c_{j} > xsize_{j} - 1)" for j in range(ndim)]
)
ops.append(
f"""
if ({_cond})
{{
out = {cval};
}}
else
{{"""
)
if order == 0:
if mode == "wrap":
ops.append("double dcoord;") # mode 'wrap' requires this to work
for j in range(ndim):
# determine nearest neighbor
if mode == "wrap":
ops.append(
f"""
dcoord = c_{j};"""
)
else:
ops.append(
f"""
{int_t} cf_{j} = ({int_t})floor((double)c_{j} + 0.5);"""
)
# handle boundary
if mode != "constant":
if mode == "wrap":
ixvar = "dcoord"
float_ix = True
else:
ixvar = f"cf_{j}"
float_ix = False
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, f"xsize_{j}", int_t, float_ix
)
)
if mode == "wrap":
ops.append(
f"""
{int_t} cf_{j} = ({int_t})floor(dcoord + 0.5);"""
)
# sum over ic_j will give the raveled coordinate in the input
ops.append(
f"""
{int_t} ic_{j} = cf_{j} * sx_{j};"""
)
_coord_idx = " + ".join([f"ic_{j}" for j in range(ndim)])
if mode == "grid-constant":
_cond = " || ".join([f"(ic_{j} < 0)" for j in range(ndim)])
ops.append(
f"""
if ({_cond}) {{
out = {cval};
}} else {{
out = ({internal_dtype})x[{_coord_idx}];
}}"""
)
else:
ops.append(
f"""
out = ({internal_dtype})x[{_coord_idx}];"""
)
elif order == 1:
for j in range(ndim):
# get coordinates for linear interpolation along axis j
ops.append(
f"""
{int_t} cf_{j} = ({int_t})floor((double)c_{j});
{int_t} cc_{j} = cf_{j} + 1;
{int_t} n_{j} = (c_{j} == cf_{j}) ? 1 : 2; // points needed
"""
)
if mode == "wrap":
ops.append(
f"""
double dcoordf = c_{j};
double dcoordc = c_{j} + 1;"""
)
else:
# handle boundaries for extension modes.
ops.append(
f"""
{int_t} cf_bounded_{j} = cf_{j};
{int_t} cc_bounded_{j} = cc_{j};"""
)
if mode != "constant":
if mode == "wrap":
ixvar = "dcoordf"
float_ix = True
else:
ixvar = f"cf_bounded_{j}"
float_ix = False
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, f"xsize_{j}", int_t, float_ix
)
)
ixvar = "dcoordc" if mode == "wrap" else f"cc_bounded_{j}"
ops.append(
_util._generate_boundary_condition_ops(
mode, ixvar, f"xsize_{j}", int_t, float_ix
)
)
if mode == "wrap":
ops.append(
f"""
{int_t} cf_bounded_{j} = ({int_t})floor(dcoordf);;
{int_t} cc_bounded_{j} = ({int_t})floor(dcoordf + 1);;
"""
)
ops.append(
f"""
for (int s_{j} = 0; s_{j} < n_{j}; s_{j}++)
{{
W w_{j};
{int_t} ic_{j};
if (s_{j} == 0)
{{
w_{j} = (W)cc_{j} - c_{j};
ic_{j} = cf_bounded_{j} * sx_{j};
}} else
{{
w_{j} = c_{j} - (W)cf_{j};
ic_{j} = cc_bounded_{j} * sx_{j};
}}"""
)
elif order > 1:
if mode == "grid-constant":
spline_mode = "constant"
elif mode == "nearest":
spline_mode = "nearest"
else:
spline_mode = _spline_prefilter_core._get_spline_mode(mode)
# wx, wy are temporary variables used during spline weight computation
ops.append(
f"""
W wx, wy;
{int_t} start;"""
)
for j in range(ndim):
# determine weights along the current axis
ops.append(
f"""
W weights_{j}[{order + 1}];"""
)
ops.append(spline_weights_inline[order].format(j=j, order=order))
# get starting coordinate for spline interpolation along axis j
if mode in ["wrap"]:
ops.append(f"double dcoord = c_{j};")
coord_var = "dcoord"
ops.append(
_util._generate_boundary_condition_ops(
mode, coord_var, f"xsize_{j}", int_t, True
)
)
else:
coord_var = f"(double)c_{j}"
if order & 1:
op_str = """
start = ({int_t})floor({coord_var}) - {order_2};"""
else:
op_str = """
start = ({int_t})floor({coord_var} + 0.5) - {order_2};"""
ops.append(
op_str.format(
int_t=int_t, coord_var=coord_var, order_2=order // 2
)
)
# set of coordinate values within spline footprint along axis j
ops.append(f"""{int_t} ci_{j}[{order + 1}];""")
for k in range(order + 1):
ixvar = f"ci_{j}[{k}]"
ops.append(
f"""
{ixvar} = start + {k};"""
)
ops.append(
_util._generate_boundary_condition_ops(
spline_mode, ixvar, f"xsize_{j}", int_t
)
)
# loop over the order + 1 values in the spline filter
ops.append(
f"""
W w_{j};
{int_t} ic_{j};
for (int k_{j} = 0; k_{j} <= {order}; k_{j}++)
{{
w_{j} = weights_{j}[k_{j}];
ic_{j} = ci_{j}[k_{j}] * sx_{j};
"""
)
if order > 0:
_weight = " * ".join([f"w_{j}" for j in range(ndim)])
_coord_idx = " + ".join([f"ic_{j}" for j in range(ndim)])
if mode == "grid-constant" or (order > 1 and mode == "constant"):
_cond = " || ".join([f"(ic_{j} < 0)" for j in range(ndim)])
ops.append(
f"""
if ({_cond}) {{
out += {cval} * ({internal_dtype})({_weight});
}} else {{
{internal_dtype} val = ({internal_dtype})x[{_coord_idx}];
out += val * ({internal_dtype})({_weight});
}}"""
)
else:
ops.append(
f"""
{internal_dtype} val = ({internal_dtype})x[{_coord_idx}];
out += val * ({internal_dtype})({_weight});"""
)
ops.append("}" * ndim)
if mode == "constant":
ops.append("}")
if integer_output:
ops.append("y = (Y)rint((double)out);")
else:
ops.append("y = (Y)out;")
operation = "\n".join(ops)
mode_str = mode.replace("-", "_") # avoid hyphen in kernel name
name = "cupyx_scipy_ndimage_interpolate_{}_order{}_{}_{}d_y{}".format(
name,
order,
mode_str,
ndim,
"_".join([f"{j}" for j in yshape]),
)
if uint_t == "size_t":
name += "_i64"
return operation, name
@cupy._util.memoize(for_each_device=True)
def _get_map_kernel(
ndim,
large_int,
yshape,
mode,
cval=0.0,
order=1,
integer_output=False,
nprepad=0,
):
in_params = "raw X x, raw W coords"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_map,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="map",
integer_output=integer_output,
nprepad=nprepad,
omit_in_coord=True, # input image coordinates are not needed
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
@cupy._util.memoize(for_each_device=True)
def _get_shift_kernel(
ndim,
large_int,
yshape,
mode,
cval=0.0,
order=1,
integer_output=False,
nprepad=0,
):
in_params = "raw X x, raw W shift"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_shift,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="shift",
integer_output=integer_output,
nprepad=nprepad,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
@cupy._util.memoize(for_each_device=True)
def _get_zoom_shift_kernel(
ndim,
large_int,
yshape,
mode,
cval=0.0,
order=1,
integer_output=False,
grid_mode=False,
nprepad=0,
):
in_params = "raw X x, raw W shift, raw W zoom"
out_params = "Y y"
if grid_mode:
zoom_shift_func = _get_coord_zoom_and_shift_grid
else:
zoom_shift_func = _get_coord_zoom_and_shift
operation, name = _generate_interp_custom(
coord_func=zoom_shift_func,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom_shift_grid" if grid_mode else "zoom_shift",
integer_output=integer_output,
nprepad=nprepad,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
@cupy._util.memoize(for_each_device=True)
def _get_zoom_kernel(
ndim,
large_int,
yshape,
mode,
cval=0.0,
order=1,
integer_output=False,
grid_mode=False,
nprepad=0,
):
in_params = "raw X x, raw W zoom"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_zoom_grid if grid_mode else _get_coord_zoom,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="zoom_grid" if grid_mode else "zoom",
integer_output=integer_output,
nprepad=nprepad,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
@cupy._util.memoize(for_each_device=True)
def _get_affine_kernel(
ndim,
large_int,
yshape,
mode,
cval=0.0,
order=1,
integer_output=False,
nprepad=0,
):
in_params = "raw X x, raw W mat"
out_params = "Y y"
operation, name = _generate_interp_custom(
coord_func=_get_coord_affine,
ndim=ndim,
large_int=large_int,
yshape=yshape,
mode=mode,
cval=cval,
order=order,
name="affine",
integer_output=integer_output,
nprepad=nprepad,
)
return cupy.ElementwiseKernel(
in_params, out_params, operation, name, preamble=math_constants_preamble
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_kernel_weights.py
|
"""Determination of spline kernel weights (adapted from SciPy)
See more verbose comments for each case there:
https://github.com/scipy/scipy/blob/eba29d69846ab1299976ff4af71c106188397ccc/scipy/ndimage/src/ni_splines.c#L7
``spline_weights_inline`` is a dict where the key is the spline order and the
value is the spline weight initialization code.
""" # noqa: E501
spline_weights_inline = {}
# Note: This order = 1 case is currently unused (order = 1 has a different code
# path in _interp_kernels.py). I think that existing code is a bit more
# efficient.
spline_weights_inline[
1
] = """
wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
weights_{j}[0] = 1.0 - wx;
weights_{j}[1] = wx;
"""
spline_weights_inline[
2
] = """
wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
weights_{j}[1] = 0.75 - wx * wx;
wy = 0.5 - wx;
weights_{j}[0] = 0.5 * wy * wy;
weights_{j}[2] = 1.0 - weights_{j}[0] - weights_{j}[1];
"""
spline_weights_inline[
3
] = """
wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
wy = 1.0 - wx;
weights_{j}[1] = (wx * wx * (wx - 2.0) * 3.0 + 4.0) / 6.0;
weights_{j}[2] = (wy * wy * (wy - 2.0) * 3.0 + 4.0) / 6.0;
weights_{j}[0] = wy * wy * wy / 6.0;
weights_{j}[3] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2];
"""
spline_weights_inline[
4
] = """
wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
wy = wx * wx;
weights_{j}[2] = wy * (wy * 0.25 - 0.625) + 115.0 / 192.0;
wy = 1.0 + wx;
weights_{j}[1] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) +
55.0 / 96.0;
wy = 1.0 - wx;
weights_{j}[3] = wy * (wy * (wy * (5.0 - wy) / 6.0 - 1.25) + 5.0 / 24.0) +
55.0 / 96.0;
wy = 0.5 - wx;
wy = wy * wy;
weights_{j}[0] = wy * wy / 24.0;
weights_{j}[4] = 1.0 - weights_{j}[0] - weights_{j}[1]
- weights_{j}[2] - weights_{j}[3];
"""
spline_weights_inline[
5
] = """
wx = c_{j} - floor({order} & 1 ? c_{j} : c_{j} + 0.5);
wy = wx * wx;
weights_{j}[2] = wy * (wy * (0.25 - wx / 12.0) - 0.5) + 0.55;
wy = 1.0 - wx;
wy = wy * wy;
weights_{j}[3] = wy * (wy * (0.25 - (1.0 - wx) / 12.0) - 0.5) + 0.55;
wy = wx + 1.0;
weights_{j}[1] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75)
+ 0.625) + 0.425;
wy = 2.0 - wx;
weights_{j}[4] = wy * (wy * (wy * (wy * (wy / 24.0 - 0.375) + 1.25) - 1.75)
+ 0.625) + 0.425;
wy = 1.0 - wx;
wy = wy * wy;
weights_{j}[0] = (1.0 - wx) * wy * wy / 120.0;
weights_{j}[5] = 1.0 - weights_{j}[0] - weights_{j}[1] - weights_{j}[2]
- weights_{j}[3] - weights_{j}[4];
"""
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/signaltools.py
|
"""A vendored subset of cupyx.scipy.signal.signaltools
Note:
The version of ``choose_conv_method`` here differs from the one in CuPy and
does not restrict the choice of fftconvolve to only 1D arrays.
"""
import timeit
import warnings
import cupy
import numpy as np
from cupyx.scipy.ndimage import rank_filter, uniform_filter
from cucim import _misc
from cucim.skimage._vendored import _signaltools_core as _st_core
from cucim.skimage._vendored._ndimage_util import _fix_sequence_arg
_prod = _misc.prod
def convolve(in1, in2, mode="full", method="auto"):
"""Convolve two N-dimensional arrays.
Convolve ``in1`` and ``in2``, with the output size determined by the
``mode`` argument.
Args:
in1 (cupy.ndarray): First input.
in2 (cupy.ndarray): Second input. Should have the same number of
dimensions as `in1`.
mode (str): Indicates the size of the output:
- ``'full'``: output is the full discrete linear convolution \
(default)
- ``'valid'``: output consists only of those elements that do \
not rely on the zero-padding. Either ``in1`` or ``in2`` must \
be at least as large as the other in every dimension.
- ``'same'``: - output is the same size as ``in1``, centered with \
respect to the ``'full'`` output
method (str): Indicates which method to use for the computations:
- ``'direct'``: The convolution is determined directly from sums, \
the definition of convolution
- ``'fft'``: The Fourier Transform is used to perform the \
convolution by calling ``fftconvolve``.
- ``'auto'``: Automatically choose direct of FFT based on an \
estimate of which is faster for the arguments (default).
Returns:
cupy.ndarray: the result of convolution.
.. seealso:: :func:`cupyx.scipy.signal.choose_conv_method`
.. seealso:: :func:`cupyx.scipy.signal.correlation`
.. seealso:: :func:`cupyx.scipy.signal.fftconvolve`
.. seealso:: :func:`cupyx.scipy.signal.oaconvolve`
.. seealso:: :func:`cupyx.scipy.ndimage.convolve`
.. seealso:: :func:`scipy.signal.convolve`
.. note::
By default, ``convolve`` and ``correlate`` use ``method='auto'``, which
calls ``choose_conv_method`` to choose the fastest method using
pre-computed values. CuPy may not choose the same method to compute
the convolution as SciPy does given the same inputs.
"""
return _correlate(in1, in2, mode, method, True)
def correlate(in1, in2, mode="full", method="auto"):
"""Cross-correlate two N-dimensional arrays.
Cross-correlate ``in1`` and ``in2``, with the output size determined by the
``mode`` argument.
Args:
in1 (cupy.ndarray): First input.
in2 (cupy.ndarray): Second input. Should have the same number of
dimensions as ``in1``.
mode (str): Indicates the size of the output:
- ``'full'``: output is the full discrete linear convolution \
(default)
- ``'valid'``: output consists only of those elements that do \
not rely on the zero-padding. Either ``in1`` or ``in2`` must \
be at least as large as the other in every dimension.
- ``'same'``: - output is the same size as ``in1``, centered with \
respect to the ``'full'`` output
method (str): Indicates which method to use for the computations:
- ``'direct'``: The convolution is determined directly from sums, \
the definition of convolution
- ``'fft'``: The Fourier Transform is used to perform the \
convolution by calling ``fftconvolve``.
- ``'auto'``: Automatically choose direct of FFT based on an \
estimate of which is faster for the arguments (default).
Returns:
cupy.ndarray: the result of correlation.
.. seealso:: :func:`cupyx.scipy.signal.choose_conv_method`
.. seealso:: :func:`cupyx.scipy.signal.convolve`
.. seealso:: :func:`cupyx.scipy.signal.fftconvolve`
.. seealso:: :func:`cupyx.scipy.signal.oaconvolve`
.. seealso:: :func:`cupyx.scipy.ndimage.correlation`
.. seealso:: :func:`scipy.signal.correlation`
.. note::
By default, ``convolve`` and ``correlate`` use ``method='auto'``, which
calls ``choose_conv_method`` to choose the fastest method using
pre-computed values. CuPy may not choose the same method to compute
the convolution as SciPy does given the same inputs.
"""
return _correlate(in1, in2, mode, method, False)
def _correlate(in1, in2, mode="full", method="auto", convolution=False):
quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution)
if quick_out is not None:
return quick_out
if method not in ("auto", "direct", "fft"):
raise ValueError('acceptable methods are "auto", "direct", or "fft"')
if method == "auto":
method = choose_conv_method(in1, in2, mode=mode)
if method == "direct":
return _st_core._direct_correlate(
in1, in2, mode, in1.dtype, convolution
)
# if method == 'fft':
inputs_swapped = _st_core._inputs_swap_needed(mode, in1.shape, in2.shape)
if inputs_swapped:
in1, in2 = in2, in1
if not convolution:
in2 = _st_core._reverse_and_conj(in2)
out = fftconvolve(in1, in2, mode)
result_type = cupy.result_type(in1, in2)
if result_type.kind in "ui":
out = out.round()
out = out.astype(result_type, copy=False)
if not convolution and inputs_swapped:
out = cupy.ascontiguousarray(_st_core._reverse_and_conj(out))
return out
def fftconvolve(in1, in2, mode="full", axes=None):
"""Convolve two N-dimensional arrays using FFT.
Convolve ``in1`` and ``in2`` using the fast Fourier transform method, with
the output size determined by the ``mode`` argument.
This is generally much faster than the ``'direct'`` method of ``convolve``
for large arrays, but can be slower when only a few output values are
needed, and can only output float arrays (int or object array inputs will
be cast to float).
Args:
in1 (cupy.ndarray): First input.
in2 (cupy.ndarray): Second input. Should have the same number of
dimensions as ``in1``.
mode (str): Indicates the size of the output:
``'full'``: output is the full discrete linear cross-correlation
(default)
``'valid'``: output consists only of those elements that do not
rely on the zero-padding. Either ``in1`` or ``in2``
must be at least as large as the other in every
dimension.
``'same'``: output is the same size as ``in1``, centered
with respect to the 'full' output
axes (scalar or tuple of scalar or None): Axes over which to compute
the convolution. The default is over all axes.
Returns:
cupy.ndarray: the result of convolution
.. seealso:: :func:`cupyx.scipy.signal.choose_conv_method`
.. seealso:: :func:`cupyx.scipy.signal.correlation`
.. seealso:: :func:`cupyx.scipy.signal.convolve`
.. seealso:: :func:`cupyx.scipy.signal.oaconvolve`
.. seealso:: :func:`cupyx.scipy.ndimage.convolve`
.. seealso:: :func:`scipy.signal.correlation`
"""
out = _st_core._check_conv_inputs(in1, in2, mode)
if out is not None:
return out
in1, in2, axes = _st_core._init_freq_conv_axes(in1, in2, mode, axes, False)
shape = [
max(x1, x2) if a not in axes else x1 + x2 - 1
for a, (x1, x2) in enumerate(zip(in1.shape, in2.shape))
]
out = _st_core._freq_domain_conv(in1, in2, axes, shape, calc_fast_len=True)
return _st_core._apply_conv_mode(out, in1.shape, in2.shape, mode, axes)
def _conv_ops(x_shape, h_shape, mode):
"""
Find the number of operations required for direct/fft methods of
convolution. The direct operations were recorded by making a dummy class to
record the number of operations by overriding ``__mul__`` and ``__add__``.
The FFT operations rely on the (well-known) computational complexity of the
FFT (and the implementation of ``_freq_domain_conv``).
"""
if mode == "full":
out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
elif mode == "valid":
out_shape = [abs(n - k) + 1 for n, k in zip(x_shape, h_shape)]
elif mode == "same":
out_shape = x_shape
else:
raise ValueError(
"Acceptable mode flags are 'valid',"
" 'same', or 'full', not mode={}".format(mode)
)
s1, s2 = x_shape, h_shape
if len(x_shape) == 1:
s1, s2 = s1[0], s2[0]
if mode == "full":
direct_ops = s1 * s2
elif mode == "valid":
direct_ops = (s2 - s1 + 1) * s1 if s2 >= s1 else (s1 - s2 + 1) * s2
elif mode == "same":
direct_ops = (
s1 * s2 if s1 < s2 else s1 * s2 - (s2 // 2) * ((s2 + 1) // 2)
)
else:
if mode == "full":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "valid":
direct_ops = min(_prod(s1), _prod(s2)) * _prod(out_shape)
elif mode == "same":
direct_ops = _prod(s1) * _prod(s2)
full_out_shape = [n + k - 1 for n, k in zip(x_shape, h_shape)]
N = _prod(full_out_shape)
fft_ops = 3 * N * np.log(N) # 3 separate FFTs of size full_out_shape
return fft_ops, direct_ops
def _fftconv_faster(x, h, mode):
"""
See if using fftconvolve or convolve is faster.
Parameters
----------
x : cupy.ndarray
Signal
h : cupy.ndarray
Kernel
mode : str
Mode passed to convolve
Returns
-------
fft_faster : bool
Notes
-----
See docstring of `choose_conv_method` for details on tuning hardware.
See pull request 11031 for more detail:
https://github.com/scipy/scipy/pull/11031.
"""
fft_ops, direct_ops = _conv_ops(x.shape, h.shape, mode)
offset = -1e-3 if x.ndim == 1 else -1e-4
constants = (
{
"valid": (1.89095737e-9, 2.1364985e-10, offset),
"full": (1.7649070e-9, 2.1414831e-10, offset),
"same": (3.2646654e-9, 2.8478277e-10, offset)
if h.size <= x.size
else (3.21635404e-9, 1.1773253e-8, -1e-5),
}
if x.ndim == 1
else {
"valid": (1.85927e-9, 2.11242e-8, offset),
"full": (1.99817e-9, 1.66174e-8, offset),
"same": (2.04735e-9, 1.55367e-8, offset),
}
)
O_fft, O_direct, O_offset = constants[mode]
return O_fft * fft_ops < O_direct * direct_ops + O_offset
def _numeric_arrays(arrays, kinds="buifc"):
"""
See if a list of arrays are all numeric.
Parameters
----------
ndarrays : array or list of arrays
arrays to check if numeric.
numeric_kinds : string-like
The dtypes of the arrays to be checked. If the dtype.kind of
the ndarrays are not in this string the function returns False and
otherwise returns True.
"""
if type(arrays) == cupy.ndarray:
return arrays.dtype.kind in kinds
for array_ in arrays:
if array_.dtype.kind not in kinds:
return False
return True
def _timeit_fast(stmt="pass", setup="pass", repeat=3):
"""
Returns the time the statement/function took, in seconds.
Faster, less precise version of IPython's timeit. `stmt` can be a statement
written as a string or a callable.
Will do only 1 loop (like IPython's timeit) with no repetitions
(unlike IPython) for very slow functions. For fast functions, only does
enough loops to take 5 ms, which seems to produce similar results (on
Windows at least), and avoids doing an extraneous cycle that isn't
measured.
"""
timer = timeit.Timer(stmt, setup)
# determine number of calls per rep so total time for 1 rep >= 5 ms
x = 0
for p in range(0, 10):
number = 10**p
x = timer.timeit(number) # seconds
if x >= 5e-3 / 10: # 5 ms for final test, 1/10th that for this one
break
if x > 1: # second
# If it's macroscopic, don't bother with repetitions
best = x
else:
number *= 10
r = timer.repeat(repeat, number)
best = min(r)
sec = best / number
return sec
# TODO: grlee77: tune this for CUDA when measure=False rather than falling
# back to the choices made by SciPy
def choose_conv_method(in1, in2, mode="full", measure=False):
"""
Find the fastest convolution/correlation method.
This primarily exists to be called during the ``method='auto'`` option in
`convolve` and `correlate`. It can also be used to determine the value of
``method`` for many different convolutions of the same dtype/shape.
In addition, it supports timing the convolution to adapt the value of
``method`` to a particular set of inputs and/or hardware.
Parameters
----------
in1 : array_like
The first argument passed into the convolution function.
in2 : array_like
The second argument passed into the convolution function.
mode : str {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
``full``
The output is the full discrete linear convolution
of the inputs. (Default)
``valid``
The output consists only of those elements that do not
rely on the zero-padding.
``same``
The output is the same size as `in1`, centered
with respect to the 'full' output.
measure : bool, optional
If True, run and time the convolution of `in1` and `in2` with both
methods and return the fastest. If False (default), predict the fastest
method using precomputed values.
Returns
-------
method : str
A string indicating which convolution method is fastest, either
'direct' or 'fft'
times : dict, optional
A dictionary containing the times (in seconds) needed for each method.
This value is only returned if ``measure=True``.
See Also
--------
convolve
correlate
Notes
-----
Generally, this method is 99% accurate for 2D signals and 85% accurate
for 1D signals for randomly chosen input sizes. For precision, use
``measure=True`` to find the fastest method by timing the convolution.
This can be used to avoid the minimal overhead of finding the fastest
``method`` later, or to adapt the value of ``method`` to a particular set
of inputs.
Experiments were run on an Amazon EC2 r5a.2xlarge machine to test this
function. These experiments measured the ratio between the time required
when using ``method='auto'`` and the time required for the fastest method
(i.e., ``ratio = time_auto / min(time_fft, time_direct)``). In these
experiments, we found:
* There is a 95% chance of this ratio being less than 1.5 for 1D signals
and a 99% chance of being less than 2.5 for 2D signals.
* The ratio was always less than 2.5/5 for 1D/2D signals respectively.
* This function is most inaccurate for 1D convolutions that take between 1
and 10 milliseconds with ``method='direct'``. A good proxy for this
(at least in our experiments) is ``1e6 <= in1.size * in2.size <= 1e7``.
The 2D results almost certainly generalize to 3D/4D/etc because the
implementation is the same (the 1D implementation is different).
All the numbers above are specific to the EC2 machine. However, we did find
that this function generalizes fairly decently across hardware. The speed
tests were of similar quality (and even slightly better) than the same
tests performed on the machine to tune this function's numbers (a mid-2014
15-inch MacBook Pro with 16GB RAM and a 2.5GHz Intel i7 processor).
There are cases when `fftconvolve` supports the inputs but this function
returns `direct` (e.g., to protect against floating point integer
precision).
.. versionadded:: 22.02.00
Examples
--------
Estimate the fastest method for a given input:
>>> from cucim.skimage import _vendored as signal
>>> img = cupy.random.rand(32, 32)
>>> filter = cupy.random.rand(8, 8)
>>> method = signal.choose_conv_method(img, filter, mode='same')
>>> method
'fft'
This can then be applied to other arrays of the same dtype and shape:
>>> img2 = cupy.random.rand(32, 32)
>>> filter2 = cupy.random.rand(8, 8)
>>> corr2 = signal.correlate(img2, filter2, mode='same', method=method)
>>> conv2 = signal.convolve(img2, filter2, mode='same', method=method)
The output of this function (``method``) works with `correlate` and
`convolve`.
"""
volume = cupy.asarray(in1)
kernel = cupy.asarray(in2)
if measure:
times = {}
for method in ["fft", "direct"]:
times[method] = _timeit_fast(
lambda: convolve(volume, kernel, mode=mode, method=method)
)
chosen_method = "fft" if times["fft"] < times["direct"] else "direct"
return chosen_method, times
# for integer input,
# catch when more precision required than float provides (representing an
# integer as float can lose precision in fftconvolve if larger than 2**52)
if any([_numeric_arrays([x], kinds="ui") for x in [volume, kernel]]):
max_value = int(cupy.abs(volume).max()) * int(cupy.abs(kernel).max())
max_value *= int(min(volume.size, kernel.size))
if max_value > 2 ** np.finfo("float").nmant - 1:
return "direct"
if _numeric_arrays([volume, kernel], kinds="b"):
return "direct"
if _numeric_arrays([volume, kernel]):
if _fftconv_faster(volume, kernel, mode):
return "fft"
return "direct"
def convolve2d(in1, in2, mode="full", boundary="fill", fillvalue=0):
"""Convolve two 2-dimensional arrays.
Convolve ``in1`` and ``in2`` with output size determined by ``mode``, and
boundary conditions determined by ``boundary`` and ``fillvalue``.
Args:
in1 (cupy.ndarray): First input.
in2 (cupy.ndarray): Second input. Should have the same number of
dimensions as ``in1``.
mode (str): Indicates the size of the output:
- ``'full'``: output is the full discrete linear convolution \
(default)
- ``'valid'``: output consists only of those elements that do \
not rely on the zero-padding. Either ``in1`` or ``in2`` must \
be at least as large as the other in every dimension.
- ``'same'``: - output is the same size as ``in1``, centered with \
respect to the ``'full'`` output
boundary (str): Indicates how to handle boundaries:
- ``fill``: pad input arrays with fillvalue (default)
- ``wrap``: circular boundary conditions
- ``symm``: symmetrical boundary conditions
fillvalue (scalar): Value to fill pad input arrays with. Default is 0.
Returns:
cupy.ndarray: A 2-dimensional array containing a subset of the discrete
linear convolution of ``in1`` with ``in2``.
.. seealso:: :func:`cupyx.scipy.signal.convolve`
.. seealso:: :func:`cupyx.scipy.signal.fftconvolve`
.. seealso:: :func:`cupyx.scipy.signal.oaconvolve`
.. seealso:: :func:`cupyx.scipy.signal.correlate2d`
.. seealso:: :func:`cupyx.scipy.ndimage.convolve`
.. seealso:: :func:`scipy.signal.convolve2d`
"""
return _correlate2d(in1, in2, mode, boundary, fillvalue, True)
def correlate2d(in1, in2, mode="full", boundary="fill", fillvalue=0):
"""Cross-correlate two 2-dimensional arrays.
Cross correlate ``in1`` and ``in2`` with output size determined by
``mode``, and boundary conditions determined by ``boundary`` and
``fillvalue``.
Args:
in1 (cupy.ndarray): First input.
in2 (cupy.ndarray): Second input. Should have the same number of
dimensions as ``in1``.
mode (str): Indicates the size of the output:
- ``'full'``: output is the full discrete linear convolution \
(default)
- ``'valid'``: output consists only of those elements that do \
not rely on the zero-padding. Either ``in1`` or ``in2`` must \
be at least as large as the other in every dimension.
- ``'same'``: - output is the same size as ``in1``, centered with \
respect to the ``'full'`` output
boundary (str): Indicates how to handle boundaries:
- ``fill``: pad input arrays with fillvalue (default)
- ``wrap``: circular boundary conditions
- ``symm``: symmetrical boundary conditions
fillvalue (scalar): Value to fill pad input arrays with. Default is 0.
Returns:
cupy.ndarray: A 2-dimensional array containing a subset of the discrete
linear cross-correlation of ``in1`` with ``in2``.
Note:
When using ``"same"`` mode with even-length inputs, the outputs of
``correlate`` and ``correlate2d`` differ: There is a 1-index offset
between them.
.. seealso:: :func:`cupyx.scipy.signal.correlate`
.. seealso:: :func:`cupyx.scipy.signal.convolve2d`
.. seealso:: :func:`cupyx.scipy.ndimage.correlate`
.. seealso:: :func:`scipy.signal.correlate2d`
"""
return _correlate2d(in1, in2, mode, boundary, fillvalue, False)
def _correlate2d(in1, in2, mode, boundary, fillvalue, convolution=False):
if not (in1.ndim == in2.ndim == 2):
raise ValueError(
"{} inputs must both be 2-D arrays".format(
"convolve2d" if convolution else "correlate2d"
)
)
_boundaries = {
"fill": "constant",
"pad": "constant",
"wrap": "wrap",
"circular": "wrap",
"symm": "reflect",
"symmetric": "reflect",
}
boundary = _boundaries.get(boundary)
if boundary is None:
raise ValueError(
'Acceptable boundary flags are "fill" (or "pad"), '
'"circular" (or "wrap"), and '
'"symmetric" (or "symm").'
)
quick_out = _st_core._check_conv_inputs(in1, in2, mode, convolution)
if quick_out is not None:
return quick_out
return _st_core._direct_correlate(
in1,
in2,
mode,
in1.dtype,
convolution,
boundary,
fillvalue,
not convolution,
)
def wiener(im, mysize=None, noise=None):
"""Perform a Wiener filter on an N-dimensional array.
Apply a Wiener filter to the N-dimensional array `im`.
Args:
im (cupy.ndarray): An N-dimensional array.
mysize (int or cupy.ndarray, optional): A scalar or an N-length list
giving the size of the Wiener filter window in each dimension.
Elements of mysize should be odd. If mysize is a scalar, then this
scalar is used as the size in each dimension.
noise (float, optional): The noise-power to use. If None, then noise is
estimated as the average of the local variance of the input.
Returns:
cupy.ndarray: Wiener filtered result with the same shape as `im`.
.. seealso:: :func:`scipy.signal.wiener`
"""
if im.dtype.kind == "c":
# TODO: adding support for complex types requires ndimage filters
# to support complex types (which they could easily if not for the
# scipy compatibility requirement of forbidding complex and using
# float64 intermediates)
raise TypeError("complex types not currently supported")
if mysize is None:
mysize = 3
mysize = _fix_sequence_arg(mysize, im.ndim, "mysize", int)
im = im.astype(float, copy=False)
# Estimate the local mean
local_mean = uniform_filter(im, mysize, mode="constant")
# Estimate the local variance
local_var = uniform_filter(im * im, mysize, mode="constant")
local_var -= local_mean * local_mean
# Estimate the noise power if needed.
if noise is None:
noise = local_var.mean()
# Perform the filtering
res = im - local_mean
res *= 1 - noise / local_var
res += local_mean
return cupy.where(local_var < noise, local_mean, res)
def order_filter(a, domain, rank):
"""Perform an order filter on an N-D array.
Perform an order filter on the array in. The domain argument acts as a mask
centered over each pixel. The non-zero elements of domain are used to
select elements surrounding each input pixel which are placed in a list.
The list is sorted, and the output for that pixel is the element
corresponding to rank in the sorted list.
Args:
a (cupy.ndarray): The N-dimensional input array.
domain (cupy.ndarray): A mask array with the same number of dimensions
as `a`. Each dimension should have an odd number of elements.
rank (int): A non-negative integer which selects the element from the
sorted list (0 corresponds to the smallest element).
Returns:
cupy.ndarray: The results of the order filter in an array with the same
shape as `a`.
.. seealso:: :func:`cupyx.scipy.ndimage.rank_filter`
.. seealso:: :func:`scipy.signal.order_filter`
"""
if a.dtype.kind in "bc" or a.dtype == cupy.float16:
# scipy doesn't support these types
raise ValueError("data type not supported")
if any(x % 2 != 1 for x in domain.shape):
raise ValueError(
"Each dimension of domain argument "
" should have an odd number of elements."
)
return rank_filter(a, rank, footprint=domain, mode="constant")
def medfilt(volume, kernel_size=None):
"""Perform a median filter on an N-dimensional array.
Apply a median filter to the input array using a local window-size
given by `kernel_size`. The array will automatically be zero-padded.
Args:
volume (cupy.ndarray): An N-dimensional input array.
kernel_size (int or list of ints): Gives the size of the median filter
window in each dimension. Elements of `kernel_size` should be odd.
If `kernel_size` is a scalar, then this scalar is used as the size
in each dimension. Default size is 3 for each dimension.
Returns:
cupy.ndarray: An array the same size as input containing the median
filtered result.
.. seealso:: :func:`cupyx.scipy.ndimage.median_filter`
.. seealso:: :func:`scipy.signal.medfilt`
"""
if volume.dtype.kind == "c":
# scipy doesn't support complex
# (and rank_filter raise TypeError)
raise ValueError("complex types not supported")
# output is forced to float64 to match scipy
kernel_size = _get_kernel_size(kernel_size, volume.ndim)
if any(k > s for k, s in zip(kernel_size, volume.shape)):
warnings.warn(
"kernel_size exceeds volume extent: " "volume will be zero-padded"
)
size = np.prod(kernel_size)
return rank_filter(
volume, size // 2, size=kernel_size, output=float, mode="constant"
)
def medfilt2d(input, kernel_size=3):
"""Median filter a 2-dimensional array.
Apply a median filter to the `input` array using a local window-size given
by `kernel_size` (must be odd). The array is zero-padded automatically.
Args:
input (cupy.ndarray): A 2-dimensional input array.
kernel_size (int of list of ints of length 2): Gives the size of the
median filter window in each dimension. Elements of `kernel_size`
should be odd. If `kernel_size` is a scalar, then this scalar is
used as the size in each dimension. Default is a kernel of size
(3, 3).
Returns:
cupy.ndarray: An array the same size as input containing the median
filtered result.
See also
--------
.. seealso:: :func:`cupyx.scipy.ndimage.median_filter`
.. seealso:: :func:`cupyx.scipy.signal.medfilt`
.. seealso:: :func:`scipy.signal.medfilt2d`
"""
if input.dtype not in (cupy.uint8, cupy.float32, cupy.float64):
# Scipy's version only supports uint8, float32, and float64
raise ValueError("only supports uint8, float32, and float64")
if input.ndim != 2:
raise ValueError("input must be 2d")
kernel_size = _get_kernel_size(kernel_size, input.ndim)
order = kernel_size[0] * kernel_size[1] // 2
return rank_filter(input, order, size=kernel_size, mode="constant")
def _get_kernel_size(kernel_size, ndim):
if kernel_size is None:
kernel_size = (3,) * ndim
kernel_size = _fix_sequence_arg(kernel_size, ndim, "kernel_size", int)
if any((k % 2) != 1 for k in kernel_size):
raise ValueError("Each element of kernel_size should be odd")
return kernel_size
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/ndimage.py
|
# locally defined filters that are more efficient than in CuPy
# measurements
# fourier filters
# additional filters
from cupyx.scipy.ndimage import fourier_ellipsoid # NOQA
from cupyx.scipy.ndimage import fourier_gaussian # NOQA
from cupyx.scipy.ndimage import fourier_shift # NOQA
from cupyx.scipy.ndimage import fourier_uniform # NOQA
from cupyx.scipy.ndimage import generic_filter # NOQA
from cupyx.scipy.ndimage import generic_filter1d # NOQA
from cupyx.scipy.ndimage import label # NOQA
from cucim.skimage._vendored._ndimage_filters import convolve # NOQA
from cucim.skimage._vendored._ndimage_filters import convolve1d # NOQA
from cucim.skimage._vendored._ndimage_filters import correlate # NOQA
from cucim.skimage._vendored._ndimage_filters import correlate1d # NOQA
from cucim.skimage._vendored._ndimage_filters import gaussian_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import gaussian_filter1d # NOQA
from cucim.skimage._vendored._ndimage_filters import gaussian_laplace # NOQA
from cucim.skimage._vendored._ndimage_filters import generic_laplace # NOQA
from cucim.skimage._vendored._ndimage_filters import laplace # NOQA
from cucim.skimage._vendored._ndimage_filters import maximum_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import maximum_filter1d # NOQA
from cucim.skimage._vendored._ndimage_filters import median_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import minimum_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import minimum_filter1d # NOQA
from cucim.skimage._vendored._ndimage_filters import percentile_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import prewitt # NOQA
from cucim.skimage._vendored._ndimage_filters import rank_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import sobel # NOQA
from cucim.skimage._vendored._ndimage_filters import uniform_filter # NOQA
from cucim.skimage._vendored._ndimage_filters import uniform_filter1d # NOQA
from cucim.skimage._vendored._ndimage_filters import ( # NOQA
gaussian_gradient_magnitude,
generic_gradient_magnitude,
)
# interpolation
from cucim.skimage._vendored._ndimage_interpolation import rotate # NOQA
from cucim.skimage._vendored._ndimage_interpolation import shift # NOQA
from cucim.skimage._vendored._ndimage_interpolation import spline_filter # NOQA
from cucim.skimage._vendored._ndimage_interpolation import zoom # NOQA
from cucim.skimage._vendored._ndimage_interpolation import ( # NOQA
affine_transform,
map_coordinates,
spline_filter1d,
)
# morphology
from cucim.skimage._vendored._ndimage_morphology import binary_closing # NOQA
from cucim.skimage._vendored._ndimage_morphology import binary_dilation # NOQA
from cucim.skimage._vendored._ndimage_morphology import binary_erosion # NOQA
from cucim.skimage._vendored._ndimage_morphology import binary_opening # NOQA
from cucim.skimage._vendored._ndimage_morphology import black_tophat # NOQA
from cucim.skimage._vendored._ndimage_morphology import grey_closing # NOQA
from cucim.skimage._vendored._ndimage_morphology import grey_dilation # NOQA
from cucim.skimage._vendored._ndimage_morphology import grey_erosion # NOQA
from cucim.skimage._vendored._ndimage_morphology import grey_opening # NOQA
from cucim.skimage._vendored._ndimage_morphology import white_tophat # NOQA
from cucim.skimage._vendored._ndimage_morphology import ( # NOQA
binary_fill_holes,
binary_hit_or_miss,
binary_propagation,
generate_binary_structure,
iterate_structure,
morphological_gradient,
morphological_laplace,
)
# Import the rest of the cupyx.scipy.ndimage API here
try:
from cupyx.scipy.ndimage import sum_labels # NOQA
except ImportError:
from cupyx.scipy.ndimage import sum as sum_labels # NOQA
from cupyx.scipy.ndimage import center_of_mass # NOQA
from cupyx.scipy.ndimage import extrema # NOQA
from cupyx.scipy.ndimage import histogram # NOQA
from cupyx.scipy.ndimage import labeled_comprehension # NOQA
from cupyx.scipy.ndimage import maximum # NOQA
from cupyx.scipy.ndimage import maximum_position # NOQA
from cupyx.scipy.ndimage import mean # NOQA
from cupyx.scipy.ndimage import median # NOQA
from cupyx.scipy.ndimage import minimum # NOQA
from cupyx.scipy.ndimage import minimum_position # NOQA
from cupyx.scipy.ndimage import standard_deviation # NOQA
from cupyx.scipy.ndimage import variance # NOQA
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters_core.py
|
"""A vendored subset of cupyx.scipy.ndimage._filters_core"""
import warnings
import cupy
import numpy
from cucim.skimage._vendored import (
_internal as internal,
_ndimage_util as _util,
)
def _origins_to_offsets(origins, w_shape):
return tuple(x // 2 + o for x, o in zip(w_shape, origins))
def _check_size_footprint_structure(
ndim, size, footprint, structure, stacklevel=3, force_footprint=False
):
if structure is None and footprint is None:
if size is None:
raise RuntimeError("no footprint or filter size provided")
sizes = _util._fix_sequence_arg(size, ndim, "size", int)
if force_footprint:
return None, cupy.ones(sizes, bool), None
return sizes, None, None
if size is not None:
warnings.warn(
"ignoring size because {} is set".format(
"structure" if footprint is None else "footprint"
),
UserWarning,
stacklevel=stacklevel + 1,
)
if footprint is not None:
footprint = cupy.array(footprint, bool, True, "C")
if not footprint.any():
raise ValueError("all-zero footprint is not supported")
if structure is None:
if not force_footprint and footprint.all():
if footprint.ndim != ndim:
raise RuntimeError("size must have length equal to input rank")
return footprint.shape, None, None
return None, footprint, None
structure = cupy.ascontiguousarray(structure)
if footprint is None:
footprint = cupy.ones(structure.shape, bool)
return None, footprint, structure
def _convert_1d_args(ndim, weights, origin, axis):
if weights.ndim != 1 or weights.size < 1:
raise RuntimeError("incorrect filter size")
axis = internal._normalize_axis_index(axis, ndim)
w_shape = [1] * ndim
w_shape[axis] = weights.size
weights = weights.reshape(w_shape)
origins = [0] * ndim
origins[axis] = _util._check_origin(origin, weights.size)
return weights, tuple(origins)
def _check_nd_args(
input, weights, mode, origin, wghts_name="filter weights", sizes=None
):
_util._check_mode(mode)
if weights is not None:
# Weights must always be less than 2 GiB
if weights.nbytes >= (1 << 31):
raise RuntimeError(
"weights must be 2 GiB or less, use FFTs instead"
)
weight_dims = [x for x in weights.shape if x != 0]
if len(weight_dims) != input.ndim:
raise RuntimeError(
"{} array has incorrect shape".format(wghts_name)
)
elif sizes is None:
raise ValueError("must specify either weights array or sizes")
else:
weight_dims = sizes
origins = _util._fix_sequence_arg(origin, len(weight_dims), "origin", int)
for origin, width in zip(origins, weight_dims):
_util._check_origin(origin, width)
return tuple(origins), _util._get_inttype(input)
def _run_1d_filters(
filters, input, args, output, mode, cval, origin=0, **filter_kwargs
):
"""
Runs a series of 1D filters forming an nd filter. The filters must be a
list of callables that take input, arg, axis, output, mode, cval, origin.
The args is a list of values that are passed for the arg value to the
filter. Individual filters can be None causing that axis to be skipped.
"""
output = _util._get_output(output, input)
modes = _util._fix_sequence_arg(mode, input.ndim, "mode", _util._check_mode)
# for filters, "wrap" is a synonym for "grid-wrap".
modes = ["grid-wrap" if m == "wrap" else m for m in modes]
origins = _util._fix_sequence_arg(origin, input.ndim, "origin", int)
n_filters = sum(filter is not None for filter in filters)
if n_filters == 0:
output[:] = input
return output
# We can't operate in-place efficiently, so use a 2-buffer system
temp = (
_util._get_output(output.dtype, input) if n_filters > 1 else None
) # noqa
iterator = zip(filters, args, modes, origins)
for axis, (fltr, arg, mode, origin) in enumerate(iterator):
if fltr is None:
continue
else:
break
if n_filters % 2 == 0:
fltr(input, arg, axis, temp, mode, cval, origin, **filter_kwargs)
input = temp
else:
fltr(input, arg, axis, output, mode, cval, origin, **filter_kwargs)
if n_filters == 1:
return output
input, output = output, temp
for axis, (fltr, arg, mode, origin) in enumerate(iterator, start=axis + 1):
if fltr is None:
continue
fltr(input, arg, axis, output, mode, cval, origin, **filter_kwargs)
input, output = output, input
return input
def _call_kernel(
kernel,
input,
weights,
output,
structure=None,
weights_dtype=numpy.float64,
structure_dtype=numpy.float64,
):
"""
Calls a constructed ElementwiseKernel. The kernel must take an input image,
an optional array of weights, an optional array for the structure, and an
output array.
weights and structure can be given as None (structure defaults to None) in
which case they are not passed to the kernel at all. If the output is given
as None then it will be allocated in this function.
This function deals with making sure that the weights and structure are
contiguous and float64 (or bool for weights that are footprints)*, that the
output is allocated and appriopately shaped. This also deals with the
situation that the input and output arrays overlap in memory.
* weights is always cast to float64 or bool in order to get an output
compatible with SciPy, though float32 might be sufficient when input dtype
is low precision. If weights_dtype is passed as weights.dtype then no
dtype conversion will occur. The input and output are never converted.
"""
args = [input]
complex_output = input.dtype.kind == "c"
if weights is not None:
weights = cupy.ascontiguousarray(weights, weights_dtype)
complex_output = complex_output or weights.dtype.kind == "c"
args.append(weights)
if structure is not None:
structure = cupy.ascontiguousarray(structure, structure_dtype)
args.append(structure)
output = _util._get_output(output, input, None, complex_output) # noqa
needs_temp = cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS")
if needs_temp:
output, temp = (
_util._get_output(output.dtype, input, None, complex_output),
output,
) # noqa
args.append(output)
kernel(*args)
if needs_temp:
output[:] = temp
output = temp
return output
_ndimage_includes = r"""
#include <type_traits> // let Jitify handle this
#include <cupy/math_constants.h>
template<> struct std::is_floating_point<float16> : std::true_type {};
template<> struct std::is_signed<float16> : std::true_type {};
template<class T> struct std::is_signed<complex<T>> : std::is_signed<T> {};
"""
_ndimage_CAST_FUNCTION = """
// Implements a casting function to make it compatible with scipy
// Use like cast<to_type>(value)
template <class B, class A>
__device__ __forceinline__
typename std::enable_if<(!std::is_floating_point<A>::value
|| std::is_signed<B>::value), B>::type
cast(A a) { return (B)a; }
template <class B, class A>
__device__ __forceinline__
typename std::enable_if<(std::is_floating_point<A>::value
&& (!std::is_signed<B>::value)), B>::type
cast(A a) { return (a >= 0) ? (B)a : -(B)(-a); }
template <class T>
__device__ __forceinline__ bool nonzero(T x) { return x != static_cast<T>(0); }
"""
def _generate_nd_kernel(
name,
pre,
found,
post,
mode,
w_shape,
int_type,
offsets,
cval,
ctype="X",
preamble="",
options=(),
has_weights=True,
has_structure=False,
has_mask=False,
binary_morphology=False,
all_weights_nonzero=False,
):
# Currently this code uses CArray for weights but avoids using CArray for
# the input data and instead does the indexing itself since it is faster.
# If CArray becomes faster than follow the comments that start with
# CArray: to switch over to using CArray for the input data as well.
ndim = len(w_shape)
in_params = "raw X x"
if has_weights:
in_params += ", raw W w"
if has_structure:
in_params += ", raw S s"
if has_mask:
in_params += ", raw M mask"
out_params = "Y y"
# for filters, "wrap" is a synonym for "grid-wrap"
mode = "grid-wrap" if mode == "wrap" else mode
# CArray: remove xstride_{j}=... from string
size = (
"%s xsize_{j}=x.shape()[{j}], ysize_{j} = _raw_y.shape()[{j}]"
", xstride_{j}=x.strides()[{j}];" % int_type
)
sizes = [size.format(j=j) for j in range(ndim)]
inds = _util._generate_indices_ops(ndim, int_type, offsets)
# CArray: remove expr entirely
expr = " + ".join(["ix_{}".format(j) for j in range(ndim)])
ws_init = ws_pre = ws_post = ""
if has_weights or has_structure:
ws_init = "int iws = 0;"
if has_structure:
ws_pre = "S sval = s[iws];\n"
if has_weights:
ws_pre += "W wval = w[iws];\n"
if not all_weights_nonzero:
ws_pre += "if (nonzero(wval))"
ws_post = "iws++;"
loops = []
for j in range(ndim):
if w_shape[j] == 1:
# CArray: string becomes 'inds[{j}] = ind_{j};', remove (int_)type
loops.append(
"{{ {type} ix_{j} = ind_{j} * xstride_{j};".format(
j=j, type=int_type
)
)
else:
boundary = _util._generate_boundary_condition_ops(
mode, "ix_{}".format(j), "xsize_{}".format(j), int_type
)
# CArray: last line of string becomes inds[{j}] = ix_{j};
loops.append(
"""
for (int iw_{j} = 0; iw_{j} < {wsize}; iw_{j}++)
{{
{type} ix_{j} = ind_{j} + iw_{j};
{boundary}
ix_{j} *= xstride_{j};
""".format(
j=j, wsize=w_shape[j], boundary=boundary, type=int_type
)
)
# CArray: string becomes 'x[inds]', no format call needed
value = "(*(X*)&data[{expr}])".format(expr=expr)
if mode == "constant":
cond = " || ".join(["(ix_{} < 0)".format(j) for j in range(ndim)])
if cval is numpy.nan:
cval = "CUDART_NAN"
elif cval == numpy.inf:
cval = "CUDART_INF"
elif cval == -numpy.inf:
cval = "-CUDART_INF"
if binary_morphology:
found = found.format(cond=cond, value=value)
else:
if mode == "constant":
value = "(({cond}) ? cast<{ctype}>({cval}) : {value})".format(
cond=cond, ctype=ctype, cval=cval, value=value
)
found = found.format(value=value)
# CArray: replace comment and next line in string with
# {type} inds[{ndim}] = {{0}};
# and add ndim=ndim, type=int_type to format call
operation = """
{sizes}
{inds}
// don't use a CArray for indexing (faster to deal with indexing ourselves)
const unsigned char* data = (const unsigned char*)&x[0];
{ws_init}
{pre}
{loops}
// inner-most loop
{ws_pre} {{
{found}
}}
{ws_post}
{end_loops}
{post}
""".format(
sizes="\n".join(sizes),
inds=inds,
pre=pre,
post=post,
ws_init=ws_init,
ws_pre=ws_pre,
ws_post=ws_post,
loops="\n".join(loops),
found=found,
end_loops="}" * ndim,
)
mode_str = mode.replace("-", "_") # avoid potential hyphen in kernel name
name = "cupyx_scipy_ndimage_{}_{}d_{}_w{}".format(
name, ndim, mode_str, "_".join(["{}".format(x) for x in w_shape])
)
if all_weights_nonzero:
name += "_all_nonzero"
if int_type == "ptrdiff_t":
name += "_i64"
if has_structure:
name += "_with_structure"
if has_mask:
name += "_with_mask"
preamble = _ndimage_includes + _ndimage_CAST_FUNCTION + preamble
options += ("--std=c++11", "-DCUPY_USE_JITIFY")
return cupy.ElementwiseKernel(
in_params,
out_params,
operation,
name,
reduce_dims=False,
preamble=preamble,
options=options,
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_pearsonr.py
|
"""Implementation of cupyx.scipy.stats.pearsonr (currently missing in CuPy)
Simple port of SciPy's pearsonr in scipy/stats/_stats_py.py.
Note that this is based on the simpler implementation from SciPy<1.9 where the
return type is just a 2-tuple of (statistic, p-value). In SciPy>=1.9, the
return type changed to a PearsonRResult object that also has a
`confidence_interval` method. Since we do not need that method in the cuCIM
API, a simple tuple-based return is used
"""
import warnings
import cupy as cp
import numpy as np
from scipy import special
class PearsonRConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is constant."""
def __init__(self, msg=None):
if msg is None:
msg = (
"An input array is constant; the correlation coefficient "
"is not defined."
)
self.args = (msg,)
class PearsonRNearConstantInputWarning(RuntimeWarning):
"""Warning generated by `pearsonr` when an input is nearly constant."""
def __init__(self, msg=None):
if msg is None:
msg = (
"An input array is nearly constant; the computed "
"correlation coefficient may be inaccurate."
)
self.args = (msg,)
# Note: adapted from scipy.stats._stats_py.pearsonr in SciPy 1.8.1
def pearsonr(x, y, *, disable_checks=False):
r"""
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient [1]_ measures the linear relationship
between two datasets. The calculation of the p-value relies on the
assumption that each dataset is normally distributed. (See Kowalski [3]_
for a discussion of the effects of non-normality of the input on the
distribution of the correlation coefficient.) Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear relationship.
Parameters
----------
x : (N,) array_like
Input array.
y : (N,) array_like
Input array.
Returns
-------
r : float
Pearson's correlation coefficient.
p-value : float
Two-tailed p-value.
Warns
-----
PearsonRConstantInputWarning
Raised if an input is a constant array. The correlation coefficient
is not defined in this case, so ``cp.nan`` is returned.
PearsonRNearConstantInputWarning
Raised if an input is "nearly" constant. The array ``x`` is considered
nearly constant if ``norm(x - mean(x)) < 1e-13 * abs(mean(x))``.
Numerical errors in the calculation ``x - mean(x)`` in this case might
result in an inaccurate calculation of r.
See Also
--------
spearmanr : Spearman rank-order correlation coefficient.
kendalltau : Kendall's tau, a correlation measure for ordinal data.
Notes
-----
The correlation coefficient is calculated as follows:
.. math::
r = \frac{\sum (x - m_x) (y - m_y)}
{\sqrt{\sum (x - m_x)^2 \sum (y - m_y)^2}}
where :math:`m_x` is the mean of the vector x and :math:`m_y` is
the mean of the vector y.
Under the assumption that x and y are drawn from
independent normal distributions (so the population correlation coefficient
is 0), the probability density function of the sample correlation
coefficient r is ([1]_, [2]_):
.. math::
f(r) = \frac{{(1-r^2)}^{n/2-2}}{\mathrm{B}(\frac{1}{2},\frac{n}{2}-1)}
where n is the number of samples, and B is the beta function. This
is sometimes referred to as the exact distribution of r. This is
the distribution that is used in `pearsonr` to compute the p-value.
The distribution is a beta distribution on the interval [-1, 1],
with equal shape parameters a = b = n/2 - 1. In terms of SciPy's
implementation of the beta distribution, the distribution of r is::
dist = scipy.stats.beta(n/2 - 1, n/2 - 1, loc=-1, scale=2)
The p-value returned by `pearsonr` is a two-sided p-value. The p-value
roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. More precisely, for a
given sample with correlation coefficient r, the p-value is
the probability that abs(r') of a random sample x' and y' drawn from
the population with zero correlation would be greater than or equal
to abs(r). In terms of the object ``dist`` shown above, the p-value
for a given r and length n can be computed as::
p = 2*dist.cdf(-abs(r))
When n is 2, the above continuous distribution is not well-defined.
One can interpret the limit of the beta distribution as the shape
parameters a and b approach a = b = 0 as a discrete distribution with
equal probability masses at r = 1 and r = -1. More directly, one
can observe that, given the data x = [x1, x2] and y = [y1, y2], and
assuming x1 != x2 and y1 != y2, the only possible values for r are 1
and -1. Because abs(r') for any sample x' and y' with length 2 will
be 1, the two-sided p-value for a sample of length 2 is always 1.
References
----------
.. [1] "Pearson correlation coefficient", Wikipedia,
https://en.wikipedia.org/wiki/Pearson_correlation_coefficient
.. [2] Student, "Probable error of a correlation coefficient",
Biometrika, Volume 6, Issue 2-3, 1 September 1908, pp. 302-310.
.. [3] C. J. Kowalski, "On the Effects of Non-Normality on the Distribution
of the Sample Product-Moment Correlation Coefficient"
Journal of the Royal Statistical Society. Series C (Applied
Statistics), Vol. 21, No. 1 (1972), pp. 1-12.
Examples
--------
>>> from scipy import stats
>>> stats.pearsonr([1, 2, 3, 4, 5], [10, 9, 2.5, 6, 4])
(-0.7426106572325057, 0.1505558088534455)
There is a linear dependence between x and y if y = a + b*x + e, where
a,b are constants and e is a random error term, assumed to be independent
of x. For simplicity, assume that x is standard normal, a=0, b=1 and let
e follow a normal distribution with mean zero and standard deviation s>0.
>>> s = 0.5
>>> x = stats.norm.rvs(size=500)
>>> e = stats.norm.rvs(scale=s, size=500)
>>> y = x + e
>>> stats.pearsonr(x, y)
(0.9029601878969703, 8.428978827629898e-185) # may vary
This should be close to the exact value given by
>>> 1/cp.sqrt(1 + s**2)
0.8944271909999159
For s=0.5, we observe a high level of correlation. In general, a large
variance of the noise reduces the correlation, while the correlation
approaches one as the variance of the error goes to zero.
It is important to keep in mind that no correlation does not imply
independence unless (x, y) is jointly normal. Correlation can even be zero
when there is a very simple dependence structure: if X follows a
standard normal distribution, let y = abs(x). Note that the correlation
between x and y is zero. Indeed, since the expectation of x is zero,
cov(x, y) = E[x*y]. By definition, this equals E[x*abs(x)] which is zero
by symmetry. The following lines of code illustrate this observation:
>>> y = cp.abs(x)
>>> stats.pearsonr(x, y)
(-0.016172891856853524, 0.7182823678751942) # may vary
A non-zero correlation coefficient can be misleading. For example, if X has
a standard normal distribution, define y = x if x < 0 and y = 0 otherwise.
A simple calculation shows that corr(x, y) = sqrt(2/Pi) = 0.797...,
implying a high level of correlation:
>>> y = cp.where(x < 0, x, 0)
>>> stats.pearsonr(x, y)
(0.8537091583771509, 3.183461621422181e-143) # may vary
This is unintuitive since there is no dependence of x and y if x is larger
than zero which happens in about half of the cases if we sample x and y.
"""
# inputs must be 1D
n = len(x)
if n != len(y):
raise ValueError("x and y must have the same length.")
if n < 2:
raise ValueError("x and y must have length at least 2.")
if not disable_checks:
# If an input is constant, the correlation coefficient is not defined.
if (x == x[0]).all() or (y == y[0]).all():
warnings.warn(PearsonRConstantInputWarning())
return cp.nan, cp.nan
# dtype is the data type for the calculations. This expression ensures
# that the data type is at least 64 bit floating point. It might have
# more precision if the input is, for example, cp.longdouble.
dtype = cp.result_type(x.dtype, y.dtype, float)
if n == 2:
# faster on host for such a small inputs
x = cp.asnumpy(x)
y = cp.asnumpy(y)
return dtype(np.sign(x[1] - x[0]) * np.sign(y[1] - y[0])), 1.0
xmean = x.mean(dtype=dtype)
ymean = y.mean(dtype=dtype)
# By using `astype(dtype)`, we ensure that the intermediate calculations
# use at least 64 bit floating point.
xm = x.astype(dtype) - xmean
ym = y.astype(dtype) - ymean
# TODO: use cupyx.scipy.linalg.norm from CuPy once available
# Unlike cp.linalg.norm or the expression sqrt((xm*xm).sum()),
# scipy.linalg.norm(xm) does not overflow if xm is, for example,
# [-5e210, 5e210, 3e200, -3e200]
normxm = cp.linalg.norm(xm)
normym = cp.linalg.norm(ym)
if not disable_checks:
threshold = 1e-13
if normxm < threshold * abs(xmean) or normym < threshold * abs(ymean):
# If all the values in x (likewise y) are very close to the mean,
# the loss of precision that occurs in the subtraction
# xm = x - xmean might result in large errors in r.
warnings.warn(PearsonRNearConstantInputWarning())
r = float(cp.dot(xm / normxm, ym / normym))
# Presumably, if abs(r) > 1, then it is only some small artifact of
# floating point arithmetic.
r = max(min(r, 1.0), -1.0)
# As explained in the docstring, the p-value can be computed as
# p = 2*dist.cdf(-abs(r))
# where dist is the beta distribution on [-1, 1] with shape parameters
# a = b = n/2 - 1. `special.btdtr` is the CDF for the beta distribution
# on [0, 1]. To use it, we make the transformation x = (r + 1)/2; the
# shape parameters do not change. Then -abs(r) used in `cdf(-abs(r))`
# becomes x = (-abs(r) + 1)/2 = 0.5*(1 - abs(r)). (r is cast to float64
# to avoid a TypeError raised by btdtr when r is higher precision.)
ab = n / 2 - 1
# scalar valued, so use special.btdtr from SciPy, not CuPy
prob = 2 * special.btdtr(ab, ab, 0.5 * (1.0 - abs(r)))
if disable_checks:
# warn only based on output values to avoid overhead of host/device
# synchronization needed for the disabled checks above.
if np.isnan(r) or np.isnan(prob):
warnings.warn(
"NaN encountered during Pearson R calculation. This may occur "
"in same cases such as a nearly constant-valued input."
)
return r, prob
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_spline_prefilter_core.py
|
"""
Spline poles and boundary handling implemented as in SciPy
https://github.com/scipy/scipy/blob/ee6ae72f83a0995aeb34929aed881d3f36fccfda/scipy/ndimage/src/ni_splines.c
""" # noqa: E501
import functools
import math
import operator
import textwrap
import cupy
def get_poles(order):
if order == 2:
# sqrt(8.0) - 3.0
return (-0.171572875253809902396622551580603843,)
elif order == 3:
# sqrt(3.0) - 2.0
return (-0.267949192431122706472553658494127633,)
elif order == 4:
# sqrt(664.0 - sqrt(438976.0)) + sqrt(304.0) - 19.0
# sqrt(664.0 + sqrt(438976.0)) - sqrt(304.0) - 19.0
return (
-0.361341225900220177092212841325675255,
-0.013725429297339121360331226939128204,
)
elif order == 5:
# sqrt(67.5 - sqrt(4436.25)) + sqrt(26.25) - 6.5
# sqrt(67.5 + sqrt(4436.25)) - sqrt(26.25) - 6.5
return (
-0.430575347099973791851434783493520110,
-0.043096288203264653822712376822550182,
)
else:
raise ValueError("only order 2-5 supported")
def get_gain(poles):
return functools.reduce(
operator.mul, [(1.0 - z) * (1.0 - 1.0 / z) for z in poles]
)
def _causal_init_code(mode):
"""Code for causal initialization step of IIR filtering.
c is a 1d array of length n and z is a filter pole
"""
code = f"""
// causal init for mode={mode}"""
if mode == "mirror":
code += """
z_i = z;
z_n_1 = pow(z, (P)(n - 1));
c[0] = c[0] + z_n_1 * c[(n - 1) * element_stride];
for (i = 1; i < min(n - 1, static_cast<idx_t>({n_boundary})); ++i) {{
c[0] += z_i * (c[i * element_stride] +
z_n_1 * c[(n - 1 - i) * element_stride]);
z_i *= z;
}}
c[0] /= 1 - z_n_1 * z_n_1;"""
elif mode == "grid-wrap":
code += """
z_i = z;
for (i = 1; i < min(n, static_cast<idx_t>({n_boundary})); ++i) {{
c[0] += z_i * c[(n - i) * element_stride];
z_i *= z;
}}
c[0] /= 1 - z_i; /* z_i = pow(z, n) */"""
elif mode == "reflect":
code += """
z_i = z;
z_n = pow(z, (P)n);
c0 = c[0];
c[0] = c[0] + z_n * c[(n - 1) * element_stride];
for (i = 1; i < min(n, static_cast<idx_t>({n_boundary})); ++i) {{
c[0] += z_i * (c[i * element_stride] +
z_n * c[(n - 1 - i) * element_stride]);
z_i *= z;
}}
c[0] *= z / (1 - z_n * z_n);
c[0] += c0;"""
else:
raise ValueError("invalid mode: {}".format(mode))
return code
def _anticausal_init_code(mode):
"""Code for the anti-causal initialization step of IIR filtering.
c is a 1d array of length n and z is a filter pole
"""
code = f"""
// anti-causal init for mode={mode}"""
if mode == "mirror":
code += """
c[(n - 1) * element_stride] = (
z * c[(n - 2) * element_stride] +
c[(n - 1) * element_stride]) * z / (z * z - 1);"""
elif mode == "grid-wrap":
code += """
z_i = z;
for (i = 0; i < min(n - 1, static_cast<idx_t>({n_boundary})); ++i) {{
c[(n - 1) * element_stride] += z_i * c[i * element_stride];
z_i *= z;
}}
c[(n - 1) * element_stride] *= z / (z_i - 1); /* z_i = pow(z, n) */"""
elif mode == "reflect":
code += """
c[(n - 1) * element_stride] *= z / (z - 1);"""
else:
raise ValueError("invalid mode: {}".format(mode))
return code
def _get_spline_mode(mode):
"""spline boundary mode for interpolation with order >= 2."""
if mode in ["mirror", "reflect", "grid-wrap"]:
# exact analytic boundary conditions exist for these modes.
return mode
elif mode == "grid-mirror":
# grid-mirror is a synonym for 'reflect'
return "reflect"
# No exact analytical spline boundary condition implemented. Reflect gives
# lower error than using mirror or wrap for mode 'nearest'. Otherwise, a
# mirror spline boundary condition is used.
return "reflect" if mode == "nearest" else "mirror"
def _get_spline1d_code(mode, poles, n_boundary):
"""Generates the code required for IIR filtering of a single 1d signal.
Prefiltering is done by causal filtering followed by anti-causal filtering.
Multiple boundary conditions have been implemented.
"""
code = [
"""
__device__ void spline_prefilter1d(
T* __restrict__ c, idx_t signal_length, idx_t element_stride)
{{"""
]
# variables common to all boundary modes
code.append(
"""
idx_t i, n = signal_length;
P z, z_i;"""
)
# retrieve the spline boundary extension mode to use
mode = _get_spline_mode(mode)
if mode == "mirror":
# variables specific to mirror boundary mode
code.append(
"""
P z_n_1;"""
)
elif mode == "reflect":
# variables specific to reflect boundary mode
code.append(
"""
P z_n;
T c0;"""
)
for pole in poles:
code.append(
f"""
// select the current pole
z = {pole};"""
)
# initialize and apply the causal filter
code.append(_causal_init_code(mode))
code.append(
"""
// apply the causal filter for the current pole
for (i = 1; i < n; ++i) {{
c[i * element_stride] += z * c[(i - 1) * element_stride];
}}"""
)
# initialize and apply the anti-causal filter
code.append(_anticausal_init_code(mode))
code.append(
"""
// apply the anti-causal filter for the current pole
for (i = n - 2; i >= 0; --i) {{
c[i * element_stride] = z * (c[(i + 1) * element_stride] -
c[i * element_stride]);
}}"""
)
code += [
"""
}}"""
]
return textwrap.dedent("\n".join(code)).format(n_boundary=n_boundary)
_FILTER_GENERAL = """
#include "cupy/carray.cuh"
#include "cupy/complex.cuh"
typedef {data_type} T;
typedef {pole_type} P;
typedef {index_type} idx_t;
template <typename T>
__device__ T* row(
T* ptr, idx_t i, idx_t axis, idx_t ndim, const idx_t* shape) {{
idx_t index = 0, stride = 1;
for (idx_t a = ndim - 1; a > 0; --a) {{
if (a != axis) {{
index += (i % shape[a]) * stride;
i /= shape[a];
}}
stride *= shape[a];
}}
return ptr + index + stride * i;
}}
"""
_batch_spline1d_strided_template = """
extern "C" __global__
__launch_bounds__({block_size})
void {kernel_name}(T* __restrict__ y, const idx_t* __restrict__ info) {{
const idx_t n_signals = info[0], n_samples = info[1],
* __restrict__ shape = info+2;
idx_t y_elem_stride = 1;
for (int a = {ndim} - 1; a > {axis}; --a) {{ y_elem_stride *= shape[a]; }}
idx_t unraveled_idx = blockDim.x * blockIdx.x + threadIdx.x;
idx_t batch_idx = unraveled_idx;
if (batch_idx < n_signals)
{{
T* __restrict__ y_i = row(y, batch_idx, {axis}, {ndim}, shape);
spline_prefilter1d(y_i, n_samples, y_elem_stride);
}}
}}
"""
@cupy.memoize(for_each_device=True)
def get_raw_spline1d_kernel(
axis,
ndim,
mode,
order,
index_type="int",
data_type="double",
pole_type="double",
block_size=128,
):
"""Generate a kernel for applying a spline prefilter along a given axis."""
poles = get_poles(order)
# determine number of samples for the boundary approximation
# (SciPy uses n_boundary = n_samples but this is excessive)
largest_pole = max([abs(p) for p in poles])
# tol < 1e-7 fails test cases comparing to SciPy at atol = rtol = 1e-5
tol = 1e-10 if pole_type == "float" else 1e-18
n_boundary = math.ceil(math.log(tol, largest_pole))
# headers and general utility function for extracting rows of data
code = _FILTER_GENERAL.format(
index_type=index_type, data_type=data_type, pole_type=pole_type
)
# generate source for a 1d function for a given boundary mode and poles
code += _get_spline1d_code(mode, poles, n_boundary)
# generate code handling batch operation of the 1d filter
mode_str = mode.replace("-", "_") # cannot have '-' in kernel name
kernel_name = (
f"cupyx_scipy_ndimage_spline_filter_{ndim}d_ord{order}_"
f"axis{axis}_{mode_str}"
)
code += _batch_spline1d_strided_template.format(
ndim=ndim, axis=axis, block_size=block_size, kernel_name=kernel_name
)
return cupy.RawKernel(code, kernel_name)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_util.py
|
"""A vendored subset of cupyx.scipy.ndimage._util"""
import warnings
import cupy
import numpy
def _is_integer_output(output, input):
if output is None:
return input.dtype.kind in "iu"
elif isinstance(output, cupy.ndarray):
return output.dtype.kind in "iu"
return cupy.dtype(output).kind in "iu"
def _check_cval(mode, cval, integer_output):
if mode == "constant" and integer_output and not cupy.isfinite(cval):
raise NotImplementedError(
"Non-finite cval is not supported for "
"outputs with integer dtype."
)
def _get_weights_dtype(input, weights, use_cucim_casting=False):
if weights.dtype.kind == "c" or input.dtype.kind == "c":
return cupy.promote_types(input.real.dtype, cupy.complex64)
elif weights.dtype.kind in "iub":
if use_cucim_casting:
from cucim.skimage._shared.utils import _supported_float_type
return _supported_float_type(weights.dtype)
else:
# convert integer dtype weights to double as in SciPy
return cupy.float64
return cupy.promote_types(input.real.dtype, cupy.float32)
def _get_output(output, input, shape=None, complex_output=False):
shape = input.shape if shape is None else shape
if output is None:
if complex_output:
_dtype = cupy.promote_types(input.dtype, cupy.complex64)
else:
_dtype = input.dtype
output = cupy.empty(shape, dtype=_dtype)
elif isinstance(output, (type, cupy.dtype)):
if complex_output and cupy.dtype(output).kind != "c":
warnings.warn("promoting specified output dtype to complex")
output = cupy.promote_types(output, cupy.complex64)
output = cupy.empty(shape, dtype=output)
elif isinstance(output, str):
output = numpy.sctypeDict[output]
if complex_output and cupy.dtype(output).kind != "c":
raise RuntimeError("output must have complex dtype")
output = cupy.empty(shape, dtype=output)
elif output.shape != shape:
raise RuntimeError("output shape not correct")
elif complex_output and output.dtype.kind != "c":
raise RuntimeError("output must have complex dtype")
return output
def _fix_sequence_arg(arg, ndim, name, conv=lambda x: x):
if isinstance(arg, str):
return [conv(arg)] * ndim
try:
arg = iter(arg)
except TypeError:
return [conv(arg)] * ndim
lst = [conv(x) for x in arg]
if len(lst) != ndim:
msg = "{} must have length equal to input rank".format(name)
raise RuntimeError(msg)
return lst
def _check_origin(origin, width):
origin = int(origin)
if (width // 2 + origin < 0) or (width // 2 + origin >= width):
raise ValueError("invalid origin")
return origin
def _check_mode(mode):
if mode not in (
"reflect",
"constant",
"nearest",
"mirror",
"wrap",
"grid-mirror",
"grid-wrap",
"grid-reflect",
):
msg = f"boundary mode not supported (actual: {mode})"
raise RuntimeError(msg)
return mode
def _get_inttype(input):
# The integer type to use for indices in the input array
# The indices actually use byte positions and we can't just use
# input.nbytes since that won't tell us the number of bytes between the
# first and last elements when the array is non-contiguous
nbytes = (
sum(
(x - 1) * abs(stride)
for x, stride in zip(input.shape, input.strides)
)
+ input.dtype.itemsize
)
return "int" if nbytes < (1 << 31) else "ptrdiff_t"
def _generate_boundary_condition_ops(
mode, ix, xsize, int_t="int", float_ix=False, separate=False
):
"""Generate boundary conditions
If separate = True, a pair of conditions for the (lower, upper) boundary
are provided instead of a single expression.
"""
min_func = "fmin" if float_ix else "min"
max_func = "fmax" if float_ix else "max"
if mode in ["reflect", "grid-mirror"]:
if separate:
ops_upper = f"""
{ix} %= {xsize} * 2;
{ix} = {min_func}({ix}, 2 * {xsize} - 1 - {ix});
"""
ops_lower = (
f"""
if ({ix} < 0) {{
{ix} = - 1 -{ix};
}}
"""
+ ops_upper
)
ops = (ops_lower, ops_upper)
else:
ops = f"""
if ({ix} < 0) {{
{ix} = - 1 -{ix};
}}
{ix} %= {xsize} * 2;
{ix} = {min_func}({ix}, 2 * {xsize} - 1 - {ix});"""
elif mode == "mirror":
if separate:
temp1 = f"""
if ({xsize} == 1) {{
{ix} = 0;
}} else {{
"""
temp2 = f"""
if ({ix} < 0) {{
{ix} = -{ix};
}}
"""
temp3 = f"""
{ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);
{ix} = {min_func}({ix}, 2 * {xsize} - 2 - {ix});
}}"""
ops_lower = temp1 + temp2 + temp3
ops_upper = temp1 + temp3
ops = (ops_lower, ops_upper)
else:
ops = f"""
if ({xsize} == 1) {{
{ix} = 0;
}} else {{
if ({ix} < 0) {{
{ix} = -{ix};
}}
{ix} = 1 + ({ix} - 1) % (({xsize} - 1) * 2);
{ix} = {min_func}({ix}, 2 * {xsize} - 2 - {ix});
}}"""
elif mode == "nearest":
T = "int" if int_t == "int" else "long long"
if separate:
ops_lower = f"""{ix} = {max_func}(({T}){ix}, ({T})0);"""
ops_upper = (
f"""{ix} = {min_func}(({T}){ix}, ({T})({xsize} - 1));""" # noqa
)
ops = (ops_lower, ops_upper)
else:
ops = f"""{ix} = {min_func}({max_func}(({T}){ix}, ({T})0), ({T})({xsize} - 1));""" # noqa
elif mode == "grid-wrap":
if separate:
ops_upper = f"""
{ix} %= {xsize};
"""
ops_lower = (
ops_upper
+ f"""
while ({ix} < 0) {{
{ix} += {xsize};
}}"""
)
ops = (ops_lower, ops_upper)
else:
ops = f"""
{ix} %= {xsize};
if ({ix} < 0) {{
{ix} += {xsize};
}}"""
elif mode == "wrap":
if separate:
ops_lower = f"""{ix} += ({xsize} - 1) * (({int_t})(-{ix} / ({xsize} - 1)) + 1);""" # noqa
ops_upper = f"""{ix} -= ({xsize} - 1) * ({int_t})({ix} / ({xsize} - 1));""" # noqa
ops = (ops_lower, ops_upper)
else:
ops = f"""
if ({ix} < 0) {{
{ix} += ({xsize} - 1) * (({int_t})(-{ix} / ({xsize} - 1)) + 1);
}} else if ({ix} > ({xsize} - 1)) {{
{ix} -= ({xsize} - 1) * ({int_t})({ix} / ({xsize} - 1));
}};"""
elif mode in ["constant", "grid-constant"]:
if separate:
ops_lower = f"""
if ({ix} < 0) {{
{ix} = -1;
}}"""
ops_upper = f"""
if ({ix} >= {xsize}) {{
{ix} = -1;
}}"""
ops = (ops_lower, ops_upper)
else:
ops = f"""
if (({ix} < 0) || {ix} >= {xsize}) {{
{ix} = -1;
}}"""
if separate:
ops = (ops, ops)
return ops
def _generate_indices_ops(ndim, int_type, offsets):
code = "{type} ind_{j} = _i % ysize_{j} - {offset}; _i /= ysize_{j};"
body = [
code.format(type=int_type, j=j, offset=offsets[j])
for j in range(ndim - 1, 0, -1)
]
return "{type} _i = i;\n{body}\n{type} ind_0 = _i - {offset};".format(
type=int_type, body="\n".join(body), offset=offsets[0]
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_filters.py
|
"""A vendored subset of cupyx.scipy.ndimage._filters"""
import warnings
import cupy
import numpy
from cucim.skimage._vendored import (
_internal as internal,
_ndimage_filters_core as _filters_core,
_ndimage_util as _util,
)
from cucim.skimage.filters._separable_filtering import (
ResourceLimitError,
_shmem_convolve1d,
)
try:
from cupy.cuda.compiler import CompileException
compile_errors = (ResourceLimitError, CompileException)
except ImportError:
compile_errors = (ResourceLimitError,)
def correlate(input, weights, output=None, mode="reflect", cval=0.0, origin=0):
"""Multi-dimensional correlate.
The array is correlated with the given kernel.
Args:
input (cupy.ndarray): The input array.
weights (cupy.ndarray): Array of weights, same number of dimensions as
input
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of correlate.
.. seealso:: :func:`scipy.ndimage.correlate`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
return _correlate_or_convolve(input, weights, output, mode, cval, origin)
def convolve(input, weights, output=None, mode="reflect", cval=0.0, origin=0):
"""Multi-dimensional convolution.
The array is convolved with the given kernel.
Args:
input (cupy.ndarray): The input array.
weights (cupy.ndarray): Array of weights, same number of dimensions as
input
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of convolution.
.. seealso:: :func:`scipy.ndimage.convolve`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
return _correlate_or_convolve(
input, weights, output, mode, cval, origin, True
)
def correlate1d(
input,
weights,
axis=-1,
output=None,
mode="reflect",
cval=0.0,
origin=0,
*,
algorithm=None,
):
"""One-dimensional correlate.
The array is correlated with the given kernel.
Args:
input (cupy.ndarray): The input array.
weights (cupy.ndarray): One-dimensional array of weights
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int): The origin parameter controls the placement of the
filter, relative to the center of the current element of the
input. Default is ``0``.
Returns:
cupy.ndarray: The result of the 1D correlation.
.. seealso:: :func:`scipy.ndimage.correlate1d`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
return _correlate_or_convolve1d(
input, weights, axis, output, mode, cval, origin, False, algorithm
)
def convolve1d(
input,
weights,
axis=-1,
output=None,
mode="reflect",
cval=0.0,
origin=0,
*,
algorithm=None,
):
"""One-dimensional convolution.
The array is convolved with the given kernel.
Args:
input (cupy.ndarray): The input array.
weights (cupy.ndarray): One-dimensional array of weights
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int): The origin parameter controls the placement of the
filter, relative to the center of the current element of the
input. Default is ``0``.
Returns:
cupy.ndarray: The result of the 1D convolution.
.. seealso:: :func:`scipy.ndimage.convolve1d`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
return _correlate_or_convolve1d(
input, weights, axis, output, mode, cval, origin, True, algorithm
)
def _correlate_or_convolve(
input, weights, output, mode, cval, origin, convolution=False
):
origins, int_type = _filters_core._check_nd_args(
input, weights, mode, origin
)
if weights.size == 0:
return cupy.zeros_like(input)
_util._check_cval(mode, cval, _util._is_integer_output(output, input))
if convolution:
weights = weights[tuple([slice(None, None, -1)] * weights.ndim)]
origins = list(origins)
for i, wsize in enumerate(weights.shape):
origins[i] = -origins[i]
if wsize % 2 == 0:
origins[i] -= 1
origins = tuple(origins)
elif weights.dtype.kind == "c":
# numpy.correlate conjugates weights rather than input.
weights = weights.conj()
weights_dtype = _util._get_weights_dtype(
input, weights, use_cucim_casting=True
) # noqa
offsets = _filters_core._origins_to_offsets(origins, weights.shape)
kernel = _get_correlate_kernel(mode, weights.shape, int_type, offsets, cval)
output = _filters_core._call_kernel(
kernel, input, weights, output, weights_dtype=weights_dtype
)
return output
def _correlate_or_convolve1d(
input,
weights,
axis,
output,
mode,
cval,
origin,
convolution=False,
algorithm=None,
):
# Calls fast shared-memory convolution when possible, otherwise falls back
# to the vendored elementwise _correlate_or_convolve
default_algorithm = False
if algorithm is None:
default_algorithm = True
if input.ndim == 2 and weights.size <= 256:
algorithm = "shared_memory"
else:
algorithm = "elementwise"
elif algorithm not in ["shared_memory", "elementwise"]:
raise ValueError(
"algorithm must be 'shared_memory', 'elementwise' or None"
)
if mode == "wrap":
mode = "grid-wrap"
if algorithm == "shared_memory":
if input.ndim not in [2, 3]:
raise NotImplementedError(
f"shared_memory not implemented for ndim={input.ndim}"
)
try:
out = _shmem_convolve1d(
input,
weights,
axis=axis,
output=output,
mode=mode,
cval=cval,
origin=origin,
convolution=convolution,
)
return out
except compile_errors:
# fallback to elementwise if inadequate shared memory available
if not default_algorithm:
# only warn if 'shared_memory' was explicitly requested
warnings.warn(
"Inadequate resources for algorithm='shared_memory: "
"falling back to the elementwise implementation"
)
algorithm = "elementwise"
if algorithm == "elementwise":
weights, origins = _filters_core._convert_1d_args(
input.ndim, weights, origin, axis
)
return _correlate_or_convolve(
input, weights, output, mode, cval, origins, convolution
)
@cupy.memoize(for_each_device=True)
def _get_correlate_kernel(mode, w_shape, int_type, offsets, cval):
return _filters_core._generate_nd_kernel(
"correlate",
"W sum = (W)0;",
"sum += cast<W>({value}) * wval;",
"y = cast<Y>(sum);",
mode,
w_shape,
int_type,
offsets,
cval,
ctype="W",
)
def _run_1d_correlates(
input, params, get_weights, output, mode, cval, origin=0, **filter_kwargs
):
"""
Enhanced version of _run_1d_filters that uses correlate1d as the filter
function. The params are a list of values to pass to the get_weights
callable given. If duplicate param values are found, the weights are
reused from the first invocation of get_weights. The get_weights callable
must return a 1D array of weights to give to correlate1d.
"""
wghts = {}
for param in params:
if param not in wghts:
wghts[param] = get_weights(param)
wghts = [wghts[param] for param in params]
return _filters_core._run_1d_filters(
[None if w is None else correlate1d for w in wghts],
input,
wghts,
output,
mode,
cval,
origin,
**filter_kwargs,
)
def uniform_filter1d(
input,
size,
axis=-1,
output=None,
mode="reflect",
cval=0.0,
origin=0,
*,
algorithm=None,
):
"""One-dimensional uniform filter along the given axis.
The lines of the array along the given axis are filtered with a uniform
filter of the given size.
Args:
input (cupy.ndarray): The input array.
size (int): Length of the uniform filter.
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int): The origin parameter controls the placement of the
filter, relative to the center of the current element of the
input. Default is ``0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.uniform_filter1d`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
weights = cupy.full(size, 1 / size, dtype=weights_dtype)
return correlate1d(
input, weights, axis, output, mode, cval, origin, algorithm=algorithm
)
def uniform_filter(
input,
size=3,
output=None,
mode="reflect",
cval=0.0,
origin=0,
*,
algorithm=None,
):
"""Multi-dimensional uniform filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): Lengths of the uniform filter for each
dimension. A single value applies to all axes.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of ``0`` is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.uniform_filter`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
sizes = _util._fix_sequence_arg(size, input.ndim, "size", int)
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
def get(size):
return (
None
if size <= 1
else cupy.full(size, 1 / size, dtype=weights_dtype)
) # noqa
return _run_1d_correlates(
input, sizes, get, output, mode, cval, origin, algorithm=algorithm
)
def gaussian_filter1d(
input,
sigma,
axis=-1,
order=0,
output=None,
mode="reflect",
cval=0.0,
truncate=4.0,
*,
algorithm=None,
):
"""One-dimensional Gaussian filter along the given axis.
The lines of the array along the given axis are filtered with a Gaussian
filter of the given standard deviation.
Args:
input (cupy.ndarray): The input array.
sigma (scalar): Standard deviation for Gaussian kernel.
axis (int): The axis of input along which to calculate. Default is -1.
order (int): An order of ``0``, the default, corresponds to convolution
with a Gaussian kernel. A positive order corresponds to convolution
with that derivative of a Gaussian.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
truncate (float): Truncate the filter at this many standard deviations.
Default is ``4.0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.gaussian_filter1d`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
radius = int(float(truncate) * float(sigma) + 0.5)
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
weights = _gaussian_kernel1d(sigma, int(order), radius, weights_dtype)
return correlate1d(
input, weights, axis, output, mode, cval, algorithm=algorithm
)
def gaussian_filter(
input,
sigma,
order=0,
output=None,
mode="reflect",
cval=0.0,
truncate=4.0,
*,
algorithm=None,
):
"""Multi-dimensional Gaussian filter.
Args:
input (cupy.ndarray): The input array.
sigma (scalar or sequence of scalar): Standard deviations for each axis
of Gaussian kernel. A single value applies to all axes.
order (int or sequence of scalar): An order of ``0``, the default,
corresponds to convolution with a Gaussian kernel. A positive order
corresponds to convolution with that derivative of a Gaussian. A
single value applies to all axes.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
truncate (float): Truncate the filter at this many standard deviations.
Default is ``4.0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.gaussian_filter`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
sigmas = _util._fix_sequence_arg(sigma, input.ndim, "sigma", float)
orders = _util._fix_sequence_arg(order, input.ndim, "order", int)
truncate = float(truncate)
weights_dtype = cupy.promote_types(input, cupy.float32)
def get(param, dtype=weights_dtype):
sigma, order = param
radius = int(truncate * float(sigma) + 0.5)
if radius <= 0:
return None
return _gaussian_kernel1d(sigma, order, radius, dtype)
return _run_1d_correlates(
input,
list(zip(sigmas, orders)),
get,
output,
mode,
cval,
0,
algorithm=algorithm,
)
def _gaussian_kernel1d(sigma, order, radius, dtype=cupy.float64):
"""
Computes a 1-D Gaussian correlation kernel.
"""
if order < 0:
raise ValueError("order must be non-negative")
sigma2 = sigma * sigma
x = numpy.arange(-radius, radius + 1)
phi_x = numpy.exp(-0.5 / sigma2 * x**2)
phi_x /= phi_x.sum()
if order == 0:
return cupy.asarray(phi_x)
# f(x) = q(x) * phi(x) = q(x) * exp(p(x))
# f'(x) = (q'(x) + q(x) * p'(x)) * phi(x)
# p'(x) = -1 / sigma ** 2
# Implement q'(x) + q(x) * p'(x) as a matrix operator and apply to the
# coefficients of q(x)
exponent_range = numpy.arange(order + 1)
q = numpy.zeros(order + 1)
q[0] = 1
D = numpy.diag(exponent_range[1:], 1) # D @ q(x) = q'(x)
P = numpy.diag(numpy.ones(order) / -sigma2, -1) # P @ q(x) = q(x) * p'(x)
Q_deriv = D + P
for _ in range(order):
q = Q_deriv.dot(q)
q = (x[:, None] ** exponent_range).dot(q)
return cupy.asarray((q * phi_x)[::-1], order="C", dtype=dtype)
def prewitt(
input, axis=-1, output=None, mode="reflect", cval=0.0, *, algorithm=None
):
"""Compute a Prewitt filter along the given axis.
Args:
input (cupy.ndarray): The input array.
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.prewitt`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
smooth = cupy.ones(3, dtype=weights_dtype)
return _prewitt_or_sobel(input, axis, output, mode, cval, smooth, algorithm)
def sobel(
input, axis=-1, output=None, mode="reflect", cval=0.0, *, algorithm=None
):
"""Compute a Sobel filter along the given axis.
Args:
input (cupy.ndarray): The input array.
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.sobel`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
smooth = cupy.array([1, 2, 1], dtype=weights_dtype)
return _prewitt_or_sobel(input, axis, output, mode, cval, smooth, algorithm)
def _prewitt_or_sobel(input, axis, output, mode, cval, weights, algorithm):
axis = internal._normalize_axis_index(axis, input.ndim)
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
def get(is_diff, dtype=weights_dtype):
return (
cupy.array([-1, 0, 1], dtype=dtype) if is_diff else weights
) # noqa
return _run_1d_correlates(
input,
[a == axis for a in range(input.ndim)],
get,
output,
mode,
cval,
algorithm=algorithm,
)
def generic_laplace(
input,
derivative2,
output=None,
mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None,
):
"""Multi-dimensional Laplace filter using a provided second derivative
function.
Args:
input (cupy.ndarray): The input array.
derivative2 (callable): Function or other callable with the following
signature that is called once per axis::
derivative2(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an
``int`` from ``0`` to the number of dimensions, and ``mode``,
``cval``, ``extra_arguments``, ``extra_keywords`` are the values
given to this function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
extra_arguments (sequence, optional):
Sequence of extra positional arguments to pass to ``derivative2``.
extra_keywords (dict, optional):
dict of extra keyword arguments to pass ``derivative2``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.generic_laplace`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
if extra_keywords is None:
extra_keywords = {}
ndim = input.ndim
modes = _util._fix_sequence_arg(mode, ndim, "mode", _util._check_mode)
output = _util._get_output(output, input)
if ndim == 0:
output[:] = input
return output
derivative2(
input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords
)
if ndim > 1:
tmp = _util._get_output(output.dtype, input)
for i in range(1, ndim):
derivative2(
input,
i,
tmp,
modes[i],
cval,
*extra_arguments,
**extra_keywords,
)
output += tmp
return output
def laplace(input, output=None, mode="reflect", cval=0.0, *, algorithm=None):
"""Multi-dimensional Laplace filter based on approximate second
derivatives.
Args:
input (cupy.ndarray): The input array.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.laplace`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
weights_dtype = cupy.promote_types(input.dtype, cupy.float32)
weights = cupy.array([1, -2, 1], dtype=weights_dtype)
def derivative2(input, axis, output, mode, cval):
return correlate1d(
input, weights, axis, output, mode, cval, algorithm=algorithm
)
return generic_laplace(input, derivative2, output, mode, cval)
def gaussian_laplace(
input,
sigma,
output=None,
mode="reflect",
cval=0.0,
*,
algorithm=None,
**kwargs,
):
"""Multi-dimensional Laplace filter using Gaussian second derivatives.
Args:
input (cupy.ndarray): The input array.
sigma (scalar or sequence of scalar): Standard deviations for each axis
of Gaussian kernel. A single value applies to all axes.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
kwargs (dict, optional):
dict of extra keyword arguments to pass ``gaussian_filter()``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.gaussian_laplace`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
def derivative2(input, axis, output, mode, cval):
order = [0] * input.ndim
order[axis] = 2
return gaussian_filter(
input,
sigma,
order,
output,
mode,
cval,
algorithm=algorithm,
**kwargs,
)
return generic_laplace(input, derivative2, output, mode, cval)
def generic_gradient_magnitude(
input,
derivative,
output=None,
mode="reflect",
cval=0.0,
extra_arguments=(),
extra_keywords=None,
):
"""Multi-dimensional gradient magnitude filter using a provided derivative
function.
Args:
input (cupy.ndarray): The input array.
derivative (callable): Function or other callable with the following
signature that is called once per axis::
derivative(input, axis, output, mode, cval,
*extra_arguments, **extra_keywords)
where ``input`` and ``output`` are ``cupy.ndarray``, ``axis`` is an
``int`` from ``0`` to the number of dimensions, and ``mode``,
``cval``, ``extra_arguments``, ``extra_keywords`` are the values
given to this function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
extra_arguments (sequence, optional):
Sequence of extra positional arguments to pass to ``derivative2``.
extra_keywords (dict, optional):
dict of extra keyword arguments to pass ``derivative2``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.generic_gradient_magnitude`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
if extra_keywords is None:
extra_keywords = {}
ndim = input.ndim
modes = _util._fix_sequence_arg(mode, ndim, "mode", _util._check_mode)
output = _util._get_output(output, input)
if ndim == 0:
output[:] = input
return output
derivative(
input, 0, output, modes[0], cval, *extra_arguments, **extra_keywords
)
output *= output
if ndim > 1:
tmp = _util._get_output(output.dtype, input)
for i in range(1, ndim):
derivative(
input,
i,
tmp,
modes[i],
cval,
*extra_arguments,
**extra_keywords,
)
tmp *= tmp
output += tmp
return cupy.sqrt(output, output, casting="unsafe")
def gaussian_gradient_magnitude(
input,
sigma,
output=None,
mode="reflect",
cval=0.0,
*,
algorithm=None,
**kwargs,
):
"""Multi-dimensional gradient magnitude using Gaussian derivatives.
Args:
input (cupy.ndarray): The input array.
sigma (scalar or sequence of scalar): Standard deviations for each axis
of Gaussian kernel. A single value applies to all axes.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
kwargs (dict, optional):
dict of extra keyword arguments to pass ``gaussian_filter()``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.gaussian_gradient_magnitude`
.. note::
When the output data type is integral (or when no output is provided
and input is integral) the results may not perfectly match the results
from SciPy due to floating-point rounding of intermediate results.
"""
def derivative(input, axis, output, mode, cval):
order = [0] * input.ndim
order[axis] = 1
return gaussian_filter(
input,
sigma,
order,
output,
mode,
cval,
algorithm=algorithm,
**kwargs,
)
return generic_gradient_magnitude(input, derivative, output, mode, cval)
def minimum_filter(
input,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional minimum filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.minimum_filter`
"""
return _min_or_max_filter(
input, size, footprint, None, output, mode, cval, origin, "min"
)
def maximum_filter(
input,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional maximum filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.maximum_filter`
"""
return _min_or_max_filter(
input, size, footprint, None, output, mode, cval, origin, "max"
)
def _min_or_max_filter(
input, size, ftprnt, structure, output, mode, cval, origin, func
):
# structure is used by morphology.grey_erosion() and grey_dilation()
# and not by the regular min/max filters
if isinstance(ftprnt, tuple) and size is None:
size = ftprnt
ftprnt = None
sizes, ftprnt, structure = _filters_core._check_size_footprint_structure(
input.ndim, size, ftprnt, structure
)
if cval is cupy.nan:
raise NotImplementedError("NaN cval is unsupported")
if sizes is not None:
# Separable filter, run as a series of 1D filters
fltr = minimum_filter1d if func == "min" else maximum_filter1d
return _filters_core._run_1d_filters(
[fltr if size > 1 else None for size in sizes],
input,
sizes,
output,
mode,
cval,
origin,
)
origins, int_type = _filters_core._check_nd_args(
input, ftprnt, mode, origin, "footprint", sizes=sizes
)
if structure is not None and structure.ndim != input.ndim:
raise RuntimeError("structure array has incorrect shape")
if ftprnt.size == 0:
return cupy.zeros_like(input)
offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
kernel = _get_min_or_max_kernel(
mode,
ftprnt.shape,
func,
offsets,
float(cval),
int_type,
has_structure=structure is not None,
has_central_value=bool(ftprnt[offsets]),
)
return _filters_core._call_kernel(
kernel, input, ftprnt, output, structure, weights_dtype=bool
)
def minimum_filter1d(
input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0
):
"""Compute the minimum filter along a single axis.
Args:
input (cupy.ndarray): The input array.
size (int): Length of the minimum filter.
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int): The origin parameter controls the placement of the
filter, relative to the center of the current element of the
input. Default is ``0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.minimum_filter1d`
"""
return _min_or_max_1d(input, size, axis, output, mode, cval, origin, "min")
def maximum_filter1d(
input, size, axis=-1, output=None, mode="reflect", cval=0.0, origin=0
):
"""Compute the maximum filter along a single axis.
Args:
input (cupy.ndarray): The input array.
size (int): Length of the maximum filter.
axis (int): The axis of input along which to calculate. Default is -1.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int): The origin parameter controls the placement of the
filter, relative to the center of the current element of the
input. Default is ``0``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.maximum_filter1d`
"""
return _min_or_max_1d(input, size, axis, output, mode, cval, origin, "max")
def _min_or_max_1d(
input,
size,
axis=-1,
output=None,
mode="reflect",
cval=0.0,
origin=0,
func="min",
):
ftprnt = cupy.ones(size, dtype=bool)
ftprnt, origin = _filters_core._convert_1d_args(
input.ndim, ftprnt, origin, axis
)
origins, int_type = _filters_core._check_nd_args(
input, ftprnt, mode, origin, "footprint"
)
offsets = _filters_core._origins_to_offsets(origins, ftprnt.shape)
kernel = _get_min_or_max_kernel(
mode,
ftprnt.shape,
func,
offsets,
float(cval),
int_type,
has_weights=False,
)
return _filters_core._call_kernel(
kernel, input, None, output, weights_dtype=bool
)
@cupy._util.memoize(for_each_device=True)
def _get_min_or_max_kernel(
mode,
w_shape,
func,
offsets,
cval,
int_type,
has_weights=True,
has_structure=False,
has_central_value=True,
):
# When there are no 'weights' (the footprint, for the 1D variants) then
# we need to make sure intermediate results are stored as doubles for
# consistent results with scipy.
ctype = "X" if has_weights else "double"
value = "{value}"
if not has_weights:
value = "cast<double>({})".format(value)
# Having a non-flat structure biases the values
if has_structure:
value += ("-" if func == "min" else "+") + "cast<X>(sval)"
if has_central_value:
pre = "{} value = x[i];"
found = "value = {func}({value}, value);"
else:
# If the central pixel is not included in the footprint we cannot
# assume `x[i]` is not below the min or above the max and thus cannot
# seed with that value. Instead we keep track of having set `value`.
pre = "{} value; bool set = false;"
found = "value = set ? {func}({value}, value) : {value}; set=true;"
return _filters_core._generate_nd_kernel(
func,
pre.format(ctype),
found.format(func=func, value=value),
"y = cast<Y>(value);",
mode,
w_shape,
int_type,
offsets,
cval,
ctype=ctype,
has_weights=has_weights,
has_structure=has_structure,
)
def rank_filter(
input,
rank,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional rank filter.
Args:
input (cupy.ndarray): The input array.
rank (int): The rank of the element to get. Can be negative to count
from the largest value, e.g. ``-1`` indicates the largest value.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.rank_filter`
"""
rank = int(rank)
return _rank_filter(
input,
lambda fs: rank + fs if rank < 0 else rank,
size,
footprint,
output,
mode,
cval,
origin,
)
def median_filter(
input,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional median filter.
Args:
input (cupy.ndarray): The input array.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.median_filter`
"""
return _rank_filter(
input, lambda fs: fs // 2, size, footprint, output, mode, cval, origin
)
def percentile_filter(
input,
percentile,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Multi-dimensional percentile filter.
Args:
input (cupy.ndarray): The input array.
percentile (scalar): The percentile of the element to get (from ``0``
to ``100``). Can be negative, thus ``-20`` equals ``80``.
size (int or sequence of int): One of ``size`` or ``footprint`` must be
provided. If ``footprint`` is given, ``size`` is ignored. Otherwise
``footprint = cupy.ones(size)`` with ``size`` automatically made to
match the number of dimensions in ``input``.
footprint (cupy.ndarray): a boolean array which specifies which of the
elements within this shape will get passed to the filter function.
output (cupy.ndarray, dtype or None): The array in which to place the
output. Default is is same dtype as the input.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``'constant'``. Default is ``0.0``.
origin (int or sequence of int): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of the filtering.
.. seealso:: :func:`scipy.ndimage.percentile_filter`
"""
percentile = float(percentile)
if percentile < 0.0:
percentile += 100.0
if percentile < 0.0 or percentile > 100.0:
raise RuntimeError("invalid percentile")
if percentile == 100.0:
def get_rank(fs):
return fs - 1
else:
def get_rank(fs):
return int(float(fs) * percentile / 100.0)
return _rank_filter(
input, get_rank, size, footprint, output, mode, cval, origin
)
def _rank_filter(
input,
get_rank,
size=None,
footprint=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
sizes, footprint, _ = _filters_core._check_size_footprint_structure(
input.ndim, size, footprint, None, force_footprint=False
)
if cval is cupy.nan:
raise NotImplementedError("NaN cval is unsupported")
origins, int_type = _filters_core._check_nd_args(
input, footprint, mode, origin, "footprint", sizes=sizes
)
has_weights = True
if sizes is not None:
has_weights = False
filter_size = internal.prod(sizes)
if filter_size == 0:
return cupy.zeros_like(input)
footprint_shape = tuple(sizes)
elif footprint.size == 0:
return cupy.zeros_like(input)
else:
footprint_shape = footprint.shape
filter_size = int(footprint.sum())
if filter_size == footprint.size:
# can omit passing the footprint if it is all ones
sizes = footprint.shape
has_weights = False
if not has_weights:
footprint = None
rank = get_rank(filter_size)
if rank < 0 or rank >= filter_size:
raise RuntimeError("rank not within filter footprint size")
if rank == 0:
min_max_op = "min"
elif rank == filter_size - 1:
min_max_op = "max"
else:
min_max_op = None
if min_max_op is not None:
if sizes is not None:
return _min_or_max_filter(
input,
sizes[0],
None,
None,
output,
mode,
cval,
origins,
min_max_op,
)
else:
return _min_or_max_filter(
input,
None,
footprint,
None,
output,
mode,
cval,
origins,
min_max_op,
)
offsets = _filters_core._origins_to_offsets(origins, footprint_shape)
kernel = _get_rank_kernel(
filter_size,
rank,
mode,
footprint_shape,
offsets,
float(cval),
int_type,
has_weights=has_weights,
)
return _filters_core._call_kernel(
kernel, input, footprint, output, weights_dtype=bool
)
__SHELL_SORT = """
__device__ void sort(X *array, int size) {{
int gap = {gap};
while (gap > 1) {{
gap /= 3;
for (int i = gap; i < size; ++i) {{
X value = array[i];
int j = i - gap;
while (j >= 0 && value < array[j]) {{
array[j + gap] = array[j];
j -= gap;
}}
array[j + gap] = value;
}}
}}
}}"""
@cupy._util.memoize()
def _get_shell_gap(filter_size):
gap = 1
while gap < filter_size:
gap = 3 * gap + 1
return gap
@cupy._util.memoize(for_each_device=True)
def _get_rank_kernel(
filter_size, rank, mode, w_shape, offsets, cval, int_type, has_weights
):
s_rank = min(rank, filter_size - rank - 1)
# The threshold was set based on the measurements on a V100
# TODO(leofang, anaruse): Use Optuna to automatically tune the threshold,
# as it may vary depending on the GPU in use, compiler version, dtype,
# filter size, etc.
if s_rank <= 80:
# When s_rank is small and register usage is low, this partial
# selection sort approach is faster than general sorting approach
# using shell sort.
if s_rank == rank:
comp_op = "<"
else:
comp_op = ">"
array_size = s_rank + 2
found_post = """
if (iv > {rank} + 1) {{{{
int target_iv = 0;
X target_val = values[0];
for (int jv = 1; jv <= {rank} + 1; jv++) {{{{
if (target_val {comp_op} values[jv]) {{{{
target_val = values[jv];
target_iv = jv;
}}}}
}}}}
if (target_iv <= {rank}) {{{{
values[target_iv] = values[{rank} + 1];
}}}}
iv = {rank} + 1;
}}}}""".format(
rank=s_rank, comp_op=comp_op
)
post = """
X target_val = values[0];
for (int jv = 1; jv <= {rank}; jv++) {{
if (target_val {comp_op} values[jv]) {{
target_val = values[jv];
}}
}}
y=cast<Y>(target_val);""".format(
rank=s_rank, comp_op=comp_op
)
sorter = ""
else:
array_size = filter_size
found_post = ""
post = "sort(values,{});\ny=cast<Y>(values[{}]);".format(
filter_size, rank
)
sorter = __SHELL_SORT.format(gap=_get_shell_gap(filter_size))
return _filters_core._generate_nd_kernel(
"rank_{}_{}".format(filter_size, rank),
"int iv = 0;\nX values[{}];".format(array_size),
"values[iv++] = {value};" + found_post,
post,
mode,
w_shape,
int_type,
offsets,
cval,
has_weights=has_weights,
preamble=sorter,
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/pad_elementwise.py
|
import cupy
def _pad_boundary_ops(mode, var_name, size, int_t="int", no_singleton=False):
T = "int" if int_t == "int" else "long long"
min_func = "min"
max_func = "max"
if mode == "constant":
ops = f"""
if (({var_name} < 0) || {var_name} >= {size}) {{
{var_name} = -1;
}}"""
elif mode == "symmetric":
ops = f"""
if ({var_name} < 0) {{
{var_name} = - 1 -{var_name};
}}
{var_name} %= {size} * 2;
{var_name} = {min_func}({var_name}, 2 * {size} - 1 - {var_name});
"""
elif mode == "reflect":
ops = f"""
if ({size} == 1) {{
{var_name} = 0;
}} else {{
if ({var_name} < 0) {{
{var_name} = -{var_name};
}}
if ({var_name} >= {size}) {{
{var_name} = 1 + ({var_name} - 1) % (({size} - 1) * 2);
{var_name} = {min_func}({var_name},
2 * {size} - 2 - {var_name});
}}
}}""" # noqa
elif mode == "reflect_no_singleton_dim":
# the same as reflect, but without the extra `{size} == 1` check
ops = f"""
if ({var_name} < 0) {{
{var_name} = -{var_name};
}}
if ({var_name} >= {size}) {{
{var_name} = 1 + ({var_name} - 1) % (({size} - 1) * 2);
{var_name} = {min_func}({var_name}, 2 * {size} - 2 - {var_name});
}}
"""
elif mode == "edge":
ops = f"""
{var_name} = {min_func}(
{max_func}(static_cast<{T}>({var_name}), static_cast<{T}>(0)),
static_cast<{T}>({size} - 1));
"""
elif mode == "wrap":
ops = f"""
{var_name} %= {size};
if ({var_name} < 0) {{
{var_name} += {size};
}}
"""
return ops + "\n"
def _generate_size_vars(
ndim, arr_name="arr", size_prefix="size", int_type="int"
):
"""Store shape of a raw array into individual variables.
Examples
--------
>>> print(_generate_size_vars(3, 'arr', 'size', 'int'))
int size_0 = arr.shape()[0];
int size_1 = arr.shape()[1];
int size_2 = arr.shape()[2];
"""
set_size_vars = [
f"{int_type} {size_prefix}_{i} = {arr_name}.shape()[{i}];"
for i in range(ndim)
]
return "\n".join(set_size_vars) + "\n"
def _generate_stride_vars(
ndim, arr_name="arr", size_prefix="stride", int_type="int"
):
"""Store stride (in bytes) of a raw array into individual variables.
Examples
--------
>>> print(_generate_size_vars(3, 'arr', 'size', 'int'))
int stride_0 = arr.strides()[0];
int stride_1 = arr.strides()[1];
int stride_2 = arr.strides()[2];
"""
set_size_vars = [
f"{int_type} {size_prefix}_{i} = {arr_name}.strides()[{i}];"
for i in range(ndim)
]
return "\n".join(set_size_vars) + "\n"
def _generate_indices_ops(
ndim,
size_prefix="size",
int_type="int",
index_prefix="ind",
order="C",
):
"""Generate indices based existing variables.
Assumes variables f'{size_prefix}_{i}' has the size along axis, i.
Examples
--------
>>> print(_generate_indices_ops(3, 'size', 'int', 'ind', 'C'))
int _i = i;
int ind_2 = _i % size_2; _i /= size_2;
int ind_1 = _i % size_1; _i /= size_1;
int ind_0 = _i;
"""
if order == "C":
_range = range(ndim - 1, 0, -1)
idx_largest_stride = 0
elif order == "F":
_range = range(ndim - 1)
idx_largest_stride = ndim - 1
else:
raise ValueError(f"Unknown order: {order}. Must be one of {'C', 'F'}.")
body = [
f"{int_type} {index_prefix}_{j} = _i % {size_prefix}_{j}; _i /= {size_prefix}_{j};" # noqa
for j in _range
]
body = "\n".join(body)
code = f"{int_type} _i = i;\n"
code += body + "\n"
code += f"{int_type} {index_prefix}_{idx_largest_stride} = _i;\n"
return code
def _gen_raveled(ndim, stride_prefix="stride", index_prefix="i", order=None):
"""Generate raveled index for c-ordered memory layout
For index_prefix='i', the indices are (i_0, i_1, ....)
For stride_prefix='stride', the stride is (stride_0, stride_1, ....)
"""
return " + ".join(
f"{stride_prefix}_{j} * {index_prefix}_{j}" for j in range(ndim)
)
def _get_pad_kernel_code(pad_starts, int_type="int", mode="edge", order="C"):
# variables storing shape of the output array
ndim = len(pad_starts)
out_size_prefix = "shape"
operation = _generate_size_vars(
ndim, arr_name="out", size_prefix=out_size_prefix, int_type=int_type
)
# variables storing shape of the input array
in_size_prefix = "ishape"
in_stride_prefix = "istride"
operation += _generate_size_vars(
ndim, arr_name="arr", size_prefix=in_size_prefix, int_type=int_type
)
operation += _generate_stride_vars(
ndim, arr_name="arr", size_prefix=in_stride_prefix, int_type=int_type
)
# unraveled indices into the output array
out_index_prefix = "oi"
# Note: Regardless of actual memory layout, need order='C' here to match
# the behavior of the index raveling used by ElementwiseKernel.
operation += _generate_indices_ops(
ndim,
size_prefix=out_size_prefix,
int_type=int_type,
index_prefix=out_index_prefix,
order="C",
)
# compute unraveled indices into the input array
# (i_0, i_1, ...)
in_index_prefix = "i"
operation += "\n".join(
[
f"{int_type} {in_index_prefix}_{j} = {out_index_prefix}_{j} - {pad_starts[j]};" # noqa
for j in range(ndim)
]
)
operation += "\n"
input_indices = tuple(f"{in_index_prefix}_{j}" for j in range(ndim))
# impose boundary condition
range_cond = " || ".join(
f"({coord} < 0) || ({coord} >= {in_size_prefix}_{j})"
for j, coord in enumerate(input_indices)
)
operation += f"bool range_cond = {range_cond};"
operation += "if (range_cond) {\n"
if mode == "constant":
for j, coord in enumerate(input_indices):
operation += _pad_boundary_ops(
mode, coord, f"{in_size_prefix}_{j}", int_type
)
operation += f"""
if ({coord} == -1) {{
out[i] = static_cast<F>(cval);
return;
}}
"""
else:
for j, coord in enumerate(input_indices):
operation += _pad_boundary_ops(
mode, coord, f"{in_size_prefix}_{j}", int_type
)
operation += "}\n"
raveled_idx = _gen_raveled(
ndim,
stride_prefix=in_stride_prefix,
index_prefix=in_index_prefix,
order=order,
)
operation += f"""
// set output based on raveled index into the input array
const char* char_arr = reinterpret_cast<const char*>(&arr[0]);
out[i] = *reinterpret_cast<const F*>(char_arr + {raveled_idx});
"""
return operation
@cupy._util.memoize(for_each_device=True)
def _get_pad_kernel(pad_starts, int_type="int", mode="edge", order="C"):
in_params = "raw F arr"
if mode == "constant":
in_params += ", float64 cval"
kernel_name = f"pad_{len(pad_starts)}d_order{order}_{mode}"
if int_type != "int":
kernel_name += f"_{int_type.replace(' ', '_')}_idx"
return cupy.ElementwiseKernel(
in_params=in_params,
out_params="raw F out",
operation=_get_pad_kernel_code(pad_starts, int_type, mode, order),
name=kernel_name,
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_signaltools_core.py
|
"""A vendored subset of cupyx.scipy.signal._signaltools_core"""
import cupy
from cupyx.scipy import fft
from cucim.skimage._vendored._ndimage_filters import _get_correlate_kernel
from . import _internal as internal, _ndimage_util as _util
def _check_conv_inputs(in1, in2, mode, convolution=True):
if in1.ndim == in2.ndim == 0:
return in1 * (in2 if convolution else in2.conj())
if in1.ndim != in2.ndim:
raise ValueError("in1 and in2 should have the same dimensionality")
if in1.size == 0 or in2.size == 0:
return cupy.array([], dtype=in1.dtype)
if mode not in ("full", "same", "valid"):
raise ValueError('acceptable modes are "valid", "same", or "full"')
return None
def _direct_correlate(
in1,
in2,
mode="full",
output=float,
convolution=False,
boundary="constant",
fillvalue=0.0,
shift=False,
):
if in1.ndim != 1 and (
in1.dtype.kind == "b"
or (in1.dtype.kind == "f" and in1.dtype.itemsize < 4)
):
raise ValueError("unsupported type in SciPy")
# Swaps inputs so smaller one is in2:
# NOTE: when mode != 'valid' we can only swap with a constant-0 boundary
swapped_inputs = False
orig_in1_shape = in1.shape
if _inputs_swap_needed(mode, in1.shape, in2.shape) or (
in2.size > in1.size and boundary == "constant" and fillvalue == 0
):
in1, in2 = in2, in1
swapped_inputs = not convolution
# Due to several optimizations, the second array can only be 2 GiB
if in2.nbytes >= (1 << 31):
raise RuntimeError(
"smaller array must be 2 GiB or less, " 'use method="fft" instead'
)
# At this point, in1.size > in2.size
# (except some cases when boundary != 'constant' or fillvalue != 0)
# Figure out the output shape and the origin of the kernel
if mode == "full":
out_shape = tuple(x1 + x2 - 1 for x1, x2 in zip(in1.shape, in2.shape))
offsets = tuple(x - 1 for x in in2.shape)
elif mode == "valid":
out_shape = tuple(x1 - x2 + 1 for x1, x2 in zip(in1.shape, in2.shape))
offsets = (0,) * in1.ndim
else: # mode == 'same':
# In correlate2d: When using "same" mode with even-length inputs, the
# outputs of correlate and correlate2d differ: There is a 1-index
# offset between them.
# This is dealt with by using "shift" parameter.
out_shape = orig_in1_shape
if orig_in1_shape == in1.shape:
offsets = tuple((x - shift) // 2 for x in in2.shape)
else:
offsets = tuple(
(2 * x2 - x1 - (not convolution) + shift) // 2
for x1, x2 in zip(in1.shape, in2.shape)
)
# Check the output
if not isinstance(output, cupy.ndarray):
output = cupy.empty(out_shape, output)
elif output.shape != out_shape:
raise ValueError("out has wrong shape")
# Get and run the CuPy kernel
int_type = _util._get_inttype(in1)
kernel = _get_correlate_kernel(
boundary, in2.shape, int_type, offsets, fillvalue
)
in2 = _reverse_and_conj(in2) if convolution else in2
if not swapped_inputs:
kernel(in1, in2, output)
elif output.dtype.kind != "c":
# Avoids one array copy
kernel(in1, in2, _reverse_and_conj(output))
else:
kernel(in1, in2, output)
output = cupy.ascontiguousarray(_reverse_and_conj(output))
return output
def _reverse_and_conj(x):
# Reverse array `x` in all dimensions and perform the complex conjugate
return x[(slice(None, None, -1),) * x.ndim].conj()
def _inputs_swap_needed(mode, shape1, shape2, axes=None):
# See scipy's documentation in scipy.signal.signaltools
if mode != "valid" or not shape1:
return False
if axes is None:
axes = range(len(shape1))
not_ok1 = any(shape1[i] < shape2[i] for i in axes)
not_ok2 = any(shape1[i] > shape2[i] for i in axes)
if not_ok1 and not_ok2:
raise ValueError(
'For "valid" mode, one must be at least '
"as large as the other in every dimension"
)
return not_ok1
def _init_freq_conv_axes(in1, in2, mode, axes, sorted_axes=False):
# See scipy's documentation in scipy.signal.signaltools
s1, s2 = in1.shape, in2.shape
axes = _init_nd_and_axes(in1, axes)
# Length-1 axes can rely on broadcasting rules, no fft needed
axes = [ax for ax in axes if s1[ax] != 1 and s2[ax] != 1]
if sorted_axes:
axes.sort()
# Check that unused axes are either 1 (broadcast) or the same length
for ax, (dim1, dim2) in enumerate(zip(s1, s2)):
if ax not in axes and dim1 != dim2 and dim1 != 1 and dim2 != 1:
raise ValueError(
"incompatible shapes for in1 and in2:"
" {} and {}".format(s1, s2)
)
# Check that input sizes are compatible with 'valid' mode.
if _inputs_swap_needed(mode, s1, s2, axes=axes):
# Convolution is commutative
in1, in2 = in2, in1
return in1, in2, axes
def _init_nd_and_axes(x, axes):
# See documentation in scipy.fft._helper._init_nd_shape_and_axes
# except shape argument is always None and doesn't return new shape
try:
axes = internal._normalize_axis_indices(axes, x.ndim, sort_axes=False)
except TypeError:
axes = internal._normalize_axis_indices(axes, x.ndim)
if not len(axes):
raise ValueError("when provided, axes cannot be empty")
if any(x.shape[ax] < 1 for ax in axes):
raise ValueError("invalid number of data points specified")
return axes
def _freq_domain_conv(in1, in2, axes, shape, calc_fast_len=False):
# See scipy's documentation in scipy.signal.signaltools
real = in1.dtype.kind != "c" and in2.dtype.kind != "c"
fshape = (
[fft.next_fast_len(shape[a], real) for a in axes]
if calc_fast_len
else shape
)
fftn, ifftn = (fft.rfftn, fft.irfftn) if real else (fft.fftn, fft.ifftn)
# Perform the convolution
sp1 = fftn(in1, fshape, axes=axes)
sp2 = fftn(in2, fshape, axes=axes)
out = ifftn(sp1 * sp2, fshape, axes=axes)
return out[tuple(slice(x) for x in shape)] if calc_fast_len else out
def _apply_conv_mode(full, s1, s2, mode, axes):
# See scipy's documentation in scipy.signal.signaltools
if mode == "full":
return cupy.ascontiguousarray(full)
if mode == "valid":
s1 = [
full.shape[a] if a not in axes else s1[a] - s2[a] + 1
for a in range(full.ndim)
]
starts = [(cur - new) // 2 for cur, new in zip(full.shape, s1)]
slices = tuple(
slice(start, start + length) for start, length in zip(starts, s1)
)
return cupy.ascontiguousarray(full[slices])
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/time.py
|
"""Timing utility copied from cupyx.time
added kwargs support to repeat
removed experimental warning
"""
import math
import time
import cupy
import numpy
class _PerfCaseResult(object):
def __init__(self, name, ts, devices):
assert ts.ndim == 2
assert ts.shape[0] == len(devices) + 1
assert ts.shape[1] > 0
self.name = name
self._ts = ts
self._devices = devices
@property
def cpu_times(self):
return self._ts[0]
@property
def gpu_times(self):
return self._ts[1:]
@staticmethod
def _to_str_per_item(device_name, t):
assert t.ndim == 1
assert t.size > 0
t_us = t * 1e6
s = " {}:{:9.03f} us".format(device_name, t_us.mean())
if t.size > 1:
s += " +/-{:6.03f} (min:{:9.03f} / max:{:9.03f}) us".format(
t_us.std(), t_us.min(), t_us.max()
)
return s
def to_str(self, show_gpu=False):
results = [self._to_str_per_item("CPU", self._ts[0])]
if show_gpu:
for i, d in enumerate(self._devices):
results.append(
self._to_str_per_item("GPU-{}".format(d), self._ts[1 + i])
)
return "{:<20s}:{}".format(self.name, " ".join(results))
def __str__(self):
return self.to_str(show_gpu=True)
def repeat(
func,
args=(),
kwargs={},
n_repeat=10000,
*,
name=None,
n_warmup=10,
max_duration=math.inf,
devices=None,
):
if name is None:
try:
name = func.__name__
except AttributeError:
name = "unknown"
if devices is None:
devices = (cupy.cuda.get_device_id(),)
if not callable(func):
raise ValueError("`func` should be a callable object.")
if not isinstance(args, tuple):
raise ValueError("`args` should be of tuple type.")
if not isinstance(kwargs, dict):
raise ValueError("`kwargs` should be of dict type.")
if not isinstance(n_repeat, int):
raise ValueError("`n_repeat` should be an integer.")
if not isinstance(name, str):
raise ValueError("`str` should be a string.")
if not isinstance(n_warmup, int):
raise ValueError("`n_warmup` should be an integer.")
if not isinstance(devices, tuple):
raise ValueError("`devices` should be of tuple type")
return _repeat(
func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices
)
def _repeat(
func, args, kwargs, n_repeat, name, n_warmup, max_duration, devices
):
events_1 = []
events_2 = []
for i in devices:
with cupy.cuda.Device(i):
events_1.append(cupy.cuda.stream.Event())
events_2.append(cupy.cuda.stream.Event())
ev1 = cupy.cuda.stream.Event()
ev2 = cupy.cuda.stream.Event()
for i in range(n_warmup):
func(*args, **kwargs)
for event, device in zip(events_1, devices):
with cupy.cuda.Device(device):
event.record()
event.synchronize()
cpu_times = []
gpu_times = [[] for i in events_1]
duration = 0
for i in range(n_repeat):
for event, device in zip(events_1, devices):
with cupy.cuda.Device(device):
event.record()
t1 = time.perf_counter()
func(*args, **kwargs)
t2 = time.perf_counter()
cpu_time = t2 - t1
cpu_times.append(cpu_time)
for event, device in zip(events_2, devices):
with cupy.cuda.Device(device):
event.record()
for event, device in zip(events_2, devices):
with cupy.cuda.Device(device):
event.synchronize()
for i, (ev1, ev2) in enumerate(zip(events_1, events_2)):
gpu_time = cupy.cuda.get_elapsed_time(ev1, ev2) * 1e-3
gpu_times[i].append(gpu_time)
duration += time.perf_counter() - t1
if duration > max_duration:
break
ts = numpy.asarray([cpu_times] + gpu_times, dtype=numpy.float64)
return _PerfCaseResult(name, ts, devices=devices)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_texture.py
|
import cupy
from cupy import _core
from cupy.cuda import runtime, texture
_affine_transform_2d_array_kernel = _core.ElementwiseKernel(
"U texObj, raw float32 m, uint64 width",
"T transformed_image",
"""
float3 pixel = make_float3(
(float)(i / width),
(float)(i % width),
1.0f
);
float x = dot(pixel, make_float3(m[0], m[1], m[2])) + .5f;
float y = dot(pixel, make_float3(m[3], m[4], m[5])) + .5f;
transformed_image = tex2D<T>(texObj, y, x);
""",
"cupyx_texture_affine_transformation_2d_array",
preamble="""
inline __host__ __device__ float dot(float3 a, float3 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z;
}
""",
)
_affine_transform_3d_array_kernel = _core.ElementwiseKernel(
"U texObj, raw float32 m, uint64 height, uint64 width",
"T transformed_volume",
"""
float4 voxel = make_float4(
(float)(i / (width * height)),
(float)((i % (width * height)) / width),
(float)((i % (width * height)) % width),
1.0f
);
float x = dot(voxel, make_float4(m[0], m[1], m[2], m[3])) + .5f;
float y = dot(voxel, make_float4(m[4], m[5], m[6], m[7])) + .5f;
float z = dot(voxel, make_float4(m[8], m[9], m[10], m[11])) + .5f;
transformed_volume = tex3D<T>(texObj, z, y, x);
""",
"cupyx_texture_affine_transformation_3d_array",
preamble="""
inline __host__ __device__ float dot(float4 a, float4 b)
{
return a.x * b.x + a.y * b.y + a.z * b.z + a.w * b.w;
}
""",
)
def _create_texture_object(
data, address_mode: str, filter_mode: str, read_mode: str, border_color=0
):
if cupy.issubdtype(data.dtype, cupy.unsignedinteger):
fmt_kind = runtime.cudaChannelFormatKindUnsigned
elif cupy.issubdtype(data.dtype, cupy.integer):
fmt_kind = runtime.cudaChannelFormatKindSigned
elif cupy.issubdtype(data.dtype, cupy.floating):
fmt_kind = runtime.cudaChannelFormatKindFloat
else:
raise ValueError(f"Unsupported data type {data.dtype}")
if address_mode == "nearest":
address_mode = runtime.cudaAddressModeClamp
elif address_mode == "constant":
address_mode = runtime.cudaAddressModeBorder
else:
raise ValueError(
f"Unsupported address mode {address_mode} "
"(supported: constant, nearest)"
)
if filter_mode == "nearest":
filter_mode = runtime.cudaFilterModePoint
elif filter_mode == "linear":
filter_mode = runtime.cudaFilterModeLinear
else:
raise ValueError(
f"Unsupported filter mode {filter_mode} "
f"(supported: nearest, linear)"
)
if read_mode == "element_type":
read_mode = runtime.cudaReadModeElementType
elif read_mode == "normalized_float":
read_mode = runtime.cudaReadModeNormalizedFloat
else:
raise ValueError(
f"Unsupported read mode {read_mode} "
"(supported: element_type, normalized_float)"
)
texture_fmt = texture.ChannelFormatDescriptor(
data.itemsize * 8, 0, 0, 0, fmt_kind
)
# CUDAArray: last dimension is the fastest changing dimension
array = texture.CUDAarray(texture_fmt, *data.shape[::-1])
res_desc = texture.ResourceDescriptor(
runtime.cudaResourceTypeArray, cuArr=array
)
# TODO(the-lay): each dimension can have a different addressing mode
# TODO(the-lay): border color/value can be defined for up to 4 channels
tex_desc = texture.TextureDescriptor(
(address_mode,) * data.ndim,
filter_mode,
read_mode,
borderColors=(border_color,),
)
tex_obj = texture.TextureObject(res_desc, tex_desc)
array.copy_from(data)
return tex_obj
def affine_transformation(
data,
transformation_matrix,
output_shape=None,
output=None,
interpolation: str = "linear",
mode: str = "constant",
border_value=0,
):
"""
Apply an affine transformation.
The method uses texture memory and supports only 2D and 3D float32 arrays
without channel dimension.
Args:
data (cupy.ndarray): The input array or texture object.
transformation_matrix (cupy.ndarray): Affine transformation matrix.
Must be a homogeneous and have shape ``(ndim + 1, ndim + 1)``.
output_shape (tuple of ints): Shape of output. If not specified,
the input array shape is used. Default is None.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array. If not specified,
creates the output array with shape of ``output_shape``. Default is
None.
interpolation (str): Specifies interpolation mode: ``'linear'`` or
``'nearest'``. Default is ``'linear'``.
mode (str): Specifies addressing mode for points outside of the array:
(`'constant'``, ``'nearest'``). Default is ``'constant'``.
border_value: Specifies value to be used for coordinates outside
of the array for ``'constant'`` mode. Default is 0.
Returns:
cupy.ndarray:
The transformed input.
.. seealso:: :func:`cupyx.scipy.ndimage.affine_transform`
"""
ndim = data.ndim
if (ndim < 2) or (ndim > 3):
raise ValueError(
"Texture memory affine transformation is defined only for "
"2D and 3D arrays without channel dimension."
)
dtype = data.dtype
if dtype != cupy.float32:
raise ValueError(
f"Texture memory affine transformation is available "
f"only for float32 data type (not {dtype})"
)
if interpolation not in ["linear", "nearest"]:
raise ValueError(
f"Unsupported interpolation {interpolation} "
f"(supported: linear, nearest)"
)
if transformation_matrix.shape != (ndim + 1, ndim + 1):
raise ValueError("Matrix must be have shape (ndim + 1, ndim + 1)")
texture_object = _create_texture_object(
data,
address_mode=mode,
filter_mode=interpolation,
read_mode="element_type",
border_color=border_value,
)
if ndim == 2:
kernel = _affine_transform_2d_array_kernel
else:
kernel = _affine_transform_3d_array_kernel
if output_shape is None:
output_shape = data.shape
if output is None:
output = cupy.zeros(output_shape, dtype=dtype)
elif isinstance(output, (type, cupy.dtype)):
if output != cupy.float32:
raise ValueError(
f"Texture memory affine transformation is "
f"available only for float32 data type (not "
f"{output})"
)
output = cupy.zeros(output_shape, dtype=output)
elif isinstance(output, cupy.ndarray):
if output.shape != output_shape:
raise ValueError("Output shapes do not match")
else:
raise ValueError("Output must be None, cupy.ndarray or cupy.dtype")
kernel(texture_object, transformation_matrix, *output_shape[1:], output)
return output
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_interpolation.py
|
import cmath
import math
import warnings
import cupy
import numpy
from cupy import _core
from cupy.cuda import runtime
from cucim.skimage._vendored import (
_ndimage_interp_kernels as _interp_kernels,
_ndimage_spline_prefilter_core as _spline_prefilter_core,
_ndimage_util as _util,
pad,
)
from cucim.skimage._vendored._internal import _normalize_axis_index, prod
def _check_parameter(func_name, order, mode):
if order is None:
warnings.warn(
f"Currently the default order of {func_name} is 1. In a "
"future release this may change to 3 to match "
"scipy.ndimage "
)
elif order < 0 or 5 < order:
raise ValueError("spline order is not supported")
if mode not in (
"constant",
"grid-constant",
"nearest",
"mirror",
"reflect",
"grid-mirror",
"wrap",
"grid-wrap",
"opencv",
"_opencv_edge",
):
raise ValueError("boundary mode ({}) is not supported".format(mode))
def _get_spline_output(input, output):
"""Create workspace array, temp, and the final dtype for the output.
Differs from SciPy by not always forcing the internal floating point dtype
to be double precision.
"""
complex_data = input.dtype.kind == "c"
if complex_data:
min_float_dtype = cupy.complex64
else:
min_float_dtype = cupy.float32
if isinstance(output, cupy.ndarray):
if complex_data and output.dtype.kind != "c":
raise ValueError(
"output must have complex dtype for complex inputs"
)
float_dtype = cupy.promote_types(output.dtype, min_float_dtype)
output_dtype = output.dtype
else:
if output is None:
output = output_dtype = input.dtype
else:
output_dtype = cupy.dtype(output)
float_dtype = cupy.promote_types(output, min_float_dtype)
if (
isinstance(output, cupy.ndarray)
and output.dtype == float_dtype == output_dtype
and output.flags.c_contiguous
):
if output is not input:
_core.elementwise_copy(input, output)
temp = output
else:
temp = input.astype(float_dtype, copy=False)
temp = cupy.ascontiguousarray(temp)
if cupy.shares_memory(temp, input, "MAY_SHARE_BOUNDS"):
temp = temp.copy()
return temp, float_dtype, output_dtype
def spline_filter1d(
input, order=3, axis=-1, output=cupy.float64, mode="mirror"
):
"""
Calculate a 1-D spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Args:
input (cupy.ndarray): The input array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
axis (int): The axis along which the spline filter is applied. Default
is the last axis.
output (cupy.ndarray or dtype, optional): The array in which to place
the output, or the dtype of the returned array. Default is
``numpy.float64``.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
Returns:
cupy.ndarray: The result of prefiltering the input.
.. seealso:: :func:`scipy.spline_filter1d`
"""
if order < 0 or order > 5:
raise RuntimeError("spline order not supported")
x = input
ndim = x.ndim
axis = _normalize_axis_index(axis, ndim)
# order 0, 1 don't require reshaping as no CUDA kernel will be called
# scalar or size 1 arrays also don't need to be filtered
run_kernel = not (order < 2 or x.ndim == 0 or x.shape[axis] == 1)
if not run_kernel:
output = _util._get_output(output, input)
_core.elementwise_copy(x, output)
return output
temp, data_dtype, output_dtype = _get_spline_output(x, output)
data_type = cupy._core._scalar.get_typename(temp.dtype)
pole_type = cupy._core._scalar.get_typename(temp.real.dtype)
index_type = _util._get_inttype(input)
index_dtype = cupy.int32 if index_type == "int" else cupy.int64
n_samples = x.shape[axis]
n_signals = x.size // n_samples
info = cupy.array((n_signals, n_samples) + x.shape, dtype=index_dtype)
# empirical choice of block size that seemed to work well
block_size = max(2 ** math.ceil(numpy.log2(n_samples / 32)), 8)
kern = _spline_prefilter_core.get_raw_spline1d_kernel(
axis,
ndim,
mode,
order=order,
index_type=index_type,
data_type=data_type,
pole_type=pole_type,
block_size=block_size,
)
# Due to recursive nature, a given line of data must be processed by a
# single thread. n_signals lines will be processed in total.
block = (block_size,)
grid = ((n_signals + block[0] - 1) // block[0],)
# apply prefilter gain
poles = _spline_prefilter_core.get_poles(order=order)
temp *= _spline_prefilter_core.get_gain(poles)
# apply caual + anti-causal IIR spline filters
kern(grid, block, (temp, info))
if isinstance(output, cupy.ndarray) and temp is not output:
# copy kernel output into the user-provided output array
_core.elementwise_copy(temp, output)
return output
return temp.astype(output_dtype, copy=False)
def spline_filter(input, order=3, output=cupy.float64, mode="mirror"):
"""Multidimensional spline filter.
Args:
input (cupy.ndarray): The input array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
output (cupy.ndarray or dtype, optional): The array in which to place
the output, or the dtype of the returned array. Default is
``numpy.float64``.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
Returns:
cupy.ndarray: The result of prefiltering the input.
.. seealso:: :func:`scipy.spline_filter1d`
"""
if order < 2 or order > 5:
raise RuntimeError("spline order not supported")
x = input
temp, data_dtype, output_dtype = _get_spline_output(x, output)
if order not in [0, 1] and input.ndim > 0:
for axis in range(x.ndim):
spline_filter1d(x, order, axis, output=temp, mode=mode)
x = temp
if isinstance(output, cupy.ndarray):
_core.elementwise_copy(temp, output)
else:
output = temp
if output.dtype != output_dtype:
output = output.astype(output_dtype)
return output
def _check_coordinates(coordinates, order, allow_float32=True):
if coordinates.dtype.kind == "f":
if allow_float32:
coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float32)
else:
coord_dtype = cupy.promote_types(coordinates.dtype, cupy.float64)
coordinates = coordinates.astype(coord_dtype, copy=False)
elif coordinates.dtype.kind in "iu":
if order > 1:
# order > 1 (spline) kernels require floating-point coordinates
if allow_float32:
coord_dtype = cupy.promote_types(
coordinates.dtype, cupy.float32
)
else:
coord_dtype = cupy.promote_types(
coordinates.dtype, cupy.float64
)
coordinates = coordinates.astype(coord_dtype)
else:
raise ValueError("coordinates should have floating point dtype")
if not coordinates.flags.c_contiguous:
coordinates = cupy.ascontiguousarray(coordinates)
return coordinates
def _prepad_for_spline_filter(input, mode, cval):
if mode in ["nearest", "grid-constant"]:
# these modes need padding to get accurate boundary values
npad = 12 # empirical factor chosen by SciPy
if mode == "grid-constant":
kwargs = dict(mode="constant", constant_values=cval)
else:
kwargs = dict(mode="edge")
padded = pad(input, npad, **kwargs)
else:
npad = 0
padded = input
return padded, npad
def _filter_input(image, prefilter, mode, cval, order):
"""Perform spline prefiltering when needed.
Spline orders > 1 need a prefiltering stage to preserve resolution.
For boundary modes without analytical spline boundary conditions, some
prepadding of the input with pad is used to maintain accuracy.
``npad`` is an integer corresponding to the amount of padding at each edge
of the array.
"""
if not prefilter or order < 2:
return (cupy.ascontiguousarray(image), 0)
padded, npad = _prepad_for_spline_filter(image, mode, cval)
float_dtype = cupy.promote_types(image.dtype, cupy.float32)
filtered = spline_filter(padded, order, output=float_dtype, mode=mode)
return cupy.ascontiguousarray(filtered), npad
def map_coordinates(
input,
coordinates,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output, the
corresponding coordinates in the input. The value of the input at those
coordinates is determined by spline interpolation of the requested order.
The shape of the output is derived from that of the coordinate array by
dropping the first axis. The values of the array along the first axis are
the coordinates in the input array at which the output value is found.
Args:
input (cupy.ndarray): The input array.
coordinates (array_like): The coordinates at which ``input`` is
evaluated.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray:
The result of transforming the input. The shape of the output is
derived from that of ``coordinates`` by dropping the first axis.
.. seealso:: :func:`scipy.ndimage.map_coordinates`
"""
_check_parameter("map_coordinates", order, mode)
if mode == "opencv" or mode == "_opencv_edge":
input = pad(
input, [(1, 1)] * input.ndim, "constant", constant_values=cval
)
coordinates = cupy.add(coordinates, 1)
mode = "constant"
ret = _util._get_output(output, input, coordinates.shape[1:])
integer_output = ret.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
coordinates = _check_coordinates(coordinates, order)
filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
large_int = max(prod(input.shape), coordinates.shape[0]) > 1 << 31
kern = _interp_kernels._get_map_kernel(
input.ndim,
large_int,
yshape=coordinates.shape,
mode=mode,
cval=cval,
order=order,
integer_output=integer_output,
nprepad=nprepad,
)
kern(filtered, coordinates, ret)
return ret
def affine_transform(
input,
matrix,
offset=0.0,
output_shape=None,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
*,
texture_memory=False,
):
"""Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value is
determined from the input image at position
``cupy.dot(matrix, o) + offset``.
Args:
input (cupy.ndarray): The input array.
matrix (cupy.ndarray): The inverse coordinate transformation matrix,
mapping output coordinates to input coordinates. If ``ndim`` is the
number of dimensions of ``input``, the given matrix must have one
of the following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always
``[0, 0, ..., 1]``, and may be omitted.
offset (float or sequence): The offset into the array where the
transform is applied. If a float, ``offset`` is the same for each
axis. If a sequence, ``offset`` should contain one value for each
axis.
output_shape (tuple of ints): Shape tuple.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
texture_memory (bool): If True, uses GPU texture memory. Supports only:
- 2D and 3D float32 arrays as input
- ``(ndim + 1, ndim + 1)`` homogeneous float32 transformation
matrix
- ``mode='constant'`` and ``mode='nearest'``
- ``order=0`` (nearest neighbor) and ``order=1`` (linear
interpolation)
- NVIDIA CUDA GPUs
Returns:
cupy.ndarray or None:
The transformed input. If ``output`` is given as a parameter,
``None`` is returned.
.. seealso:: :func:`scipy.ndimage.affine_transform`
"""
if texture_memory:
# _texture only available in CuPy 10.x so delay the import
# We do not use this texture-based implementation in cuCIM.
from cucim.skimage._vendored import _texture
if runtime.is_hip:
raise RuntimeError(
"HIP currently does not support texture acceleration"
)
tm_interp = "linear" if order > 0 else "nearest"
return _texture.affine_transformation(
data=input,
transformation_matrix=matrix,
output_shape=output_shape,
output=output,
interpolation=tm_interp,
mode=mode,
border_value=cval,
)
_check_parameter("affine_transform", order, mode)
offset = _util._fix_sequence_arg(offset, input.ndim, "offset", float)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError("no proper affine matrix provided")
if matrix.ndim == 2:
if matrix.shape[0] == matrix.shape[1] - 1:
offset = matrix[:, -1]
matrix = matrix[:, :-1]
elif matrix.shape[0] == input.ndim + 1:
offset = matrix[:-1, -1]
matrix = matrix[:-1, :-1]
if matrix.shape != (input.ndim, input.ndim):
raise RuntimeError("improper affine shape")
if mode == "opencv":
m = cupy.zeros((input.ndim + 1, input.ndim + 1))
m[:-1, :-1] = matrix
m[:-1, -1] = offset
m[-1, -1] = 1
m = cupy.linalg.inv(m)
m[:2] = cupy.roll(m[:2], 1, axis=0)
m[:2, :2] = cupy.roll(m[:2, :2], 1, axis=1)
matrix = m[:-1, :-1]
offset = m[:-1, -1]
if output_shape is None:
output_shape = input.shape
if mode == "opencv" or mode == "_opencv_edge":
if matrix.ndim == 1:
matrix = cupy.diag(matrix)
coordinates = cupy.indices(output_shape, dtype=cupy.float64)
coordinates = cupy.dot(matrix, coordinates.reshape((input.ndim, -1)))
coordinates += cupy.expand_dims(cupy.asarray(offset), -1)
ret = _util._get_output(output, input, shape=output_shape)
ret[:] = map_coordinates(
input, coordinates, ret.dtype, order, mode, cval, prefilter
).reshape(output_shape)
return ret
matrix = matrix.astype(cupy.float64, copy=False)
ndim = input.ndim
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(prod(input.shape), prod(output_shape)) > 1 << 31
if matrix.ndim == 1:
offset = cupy.asarray(offset, dtype=cupy.float64)
offset = -offset / matrix
kern = _interp_kernels._get_zoom_shift_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
nprepad=nprepad,
)
kern(filtered, offset, matrix, output)
else:
kern = _interp_kernels._get_affine_kernel(
ndim,
large_int,
output_shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
nprepad=nprepad,
)
m = cupy.zeros((ndim, ndim + 1), dtype=cupy.float64)
m[:, :-1] = matrix
m[:, -1] = cupy.asarray(offset, dtype=cupy.float64)
kern(filtered, m, output)
return output
def _minmax(coor, minc, maxc):
if coor[0] < minc[0]:
minc[0] = coor[0]
elif coor[0] > maxc[0]:
maxc[0] = coor[0]
if coor[1] < minc[1]:
minc[1] = coor[1]
elif coor[1] > maxc[1]:
maxc[1] = coor[1]
return minc, maxc
def rotate(
input,
angle,
axes=(1, 0),
reshape=True,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Rotate an array.
The array is rotated in the plane defined by the two axes given by the
``axes`` parameter using spline interpolation of the requested order.
Args:
input (cupy.ndarray): The input array.
angle (float): The rotation angle in degrees.
axes (tuple of 2 ints): The two axes that define the plane of rotation.
Default is the first two axes.
reshape (bool): If ``reshape`` is True, the output shape is adapted so
that the input array is contained completely in the output. Default
is True.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The rotated input.
.. seealso:: :func:`scipy.ndimage.rotate`
"""
_check_parameter("rotate", order, mode)
if mode == "opencv":
mode = "_opencv_edge"
input_arr = input
axes = list(axes)
if axes[0] < 0:
axes[0] += input_arr.ndim
if axes[1] < 0:
axes[1] += input_arr.ndim
if axes[0] > axes[1]:
axes = [axes[1], axes[0]]
if axes[0] < 0 or input_arr.ndim <= axes[1]:
raise ValueError("invalid rotation plane specified")
ndim = input_arr.ndim
rad = math.radians(angle)
sincos = cmath.rect(1, rad)
cos, sin = sincos.real, sincos.imag
# determine offsets and output shape as in scipy.ndimage.rotate
rot_matrix = numpy.array([[cos, sin], [-sin, cos]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy], [0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(cupy.int64)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
matrix = numpy.identity(ndim)
matrix[axes[0], axes[0]] = cos
matrix[axes[0], axes[1]] = sin
matrix[axes[1], axes[0]] = -sin
matrix[axes[1], axes[1]] = cos
offset = numpy.zeros(ndim, dtype=cupy.float64)
offset[axes] = in_center - out_center
matrix = cupy.asarray(matrix)
offset = cupy.asarray(offset)
return affine_transform(
input,
matrix,
offset,
output_shape,
output,
order,
mode,
cval,
prefilter,
)
def shift(
input,
shift,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
):
"""Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Args:
input (cupy.ndarray): The input array.
shift (float or sequence): The shift along the axes. If a float,
``shift`` is the same for each axis. If a sequence, ``shift``
should contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
Returns:
cupy.ndarray or None:
The shifted input.
.. seealso:: :func:`scipy.ndimage.shift`
"""
_check_parameter("shift", order, mode)
shift = _util._fix_sequence_arg(shift, input.ndim, "shift", float)
if mode == "opencv":
mode = "_opencv_edge"
output = affine_transform(
input,
cupy.ones(input.ndim, input.dtype),
cupy.negative(cupy.asarray(shift)),
None,
output,
order,
mode,
cval,
prefilter,
)
else:
output = _util._get_output(output, input)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = prod(input.shape) > 1 << 31
kern = _interp_kernels._get_shift_kernel(
input.ndim,
large_int,
input.shape,
mode,
cval=cval,
order=order,
integer_output=integer_output,
nprepad=nprepad,
)
shift = cupy.asarray(shift, dtype=cupy.float64, order="C")
if shift.ndim != 1:
raise ValueError("shift must be 1d")
if shift.size != filtered.ndim:
raise ValueError("len(shift) must equal input.ndim")
kern(filtered, shift, output)
return output
def zoom(
input,
zoom,
output=None,
order=3,
mode="constant",
cval=0.0,
prefilter=True,
*,
grid_mode=False,
):
"""Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Args:
input (cupy.ndarray): The input array.
zoom (float or sequence): The zoom factor along the axes. If a float,
``zoom`` is the same for each axis. If a sequence, ``zoom`` should
contain one value for each axis.
output (cupy.ndarray or ~cupy.dtype): The array in which to place the
output, or the dtype of the returned array.
order (int): The order of the spline interpolation, default is 3. Must
be in the range 0-5.
mode (str): Points outside the boundaries of the input are filled
according to the given mode (``'constant'``, ``'nearest'``,
``'mirror'``, ``'reflect'``, ``'wrap'``, ``'grid-mirror'``,
``'grid-wrap'``, ``'grid-constant'`` or ``'opencv'``).
cval (scalar): Value used for points outside the boundaries of
the input if ``mode='constant'`` or ``mode='opencv'``. Default is
0.0
prefilter (bool): It is not used yet. It just exists for compatibility
with :mod:`scipy.ndimage`.
grid_mode (bool, optional): If False, the distance from the pixel
centers is zoomed. Otherwise, the distance including the full pixel
extent is used. For example, a 1d signal of length 5 is considered
to have length 4 when ``grid_mode`` is False, but length 5 when
``grid_mode`` is True. See the following visual illustration:
.. code-block:: text
| pixel 1 | pixel 2 | pixel 3 | pixel 4 | pixel 5 |
|<-------------------------------------->|
vs.
|<----------------------------------------------->|
The starting point of the arrow in the diagram above corresponds to
coordinate location 0 in each mode.
Returns:
cupy.ndarray or None:
The zoomed input.
.. seealso:: :func:`scipy.ndimage.zoom`
"""
_check_parameter("zoom", order, mode)
zoom = _util._fix_sequence_arg(zoom, input.ndim, "zoom", float)
output_shape = []
for s, z in zip(input.shape, zoom):
output_shape.append(int(round(s * z)))
output_shape = tuple(output_shape)
if mode == "opencv":
zoom = []
offset = []
for in_size, out_size in zip(input.shape, output_shape):
if out_size > 1:
zoom.append(float(in_size) / out_size)
offset.append((zoom[-1] - 1) / 2.0)
else:
zoom.append(0)
offset.append(0)
mode = "nearest"
output = affine_transform(
input,
cupy.asarray(zoom),
offset,
output_shape,
output,
order,
mode,
cval,
prefilter,
)
else:
if grid_mode:
# warn about modes that may have surprising behavior
suggest_mode = None
if mode == "constant":
suggest_mode = "grid-constant"
elif mode == "wrap":
suggest_mode = "grid-wrap"
if suggest_mode is not None:
warnings.warn(
f"It is recommended to use mode = {suggest_mode} instead "
f"of {mode} when grid_mode is True."
)
zoom = []
for in_size, out_size in zip(input.shape, output_shape):
if grid_mode and out_size > 0:
zoom.append(in_size / out_size)
elif out_size > 1:
zoom.append((in_size - 1) / (out_size - 1))
else:
zoom.append(0)
output = _util._get_output(output, input, shape=output_shape)
if input.dtype.kind in "iu":
input = input.astype(cupy.float32)
filtered, nprepad = _filter_input(input, prefilter, mode, cval, order)
integer_output = output.dtype.kind in "iu"
_util._check_cval(mode, cval, integer_output)
large_int = max(prod(input.shape), prod(output_shape)) > 1 << 31
kern = _interp_kernels._get_zoom_kernel(
input.ndim,
large_int,
output_shape,
mode,
order=order,
integer_output=integer_output,
grid_mode=grid_mode,
nprepad=nprepad,
)
zoom = cupy.asarray(zoom, dtype=cupy.float64)
kern(filtered, zoom, output)
return output
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/__init__.py
|
"""
This module will hold copies of any upstream CuPy code that is needed, but has
not yet been merged to CuPy master.
"""
from cucim.skimage._vendored._pearsonr import pearsonr
from cucim.skimage._vendored.pad import pad
from cucim.skimage._vendored.signaltools import * # noqa
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_internal.py
|
import math
from functools import reduce
from operator import mul
import cupy
import numpy
try:
# try importing Cython-based private axis handling functions from CuPy
if hasattr(cupy, "_core"):
# CuPy 10 renames core->_core
from cupy._core.internal import _normalize_axis_index # NOQA
from cupy._core.internal import _normalize_axis_indices # NOQA
else:
from cupy.core.internal import _normalize_axis_index # NOQA
from cupy.core.internal import _normalize_axis_indices # NOQA
except ImportError:
# Fallback to local Python implementations
def _normalize_axis_index(axis, ndim): # NOQA
"""
Normalizes an axis index, ``axis``, such that is a valid positive
index into the shape of array with ``ndim`` dimensions. Raises a
ValueError with an appropriate message if this is not possible.
Args:
axis (int):
The un-normalized index of the axis. Can be negative
ndim (int):
The number of dimensions of the array that ``axis`` should
be normalized against
Returns:
int:
The normalized axis index, such that
`0 <= normalized_axis < ndim`
"""
if axis < 0:
axis += ndim
if not (0 <= axis < ndim):
raise numpy.AxisError("axis out of bounds")
return axis
def _normalize_axis_indices(axes, ndim): # NOQA
"""Normalize axis indices.
Args:
axis (int, tuple of int or None):
The un-normalized indices of the axis. Can be negative.
ndim (int):
The number of dimensions of the array that ``axis`` should
be normalized against
Returns:
tuple of int:
The tuple of normalized axis indices.
"""
if axes is None:
axes = tuple(range(ndim))
elif not isinstance(axes, tuple):
axes = (axes,)
res = []
for axis in axes:
axis = _normalize_axis_index(axis, ndim)
if axis in res:
raise ValueError("Duplicate value in 'axis'")
res.append(axis)
return tuple(sorted(res))
if hasattr(math, "prod"):
prod = math.prod
else:
def prod(iterable, *, start=1):
return reduce(mul, iterable, start)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/pad.py
|
"""version of cupy.pad that dispatches to elementwise kernels for some boundary
modes: {'edge', 'wrap', 'symmetric', 'reflect'}.
New utility _use_elementwise_kernel determines when to use the new elementwise
kernels, otherwise the existing implementations as in cupy.pad are used.
"""
import numbers
import cupy
import numpy
###############################################################################
# Private utility functions.
def _round_if_needed(arr, dtype):
"""Rounds arr inplace if the destination dtype is an integer."""
if cupy.issubdtype(dtype, cupy.integer):
arr.round(out=arr) # bug in round so use rint (cupy/cupy#2330)
def _slice_at_axis(sl, axis):
"""Constructs a tuple of slices to slice an array in the given dimension.
Args:
sl(slice): The slice for the given dimension.
axis(int): The axis to which `sl` is applied. All other dimensions are
left "unsliced".
Returns:
tuple of slices: A tuple with slices matching `shape` in length.
"""
return (slice(None),) * axis + (sl,) + (Ellipsis,)
def _view_roi(array, original_area_slice, axis):
"""Gets a view of the current region of interest during iterative padding.
When padding multiple dimensions iteratively corner values are
unnecessarily overwritten multiple times. This function reduces the
working area for the first dimensions so that corners are excluded.
Args:
array(cupy.ndarray): The array with the region of interest.
original_area_slice(tuple of slices): Denotes the area with original
values of the unpadded array.
axis(int): The currently padded dimension assuming that `axis` is padded
before `axis` + 1.
Returns:
"""
axis += 1
sl = (slice(None),) * axis + original_area_slice[axis:]
return array[sl]
def _pad_simple(array, pad_width, fill_value=None):
"""Pads an array on all sides with either a constant or undefined values.
Args:
array(cupy.ndarray): Array to grow.
pad_width(sequence of tuple[int, int]): Pad width on both sides for each
dimension in `arr`.
fill_value(scalar, optional): If provided the padded area is
filled with this value, otherwise the pad area left undefined.
(Default value = None)
"""
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = "F" if array.flags.fnc else "C" # Fortran and not also C-order
padded = cupy.empty(new_shape, dtype=array.dtype, order=order)
if fill_value is not None:
padded.fill(fill_value)
# Copy old array into correct space
original_area_slice = tuple(
slice(left, left + size)
for size, (left, right) in zip(array.shape, pad_width)
)
padded[original_area_slice] = array
return padded, original_area_slice
def _set_pad_area(padded, axis, width_pair, value_pair):
"""Set an empty-padded area in given dimension."""
left_slice = _slice_at_axis(slice(None, width_pair[0]), axis)
padded[left_slice] = value_pair[0]
right_slice = _slice_at_axis(
slice(padded.shape[axis] - width_pair[1], None), axis
)
padded[right_slice] = value_pair[1]
def _get_edges(padded, axis, width_pair):
"""Retrieves edge values from an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the edges are considered.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_index = width_pair[0]
left_slice = _slice_at_axis(slice(left_index, left_index + 1), axis)
left_edge = padded[left_slice]
right_index = padded.shape[axis] - width_pair[1]
right_slice = _slice_at_axis(slice(right_index - 1, right_index), axis)
right_edge = padded[right_slice]
return left_edge, right_edge
def _get_linear_ramps(padded, axis, width_pair, end_value_pair):
"""Constructs linear ramps for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the ramps are constructed.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
end_value_pair((scalar, scalar)): End values for the linear ramps which
form the edge of the fully padded array. These values are included in
the linear ramps.
"""
edge_pair = _get_edges(padded, axis, width_pair)
left_ramp = cupy.linspace(
start=end_value_pair[0],
# squeeze axis replaced by linspace
stop=edge_pair[0].squeeze(axis),
num=width_pair[0],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
right_ramp = cupy.linspace(
start=end_value_pair[1],
# squeeze axis replaced by linspace
stop=edge_pair[1].squeeze(axis),
num=width_pair[1],
endpoint=False,
dtype=padded.dtype,
axis=axis,
)
# Reverse linear space in appropriate dimension
right_ramp = right_ramp[_slice_at_axis(slice(None, None, -1), axis)]
return left_ramp, right_ramp
def _get_stats(padded, axis, width_pair, length_pair, stat_func):
"""Calculates a statistic for an empty-padded array along a given axis.
Args:
padded(cupy.ndarray): Empty-padded array.
axis(int): Dimension in which the statistic is calculated.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
length_pair(2-element sequence of None or int): Gives the number of
values in valid area from each side that is taken into account when
calculating the statistic. If None the entire valid area in `padded`
is considered.
stat_func(function): Function to compute statistic. The expected
signature is
``stat_func(x: ndarray, axis: int, keepdims: bool) -> ndarray``.
"""
# Calculate indices of the edges of the area with original values
left_index = width_pair[0]
right_index = padded.shape[axis] - width_pair[1]
# as well as its length
max_length = right_index - left_index
# Limit stat_lengths to max_length
left_length, right_length = length_pair
if left_length is None or max_length < left_length:
left_length = max_length
if right_length is None or max_length < right_length:
right_length = max_length
# Calculate statistic for the left side
left_slice = _slice_at_axis(
slice(left_index, left_index + left_length), axis
)
left_chunk = padded[left_slice]
left_stat = stat_func(left_chunk, axis=axis, keepdims=True)
_round_if_needed(left_stat, padded.dtype)
if left_length == right_length == max_length:
# return early as right_stat must be identical to left_stat
return left_stat, left_stat
# Calculate statistic for the right side
right_slice = _slice_at_axis(
slice(right_index - right_length, right_index), axis
)
right_chunk = padded[right_slice]
right_stat = stat_func(right_chunk, axis=axis, keepdims=True)
_round_if_needed(right_stat, padded.dtype)
return left_stat, right_stat
def _set_reflect_both(padded, axis, width_pair, method, include_edge=False):
"""Pads an `axis` of `arr` using reflection.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
method(str): Controls method of reflection; options are 'even' or 'odd'.
include_edge(bool, optional): If true, edge value is included in
reflection, otherwise the edge value forms the symmetric axis to the
reflection. (Default value = False)
"""
left_pad, right_pad = width_pair
old_length = padded.shape[axis] - right_pad - left_pad
if include_edge:
# Edge is included, we need to offset the pad amount by 1
edge_offset = 1
else:
edge_offset = 0 # Edge is not included, no need to offset pad amount
old_length -= 1 # but must be omitted from the chunk
if left_pad > 0:
# Pad with reflected values on left side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, left_pad)
# Slice right to left, stop on or next to edge, start relative to stop
stop = left_pad - edge_offset
start = stop + chunk_length
left_slice = _slice_at_axis(slice(start, stop, -1), axis)
left_chunk = padded[left_slice]
if method == "odd":
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(left_pad, left_pad + 1), axis)
left_chunk = 2 * padded[edge_slice] - left_chunk
# Insert chunk into padded area
start = left_pad - chunk_length
stop = left_pad
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = left_chunk
# Adjust pointer to left edge for next iteration
left_pad -= chunk_length
if right_pad > 0:
# Pad with reflected values on right side:
# First limit chunk size which can't be larger than pad area
chunk_length = min(old_length, right_pad)
# Slice right to left, start on or next to edge, stop relative to start
start = -right_pad + edge_offset - 2
stop = start - chunk_length
right_slice = _slice_at_axis(slice(start, stop, -1), axis)
right_chunk = padded[right_slice]
if method == "odd":
# Negate chunk and align with edge
edge_slice = _slice_at_axis(slice(-right_pad - 1, -right_pad), axis)
right_chunk = 2 * padded[edge_slice] - right_chunk
# Insert chunk into padded area
start = padded.shape[axis] - right_pad
stop = start + chunk_length
pad_area = _slice_at_axis(slice(start, stop), axis)
padded[pad_area] = right_chunk
# Adjust pointer to right edge for next iteration
right_pad -= chunk_length
return left_pad, right_pad
def _set_wrap_both(padded, axis, width_pair):
"""Pads an `axis` of `arr` with wrapped values.
Args:
padded(cupy.ndarray): Input array of arbitrary shape.
axis(int): Axis along which to pad `arr`.
width_pair((int, int)): Pair of widths that mark the pad area on both
sides in the given dimension.
"""
left_pad, right_pad = width_pair
period = padded.shape[axis] - right_pad - left_pad
# If the current dimension of `arr` doesn't contain enough valid values
# (not part of the undefined pad area) we need to pad multiple times.
# Each time the pad area shrinks on both sides which is communicated with
# these variables.
new_left_pad = 0
new_right_pad = 0
if left_pad > 0:
# Pad with wrapped values on left side
# First slice chunk from right side of the non-pad area.
# Use min(period, left_pad) to ensure that chunk is not larger than
# pad area
right_slice = _slice_at_axis(
slice(
-right_pad - min(period, left_pad),
-right_pad if right_pad != 0 else None,
),
axis,
)
right_chunk = padded[right_slice]
if left_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(slice(left_pad - period, left_pad), axis)
new_left_pad = left_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(None, left_pad), axis)
padded[pad_area] = right_chunk
if right_pad > 0:
# Pad with wrapped values on right side
# First slice chunk from left side of the non-pad area.
# Use min(period, right_pad) to ensure that chunk is not larger than
# pad area
left_slice = _slice_at_axis(
slice(left_pad, left_pad + min(period, right_pad)), axis
)
left_chunk = padded[left_slice]
if right_pad > period:
# Chunk is smaller than pad area
pad_area = _slice_at_axis(
slice(-right_pad, -right_pad + period), axis
)
new_right_pad = right_pad - period
else:
# Chunk matches pad area
pad_area = _slice_at_axis(slice(-right_pad, None), axis)
padded[pad_area] = left_chunk
return new_left_pad, new_right_pad
def _as_pairs(x, ndim, as_index=False):
"""Broadcasts `x` to an array with shape (`ndim`, 2).
A helper function for `pad` that prepares and validates arguments like
`pad_width` for iteration in pairs.
Args:
x(scalar or array-like, optional): The object to broadcast to the shape
(`ndim`, 2).
ndim(int): Number of pairs the broadcasted `x` will have.
as_index(bool, optional): If `x` is not None, try to round each
element of `x` to an integer (dtype `cupy.intp`) and ensure every
element is positive. (Default value = False)
Returns:
nested iterables, shape (`ndim`, 2): The broadcasted version of `x`.
"""
if x is None:
# Pass through None as a special case, otherwise cupy.round(x) fails
# with an AttributeError
return ((None, None),) * ndim
elif isinstance(x, numbers.Number):
if as_index:
x = round(x)
return ((x, x),) * ndim
x = numpy.array(x)
if as_index:
x = numpy.asarray(numpy.round(x), dtype=numpy.intp)
if x.ndim < 3:
# Optimization: Possibly use faster paths for cases where `x` has
# only 1 or 2 elements. `numpy.broadcast_to` could handle these as well
# but is currently slower
if x.size == 1:
# x was supplied as a single value
x = x.ravel() # Ensure x[0] works for x.ndim == 0, 1, 2
if as_index and x < 0:
raise ValueError("index can't contain negative values")
return ((x[0], x[0]),) * ndim
if x.size == 2 and x.shape != (2, 1):
# x was supplied with a single value for each side
# but except case when each dimension has a single value
# which should be broadcasted to a pair,
# e.g. [[1], [2]] -> [[1, 1], [2, 2]] not [[1, 2], [1, 2]]
x = x.ravel() # Ensure x[0], x[1] works
if as_index and (x[0] < 0 or x[1] < 0):
raise ValueError("index can't contain negative values")
return ((x[0], x[1]),) * ndim
if as_index and x.min() < 0:
raise ValueError("index can't contain negative values")
# Converting the array with `tolist` seems to improve performance
# when iterating and indexing the result (see usage in `pad`)
x_view = x.view()
x_view.shape = (ndim, 2)
return x_view.tolist()
def _use_elementwise_kernel(arr, mode, kwargs):
"""Determine if we can use an ElementwiseKernel from pad_elementwise.py"""
use_elementwise = False
if arr.ndim == 0 or arr.size == 0:
return False
if mode in ("edge", "wrap"):
use_elementwise = True
# elif mode == 'constant':
# # Only a uniform constant is supported in the Elementwise kernel.
# # A per-axis constant is not currently supported.
# return isinstance(kwargs.get('constant_values', 0), numbers.Number)
elif mode in ("symmetric", "reflect"):
# only the default 'even' reflect type is supported
use_elementwise = kwargs.get("reflect_type", "even") == "even"
if use_elementwise:
if arr.ndim > 2 and (arr.flags.fnc and arr.nbytes > 5_000_000):
# Empirically found slower performance for large Fortran-ordered
# arrays with ndim > 2.
return False
return True
return False
###############################################################################
# Public functions
# @array_function_dispatch(_pad_dispatcher, module='numpy')
def pad(array, pad_width, mode="constant", **kwargs):
"""Pads an array with specified widths and values.
Args:
array(cupy.ndarray): The array to pad.
pad_width(sequence, array_like or int): Number of values padded to the
edges of each axis. ((before_1, after_1), ... (before_N, after_N))
unique pad widths for each axis. ((before, after),) yields same
before and after pad for each axis. (pad,) or int is a shortcut for
before = after = pad width for all axes. You cannot specify
``cupy.ndarray``.
mode(str or function, optional): One of the following string values or a
user supplied function
'constant' (default)
Pads with a constant value.
'edge'
Pads with the edge values of array.
'linear_ramp'
Pads with the linear ramp between end_value and the array edge
value.
'maximum'
Pads with the maximum value of all or part of the vector along
each axis.
'mean'
Pads with the mean value of all or part of the vector along each
axis.
'median'
Pads with the median value of all or part of the vector along
each axis. (Not Implemented)
'minimum'
Pads with the minimum value of all or part of the vector along
each axis.
'reflect'
Pads with the reflection of the vector mirrored on the first and
last values of the vector along each axis.
'symmetric'
Pads with the reflection of the vector mirrored along the edge
of the array.
'wrap'
Pads with the wrap of the vector along the axis. The first
values are used to pad the end and the end values are used to
pad the beginning.
'empty'
Pads with undefined values.
<function>
Padding function, see Notes.
stat_length(sequence or int, optional): Used in 'maximum', 'mean',
'median', and 'minimum'. Number of values at edge of each axis used
to calculate the statistic value.
((before_1, after_1), ... (before_N, after_N)) unique statistic
lengths for each axis. ((before, after),) yields same before and
after statistic lengths for each axis. (stat_length,) or int is a
shortcut for before = after = statistic length for all axes.
Default is ``None``, to use the entire axis. You cannot specify
``cupy.ndarray``.
constant_values(sequence or scalar, optional): Used in 'constant'. The
values to set the padded values for each axis.
((before_1, after_1), ... (before_N, after_N)) unique pad constants
for each axis.
((before, after),) yields same before and after constants for each
axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
end_values(sequence or scalar, optional): Used in 'linear_ramp'. The
values used for the ending value of the linear_ramp and that will
form the edge of the padded array.
((before_1, after_1), ... (before_N, after_N)) unique end values
for each axis.
((before, after),) yields same before and after end
values for each axis.
(constant,) or constant is a shortcut for before = after = constant
for all axes.
Default is 0. You cannot specify ``cupy.ndarray``.
reflect_type({'even', 'odd'}, optional): Used in 'reflect', and
'symmetric'. The 'even' style is the default with an unaltered
reflection around the edge value. For the 'odd' style, the extended
part of the array is created by subtracting the reflected values from
two times the edge value.
Returns:
cupy.ndarray: Padded array with shape extended by ``pad_width``.
.. note::
For an array with rank greater than 1, some of the padding of later
axes is calculated from padding of previous axes. This is easiest to
think about with a rank 2 array where the corners of the padded array
are calculated by using padded values from the first axis.
The padding function, if used, should modify a rank 1 array in-place.
It has the following signature:
``padding_func(vector, iaxis_pad_width, iaxis, kwargs)``
where
vector (cupy.ndarray)
A rank 1 array already padded with zeros. Padded values are
``vector[:iaxis_pad_width[0]]`` and
``vector[-iaxis_pad_width[1]:]``.
iaxis_pad_width (tuple)
A 2-tuple of ints, ``iaxis_pad_width[0]`` represents the number of
values padded at the beginning of vector where
``iaxis_pad_width[1]`` represents the number of values padded at
the end of vector.
iaxis (int)
The axis currently being calculated.
kwargs (dict)
Any keyword arguments the function requires.
Examples
--------
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'constant', constant_values=(4, 6))
array([4, 4, 1, ..., 6, 6, 6])
>>> cupy.pad(a, (2, 3), 'edge')
array([1, 1, 1, ..., 5, 5, 5])
>>> cupy.pad(a, (2, 3), 'linear_ramp', end_values=(5, -4))
array([ 5, 3, 1, 2, 3, 4, 5, 2, -1, -4])
>>> cupy.pad(a, (2,), 'maximum')
array([5, 5, 1, 2, 3, 4, 5, 5, 5])
>>> cupy.pad(a, (2,), 'mean')
array([3, 3, 1, 2, 3, 4, 5, 3, 3])
>>> a = cupy.array([[1, 2], [3, 4]])
>>> cupy.pad(a, ((3, 2), (2, 3)), 'minimum')
array([[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1],
[3, 3, 3, 4, 3, 3, 3],
[1, 1, 1, 2, 1, 1, 1],
[1, 1, 1, 2, 1, 1, 1]])
>>> a = cupy.array([1, 2, 3, 4, 5])
>>> cupy.pad(a, (2, 3), 'reflect')
array([3, 2, 1, 2, 3, 4, 5, 4, 3, 2])
>>> cupy.pad(a, (2, 3), 'reflect', reflect_type='odd')
array([-1, 0, 1, 2, 3, 4, 5, 6, 7, 8])
>>> cupy.pad(a, (2, 3), 'symmetric')
array([2, 1, 1, 2, 3, 4, 5, 5, 4, 3])
>>> cupy.pad(a, (2, 3), 'symmetric', reflect_type='odd')
array([0, 1, 1, 2, 3, 4, 5, 5, 6, 7])
>>> cupy.pad(a, (2, 3), 'wrap')
array([4, 5, 1, 2, 3, 4, 5, 1, 2, 3])
>>> def pad_with(vector, pad_width, iaxis, kwargs):
... pad_value = kwargs.get('padder', 10)
... vector[:pad_width[0]] = pad_value
... vector[-pad_width[1]:] = pad_value
>>> a = cupy.arange(6)
>>> a = a.reshape((2, 3))
>>> cupy.pad(a, 2, pad_with)
array([[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]])
>>> cupy.pad(a, 2, pad_with, padder=100)
array([[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 0, 1, 2, 100, 100],
[100, 100, 3, 4, 5, 100, 100],
[100, 100, 100, 100, 100, 100, 100],
[100, 100, 100, 100, 100, 100, 100]])
"""
if isinstance(pad_width, numbers.Integral):
pad_width = ((pad_width, pad_width),) * array.ndim
else:
pad_width = numpy.asarray(pad_width)
if not pad_width.dtype.kind == "i":
raise TypeError("`pad_width` must be of integral type.")
# Broadcast to shape (array.ndim, 2)
pad_width = _as_pairs(pad_width, array.ndim, as_index=True)
if callable(mode):
# Old behavior: Use user-supplied function with numpy.apply_along_axis
function = mode
# Create a new zero padded array
padded, _ = _pad_simple(array, pad_width, fill_value=0)
# And apply along each axis
for axis in range(padded.ndim):
# Iterate using ndindex as in apply_along_axis, but assuming that
# function operates inplace on the padded array.
# view with the iteration axis at the end
view = cupy.moveaxis(padded, axis, -1)
# compute indices for the iteration axes, and append a trailing
# ellipsis to prevent 0d arrays decaying to scalars (gh-8642)
inds = numpy.ndindex(view.shape[:-1])
inds = (ind + (Ellipsis,) for ind in inds)
for ind in inds:
function(view[ind], pad_width[axis], axis, kwargs)
return padded
# Make sure that no unsupported keywords were passed for the current mode
allowed_kwargs = {
"empty": [],
"edge": [],
"wrap": [],
"constant": ["constant_values"],
"linear_ramp": ["end_values"],
"maximum": ["stat_length"],
"mean": ["stat_length"],
# 'median': ['stat_length'],
"minimum": ["stat_length"],
"reflect": ["reflect_type"],
"symmetric": ["reflect_type"],
}
try:
unsupported_kwargs = set(kwargs) - set(allowed_kwargs[mode])
except KeyError:
raise ValueError("mode '{}' is not supported".format(mode))
if unsupported_kwargs:
raise ValueError(
"unsupported keyword arguments for mode '{}': {}".format(
mode, unsupported_kwargs
)
)
if _use_elementwise_kernel(array, mode, kwargs):
# import here to avoid circular import
from cucim.skimage._vendored.pad_elementwise import _get_pad_kernel
if mode == "reflect" and min(array.shape) > 1:
mode = "reflect_no_singleton_dim"
if not array.flags.forc:
# make non-contiguous input C-contiguous
array = cupy.ascontiguousarray(array)
# Allocate grown array
new_shape = tuple(
left + size + right
for size, (left, right) in zip(array.shape, pad_width)
)
order = "F" if array.flags.fnc else "C" # Fortran and not also C-order
padded = cupy.empty(new_shape, dtype=array.dtype, order=order)
(int_type, np_type) = (
("int", cupy.int32)
if padded.size < (1 << 31)
else ("ptrdiff_t", cupy.intp)
)
kern = _get_pad_kernel(
pad_starts=tuple(p[0] for p in pad_width),
mode=mode,
int_type=int_type,
order=order,
)
# pad_width must be C-contiguous
if mode == "constant":
# `_use_elementwise_kernel` excludes cases with non-scalar cval
cval = float(kwargs.get("constant_values", 0))
kern(array, cval, padded, size=padded.size)
else:
kern(array, padded, size=padded.size)
return padded
if mode == "constant":
values = kwargs.get("constant_values", 0)
if (
isinstance(values, numbers.Number)
and values == 0
and (array.ndim == 1 or array.size < 4e6)
):
# faster path for 1d arrays or small n-dimensional arrays
return _pad_simple(array, pad_width, 0)[0]
stat_functions = {
"maximum": cupy.max,
"minimum": cupy.min,
"mean": cupy.mean,
# 'median': cupy.median,
}
# Create array with final shape and original values
# (padded area is undefined)
padded, original_area_slice = _pad_simple(array, pad_width)
# And prepare iteration over all dimensions
# (zipping may be more readable than using enumerate)
axes = range(padded.ndim)
if mode == "constant":
values = _as_pairs(values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, values):
roi = _view_roi(padded, original_area_slice, axis)
_set_pad_area(roi, axis, width_pair, value_pair)
elif mode == "empty":
pass # Do nothing as _pad_simple already returned the correct result
elif array.size == 0:
# Only modes 'constant' and 'empty' can extend empty axes, all other
# modes depend on `array` not being empty
# -> ensure every empty axis is only 'padded with 0'
for axis, width_pair in zip(axes, pad_width):
if array.shape[axis] == 0 and any(width_pair):
raise ValueError(
"can't extend empty axis {} using modes other than "
"'constant' or 'empty'".format(axis)
)
# passed, don't need to do anything more as _pad_simple already
# returned the correct result
elif mode == "edge":
for axis, width_pair in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
edge_pair = _get_edges(roi, axis, width_pair)
_set_pad_area(roi, axis, width_pair, edge_pair)
elif mode == "linear_ramp":
end_values = kwargs.get("end_values", 0)
end_values = _as_pairs(end_values, padded.ndim)
for axis, width_pair, value_pair in zip(axes, pad_width, end_values):
roi = _view_roi(padded, original_area_slice, axis)
ramp_pair = _get_linear_ramps(roi, axis, width_pair, value_pair)
_set_pad_area(roi, axis, width_pair, ramp_pair)
elif mode in stat_functions:
func = stat_functions[mode]
length = kwargs.get("stat_length", None)
length = _as_pairs(length, padded.ndim, as_index=True)
for axis, width_pair, length_pair in zip(axes, pad_width, length):
roi = _view_roi(padded, original_area_slice, axis)
stat_pair = _get_stats(roi, axis, width_pair, length_pair, func)
_set_pad_area(roi, axis, width_pair, stat_pair)
elif mode in {"reflect", "symmetric"}:
method = kwargs.get("reflect_type", "even")
include_edge = True if mode == "symmetric" else False
for axis, (left_index, right_index) in zip(axes, pad_width):
if array.shape[axis] == 1 and (left_index > 0 or right_index > 0):
# Extending singleton dimension for 'reflect' is legacy
# behavior; it really should raise an error.
edge_pair = _get_edges(padded, axis, (left_index, right_index))
_set_pad_area(
padded, axis, (left_index, right_index), edge_pair
)
continue
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with reflected
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_reflect_both(
roi, axis, (left_index, right_index), method, include_edge
)
elif mode == "wrap":
for axis, (left_index, right_index) in zip(axes, pad_width):
roi = _view_roi(padded, original_area_slice, axis)
while left_index > 0 or right_index > 0:
# Iteratively pad until dimension is filled with wrapped
# values. This is necessary if the pad area is larger than
# the length of the original values in the current dimension.
left_index, right_index = _set_wrap_both(
roi, axis, (left_index, right_index)
)
return padded
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_vendored/_ndimage_morphology.py
|
import operator
import warnings
import cupy
import numpy
from cupy import _core
from cucim.skimage._vendored import (
_internal as internal,
_ndimage_filters as _filters,
_ndimage_filters_core as _filters_core,
_ndimage_util as _util,
)
@cupy.memoize(for_each_device=True)
def _get_binary_erosion_kernel(
w_shape,
int_type,
offsets,
center_is_true,
border_value,
invert,
masked,
all_weights_nonzero,
):
if invert:
border_value = int(not border_value)
true_val = 0
false_val = 1
else:
true_val = 1
false_val = 0
if masked:
pre = """
bool mv = (bool)mask[i];
bool _in = (bool)x[i];
if (!mv) {{
y = cast<Y>(_in);
return;
}} else if ({center_is_true} && _in == {false_val}) {{
y = cast<Y>(_in);
return;
}}""".format(
center_is_true=int(center_is_true), false_val=false_val
)
else:
pre = """
bool _in = (bool)x[i];
if ({center_is_true} && _in == {false_val}) {{
y = cast<Y>(_in);
return;
}}""".format(
center_is_true=int(center_is_true), false_val=false_val
)
pre = (
pre
+ """
y = cast<Y>({true_val});""".format(
true_val=true_val
)
)
# {{{{ required because format is called again within _generate_nd_kernel
found = """
if ({{cond}}) {{{{
if (!{border_value}) {{{{
y = cast<Y>({false_val});
return;
}}}}
}}}} else {{{{
bool nn = {{value}} ? {true_val} : {false_val};
if (!nn) {{{{
y = cast<Y>({false_val});
return;
}}}}
}}}}""".format(
true_val=int(true_val),
false_val=int(false_val),
border_value=int(border_value),
)
name = "binary_erosion"
if false_val:
name += "_invert"
has_weights = not all_weights_nonzero
return _filters_core._generate_nd_kernel(
name,
pre,
found,
"",
"constant",
w_shape,
int_type,
offsets,
0,
ctype="Y",
has_weights=has_weights,
has_structure=False,
has_mask=masked,
binary_morphology=True,
)
def _center_is_true(structure, origin):
coor = tuple([oo + ss // 2 for ss, oo in zip(structure.shape, origin)])
return bool(structure[coor]) # device synchronization
def iterate_structure(structure, iterations, origin=None):
"""Iterate a structure by dilating it with itself.
Args:
structure(array_like): Structuring element (an array of bools,
for example), to be dilated with itself.
iterations(int): The number of dilations performed on the structure
with itself.
origin(int or tuple of int, optional): If origin is None, only the
iterated structure is returned. If not, a tuple of the iterated
structure and the modified origin is returned.
Returns:
cupy.ndarray: A new structuring element obtained by dilating
``structure`` (``iterations`` - 1) times with itself.
.. seealso:: :func:`scipy.ndimage.iterate_structure`
"""
if iterations < 2:
return structure.copy()
ni = iterations - 1
shape = [ii + ni * (ii - 1) for ii in structure.shape]
pos = [ni * (structure.shape[ii] // 2) for ii in range(len(shape))]
slc = tuple(
slice(pos[ii], pos[ii] + structure.shape[ii], None)
for ii in range(len(shape))
)
out = cupy.zeros(shape, bool)
out[slc] = structure != 0
out = binary_dilation(out, structure, iterations=ni)
if origin is None:
return out
else:
origin = _util._fix_sequence_arg(origin, structure.ndim, "origin", int)
origin = [iterations * o for o in origin]
return out, origin
def generate_binary_structure(rank, connectivity):
"""Generate a binary structure for binary morphological operations.
Args:
rank(int): Number of dimensions of the array to which the structuring
element will be applied, as returned by ``np.ndim``.
connectivity(int): ``connectivity`` determines which elements of the
output array belong to the structure, i.e., are considered as
neighbors of the central element. Elements up to a squared distance
of ``connectivity`` from the center are considered neighbors.
``connectivity`` may range from 1 (no diagonal elements are
neighbors) to ``rank`` (all elements are neighbors).
Returns:
cupy.ndarray: Structuring element which may be used for binary
morphological operations, with ``rank`` dimensions and all
dimensions equal to 3.
.. seealso:: :func:`scipy.ndimage.generate_binary_structure`
"""
if connectivity < 1:
connectivity = 1
if rank < 1:
return cupy.asarray(True, dtype=bool)
output = numpy.fabs(numpy.indices([3] * rank) - 1)
output = numpy.add.reduce(output, 0)
output = output <= connectivity
return cupy.asarray(output)
def _binary_erosion(
input,
structure,
iterations,
mask,
output,
border_value,
origin,
invert,
brute_force=True,
):
try:
iterations = operator.index(iterations)
except TypeError:
raise TypeError("iterations parameter should be an integer")
if input.dtype.kind == "c":
raise TypeError("Complex type not supported")
default_structure = False
if structure is None:
structure = generate_binary_structure(input.ndim, 1)
all_weights_nonzero = input.ndim == 1
center_is_true = True
structure_shape = structure.shape
elif isinstance(structure, tuple):
# For a structure that is true everywhere, can just provide the shape
structure_shape = structure
if len(structure_shape) == 0:
raise RuntimeError("structure must not be empty")
else:
structure = structure.astype(dtype=bool, copy=False)
structure_shape = structure.shape
# transfer to CPU for use in determining if it is fully dense
# structure_cpu = cupy.asnumpy(structure)
if structure.ndim != input.ndim:
raise RuntimeError(
"structure and input must have same dimensionality"
)
if not structure.flags.c_contiguous:
structure = cupy.ascontiguousarray(structure)
if structure.size < 1:
raise RuntimeError("structure must not be empty")
if mask is not None:
if mask.shape != input.shape:
raise RuntimeError("mask and input must have equal sizes")
if not mask.flags.c_contiguous:
mask = cupy.ascontiguousarray(mask)
masked = True
else:
masked = False
origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int)
if isinstance(output, cupy.ndarray):
if output.dtype.kind == "c":
raise TypeError("Complex output type not supported")
else:
output = bool
output = _util._get_output(output, input)
temp_needed = cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS")
if temp_needed:
# input and output arrays cannot share memory
temp = output
output = _util._get_output(output.dtype, input)
if len(structure_shape) == 0:
# kernel doesn't handle ndim=0, so special case it here
if isinstance(structure, tuple) or float(structure):
output[...] = cupy.asarray(input, dtype=bool)
else:
output[...] = ~cupy.asarray(input, dtype=bool)
return output
origin = tuple(origin)
int_type = _util._get_inttype(input)
offsets = _filters_core._origins_to_offsets(origin, structure_shape)
if not default_structure:
if isinstance(structure, tuple):
nnz = internal.prod(structure_shape)
all_weights_nonzero = True
center_is_true = True
else:
# synchronize required to determine if all weights are non-zero
nnz = int(cupy.count_nonzero(structure))
all_weights_nonzero = nnz == structure.size
if all_weights_nonzero:
center_is_true = True
else:
center_is_true = _center_is_true(structure, origin)
erode_kernel = _get_binary_erosion_kernel(
structure_shape,
int_type,
offsets,
center_is_true,
border_value,
invert,
masked,
all_weights_nonzero,
)
if all_weights_nonzero:
if masked:
in_args = (input, mask)
else:
in_args = (input,)
else:
if masked:
in_args = (input, structure, mask)
else:
in_args = (input, structure)
if iterations == 1:
output = erode_kernel(*in_args, output)
elif center_is_true and not brute_force:
raise NotImplementedError(
"only brute_force iteration has been implemented"
)
else:
if cupy.shares_memory(output, input, "MAY_SHARE_BOUNDS"):
raise ValueError("output and input may not overlap in memory")
tmp_in = cupy.empty_like(input, dtype=output.dtype)
tmp_out = output
if iterations >= 1 and not iterations & 1:
tmp_in, tmp_out = tmp_out, tmp_in
tmp_out = erode_kernel(*in_args, tmp_out)
# TODO: kernel doesn't return the changed status, so determine it here
changed = not (input == tmp_out).all() # synchronize!
ii = 1
while ii < iterations or ((iterations < 1) and changed):
tmp_in, tmp_out = tmp_out, tmp_in
if all_weights_nonzero:
if masked:
in_args = (tmp_in, mask)
else:
in_args = (tmp_in,)
else:
if masked:
in_args = (tmp_in, structure, mask)
else:
in_args = (tmp_in, structure)
tmp_out = erode_kernel(*in_args, tmp_out)
changed = not (tmp_in == tmp_out).all()
ii += 1
if not changed and (not ii & 1): # synchronize!
# can exit early if nothing changed
# (only do this after even number of tmp_in/out swaps)
break
output = tmp_out
if temp_needed:
_core.elementwise_copy(output, temp)
output = temp
return output
def _prep_structure(structure, ndim):
if structure is None:
structure = generate_binary_structure(ndim, 1)
return structure, structure.shape, True
if isinstance(structure, int):
structure = (structure,) * ndim
elif isinstance(structure, list):
structure = tuple(structure)
if isinstance(structure, tuple):
symmetric_structure = True
structure_shape = structure
else:
# if user-provided, it is not guaranteed to be symmetric
symmetric_structure = False
structure_shape = structure.shape
return structure, structure_shape, symmetric_structure
def binary_erosion(
input,
structure=None,
iterations=1,
mask=None,
output=None,
border_value=0,
origin=0,
brute_force=False,
):
"""Multidimensional binary erosion with a given structuring element.
Binary erosion is a mathematical morphology operation used for image
processing.
Args:
input(cupy.ndarray): The input binary array_like to be eroded.
Non-zero (True) elements form the subset to be eroded.
structure(cupy.ndarray or tuple or int, optional): The structuring
element used for the erosion. Non-zero elements are considered
True. If no structuring element is provided an element is generated
with a square connectivity equal to one. (Default value = None). If
a tuple of integers is provided, a structuring element of the
specified shape is used (all elements True). If an integer is
provided, the structuring element will have the same size along all
axes.
iterations(int, optional): The erosion is repeated ``iterations`` times
(one, by default). If iterations is less than 1, the erosion is
repeated until the result does not change anymore. Only an integer
of iterations is accepted.
mask(cupy.ndarray or None, optional): If a mask is given, only those
elements with a True value at the corresponding mask element are
modified at each iteration. (Default value = None)
output(cupy.ndarray, optional): Array of the same shape as input, into
which the output is placed. By default, a new array is created.
border_value(int (cast to 0 or 1), optional): Value at the
border in the output array. (Default value = 0)
origin(int or tuple of ints, optional): Placement of the filter, by
default 0.
brute_force(boolean, optional): Memory condition: if False, only the
pixels whose value was changed in the last iteration are tracked as
candidates to be updated (eroded) in the current iteration; if
True all pixels are considered as candidates for erosion,
regardless of what happened in the previous iteration.
Returns:
cupy.ndarray: The result of binary erosion.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_erosion`
"""
structure, _, _ = _prep_structure(structure, input.ndim)
return _binary_erosion(
input,
structure,
iterations,
mask,
output,
border_value,
origin,
0,
brute_force,
)
def binary_dilation(
input,
structure=None,
iterations=1,
mask=None,
output=None,
border_value=0,
origin=0,
brute_force=False,
):
"""Multidimensional binary dilation with the given structuring element.
Args:
input(cupy.ndarray): The input binary array_like to be dilated.
Non-zero (True) elements form the subset to be dilated.
structure(cupy.ndarray or tuple or int, optional): The structuring
element used for the erosion. Non-zero elements are considered
True. If no structuring element is provided an element is generated
with a square connectivity equal to one. (Default value = None). If
a tuple of integers is provided, a structuring element of the
specified shape is used (all elements True). If an integer is
provided, the structuring element will have the same size along all
axes.
iterations(int, optional): The dilation is repeated ``iterations``
times (one, by default). If iterations is less than 1, the dilation
is repeated until the result does not change anymore. Only an
integer of iterations is accepted.
mask(cupy.ndarray or None, optional): If a mask is given, only those
elements with a True value at the corresponding mask element are
modified at each iteration. (Default value = None)
output(cupy.ndarray, optional): Array of the same shape as input, into
which the output is placed. By default, a new array is created.
border_value(int (cast to 0 or 1), optional): Value at the
border in the output array. (Default value = 0)
origin(int or tuple of ints, optional): Placement of the filter, by
default 0.
brute_force(boolean, optional): Memory condition: if False, only the
pixels whose value was changed in the last iteration are tracked as
candidates to be updated (dilated) in the current iteration; if
True all pixels are considered as candidates for dilation,
regardless of what happened in the previous iteration.
Returns:
cupy.ndarray: The result of binary dilation.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_dilation`
"""
structure, structure_shape, symmetric = _prep_structure(
structure, input.ndim
)
origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int)
if not symmetric:
structure = structure[tuple([slice(None, None, -1)] * structure.ndim)]
for ii in range(len(origin)):
origin[ii] = -origin[ii]
if not structure_shape[ii] & 1:
origin[ii] -= 1
return _binary_erosion(
input,
structure,
iterations,
mask,
output,
border_value,
origin,
1,
brute_force,
)
def binary_opening(
input,
structure=None,
iterations=1,
output=None,
origin=0,
mask=None,
border_value=0,
brute_force=False,
):
"""
Multidimensional binary opening with the given structuring element.
The *opening* of an input image by a structuring element is the
*dilation* of the *erosion* of the image by the structuring element.
Args:
input(cupy.ndarray): The input binary array to be opened.
Non-zero (True) elements form the subset to be opened.
structure(cupy.ndarray or tuple or int, optional): The structuring
element used for the erosion. Non-zero elements are considered
True. If no structuring element is provided an element is generated
with a square connectivity equal to one. (Default value = None). If
a tuple of integers is provided, a structuring element of the
specified shape is used (all elements True). If an integer is
provided, the structuring element will have the same size along all
axes.
iterations(int, optional): The opening is repeated ``iterations`` times
(one, by default). If iterations is less than 1, the opening is
repeated until the result does not change anymore. Only an integer
of iterations is accepted.
output(cupy.ndarray, optional): Array of the same shape as input, into
which the output is placed. By default, a new array is created.
origin(int or tuple of ints, optional): Placement of the filter, by
default 0.
mask(cupy.ndarray or None, optional): If a mask is given, only those
elements with a True value at the corresponding mask element are
modified at each iteration. (Default value = None)
border_value(int (cast to 0 or 1), optional): Value at the
border in the output array. (Default value = 0)
brute_force(boolean, optional): Memory condition: if False, only the
pixels whose value was changed in the last iteration are tracked as
candidates to be updated (dilated) in the current iteration; if
True all pixels are considered as candidates for opening,
regardless of what happened in the previous iteration.
Returns:
cupy.ndarray: The result of binary opening.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_opening`
"""
structure, _, _ = _prep_structure(structure, input.ndim)
tmp = binary_erosion(
input,
structure,
iterations,
mask,
None,
border_value,
origin,
brute_force,
)
return binary_dilation(
tmp,
structure,
iterations,
mask,
output,
border_value,
origin,
brute_force,
)
def binary_closing(
input,
structure=None,
iterations=1,
output=None,
origin=0,
mask=None,
border_value=0,
brute_force=False,
):
"""
Multidimensional binary closing with the given structuring element.
The *closing* of an input image by a structuring element is the
*erosion* of the *dilation* of the image by the structuring element.
Args:
input(cupy.ndarray): The input binary array to be closed.
Non-zero (True) elements form the subset to be closed.
structure(cupy.ndarray or tuple or int, optional): The structuring
element used for the erosion. Non-zero elements are considered
True. If no structuring element is provided an element is generated
with a square connectivity equal to one. (Default value = None). If
a tuple of integers is provided, a structuring element of the
specified shape is used (all elements True). If an integer is
provided, the structuring element will have the same size along all
axes.
iterations(int, optional): The closing is repeated ``iterations`` times
(one, by default). If iterations is less than 1, the closing is
repeated until the result does not change anymore. Only an integer
of iterations is accepted.
output(cupy.ndarray, optional): Array of the same shape as input, into
which the output is placed. By default, a new array is created.
origin(int or tuple of ints, optional): Placement of the filter, by
default 0.
mask(cupy.ndarray or None, optional): If a mask is given, only those
elements with a True value at the corresponding mask element are
modified at each iteration. (Default value = None)
border_value(int (cast to 0 or 1), optional): Value at the
border in the output array. (Default value = 0)
brute_force(boolean, optional): Memory condition: if False, only the
pixels whose value was changed in the last iteration are tracked as
candidates to be updated (dilated) in the current iteration; if
True all pixels are considered as candidates for closing,
regardless of what happened in the previous iteration.
Returns:
cupy.ndarray: The result of binary closing.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_closing`
"""
structure, _, _ = _prep_structure(structure, input.ndim)
tmp = binary_dilation(
input,
structure,
iterations,
mask,
None,
border_value,
origin,
brute_force,
)
return binary_erosion(
tmp,
structure,
iterations,
mask,
output,
border_value,
origin,
brute_force,
)
def binary_hit_or_miss(
input,
structure1=None,
structure2=None,
output=None,
origin1=0,
origin2=None,
):
"""
Multidimensional binary hit-or-miss transform.
The hit-or-miss transform finds the locations of a given pattern
inside the input image.
Args:
input (cupy.ndarray): Binary image where a pattern is to be detected.
structure1 (cupy.ndarray, optional): Part of the structuring element to
be fitted to the foreground (non-zero elements) of ``input``. If no
value is provided, a structure of square connectivity 1 is chosen.
structure2 (cupy.ndarray, optional): Second part of the structuring
element that has to miss completely the foreground. If no value is
provided, the complementary of ``structure1`` is taken.
output (cupy.ndarray, dtype or None, optional): Array of the same shape
as input, into which the output is placed. By default, a new array
is created.
origin1 (int or tuple of ints, optional): Placement of the first part
of the structuring element ``structure1``, by default 0 for a
centered structure.
origin2 (int or tuple of ints or None, optional): Placement of the
second part of the structuring element ``structure2``, by default 0
for a centered structure. If a value is provided for ``origin1``
and not for ``origin2``, then ``origin2`` is set to ``origin1``.
Returns:
cupy.ndarray: Hit-or-miss transform of ``input`` with the given
structuring element (``structure1``, ``structure2``).
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_hit_or_miss`
"""
if structure1 is None:
structure1 = generate_binary_structure(input.ndim, 1)
if structure2 is None:
structure2 = cupy.logical_not(structure1)
origin1 = _util._fix_sequence_arg(origin1, input.ndim, "origin1", int)
if origin2 is None:
origin2 = origin1
else:
origin2 = _util._fix_sequence_arg(origin2, input.ndim, "origin2", int)
tmp1 = _binary_erosion(
input, structure1, 1, None, None, 0, origin1, 0, False
)
inplace = isinstance(output, cupy.ndarray)
result = _binary_erosion(
input, structure2, 1, None, output, 0, origin2, 1, False
)
if inplace:
cupy.logical_not(output, output)
cupy.logical_and(tmp1, output, output)
else:
cupy.logical_not(result, result)
return cupy.logical_and(tmp1, result)
def binary_propagation(
input, structure=None, mask=None, output=None, border_value=0, origin=0
):
"""
Multidimensional binary propagation with the given structuring element.
Args:
input (cupy.ndarray): Binary image to be propagated inside ``mask``.
structure (cupy.ndarray, optional): Structuring element used in the
successive dilations. The output may depend on the structuring
element, especially if ``mask`` has several connex components. If
no structuring element is provided, an element is generated with a
squared connectivity equal to one.
mask (cupy.ndarray, optional): Binary mask defining the region into
which ``input`` is allowed to propagate.
output (cupy.ndarray, optional): Array of the same shape as input, into
which the output is placed. By default, a new array is created.
border_value (int, optional): Value at the border in the output array.
The value is cast to 0 or 1.
origin (int or tuple of ints, optional): Placement of the filter.
Returns:
cupy.ndarray : Binary propagation of ``input`` inside ``mask``.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_propagation`
"""
return binary_dilation(
input,
structure,
-1,
mask,
output,
border_value,
origin,
brute_force=True,
)
def binary_fill_holes(input, structure=None, output=None, origin=0):
"""Fill the holes in binary objects.
Args:
input (cupy.ndarray): N-D binary array with holes to be filled.
structure (cupy.ndarray, optional): Structuring element used in the
computation; large-size elements make computations faster but may
miss holes separated from the background by thin regions. The
default element (with a square connectivity equal to one) yields
the intuitive result where all holes in the input have been filled.
output (cupy.ndarray, dtype or None, optional): Array of the same shape
as input, into which the output is placed. By default, a new array
is created.
origin (int, tuple of ints, optional): Position of the structuring
element.
Returns:
cupy.ndarray: Transformation of the initial image ``input`` where holes
have been filled.
.. warning::
This function may synchronize the device.
.. seealso:: :func:`scipy.ndimage.binary_fill_holes`
"""
mask = cupy.logical_not(input)
tmp = cupy.zeros(mask.shape, bool)
inplace = isinstance(output, cupy.ndarray)
# TODO (grlee77): set brute_force=False below once implemented
if inplace:
binary_dilation(
tmp, structure, -1, mask, output, 1, origin, brute_force=True
)
cupy.logical_not(output, output)
else:
output = binary_dilation(
tmp, structure, -1, mask, None, 1, origin, brute_force=True
)
cupy.logical_not(output, output)
return output
def grey_erosion(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Calculates a greyscale erosion.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale erosion. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale erosion. Non-zero values
give the set of neighbors of the center over which minimum is
chosen.
structure (array of ints): Structuring element used for the greyscale
erosion. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale erosion.
.. seealso:: :func:`scipy.ndimage.grey_erosion`
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
return _filters._min_or_max_filter(
input, size, footprint, structure, output, mode, cval, origin, "min"
)
def grey_dilation(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Calculates a greyscale dilation.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale dilation. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale dilation. Non-zero values
give the set of neighbors of the center over which maximum is
chosen.
structure (array of ints): Structuring element used for the greyscale
dilation. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale dilation.
.. seealso:: :func:`scipy.ndimage.grey_dilation`
"""
if size is None and footprint is None and structure is None:
raise ValueError("size, footprint or structure must be specified")
if structure is not None:
structure = cupy.array(structure)
structure = structure[tuple([slice(None, None, -1)] * structure.ndim)]
if footprint is not None:
footprint = cupy.array(footprint)
footprint = footprint[tuple([slice(None, None, -1)] * footprint.ndim)]
origin = _util._fix_sequence_arg(origin, input.ndim, "origin", int)
for i in range(len(origin)):
origin[i] = -origin[i]
if footprint is not None:
sz = footprint.shape[i]
elif structure is not None:
sz = structure.shape[i]
elif numpy.isscalar(size):
sz = size
else:
sz = size[i]
if sz % 2 == 0:
origin[i] -= 1
return _filters._min_or_max_filter(
input, size, footprint, structure, output, mode, cval, origin, "max"
)
def grey_closing(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Calculates a multi-dimensional greyscale closing.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale closing. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale closing. Non-zero values
give the set of neighbors of the center over which closing is
chosen.
structure (array of ints): Structuring element used for the greyscale
closing. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale closing.
.. seealso:: :func:`scipy.ndimage.grey_closing`
"""
if (size is not None) and (footprint is not None):
warnings.warn(
"ignoring size because footprint is set", UserWarning, stacklevel=2
)
tmp = grey_dilation(
input, size, footprint, structure, None, mode, cval, origin
)
return grey_erosion(
tmp, size, footprint, structure, output, mode, cval, origin
)
def grey_opening(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""Calculates a multi-dimensional greyscale opening.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the greyscale opening. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for greyscale opening. Non-zero values
give the set of neighbors of the center over which opening is
chosen.
structure (array of ints): Structuring element used for the greyscale
opening. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The result of greyscale opening.
.. seealso:: :func:`scipy.ndimage.grey_opening`
"""
if (size is not None) and (footprint is not None):
warnings.warn(
"ignoring size because footprint is set", UserWarning, stacklevel=2
)
tmp = grey_erosion(
input, size, footprint, structure, None, mode, cval, origin
)
return grey_dilation(
tmp, size, footprint, structure, output, mode, cval, origin
)
def morphological_gradient(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""
Multidimensional morphological gradient.
The morphological gradient is calculated as the difference between a
dilation and an erosion of the input with a given structuring element.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the morphological gradient. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for morphological gradient. Non-zero
values give the set of neighbors of the center over which opening
is chosen.
structure (array of ints): Structuring element used for the
morphological gradient. ``structure`` may be a non-flat
structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The morphological gradient of the input.
.. seealso:: :func:`scipy.ndimage.morphological_gradient`
"""
tmp = grey_dilation(
input, size, footprint, structure, None, mode, cval, origin
)
if isinstance(output, cupy.ndarray):
grey_erosion(
input, size, footprint, structure, output, mode, cval, origin
)
return cupy.subtract(tmp, output, output)
else:
return tmp - grey_erosion(
input, size, footprint, structure, None, mode, cval, origin
)
def morphological_laplace(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""
Multidimensional morphological laplace.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the morphological laplace. Optional if ``footprint`` or
``structure`` is provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for morphological laplace. Non-zero
values give the set of neighbors of the center over which opening
is chosen.
structure (array of ints): Structuring element used for the
morphological laplace. ``structure`` may be a non-flat
structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: The morphological laplace of the input.
.. seealso:: :func:`scipy.ndimage.morphological_laplace`
"""
tmp1 = grey_dilation(
input, size, footprint, structure, None, mode, cval, origin
)
if isinstance(output, cupy.ndarray):
grey_erosion(
input, size, footprint, structure, output, mode, cval, origin
)
cupy.add(tmp1, output, output)
cupy.subtract(output, input, output)
return cupy.subtract(output, input, output)
else:
tmp2 = grey_erosion(
input, size, footprint, structure, None, mode, cval, origin
)
cupy.add(tmp1, tmp2, tmp2)
cupy.subtract(tmp2, input, tmp2)
cupy.subtract(tmp2, input, tmp2)
return tmp2
def white_tophat(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""
Multidimensional white tophat filter.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the white tophat. Optional if ``footprint`` or ``structure`` is
provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for the white tophat. Non-zero values
give the set of neighbors of the center over which opening is
chosen.
structure (array of ints): Structuring element used for the white
tophat. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarray: Result of the filter of ``input`` with ``structure``.
.. seealso:: :func:`scipy.ndimage.white_tophat`
"""
if (size is not None) and (footprint is not None):
warnings.warn(
"ignoring size because footprint is set", UserWarning, stacklevel=2
)
tmp = grey_erosion(
input, size, footprint, structure, None, mode, cval, origin
)
tmp = grey_dilation(
tmp, size, footprint, structure, output, mode, cval, origin
)
if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
cupy.bitwise_xor(input, tmp, out=tmp)
else:
cupy.subtract(input, tmp, out=tmp)
return tmp
def black_tophat(
input,
size=None,
footprint=None,
structure=None,
output=None,
mode="reflect",
cval=0.0,
origin=0,
):
"""
Multidimensional black tophat filter.
Args:
input (cupy.ndarray): The input array.
size (tuple of ints): Shape of a flat and full structuring element used
for the black tophat. Optional if ``footprint`` or ``structure`` is
provided.
footprint (array of ints): Positions of non-infinite elements of a flat
structuring element used for the black tophat. Non-zero values
give the set of neighbors of the center over which opening is
chosen.
structure (array of ints): Structuring element used for the black
tophat. ``structure`` may be a non-flat structuring element.
output (cupy.ndarray, dtype or None): The array in which to place the
output.
mode (str): The array borders are handled according to the given mode
(``'reflect'``, ``'constant'``, ``'nearest'``, ``'mirror'``,
``'wrap'``). Default is ``'reflect'``.
cval (scalar): Value to fill past edges of input if mode is
``constant``. Default is ``0.0``.
origin (scalar or tuple of scalar): The origin parameter controls the
placement of the filter, relative to the center of the current
element of the input. Default of 0 is equivalent to
``(0,)*input.ndim``.
Returns:
cupy.ndarry : Result of the filter of ``input`` with ``structure``.
.. seealso:: :func:`scipy.ndimage.black_tophat`
"""
if (size is not None) and (footprint is not None):
warnings.warn(
"ignoring size because footprint is set", UserWarning, stacklevel=2
)
tmp = grey_dilation(
input, size, footprint, structure, None, mode, cval, origin
)
tmp = grey_erosion(
tmp, size, footprint, structure, output, mode, cval, origin
)
if input.dtype == numpy.bool_ and tmp.dtype == numpy.bool_:
cupy.bitwise_xor(tmp, input, out=tmp)
else:
cupy.subtract(tmp, input, out=tmp)
return tmp
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/grey.py
|
import warnings
from .gray import ( # noqa
black_tophat,
closing,
dilation,
erosion,
opening,
white_tophat,
)
__all__ = [
"erosion",
"dilation",
"opening",
"closing",
"white_tophat",
"black_tophat",
]
warnings.warn(
"Importing from cucim.skimage.morphology.grey is deprecated. "
"Please import from cucim.skimage.morphology instead.",
FutureWarning,
stacklevel=2,
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/footprints.py
|
import os
from collections.abc import Sequence
from numbers import Integral
import cupy as cp
import numpy as np
from cucim.skimage import morphology
# Precomputed ball and disk decompositions were saved as 2D arrays where the
# radius of the desired decomposition is used to index into the first axis of
# the array. The values at a given radius corresponds to the number of
# repetitions of 3 different types elementary of structuring elements.
#
# See _nsphere_series_decomposition for full details.
_nsphere_decompositions = {}
_nsphere_decompositions[2] = np.load(
os.path.join(os.path.dirname(__file__), "disk_decompositions.npy")
)
_nsphere_decompositions[3] = np.load(
os.path.join(os.path.dirname(__file__), "ball_decompositions.npy")
)
def _footprint_is_sequence(footprint):
if hasattr(footprint, "__cuda_array_interface__"):
return False
def _validate_sequence_element(t):
return (
isinstance(t, Sequence)
and len(t) == 2
and (
hasattr(t[0], "__cuda_array_interface__")
# can be a shape tuple for square/rectangular footprints
or isinstance(t[0], tuple)
)
and isinstance(t[1], Integral)
)
if isinstance(footprint, Sequence):
if all(isinstance(t, int) for t in footprint):
# allow pass through of a single shape tuple
return False
elif not all(_validate_sequence_element(t) for t in footprint):
raise ValueError(
"All elements of footprint sequence must be a 2-tuple where "
"the first element of the tuple is an ndarray and the second "
"is an integer indicating the number of iterations."
)
else:
raise ValueError("footprint must be either an ndarray or Sequence")
return True
def _footprint_shape(footprint):
if isinstance(footprint, tuple):
return footprint
return footprint.shape
def _footprint_ndim(footprint):
if isinstance(footprint, tuple):
return len(footprint)
return footprint.ndim
def _shape_from_sequence(footprints, require_odd_size=False):
"""Determine the shape of composite footprint
In the future if we only want to support odd-sized square, we may want to
change this to require_odd_size
"""
if not _footprint_is_sequence(footprints):
raise ValueError("expected a sequence of footprints")
ndim = _footprint_ndim(footprints[0][0])
shape = [0] * ndim
def _odd_size(size, require_odd_size):
if require_odd_size and size % 2 == 0:
raise ValueError("expected all footprint elements to have odd size")
for d in range(ndim):
fp, nreps = footprints[0]
fp_shape = _footprint_shape(fp)
_odd_size(fp_shape[d], require_odd_size)
shape[d] = fp_shape[d] + (nreps - 1) * (fp_shape[d] - 1)
for fp, nreps in footprints[1:]:
fp_shape = _footprint_shape(fp)
_odd_size(fp_shape[d], require_odd_size)
shape[d] += nreps * (fp_shape[d] - 1)
return tuple(shape)
def footprint_from_sequence(footprints):
"""Convert a footprint sequence into an equivalent ndarray.
Parameters
----------
footprints : tuple of 2-tuples
A sequence of footprint tuples where the first element of each tuple
is an array corresponding to a footprint and the second element is the
number of times it is to be applied. Currently all footprints should
have odd size.
Returns
-------
footprint : ndarray
An single array equivalent to applying the sequence of `footprints`.
"""
# Create a single pixel image of sufficient size and apply binary dilation.
shape = _shape_from_sequence(footprints)
imag = cp.zeros(shape, dtype=bool)
imag[tuple(s // 2 for s in shape)] = 1
return morphology.binary_dilation(imag, footprints)
def square(width, dtype=None, *, decomposition=None):
"""Generates a flat, square-shaped footprint.
Every pixel along the perimeter has a chessboard distance
no greater than radius (radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width and height of the square.
Other Parameters
----------------
dtype : data-type or None, optional
The data type of the footprint. When None, a tuple will be returned in
place of the actual footprint array. This can be be passed to grayscale
and binary morphology functions in place of an explicit array to avoid
array allocation overhead.
decomposition : {None, 'separable', 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but often with
better computational performance. See Notes for more details.
With 'separable', this function uses separable 1D footprints for each
axis. Whether 'seqeunce' or 'separable' is computationally faster may
be architecture-dependent.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
When `decomposition` is None, this is just a numpy.ndarray. Otherwise,
this will be a tuple whose length is equal to the number of unique
structuring elements to apply (see Notes for more detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For binary morphology, using ``decomposition='sequence'`` or
``decomposition='separable'`` were observed to give better performance than
``decomposition=None``, with the magnitude of the performance increase
rapidly increasing with footprint size. For grayscale morphology with
square footprints, it is recommended to use ``decomposition=None`` since
the internal SciPy functions that are called already have a fast
implementation based on separable 1D sliding windows.
The 'sequence' decomposition mode only supports odd valued `width`. If
`width` is even, the sequence used will be identical to the 'separable'
mode.
"""
if decomposition is None:
if dtype is None:
return (width, width)
else:
return cp.ones((width, width), dtype=dtype)
if decomposition == "separable" or width % 2 == 0:
if dtype is None:
sequence = (((width, 1), 1), ((1, width), 1))
else:
sequence = (
(cp.ones((width, 1), dtype=dtype), 1),
(cp.ones((1, width), dtype=dtype), 1),
)
elif decomposition == "sequence":
# only handles odd widths
if dtype is None:
sequence = (((3, 3), _decompose_size(width, 3)),)
else:
sequence = (
(cp.ones((3, 3), dtype=dtype), _decompose_size(width, 3)),
)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return sequence
def _decompose_size(size, kernel_size=3):
"""Determine number of repeated iterations for a `kernel_size` kernel.
Returns how many repeated morphology operations with an element of size
`kernel_size` is equivalent to a morphology with a single kernel of size
`n`.
"""
if kernel_size % 2 != 1:
raise ValueError("only odd length kernel_size is supported")
return 1 + (size - kernel_size) // (kernel_size - 1)
def rectangle(nrows, ncols, dtype=None, *, decomposition=None):
"""Generates a flat, rectangular-shaped footprint.
Every pixel in the rectangle generated for a given width and given height
belongs to the neighborhood.
Parameters
----------
nrows : int
The number of rows of the rectangle.
ncols : int
The number of columns of the rectangle.
Other Parameters
----------------
dtype : data-type or None, optional
The data type of the footprint. When None, a tuple will be returned in
place of the actual footprint array. This can be be passed to grayscale
and binary morphology functions in place of an explicit array to avoid
array allocation overhead.
decomposition : {None, 'separable', 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but often with
better computational performance. See Notes for more details.
With 'separable', this function uses separable 1D footprints for each
axis. Whether 'seqeunce' or 'separable' is computationally faster may
be architecture-dependent.
Returns
-------
footprint : cupy.ndarray
A footprint consisting only of ones, i.e. every pixel belongs to the
neighborhood. When `decomposition` is None, this is just a
numpy.ndarray. Otherwise, this will be a tuple whose length is equal to
the number of unique structuring elements to apply (see Notes for more
detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For binary morphology, using ``decomposition='sequence'``
was observed to give better performance, with the magnitude of the
performance increase rapidly increasing with footprint size. For grayscale
morphology with rectangular footprints, it is recommended to use
``decomposition=None`` since the internal SciPy functions that are called
already have a fast implementation based on separable 1D sliding windows.
The `sequence` decomposition mode only supports odd valued `nrows` and
`ncols`. If either `nrows` or `ncols` is even, the sequence used will be
identical to ``decomposition='separable'``.
- The use of ``width`` and ``height`` has been deprecated in
version 0.18.0. Use ``nrows`` and ``ncols`` instead.
"""
if decomposition is None: # TODO: check optimal width setting here
if dtype is None:
return (nrows, ncols)
else:
return cp.ones((nrows, ncols), dtype=dtype)
even_rows = nrows % 2 == 0
even_cols = ncols % 2 == 0
if decomposition == "separable" or even_rows or even_cols:
if dtype is None:
sequence = [((nrows, 1), 1), ((1, ncols), 1)]
else:
sequence = [
(cp.ones((nrows, 1), dtype=dtype), 1),
(cp.ones((1, ncols), dtype=dtype), 1),
]
elif decomposition == "sequence":
# this branch only support odd nrows, ncols
sq_size = 3
sq_reps = _decompose_size(min(nrows, ncols), sq_size)
if dtype is None:
sequence = [((3, 3), sq_reps)]
else:
sequence = [(cp.ones((3, 3), dtype=dtype), sq_reps)]
if nrows > ncols:
nextra = nrows - ncols
if dtype is None:
sequence.append(((nextra + 1, 1), 1))
else:
sequence.append((cp.ones((nextra + 1, 1), dtype=dtype), 1))
elif ncols > nrows:
nextra = ncols - nrows
if dtype is None:
sequence.append(((1, nextra + 1), 1))
else:
sequence.append((cp.ones((1, nextra + 1), dtype=dtype), 1))
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return tuple(sequence)
def diamond(radius, dtype=cp.uint8, *, decomposition=None):
"""Generates a flat, diamond-shaped footprint.
A pixel is part of the neighborhood (i.e. labeled 1) if
the city block/Manhattan distance between it and the center of
the neighborhood is no greater than radius.
Parameters
----------
radius : int
The radius of the diamond-shaped footprint.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
decomposition : {None, 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but with
better computational performance. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
When `decomposition` is None, this is just a numpy.ndarray. Otherwise,
this will be a tuple whose length is equal to the number of unique
structuring elements to apply (see Notes for more detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For either binary or grayscale morphology, using
``decomposition='sequence'`` was observed to have a performance benefit,
with the magnitude of the benefit increasing with increasing footprint
size.
"""
if decomposition is None:
# CuPy Backend: grid is usually small -> faster to generate it in NumPy
sz = np.arange(0, radius * 2 + 1)
ii, jj = np.meshgrid(sz, sz, sparse=True)
return cp.asarray(
np.abs(ii - radius) + np.abs(jj - radius) <= radius, dtype=dtype
)
elif decomposition == "sequence":
fp = diamond(1, dtype=dtype, decomposition=None)
nreps = _decompose_size(2 * radius + 1, fp.shape[0])
footprint = ((fp, nreps),)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return footprint
def _nsphere_series_decomposition(radius, ndim, dtype=None):
"""Generate a sequence of footprints approximating an n-sphere.
Morphological operations with an n-sphere (hypersphere) footprint can be
approximated by applying a series of smaller footprints of extent 3 along
each axis. Specific solutions for this are given in [1]_ for the case of
2D disks with radius 2 through 10.
Here we used n-dimensional extensions of the "square", "diamond" and
"t-shaped" elements from that publication. All of these elementary elements
have size ``(3,) * ndim``. We numerically computed the number of
repetitions of each element that gives the closest match to the disk
(in 2D) or ball (in 3D) computed with ``decomposition=None``.
The approach can be extended to higher dimensions, but we have only stored
results for 2D and 3D at this point.
Empirically, the shapes at large radius approach a hexadecagon
(16-sides [2]_) in 2D and a rhombicuboctahedron (26-faces, [3]_) in 3D.
References
----------
.. [1] Park, H and Chin R.T. Decomposition of structuring elements for
optimal implementation of morphological operations. In Proceedings:
1997 IEEE Workshop on Nonlinear Signal and Image Processing, London,
UK.
https://www.iwaenc.org/proceedings/1997/nsip97/pdf/scan/ns970226.pdf
.. [2] https://en.wikipedia.org/wiki/Hexadecagon
.. [3] https://en.wikipedia.org/wiki/Rhombicuboctahedron
"""
if radius == 1:
# for radius 1 just use the exact shape (3,) * ndim solution
kwargs = dict(dtype=dtype, strict_radius=False, decomposition=None)
if ndim == 2:
return ((disk(1, **kwargs), 1),)
elif ndim == 3:
return ((ball(1, **kwargs), 1),)
# load precomputed decompositions
if ndim not in _nsphere_decompositions:
raise ValueError(
"sequence decompositions are only currently available for "
"2d disks or 3d balls"
)
precomputed_decompositions = _nsphere_decompositions[ndim]
max_radius = precomputed_decompositions.shape[0]
if radius > max_radius:
raise ValueError(
f"precomputed {ndim}D decomposition unavailable for "
f"radius > {max_radius}"
)
num_t_series, num_diamond, num_square = precomputed_decompositions[radius]
sequence = []
if dtype is None:
_dtype = cp.uint8
else:
_dtype = dtype
if num_t_series > 0:
# shape (3, ) * ndim "T-shaped" footprints
all_t = _t_shaped_element_series(ndim=ndim, dtype=_dtype)
[sequence.append((t, num_t_series)) for t in all_t]
if num_diamond > 0:
d = np.zeros((3,) * ndim, dtype=_dtype)
sl = [slice(1, 2)] * ndim
for ax in range(ndim):
sl[ax] = slice(None)
d[tuple(sl)] = 1
sl[ax] = slice(1, 2)
sequence.append((cp.asarray(d), num_diamond))
if num_square > 0:
if dtype is None:
sequence.append(((3,) * ndim, num_square))
else:
sequence.append((cp.ones((3,) * ndim, dtype=_dtype), num_square))
return tuple(sequence)
def _t_shaped_element_series(ndim=2, dtype=cp.uint8):
"""A series of T-shaped structuring elements.
In the 2D case this is a T-shaped element and its rotation at multiples of
90 degrees. This series is used in efficient decompositions of disks of
various radius as published in [1]_.
The generalization to the n-dimensional case can be performed by having the
"top" of the T to extend in (ndim - 1) dimensions and then producing a
series of rotations such that the bottom end of the T points along each of
``2 * ndim`` orthogonal directions.
"""
if ndim == 2:
# The n-dimensional case produces the same set of footprints, but
# the 2D example is retained here for clarity.
t0 = np.array([[1, 1, 1], [0, 1, 0], [0, 1, 0]], dtype=dtype)
t90 = cp.asarray(np.rot90(t0, 1))
t180 = cp.asarray(np.rot90(t0, 2))
t270 = cp.asarray(np.rot90(t0, 3))
t0 = cp.asarray(t0)
return t0, t90, t180, t270
else:
# ndimensional generalization of the 2D case above
all_t = []
for ax in range(ndim):
for idx in [0, 2]:
t = np.zeros((3,) * ndim, dtype=dtype)
sl = [slice(None)] * ndim
sl[ax] = slice(idx, idx + 1)
t[tuple(sl)] = 1
sl = [slice(1, 2)] * ndim
sl[ax] = slice(None)
t[tuple(sl)] = 1
all_t.append(cp.asarray(t))
return tuple(all_t)
def disk(radius, dtype=cp.uint8, *, strict_radius=True, decomposition=None):
"""Generates a flat, disk-shaped footprint.
A pixel is within the neighborhood if the Euclidean distance between
it and the origin is no greater than radius (This is only approximately
True, when `decomposition == 'sequence'`).
Parameters
----------
radius : int
The radius of the disk-shaped footprint.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
strict_radius : bool, optional
If False, extend the radius by 0.5. This allows the circle to expand
further within a cube that remains of size ``2 * radius + 1`` along
each axis. This parameter is ignored if decomposition is not None.
decomposition : {None, 'sequence', 'crosses'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given a result equivalent to a single, larger footprint, but with
better computational performance. For disk footprints, the 'sequence'
or 'crosses' decompositions are not always exactly equivalent to
``decomposition=None``. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
The disk produced by the ``decomposition='sequence'`` mode may not be
identical to that with ``decomposition=None``. A disk footprint can be
approximated by applying a series of smaller footprints of extent 3 along
each axis. Specific solutions for this are given in [1]_ for the case of
2D disks with radius 2 through 10. Here, we numerically computed the number
of repetitions of each element that gives the closest match to the disk
computed with kwargs ``strict_radius=False, decomposition=None``.
Empirically, the series decomposition at large radius approaches a
hexadecagon (a 16-sided polygon [2]_). In [3]_, the authors demonstrate
that a hexadecagon is the closest approximation to a disk that can be
achieved for decomposition with footprints of shape (3, 3).
The disk produced by the ``decomposition='crosses'`` is often but not
always identical to that with ``decomposition=None``. It tends to give a
closer approximation than ``decomposition='sequence'``, at a performance
that is fairly comparable. The individual cross-shaped elements are not
limited to extent (3, 3) in size. Unlike the 'seqeuence' decomposition, the
'crosses' decomposition can also accurately approximate the shape of disks
with ``strict_radius=True``. The method is based on an adaption of
algorithm 1 given in [4]_.
References
----------
.. [1] Park, H and Chin R.T. Decomposition of structuring elements for
optimal implementation of morphological operations. In Proceedings:
1997 IEEE Workshop on Nonlinear Signal and Image Processing, London,
UK.
https://www.iwaenc.org/proceedings/1997/nsip97/pdf/scan/ns970226.pdf
.. [2] https://en.wikipedia.org/wiki/Hexadecagon
.. [3] Vanrell, M and Vitrià, J. Optimal 3 × 3 decomposable disks for
morphological transformations. Image and Vision Computing, Vol. 15,
Issue 11, 1997.
:DOI:`10.1016/S0262-8856(97)00026-7`
.. [4] Li, D. and Ritter, G.X. Decomposition of Separable and Symmetric
Convex Templates. Proc. SPIE 1350, Image Algebra and Morphological
Image Processing, (1 November 1990).
:DOI:`10.1117/12.23608`
"""
if decomposition is None:
# CuPy Backend: grid is usually small -> faster to generate it in NumPy
L = np.arange(-radius, radius + 1)
X, Y = np.meshgrid(L, L, sparse=True)
if not strict_radius:
radius += 0.5
return cp.asarray((X * X + Y * Y) <= radius * radius, dtype=dtype)
elif decomposition == "sequence":
sequence = _nsphere_series_decomposition(radius, ndim=2, dtype=dtype)
elif decomposition == "crosses":
fp = disk(
radius, dtype, strict_radius=strict_radius, decomposition=None
)
sequence = _cross_decomposition(fp)
return sequence
def _cross(r0, r1, dtype=cp.uint8):
"""Cross-shaped structuring element of shape (r0, r1).
Only the central row and column are ones.
"""
s0 = int(2 * r0 + 1)
s1 = int(2 * r1 + 1)
c = cp.zeros((s0, s1), dtype=dtype)
if r1 != 0:
c[r0, :] = 1
if r0 != 0:
c[:, r1] = 1
return c
def _cross_decomposition(footprint, dtype=cp.uint8):
"""Decompose a symmetric convex footprint into cross-shaped elements.
This is a decomposition of the footprint into a sequence of
(possibly asymmetric) cross-shaped elements. This technique was proposed in
[1]_ and corresponds roughly to algorithm 1 of that publication (some
details had to be modified to get reliable operation).
.. [1] Li, D. and Ritter, G.X. Decomposition of Separable and Symmetric
Convex Templates. Proc. SPIE 1350, Image Algebra and Morphological
Image Processing, (1 November 1990).
:DOI:`10.1117/12.23608`
"""
footprint = cp.asnumpy(footprint)
quadrant = footprint[footprint.shape[0] // 2 :, footprint.shape[1] // 2 :]
col_sums = quadrant.sum(0, dtype=int)
col_sums = np.concatenate((col_sums, np.asarray([0], dtype=int)))
i_prev = 0
idx = {}
sum0 = 0
for i in range(col_sums.size - 1):
if col_sums[i] > col_sums[i + 1]:
if i == 0:
continue
key = (col_sums[i_prev] - col_sums[i], i - i_prev)
sum0 += key[0]
if key not in idx:
idx[key] = 1
else:
idx[key] += 1
i_prev = i
n = quadrant.shape[0] - 1 - sum0
if n > 0:
key = (n, 0)
idx[key] = idx.get(key, 0) + 1
return tuple([(_cross(r0, r1, dtype), n) for (r0, r1), n in idx.items()])
def ellipse(width, height, dtype=cp.uint8, *, decomposition=None):
"""Generates a flat, ellipse-shaped footprint.
Every pixel along the perimeter of ellipse satisfies
the equation ``(x/width+1)**2 + (y/height+1)**2 = 1``.
Parameters
----------
width : int
The width of the ellipse-shaped footprint.
height : int
The height of the ellipse-shaped footprint.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
decomposition : {None, 'crosses'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but with
better computational performance. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
The footprint will have shape ``(2 * height + 1, 2 * width + 1)``.
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
The ellipse produced by the ``decomposition='crosses'`` is often but not
always identical to that with ``decomposition=None``. The method is based
on an adaption of algorithm 1 given in [1]_.
References
----------
.. [1] Li, D. and Ritter, G.X. Decomposition of Separable and Symmetric
Convex Templates. Proc. SPIE 1350, Image Algebra and Morphological
Image Processing, (1 November 1990).
:DOI:`10.1117/12.23608`
Examples
--------
>>> from cucim.skimage.morphology import footprints
>>> footprints.ellipse(5, 3)
array([[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]], dtype=uint8)
"""
try:
from skimage import draw
except ImportError:
raise ImportError("ellipse requires scikit-image")
if decomposition is None:
footprint = np.zeros((2 * height + 1, 2 * width + 1), dtype=dtype)
rows, cols = draw.ellipse(height, width, height + 1, width + 1)
footprint[rows, cols] = 1
# Note: no CUDA counterpart for draw.ellipse so compute in NumPy
# CuPy Backend: grid is usually small -> faster to generate it in NumPy
return cp.asarray(footprint)
elif decomposition == "crosses":
fp = ellipse(width, height, dtype, decomposition=None)
sequence = _cross_decomposition(fp)
return sequence
def cube(width, dtype=None, *, decomposition=None):
"""Generates a cube-shaped footprint.
This is the 3D equivalent of a square.
Every pixel along the perimeter has a chessboard distance
no greater than radius (radius=floor(width/2)) pixels.
Parameters
----------
width : int
The width, height and depth of the cube.
Other Parameters
----------------
dtype : data-type or None, optional
The data type of the footprint. When None, a tuple will be returned in
place of the actual footprint array. This can be be passed to grayscale
and binary morphology functions in place of an explicit array to avoid
array allocation overhead.
decomposition : {None, 'separable', 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but often with
better computational performance. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
When `decomposition` is None, this is just a numpy.ndarray. Otherwise,
this will be a tuple whose length is equal to the number of unique
structuring elements to apply (see Notes for more detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For binary morphology, using ``decomposition='sequence'``
was observed to give better performance, with the magnitude of the
performance increase rapidly increasing with footprint size. For grayscale
morphology with square footprints, it is recommended to use
``decomposition=None`` since the internal SciPy functions that are called
already have a fast implementation based on separable 1D sliding windows.
The 'sequence' decomposition mode only supports odd valued `width`. If
`width` is even, the sequence used will be identical to the 'separable'
mode.
"""
if decomposition is None:
if dtype is None:
return (width, width, width)
else:
return cp.ones((width, width, width), dtype=dtype)
if decomposition == "separable" or width % 2 == 0:
if dtype is None:
sequence = (
((width, 1, 1), 1),
((1, width, 1), 1),
((1, 1, width), 1),
)
else:
sequence = (
(cp.ones((width, 1, 1), dtype=dtype), 1),
(cp.ones((1, width, 1), dtype=dtype), 1),
(cp.ones((1, 1, width), dtype=dtype), 1),
)
elif decomposition == "sequence":
# only handles odd widths
if dtype is None:
sequence = (((3, 3, 3), _decompose_size(width, 3)),)
else:
sequence = (
(cp.ones((3, 3, 3), dtype=dtype), _decompose_size(width, 3)),
)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return sequence
def octahedron(radius, dtype=cp.uint8, *, decomposition=None):
"""Generates a octahedron-shaped footprint.
This is the 3D equivalent of a diamond.
A pixel is part of the neighborhood (i.e. labeled 1) if
the city block/Manhattan distance between it and the center of
the neighborhood is no greater than radius.
Parameters
----------
radius : int
The radius of the octahedron-shaped footprint.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
decomposition : {None, 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but with
better computational performance. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
When `decomposition` is None, this is just a numpy.ndarray. Otherwise,
this will be a tuple whose length is equal to the number of unique
structuring elements to apply (see Notes for more detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For either binary or grayscale morphology, using
``decomposition='sequence'`` was observed to have a performance benefit,
with the magnitude of the benefit increasing with increasing footprint
size.
"""
# note that in contrast to diamond(), this method allows non-integer radii
if decomposition is None:
n = 2 * radius + 1
Z, Y, X = np.ogrid[
-radius : radius : n * 1j,
-radius : radius : n * 1j,
-radius : radius : n * 1j,
]
s = np.abs(X) + np.abs(Y) + np.abs(Z)
footprint = cp.array(s <= radius, dtype=dtype)
elif decomposition == "sequence":
fp = octahedron(1, dtype=dtype, decomposition=None)
nreps = _decompose_size(2 * radius + 1, fp.shape[0])
footprint = ((fp, nreps),)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return footprint
def ball(radius, dtype=cp.uint8, *, strict_radius=True, decomposition=None):
"""Generates a ball-shaped footprint.
This is the 3D equivalent of a disk.
A pixel is within the neighborhood if the Euclidean distance between
it and the origin is no greater than radius.
Parameters
----------
radius : int
The radius of the ball-shaped footprint.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
strict_radius : bool, optional
If False, extend the radius by 0.5. This allows the circle to expand
further within a cube that remains of size ``2 * radius + 1`` along
each axis. This parameter is ignored if decomposition is not None.
decomposition : {None, 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given a result equivalent to a single, larger footprint, but with
better computational performance. For ball footprints, the sequence
decomposition is not exactly equivalent to decomposition=None.
See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
Notes
-----
The disk produced by the decomposition='sequence' mode is not identical
to that with decomposition=None. Here we extend the approach taken in [1]_
for disks to the 3D case, using 3-dimensional extensions of the "square",
"diamond" and "t-shaped" elements from that publication. All of these
elementary elements have size ``(3,) * ndim``. We numerically computed the
number of repetitions of each element that gives the closest match to the
ball computed with kwargs ``strict_radius=False, decomposition=None``.
Empirically, the equivalent composite footprint to the sequence
decomposition approaches a rhombicuboctahedron (26-faces [2]_).
References
----------
.. [1] Park, H and Chin R.T. Decomposition of structuring elements for
optimal implementation of morphological operations. In Proceedings:
1997 IEEE Workshop on Nonlinear Signal and Image Processing, London,
UK.
https://www.iwaenc.org/proceedings/1997/nsip97/pdf/scan/ns970226.pdf
.. [2] https://en.wikipedia.org/wiki/Rhombicuboctahedron
"""
if decomposition is None:
n = 2 * radius + 1
Z, Y, X = np.ogrid[
-radius : radius : n * 1j,
-radius : radius : n * 1j,
-radius : radius : n * 1j,
]
s = X * X + Y * Y + Z * Z
if not strict_radius:
radius += 0.5
return cp.array(s <= radius * radius, dtype=dtype)
elif decomposition == "sequence":
sequence = _nsphere_series_decomposition(radius, ndim=3, dtype=dtype)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return sequence
def octagon(m, n, dtype=cp.uint8, *, decomposition=None):
"""Generates an octagon shaped footprint.
For a given size of (m) horizontal and vertical sides
and a given (n) height or width of slanted sides octagon is generated.
The slanted sides are 45 or 135 degrees to the horizontal axis
and hence the widths and heights are equal. The overall size of the
footprint along a single axis will be ``m + 2 * n``.
Parameters
----------
m : int
The size of the horizontal and vertical sides.
n : int
The height or width of the slanted sides.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
decomposition : {None, 'sequence'}, optional
If None, a single array is returned. For 'sequence', a tuple of smaller
footprints is returned. Applying this series of smaller footprints will
given an identical result to a single, larger footprint, but with
better computational performance. See Notes for more details.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
When `decomposition` is None, this is just a numpy.ndarray. Otherwise,
this will be a tuple whose length is equal to the number of unique
structuring elements to apply (see Notes for more detail)
Notes
-----
When `decomposition` is not None, each element of the `footprint`
tuple is a 2-tuple of the form ``(ndarray, num_iter)`` that specifies a
footprint array and the number of iterations it is to be applied.
For either binary or grayscale morphology, using
``decomposition='sequence'`` was observed to have a performance benefit,
with the magnitude of the benefit increasing with increasing footprint
size.
"""
try:
from skimage.morphology import convex_hull_image
except ImportError:
raise ImportError("octagon requires scikit-image")
if m == n == 0:
raise ValueError("m and n cannot both be zero")
if decomposition is None:
footprint = np.zeros((m + 2 * n, m + 2 * n))
footprint[0, n] = 1
footprint[n, 0] = 1
footprint[0, m + n - 1] = 1
footprint[m + n - 1, 0] = 1
footprint[-1, n] = 1
footprint[n, -1] = 1
footprint[-1, m + n - 1] = 1
footprint[m + n - 1, -1] = 1
footprint = convex_hull_image(footprint).astype(dtype)
footprint = cp.array(footprint)
elif decomposition == "sequence":
# special handling for edge cases with small m and/or n
if m <= 2 and n <= 2:
return ((octagon(m, n, dtype=dtype, decomposition=None), 1),)
# general approach for larger m and/or n
if m == 0:
m = 2
n -= 1
sequence = []
if m > 1:
sequence += list(square(m, dtype=dtype, decomposition="sequence"))
if n > 0:
sequence += [(diamond(1, dtype=dtype, decomposition=None), n)]
footprint = tuple(sequence)
else:
raise ValueError(f"Unrecognized decomposition: {decomposition}")
return footprint
def star(a, dtype=cp.uint8):
"""Generates a star shaped footprint.
Start has 8 vertices and is an overlap of square of size `2*a + 1`
with its 45 degree rotated version.
The slanted sides are 45 or 135 degrees to the horizontal axis.
Parameters
----------
a : int
Parameter deciding the size of the star structural element. The side
of the square array returned is `2*a + 1 + 2*floor(a / 2)`.
Other Parameters
----------------
dtype : data-type, optional
The data type of the footprint.
Returns
-------
footprint : cupy.ndarray
The footprint where elements of the neighborhood are 1 and 0 otherwise.
"""
try:
from skimage.morphology import convex_hull_image
except ImportError:
raise ImportError("star requires scikit-image")
if a == 1:
bfilter = cp.zeros((3, 3), dtype)
bfilter[:] = 1
return bfilter
m = 2 * a + 1
n = a // 2
footprint_square = np.zeros((m + 2 * n, m + 2 * n))
footprint_square[n : m + n, n : m + n] = 1
c = (m + 2 * n - 1) // 2
footprint_rotated = np.zeros((m + 2 * n, m + 2 * n))
footprint_rotated[0, c] = footprint_rotated[-1, c] = 1
footprint_rotated[c, 0] = footprint_rotated[c, -1] = 1
footprint_rotated = convex_hull_image(footprint_rotated).astype(int)
footprint = footprint_square + footprint_rotated
footprint[footprint > 0] = 1
return cp.array(footprint.astype(dtype, copy=False))
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/binary.py
|
"""
Binary morphological operations
"""
import functools
import cupy as cp
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import deprecate_kwarg
from .footprints import _footprint_is_sequence
from .misc import default_footprint
def _iterate_binary_func(binary_func, image, footprint, out):
"""Helper to call `binary_func` for each footprint in a sequence.
binary_func is a binary morphology function that accepts "structure",
"output" and "iterations" keyword arguments
(e.g. `scipy.ndimage.binary_erosion`).
"""
# TODO (performance):
# `cupyx.scipy.ndimage` binary morphology implementation only supports
# `brute_force=True`. Update here if a more efficient method for
# `iterations > 1` is added.
fp, num_iter = footprint[0]
binary_func(
image, structure=fp, output=out, iterations=num_iter, brute_force=True
)
for fp, num_iter in footprint[1:]:
# Note: out.copy() because the computation cannot be in-place!
# SciPy <= 1.7 did not automatically make a copy if needed.
binary_func(
out.copy(),
structure=fp,
output=out,
iterations=num_iter,
brute_force=True,
)
return out
# The default_footprint decorator provides a diamond footprint as
# default with the same dimension as the input image and size 3 along each
# axis.
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def binary_erosion(image, footprint=None, out=None):
"""Return fast binary morphological erosion of an image.
This function returns the same result as grayscale erosion but performs
faster for binary images.
Morphological erosion sets a pixel at ``(i,j)`` to the minimum over all
pixels in the neighborhood centered at ``(i,j)``. Erosion shrinks bright
regions and enlarges dark regions.
Parameters
----------
image : ndarray
Binary input image.
footprint : ndarray or tuple, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : ndarray of bool, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
Returns
-------
eroded : ndarray of bool or uint
The result of the morphological erosion taking values in
``[False, True]``.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
"""
if out is None:
out = cp.empty(image.shape, dtype=bool)
if _footprint_is_sequence(footprint):
binary_func = functools.partial(ndi.binary_erosion, border_value=True)
return _iterate_binary_func(binary_func, image, footprint, out)
ndi.binary_erosion(
image, structure=footprint, output=out, border_value=True
)
return out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def binary_dilation(image, footprint=None, out=None):
"""Return fast binary morphological dilation of an image.
This function returns the same result as grayscale dilation but performs
faster for binary images.
Morphological dilation sets a pixel at ``(i,j)`` to the maximum over all
pixels in the neighborhood centered at ``(i,j)``. Dilation enlarges bright
regions and shrinks dark regions.
Parameters
----------
image : ndarray
Binary input image.
footprint : ndarray or tuple, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : ndarray of bool, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
Returns
-------
dilated : ndarray of bool or uint
The result of the morphological dilation with values in
``[False, True]``.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
"""
if out is None:
out = cp.empty(image.shape, dtype=bool)
if _footprint_is_sequence(footprint):
return _iterate_binary_func(ndi.binary_dilation, image, footprint, out)
ndi.binary_dilation(image, structure=footprint, output=out)
return out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def binary_opening(image, footprint=None, out=None):
"""Return fast binary morphological opening of an image.
This function returns the same result as grayscale opening but performs
faster for binary images.
The morphological opening on an image is defined as an erosion followed by
a dilation. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks. This tends to "open" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Binary input image.
footprint : ndarray or tuple, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : ndarray of bool, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
opening : ndarray of bool
The result of the morphological opening.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
"""
eroded = binary_erosion(image, footprint)
out = binary_dilation(eroded, footprint, out=out)
return out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def binary_closing(image, footprint=None, out=None):
"""Return fast binary morphological closing of an image.
This function returns the same result as grayscale closing but performs
faster for binary images.
The morphological closing on an image is defined as a dilation followed by
an erosion. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks. This tends to "close" up (dark) gaps between (bright)
features.
Parameters
----------
image : ndarray
Binary input image.
footprint : ndarray or tuple, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : ndarray of bool, optional
The array to store the result of the morphology. If None,
is passed, a new array will be allocated.
Returns
-------
closing : ndarray of bool
The result of the morphological closing.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
"""
dilated = binary_dilation(image, footprint)
out = binary_erosion(dilated, footprint, out=out)
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/gray.py
|
"""
Grayscale morphological operations
"""
import functools
import cupy as cp
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import deprecate_kwarg
from .._vendored import pad
from ..util import crop
from .footprints import _footprint_is_sequence, _shape_from_sequence
from .misc import default_footprint
__all__ = [
"erosion",
"dilation",
"opening",
"closing",
"white_tophat",
"black_tophat",
]
def _iterate_gray_func(gray_func, image, footprints, out):
"""Helper to call `binary_func` for each footprint in a sequence.
binary_func is a binary morphology function that accepts "structure",
"output" and "iterations" keyword arguments
(e.g. `scipy.ndimage.binary_erosion`).
"""
fp, num_iter = footprints[0]
tuple_fp = isinstance(fp, tuple)
if tuple_fp:
gray_func(image, size=fp, output=out)
else:
gray_func(image, footprint=fp, output=out)
for _ in range(1, num_iter):
if tuple_fp:
gray_func(out.copy(), size=fp, output=out)
else:
gray_func(out.copy(), footprint=fp, output=out)
for fp, num_iter in footprints[1:]:
tuple_fp = isinstance(fp, tuple)
# Note: out.copy() because the computation cannot be in-place!
for _ in range(num_iter):
if tuple_fp:
gray_func(out.copy(), size=fp, output=out)
else:
gray_func(out.copy(), footprint=fp, output=out)
return out
def _shift_footprint(footprint, shift_x, shift_y):
"""Shift the binary image `footprint` in the left and/or up.
This only affects 2D footprints with even number of rows
or columns.
Parameters
----------
footprint : 2D array, shape (M, N)
The input footprint.
shift_x, shift_y : bool
Whether to move `footprint` along each axis.
Returns
-------
out : 2D array, shape (M + int(shift_x), N + int(shift_y))
The shifted footprint.
"""
if isinstance(footprint, tuple):
if len(footprint) == 2 and any(s % 2 == 0 for s in footprint):
# have to use an explicit array to shift the footprint below
footprint = cp.ones(footprint, dtype=bool)
else:
# no shift needed
return footprint
if footprint.ndim != 2:
# do nothing for 1D or 3D or higher footprints
return footprint
m, n = footprint.shape
if m % 2 == 0:
extra_row = cp.zeros((1, n), footprint.dtype)
if shift_x:
footprint = cp.vstack((footprint, extra_row))
else:
footprint = cp.vstack((extra_row, footprint))
m += 1
if n % 2 == 0:
extra_col = cp.zeros((m, 1), footprint.dtype)
if shift_y:
footprint = cp.hstack((footprint, extra_col))
else:
footprint = cp.hstack((extra_col, footprint))
return footprint
def _invert_footprint(footprint):
"""Change the order of the values in `footprint`.
This is a patch for the *weird* footprint inversion in
`ndi.grey_morphology` [1]_.
Parameters
----------
footprint : array
The input footprint.
Returns
-------
inverted : array, same shape and type as `footprint`
The footprint, in opposite order.
Examples
--------
>>> footprint = cp.asarray([[0, 0, 0], [0, 1, 1], [0, 1, 1]], cp.uint8)
>>> _invert_footprint(footprint)
array([[1, 1, 0],
[1, 1, 0],
[0, 0, 0]], dtype=uint8)
References
----------
.. [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285
""" # noqa: E501
if isinstance(footprint, tuple):
# fully populated rectangle is symmetric
return footprint
inverted = footprint[(slice(None, None, -1),) * footprint.ndim]
return inverted
def pad_for_eccentric_footprints(func):
"""Pad input images for certain morphological operations.
Parameters
----------
func : callable
A morphological function, either opening or closing, that
supports eccentric footprints. Its parameters must
include at least `image`, `footprint`, and `out`.
Returns
-------
func_out : callable
The same function, but correctly padding the input image before
applying the input function.
See Also
--------
opening, closing.
"""
@functools.wraps(func)
def func_out(image, footprint, out=None, *args, **kwargs):
pad_widths = []
padding = False
if out is None:
out = cp.empty_like(image)
if _footprint_is_sequence(footprint):
# Note: in practice none of our built-in footprint sequences will
# require padding (all are symmetric and have odd sizes)
footprint_shape = _shape_from_sequence(footprint)
elif isinstance(footprint, tuple):
footprint_shape = footprint
else:
footprint_shape = footprint.shape
for axis_len in footprint_shape:
if axis_len % 2 == 0:
axis_pad_width = axis_len - 1
padding = True
else:
axis_pad_width = 0
pad_widths.append((axis_pad_width,) * 2)
if padding:
image = pad(image, pad_widths, mode="edge")
out_temp = cp.empty_like(image)
else:
out_temp = out
out_temp = func(image, footprint, out=out_temp, *args, **kwargs)
if padding:
out[:] = crop(out_temp, pad_widths)
else:
out = out_temp
return out
return func_out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def erosion(image, footprint=None, out=None, shift_x=False, shift_y=False):
"""Return grayscale morphological erosion of an image.
Morphological erosion sets a pixel at (i,j) to the minimum over all pixels
in the neighborhood centered at (i,j). Erosion shrinks bright regions and
enlarges dark regions.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
shift_x, shift_y : bool, optional
shift footprint about center point. This only affects
eccentric footprints (i.e. footprint with even numbered
sides).
Returns
-------
eroded : cupy.ndarray, same shape as `image`
The result of the morphological erosion.
Notes
-----
For ``uint8`` (and ``uint16`` up to a certain bit-depth) data, the
lower algorithm complexity makes the `skimage.filters.rank.minimum`
function more efficient for larger images and footprints.
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
Examples
--------
>>> # Erosion shrinks bright regions
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> bright_square = cp.asarray([[0, 0, 0, 0, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 1, 1, 1, 0],
... [0, 0, 0, 0, 0]], dtype=cp.uint8)
>>> erosion(bright_square, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if out is None:
out = cp.empty_like(image)
if _footprint_is_sequence(footprint):
footprints = tuple(
(_shift_footprint(fp, shift_x, shift_y), n) for fp, n in footprint
)
return _iterate_gray_func(ndi.grey_erosion, image, footprints, out)
if isinstance(footprint, tuple):
if len(footprint) != image.ndim:
raise ValueError(
"footprint.ndim={len(footprint)}, image.ndim={image.ndim}"
)
if image.ndim == 2 and any(s % 2 == 0 for s in footprint):
# only odd-shaped footprints are properly handled for tuples
footprint = cp.ones(footprint, dtype=bool)
else:
ndi.grey_erosion(image, size=footprint, output=out)
return out
footprint = _shift_footprint(footprint, shift_x, shift_y)
ndi.grey_erosion(image, footprint=footprint, output=out)
return out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def dilation(image, footprint=None, out=None, shift_x=False, shift_y=False):
"""Return grayscale morphological dilation of an image.
Morphological dilation sets the value of a pixel to the maximum over all
pixel values within a local neighborhood centered about it. The values
where the footprint is 1 define this neighborhood.
Dilation enlarges bright regions and shrinks dark regions.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
shift_x, shift_y : bool, optional
Shift footprint about center point. This only affects 2D
eccentric footprints (i.e., footprints with even-numbered
sides).
Returns
-------
dilated : cupy.ndarray, same shape and type as `image`
The result of the morphological dilation.
Notes
-----
For `uint8` (and `uint16` up to a certain bit-depth) data, the lower
algorithm complexity makes the `skimage.filters.rank.maximum` function more
efficient for larger images and footprints.
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
Examples
--------
>>> # Dilation enlarges bright regions
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> bright_pixel = cp.asarray([[0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 1, 0, 0],
... [0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0]], dtype=cp.uint8)
>>> dilation(bright_pixel, square(3))
array([[0, 0, 0, 0, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 1, 1, 1, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if out is None:
out = cp.empty_like(image)
if _footprint_is_sequence(footprint):
# shift and invert (see comment below) each footprint
footprints = tuple(
(_invert_footprint(_shift_footprint(fp, shift_x, shift_y)), n)
for fp, n in footprint
)
return _iterate_gray_func(ndi.grey_dilation, image, footprints, out)
if isinstance(footprint, tuple):
if len(footprint) != image.ndim:
raise ValueError(
"footprint.ndim={len(footprint)}, image.ndim={image.ndim}"
)
if image.ndim == 2 and any(s % 2 == 0 for s in footprint):
# only odd-shaped footprints are properly handled for tuples
footprint = cp.ones(footprint, dtype=bool)
else:
ndi.grey_dilation(image, size=footprint, output=out)
return out
footprint = _shift_footprint(footprint, shift_x, shift_y)
# Inside ndi.grey_dilation, the footprint is inverted,
# e.g. `footprint = footprint[::-1, ::-1]` for 2D [1]_, for reasons unknown
# to this author (@jni). To "patch" this behaviour, we invert our own
# footprint before passing it to `ndi.grey_dilation`.
# [1] https://github.com/scipy/scipy/blob/ec20ababa400e39ac3ffc9148c01ef86d5349332/scipy/ndimage/morphology.py#L1285 # noqa
footprint = _invert_footprint(footprint)
ndi.grey_dilation(image, footprint=footprint, output=out)
return out
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
@default_footprint
@pad_for_eccentric_footprints
def opening(image, footprint=None, out=None):
"""Return grayscale morphological opening of an image.
The morphological opening of an image is defined as an erosion followed by
a dilation. Opening can remove small bright spots (i.e. "salt") and connect
small dark cracks. This tends to "open" up (dark) gaps between (bright)
features.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
opening : cupy.ndarray, same shape and type as `image`
The result of the morphological opening.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
Examples
--------
>>> # Open up gap between two bright regions (but also shrink regions)
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> bad_connection = cp.asarray([[1, 0, 0, 0, 1],
... [1, 1, 0, 1, 1],
... [1, 1, 1, 1, 1],
... [1, 1, 0, 1, 1],
... [1, 0, 0, 0, 1]], dtype=cp.uint8)
>>> opening(bad_connection, square(3))
array([[0, 0, 0, 0, 0],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 1],
[1, 1, 0, 1, 1],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
eroded = erosion(image, footprint)
# note: shift_x, shift_y do nothing if footprint side length is odd
out = dilation(eroded, footprint, out=out, shift_x=True, shift_y=True)
return out
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
@default_footprint
@pad_for_eccentric_footprints
def closing(image, footprint=None, out=None):
"""Return grayscale morphological closing of an image.
The morphological closing of an image is defined as a dilation followed by
an erosion. Closing can remove small dark spots (i.e. "pepper") and connect
small bright cracks. This tends to "close" up (dark) gaps between (bright)
features.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None,
a new array will be allocated.
Returns
-------
closing : cupy.ndarray, same shape and type as `image`
The result of the morphological closing.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
Examples
--------
>>> # Close a gap between two bright lines
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> broken_line = cp.asarray([[0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0],
... [1, 1, 0, 1, 1],
... [0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0]], dtype=cp.uint8)
>>> closing(broken_line, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
dilated = dilation(image, footprint)
# note: shift_x, shift_y do nothing if footprint side length is odd
out = erosion(dilated, footprint, out=out, shift_x=True, shift_y=True)
return out
def _white_tophat_seqence(image, footprints, out):
"""Return white top hat for a sequence of footprints.
Like SciPy's implementation, but with ``ndi.grey_erosion`` and
``ndi.grey_dilation`` wrapped with ``_iterate_gray_func``.
"""
tmp = _iterate_gray_func(ndi.grey_erosion, image, footprints, out)
tmp = _iterate_gray_func(ndi.grey_dilation, tmp.copy(), footprints, out)
if tmp is None:
tmp = out
if image.dtype == cp.bool_ and tmp.dtype == cp.bool_:
cp.bitwise_xor(image, tmp, out=tmp)
else:
cp.subtract(image, tmp, out=tmp)
return tmp
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def white_tophat(image, footprint=None, out=None):
"""Return white top hat of an image.
The white top hat of an image is defined as the image minus its
morphological opening. This operation returns the bright spots of the image
that are smaller than the footprint.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
out : cupy.ndarray, same shape and type as `image`
The result of the morphological white top hat.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
See Also
--------
black_tophat
References
----------
.. [1] https://en.wikipedia.org/wiki/Top-hat_transform
Examples
--------
>>> # Subtract grey background from bright peak
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> bright_on_grey = cp.asarray([[2, 3, 3, 3, 2],
... [3, 4, 5, 4, 3],
... [3, 5, 9, 5, 3],
... [3, 4, 5, 4, 3],
... [2, 3, 3, 3, 2]], dtype=cp.uint8)
>>> white_tophat(bright_on_grey, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 5, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if out is image:
opened = opening(image, footprint)
if cp.issubdtype(opened.dtype, cp.bool_):
cp.logical_xor(out, opened, out=out)
else:
out -= opened
return out
elif out is None:
out = cp.empty_like(image)
# work-around for NumPy deprecation warning for arithmetic
# operations on bool arrays
if isinstance(image, cp.ndarray) and image.dtype == bool:
image_ = image.view(dtype=cp.uint8)
else:
image_ = image
if isinstance(out, cp.ndarray) and out.dtype == bool:
out_ = out.view(dtype=cp.uint8)
else:
out_ = out
if _footprint_is_sequence(footprint):
return _white_tophat_seqence(image_, footprint, out_)
elif isinstance(footprint, tuple):
if len(footprint) != image.ndim:
raise ValueError(
"footprint.ndim={len(footprint)}, image.ndim={image.ndim}"
)
ndi.white_tophat(image, size=footprint, output=out)
return out
out_ = ndi.white_tophat(image_, footprint=footprint, output=out_)
return out
@default_footprint
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def black_tophat(image, footprint=None, out=None):
"""Return black top hat of an image.
The black top hat of an image is defined as its morphological closing minus
the original image. This operation returns the dark spots of the image that
are smaller than the footprint. Note that dark spots in the
original image are bright spots after the black top hat.
Parameters
----------
image : cupy.ndarray
Image array.
footprint : cupy.ndarray, optional
The neighborhood expressed as a 2-D array of 1's and 0's.
If None, use a cross-shaped footprint (connectivity=1). The footprint
can also be provided as a sequence of smaller footprints as described
in the notes below.
out : cupy.ndarray, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
Returns
-------
out : cupy.ndarray, same shape and type as `image`
The result of the morphological black top hat.
Notes
-----
The footprint can also be a provided as a sequence of 2-tuples where the
first element of each 2-tuple is a footprint ndarray and the second element
is an integer describing the number of times it should be iterated. For
example ``footprint=[(cp.ones((9, 1)), 1), (cp.ones((1, 9)), 1)]``
would apply a 9x1 footprint followed by a 1x9 footprint resulting in a net
effect that is the same as ``footprint=cp.ones((9, 9))``, but with lower
computational cost. Most of the builtin footprints such as
``skimage.morphology.disk`` provide an option to automatically generate a
footprint sequence of this type.
See Also
--------
white_tophat
References
----------
.. [1] https://en.wikipedia.org/wiki/Top-hat_transform
Examples
--------
>>> # Change dark peak to bright peak and subtract background
>>> import cupy as cp
>>> from cucim.skimage.morphology import square
>>> dark_on_grey = cp.asarray([[7, 6, 6, 6, 7],
... [6, 5, 4, 5, 6],
... [6, 4, 0, 4, 6],
... [6, 5, 4, 5, 6],
... [7, 6, 6, 6, 7]], dtype=cp.uint8)
>>> black_tophat(dark_on_grey, square(3))
array([[0, 0, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 5, 1, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 0, 0]], dtype=uint8)
"""
if out is image:
original = image.copy()
else:
original = image
out = closing(image, footprint, out=out)
if cp.issubdtype(out.dtype, bool):
cp.logical_xor(out, original, out=out)
else:
out -= original
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/misc.py
|
"""Miscellaneous morphology functions."""
import functools
import cupy as cp
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import warn
# Our function names don't exactly correspond to ndimages.
# This dictionary translates from our names to scipy's.
funcs = ("erosion", "dilation", "opening", "closing")
skimage2ndimage = {x: "grey_" + x for x in funcs}
# These function names are the same in ndimage.
funcs = (
"binary_erosion",
"binary_dilation",
"binary_opening",
"binary_closing",
"black_tophat",
"white_tophat",
)
skimage2ndimage.update({x: x for x in funcs})
def default_footprint(func):
"""Decorator to add a default footprint to morphology functions.
Parameters
----------
func : function
A morphology function such as erosion, dilation, opening, closing,
white_tophat, or black_tophat.
Returns
-------
func_out : function
The function, using a default footprint of same dimension
as the input image with connectivity 1.
"""
@functools.wraps(func)
def func_out(image, footprint=None, *args, **kwargs):
if footprint is None:
footprint = ndi.generate_binary_structure(image.ndim, 1)
return func(image, footprint=footprint, *args, **kwargs)
return func_out
def _check_dtype_supported(ar):
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or cp.issubdtype(ar.dtype, cp.integer)):
raise TypeError(
"Only bool or integer image types are supported. "
f"Got {ar.dtype}."
)
def remove_small_objects(ar, min_size=64, connectivity=1, *, out=None):
"""Remove objects smaller than the specified size.
Expects ar to be an array with labeled objects, and removes objects
smaller than min_size. If `ar` is bool, the image is first labeled.
This leads to potentially different behavior for bool and 0-and-1
arrays.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the objects of interest. If the array type is
int, the ints must be non-negative.
min_size : int, optional (default: 64)
The smallest allowable object size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel. Used during
labelling if `ar` is bool.
out : ndarray
Array of the same shape as `ar`, into which the output is
placed. By default, a new array is created.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import morphology
>>> a = cp.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_small_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]])
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]])
>>> d = morphology.remove_small_objects(a, 6, out=a)
>>> d is a
True
"""
# Raising type error if not int or bool
_check_dtype_supported(ar)
if out is None:
out = ar.copy()
else:
out[:] = ar
if min_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
footprint = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = cp.zeros_like(ar, dtype=cp.int32)
ndi.label(ar, footprint, output=ccs)
else:
ccs = out
try:
component_sizes = cp.bincount(ccs.ravel())
except ValueError:
raise ValueError(
"Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`."
)
if len(component_sizes) == 2 and out.dtype != bool:
warn(
"Only one label was provided to `remove_small_objects`. "
"Did you mean to use a boolean array?"
)
too_small = component_sizes < min_size
too_small_mask = too_small[ccs]
out[too_small_mask] = 0
return out
def remove_small_holes(ar, area_threshold=64, connectivity=1, *, out=None):
"""Remove contiguous holes smaller than the specified size.
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the connected components of interest.
area_threshold : int, optional (default: 64)
The maximum area, in pixels, of a contiguous hole that will be filled.
Replaces `min_size`.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel.
out : ndarray
Array of the same shape as `ar` and bool dtype, into which the
output is placed. By default, a new array is created.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small holes within connected components removed.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import morphology
>>> a = cp.array([[1, 1, 1, 1, 1, 0],
... [1, 1, 1, 0, 1, 0],
... [1, 0, 0, 1, 1, 0],
... [1, 1, 1, 1, 1, 0]], bool)
>>> b = morphology.remove_small_holes(a, 2)
>>> b
array([[ True, True, True, True, True, False],
[ True, True, True, True, True, False],
[ True, False, False, True, True, False],
[ True, True, True, True, True, False]])
>>> c = morphology.remove_small_holes(a, 2, connectivity=2)
>>> c
array([[ True, True, True, True, True, False],
[ True, True, True, False, True, False],
[ True, False, False, True, True, False],
[ True, True, True, True, True, False]])
>>> d = morphology.remove_small_holes(a, 2, out=a)
>>> d is a
True
Notes
-----
If the array type is int, it is assumed that it contains already-labeled
objects. The labels are not kept in the output image (this function always
outputs a bool image). It is suggested that labeling is completed after
using this function.
"""
_check_dtype_supported(ar)
# Creates warning if image is an integer image
if ar.dtype != bool:
warn(
"Any labeled images will be returned as a boolean array. "
"Did you mean to use a boolean array?",
UserWarning,
)
if out is not None:
if out.dtype != bool:
raise TypeError("out dtype must be bool")
else:
out = ar.astype(bool, copy=True)
# Creating the inverse of ar
cp.logical_not(ar, out=out)
# removing small objects from the inverse of ar
out = remove_small_objects(out, area_threshold, connectivity, out=out)
cp.logical_not(out, out=out)
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/grayreconstruct.py
|
"""
This morphological reconstruction routine was adapted from CellProfiler, code
licensed under both GPL and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import cupy as cp
import numpy as np
import skimage
from packaging.version import Version
from .._shared.utils import deprecate_kwarg
old_reconstruction_pyx = Version(skimage.__version__) < Version("0.20.0")
@deprecate_kwarg(
kwarg_mapping={"selem": "footprint"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def reconstruction(seed, mask, method="dilation", footprint=None, offset=None):
"""Perform a morphological reconstruction of an image.
Morphological reconstruction by dilation is similar to basic morphological
dilation: high-intensity values will replace nearby low-intensity values.
The basic dilation operator, however, uses a footprint to
determine how far a value in the input image can spread. In contrast,
reconstruction uses two images: a "seed" image, which specifies the values
that spread, and a "mask" image, which gives the maximum allowed value at
each pixel. The mask image, like the footprint, limits the spread
of high-intensity values. Reconstruction by erosion is simply the inverse:
low-intensity values spread from the seed image and are limited by the mask
image, which represents the minimum allowed value.
Alternatively, you can think of reconstruction as a way to isolate the
connected regions of an image. For dilation, reconstruction connects
regions marked by local maxima in the seed image: neighboring pixels
less-than-or-equal-to those seeds are connected to the seeded region.
Local maxima with values larger than the seed image will get truncated to
the seed value.
Parameters
----------
seed : ndarray
The seed image (a.k.a. marker image), which specifies the values that
are dilated or eroded.
mask : ndarray
The maximum (dilation) / minimum (erosion) allowed value at each pixel.
method : {'dilation'|'erosion'}, optional
Perform reconstruction by dilation or erosion. In dilation (or
erosion), the seed image is dilated (or eroded) until limited by the
mask image. For dilation, each seed value must be less than or equal
to the corresponding mask value; for erosion, the reverse is true.
Default is 'dilation'.
footprint : ndarray, optional
The neighborhood expressed as an n-D array of 1's and 0's.
Default is the n-D square of radius equal to 1 (i.e. a 3x3 square
for 2D images, a 3x3x3 cube for 3D images, etc.)
offset : ndarray, optional
The coordinates of the center of the footprint.
Default is located on the geometrical center of the footprint, in that
case footprint dimensions must be odd.
Returns
-------
reconstructed : ndarray
The result of morphological reconstruction.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.morphology import reconstruction
First, we create a sinusoidal mask image with peaks at middle and ends.
>>> x = cp.linspace(0, 4 * np.pi)
>>> y_mask = cp.cos(x)
Then, we create a seed image initialized to the minimum mask value (for
reconstruction by dilation, min-intensity values don't spread) and add
"seeds" to the left and right peak, but at a fraction of peak value (1).
>>> y_seed = y_mask.min() * cp.ones_like(x)
>>> y_seed[0] = 0.5
>>> y_seed[-1] = 0
>>> y_rec = reconstruction(y_seed, y_mask)
The reconstructed image (or curve, in this case) is exactly the same as the
mask image, except that the peaks are truncated to 0.5 and 0. The middle
peak disappears completely: Since there were no seed values in this peak
region, its reconstructed value is truncated to the surrounding value (-1).
As a more practical example, we try to extract the bright features of an
image by subtracting a background image created by reconstruction.
>>> y, x = cp.mgrid[:20:0.5, :20:0.5]
>>> bumps = cp.sin(x) + cp.sin(y)
To create the background image, set the mask image to the original image,
and the seed image to the original image with an intensity offset, `h`.
>>> h = 0.3
>>> seed = bumps - h
>>> background = reconstruction(seed, bumps)
The resulting reconstructed image looks exactly like the original image,
but with the peaks of the bumps cut off. Subtracting this reconstructed
image from the original image leaves just the peaks of the bumps
>>> hdome = bumps - background
This operation is known as the h-dome of the image and leaves features
of height `h` in the subtracted image.
Notes
-----
The algorithm is taken from [1]_. Applications for grayscale reconstruction
are discussed in [2]_ and [3]_.
References
----------
.. [1] Robinson, "Efficient morphological reconstruction: a downhill
filter", Pattern Recognition Letters 25 (2004) 1759-1767.
.. [2] Vincent, L., "Morphological Grayscale Reconstruction in Image
Analysis: Applications and Efficient Algorithms", IEEE Transactions
on Image Processing (1993)
.. [3] Soille, P., "Morphological Image Analysis: Principles and
Applications", Chapter 6, 2nd edition (2003), ISBN 3540429883.
"""
from ..filters._rank_order import rank_order
assert tuple(seed.shape) == tuple(mask.shape)
if method == "dilation" and cp.any(seed > mask): # synchronize!
raise ValueError(
"Intensity of seed image must be less than that "
"of the mask image for reconstruction by dilation."
)
elif method == "erosion" and cp.any(seed < mask): # synchronize!
raise ValueError(
"Intensity of seed image must be greater than that "
"of the mask image for reconstruction by erosion."
)
try:
from skimage.morphology._grayreconstruct import reconstruction_loop
except ImportError:
try:
from skimage.morphology._greyreconstruct import reconstruction_loop
except ImportError:
raise ImportError("reconstruction requires scikit-image")
if footprint is None:
footprint = np.ones([3] * seed.ndim, dtype=bool)
else:
if isinstance(footprint, cp.ndarray):
footprint = cp.asnumpy(footprint)
footprint = footprint.astype(bool, copy=True)
if offset is None:
if not all([d % 2 == 1 for d in footprint.shape]):
raise ValueError("Footprint dimensions must all be odd")
offset = np.array([d // 2 for d in footprint.shape])
else:
if isinstance(offset, cp.ndarray):
offset = cp.asnumpy(offset)
if offset.ndim != footprint.ndim:
raise ValueError("Offset and footprint ndims must be equal.")
if not all([(0 <= o < d) for o, d in zip(offset, footprint.shape)]):
raise ValueError("Offset must be included inside footprint")
# Cross out the center of the footprint
footprint[tuple(slice(d, d + 1) for d in offset)] = False
# Make padding for edges of reconstructed image so we can ignore boundaries
dims = (2,) + tuple(
s1 + s2 - 1 for s1, s2 in zip(seed.shape, footprint.shape)
)
inside_slices = tuple(slice(o, o + s) for o, s in zip(offset, seed.shape))
# Set padded region to minimum image intensity and mask along first axis so
# we can interleave image and mask pixels when sorting.
if method == "dilation":
pad_value = cp.min(seed).item()
elif method == "erosion":
pad_value = cp.max(seed).item()
else:
raise ValueError(
"Reconstruction method can be one of 'erosion' "
f"or 'dilation'. Got '{method}'."
)
# CuPy Backend: modified to allow images_dtype based on input dtype
# instead of float64
images_dtype = np.promote_types(seed.dtype, mask.dtype)
images = cp.full(dims, pad_value, dtype=images_dtype)
images[(0, *inside_slices)] = seed
images[(1, *inside_slices)] = mask
isize = images.size
if old_reconstruction_pyx:
# scikit-image < 0.20 Cython code only supports int32_t
signed_int_dtype = np.int32
unsigned_int_dtype = np.uint32
else:
# determine whether image is large enough to require 64-bit integers
# use -isize so we get a signed dtype rather than an unsigned one
signed_int_dtype = np.result_type(np.min_scalar_type(-isize), np.int32)
# the corresponding unsigned type has same char, but uppercase
unsigned_int_dtype = np.dtype(signed_int_dtype.char.upper())
# Create a list of strides across the array to get the neighbors within
# a flattened array
value_stride = np.array(images.strides[1:]) // images.dtype.itemsize
image_stride = images.strides[0] // images.dtype.itemsize
footprint_mgrid = np.mgrid[
[slice(-o, d - o) for d, o in zip(footprint.shape, offset)]
]
footprint_offsets = footprint_mgrid[:, footprint].transpose()
nb_strides = np.array(
[
np.sum(value_stride * footprint_offset)
for footprint_offset in footprint_offsets
],
signed_int_dtype,
)
# CuPy Backend: changed flatten to ravel to avoid copy
images = images.ravel()
# Erosion goes smallest to largest; dilation goes largest to smallest.
index_sorted = cp.argsort(images).astype(signed_int_dtype, copy=False)
if method == "dilation":
index_sorted = index_sorted[::-1]
# Make a linked list of pixels sorted by value. -1 is the list terminator.
index_sorted = cp.asnumpy(index_sorted)
prev = np.full(isize, -1, signed_int_dtype)
next = np.full(isize, -1, signed_int_dtype)
prev[index_sorted[1:]] = index_sorted[:-1]
next[index_sorted[:-1]] = index_sorted[1:]
# Cython inner-loop compares the rank of pixel values.
if method == "dilation":
value_rank, value_map = rank_order(images)
elif method == "erosion":
value_rank, value_map = rank_order(-images)
value_map = -value_map
# TODO: implement reconstruction_loop on the GPU? For now, run it on host.
start = index_sorted[0]
value_rank = cp.asnumpy(value_rank.astype(unsigned_int_dtype, copy=False))
reconstruction_loop(value_rank, prev, next, nb_strides, start, image_stride)
# Reshape reconstructed image to original image shape and remove padding.
value_rank = cp.asarray(value_rank[:image_stride])
rec_img = value_map[value_rank]
rec_img.shape = dims[1:]
return rec_img[inside_slices]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/greyreconstruct.py
|
import warnings
from .grayreconstruct import reconstruction # noqa
warnings.warn(
"Importing from cucim.skimage.morphology.greyreconstruct is deprecated. "
"Please import from cucim.skimage.morphology instead.",
FutureWarning,
stacklevel=2,
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/_medial_axis_lookup.py
|
import numpy as np
# medial axis lookup tables (independent of image content)
#
# Note: lookup table generated using scikit-image code from
# https://github.com/scikit-image/scikit-image/blob/38b595d60befe3a0b4c0742995b9737200a079c6/skimage/morphology/_skeletonize.py#L449-L458 # noqa
# fmt: off
lookup_table = np.array(
[
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1,
0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0,
0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0,
0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0
],
dtype=bool,
)
cornerness_table = np.array(
[
9, 8, 8, 7, 8, 7, 7, 6, 8, 7, 7, 6, 7, 6, 6, 5, 8, 7, 7, 6, 7, 6,
6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 8, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5,
6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 8, 7,
7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4,
6, 5, 5, 4, 5, 4, 4, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4,
4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 8, 7, 7, 6,
7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5,
5, 4, 5, 4, 4, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3,
6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5,
5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3,
4, 3, 3, 2, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4,
4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 8, 7, 7, 6, 7, 6, 6, 5,
7, 6, 6, 5, 6, 5, 5, 4, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4,
4, 3, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3, 6, 5, 5, 4,
5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5,
5, 4, 5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2,
6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3,
3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4,
5, 4, 4, 3, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 6, 5,
5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3, 3, 2, 5, 4, 4, 3, 4, 3, 3, 2,
4, 3, 3, 2, 3, 2, 2, 1, 6, 5, 5, 4, 5, 4, 4, 3, 5, 4, 4, 3, 4, 3,
3, 2, 5, 4, 4, 3, 4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 5, 4, 4, 3,
4, 3, 3, 2, 4, 3, 3, 2, 3, 2, 2, 1, 4, 3, 3, 2, 3, 2, 2, 1, 3, 2,
2, 1, 2, 1, 1, 0
],
dtype=np.uint8,
)
# fmt: on
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/__init__.py
|
from ._skeletonize import medial_axis, thin
from .binary import (
binary_closing,
binary_dilation,
binary_erosion,
binary_opening,
)
from .footprints import (
ball,
cube,
diamond,
disk,
octagon,
octahedron,
rectangle,
square,
star,
)
from .gray import (
black_tophat,
closing,
dilation,
erosion,
opening,
white_tophat,
)
from .grayreconstruct import reconstruction
from .isotropic import (
isotropic_closing,
isotropic_dilation,
isotropic_erosion,
isotropic_opening,
)
from .misc import remove_small_holes, remove_small_objects
__all__ = [
"binary_erosion",
"binary_dilation",
"binary_opening",
"binary_closing",
"isotropic_dilation",
"isotropic_erosion",
"isotropic_opening",
"isotropic_closing",
"erosion",
"dilation",
"opening",
"closing",
"white_tophat",
"black_tophat",
"square",
"rectangle",
"diamond",
"disk",
"cube",
"octahedron",
"ball",
"octagon",
"star",
"reconstruction",
"remove_small_objects",
"remove_small_holes",
"thin",
"medial_axis",
]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/_skeletonize.py
|
import warnings
import cupy as cp
import numpy as np
import cucim.skimage._vendored.ndimage as ndi
from cucim.core.operations.morphology import distance_transform_edt
from .._shared.utils import check_nD, deprecate_kwarg
from ._medial_axis_lookup import (
cornerness_table as _medial_axis_cornerness_table,
lookup_table as _medial_axis_lookup_table,
)
# --------- Skeletonization and thinning based on Guo and Hall 1989 ---------
# fmt: off
_G123_LUT = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0,
0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1,
0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1,
1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0,
0, 1, 1, 0, 0, 1, 0, 0, 0], dtype=bool)
_G123P_LUT = np.array([0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0,
0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0,
1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1,
0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=bool)
# fmt: on
@deprecate_kwarg(
{"max_iter": "max_num_iter"},
removed_version="23.02.00",
deprecated_version="22.02.00",
)
def thin(image, max_num_iter=None):
"""
Perform morphological thinning of a binary image.
Parameters
----------
image : binary (M, N) ndarray
The image to be thinned.
max_num_iter : int, number of iterations, optional
Regardless of the value of this parameter, the thinned image
is returned immediately if an iteration produces no change.
If this parameter is specified it thus sets an upper bound on
the number of iterations performed.
Returns
-------
out : ndarray of bool
Thinned image.
See Also
--------
medial_axis
Notes
-----
This algorithm [1]_ works by making multiple passes over the image,
removing pixels matching a set of criteria designed to thin
connected regions while preserving eight-connected components and
2 x 2 squares [2]_. In each of the two sub-iterations the algorithm
correlates the intermediate skeleton image with a neighborhood mask,
then looks up each neighborhood in a lookup table indicating whether
the central pixel should be deleted in that sub-iteration.
References
----------
.. [1] Z. Guo and R. W. Hall, "Parallel thinning with
two-subiteration algorithms," Comm. ACM, vol. 32, no. 3,
pp. 359-373, 1989. :DOI:`10.1145/62065.62074`
.. [2] Lam, L., Seong-Whan Lee, and Ching Y. Suen, "Thinning
Methodologies-A Comprehensive Survey," IEEE Transactions on
Pattern Analysis and Machine Intelligence, Vol 14, No. 9,
p. 879, 1992. :DOI:`10.1109/34.161346`
Examples
--------
>>> square = np.zeros((7, 7), dtype=np.uint8)
>>> square[1:-1, 2:-2] = 1
>>> square[0, 1] = 1
>>> square
array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> skel = thin(square)
>>> skel.astype(np.uint8)
array([[0, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
# check that image is 2d
check_nD(image, 2)
# convert image to uint8 with values in {0, 1}
skel = cp.asarray(image, dtype=bool).astype(cp.uint8)
# neighborhood mask
mask = cp.asarray(
[[8, 4, 2], [16, 0, 1], [32, 64, 128]], dtype=cp.uint8 # noqa # noqa
)
G123_LUT = cp.asarray(_G123_LUT)
G123P_LUT = cp.asarray(_G123P_LUT)
# iterate until convergence, up to the iteration limit
max_num_iter = max_num_iter or cp.inf
num_iter = 0
n_pts_old, n_pts_new = cp.inf, cp.sum(skel)
while n_pts_old != n_pts_new and num_iter < max_num_iter:
n_pts_old = n_pts_new
# perform the two "subiterations" described in the paper
for lut in [G123_LUT, G123P_LUT]:
# correlate image with neighborhood mask
N = ndi.correlate(skel, mask, mode="constant")
# take deletion decision from this subiteration's LUT
D = cp.take(lut, N)
# perform deletion
skel[D] = 0
n_pts_new = cp.sum(skel) # count points after thinning
num_iter += 1
return skel.astype(bool)
# --------- Skeletonization by medial axis transform --------
def _get_tiebreaker(n, seed):
# CuPy generator doesn't currently have the permutation method, so
# fall back to cp.random.permutation instead.
cp.random.seed(seed)
if n < 2 << 31:
dtype = np.int32
else:
dtype = np.intp
tiebreaker = cp.random.permutation(cp.arange(n, dtype=dtype))
return tiebreaker
@deprecate_kwarg(
{"random_state": "rng"},
deprecated_version="23.08",
removed_version="24.06",
)
@deprecate_kwarg(
{"seed": "rng"}, deprecated_version="23.12", removed_version="24.12"
)
def medial_axis(image, mask=None, return_distance=False, *, rng=None):
"""Compute the medial axis transform of a binary image.
Parameters
----------
image : binary ndarray, shape (M, N)
The image of the shape to be skeletonized.
mask : binary ndarray, shape (M, N), optional
If a mask is given, only those elements in `image` with a true
value in `mask` are used for computing the medial axis.
return_distance : bool, optional
If true, the distance transform is returned as well as the skeleton.
rng : {`numpy.random.Generator`, int}, optional
Pseudo-random number generator.
By default, a PCG64 generator is used
(see :func:`numpy.random.default_rng`).
If `rng` is an int, it is used to seed the generator.
The PRNG determines the order in which pixels are processed for
tiebreaking.
Note: Due to a missing `permute` method on CuPy's random Generator
class, only a `numpy.random.Generator` is currently supported.
Returns
-------
out : ndarray of bools
Medial axis transform of the image
dist : ndarray of ints, optional
Distance transform of the image (only returned if `return_distance`
is True)
See Also
--------
skeletonize
Notes
-----
This algorithm computes the medial axis transform of an image
as the ridges of its distance transform.
The different steps of the algorithm are as follows
* A lookup table is used, that assigns 0 or 1 to each configuration of
the 3x3 binary square, whether the central pixel should be removed
or kept. We want a point to be removed if it has more than one neighbor
and if removing it does not change the number of connected components.
* The distance transform to the background is computed, as well as
the cornerness of the pixel.
* The foreground (value of 1) points are ordered by
the distance transform, then the cornerness.
* A cython function is called to reduce the image to its skeleton. It
processes pixels in the order determined at the previous step, and
removes or maintains a pixel according to the lookup table. Because
of the ordering, it is possible to process all pixels in only one
pass.
Examples
--------
>>> square = np.zeros((7, 7), dtype=np.uint8)
>>> square[1:-1, 2:-2] = 1
>>> square
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
>>> medial_axis(square).astype(np.uint8)
array([[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0]], dtype=uint8)
"""
try:
from skimage.morphology._skeletonize_cy import _skeletonize_loop
except ImportError as e:
warnings.warn(
"Could not find required private skimage Cython function:\n"
"\tskimage.morphology._skeletonize_cy._skeletonize_loop\n"
)
raise e
if mask is None:
# masked_image is modified in-place later so make a copy of the input
masked_image = image.astype(bool, copy=True)
else:
masked_image = image.astype(bool, copy=True)
masked_image[~mask] = False
# Load precomputed lookup table based on three conditions:
# 1. Keep only positive pixels
# AND
# 2. Keep if removing the pixel results in a different connectivity
# (if the number of connected components is different with and
# without the central pixel)
# OR
# 3. Keep if # pixels in neighborhood is 2 or less
# Note that this table is independent of the image
table = _medial_axis_lookup_table
# Build distance transform
distance = distance_transform_edt(masked_image)
if return_distance:
store_distance = distance.copy()
# Corners
# The processing order along the edge is critical to the shape of the
# resulting skeleton: if you process a corner first, that corner will
# be eroded and the skeleton will miss the arm from that corner. Pixels
# with fewer neighbors are more "cornery" and should be processed last.
# We use a cornerness_table lookup table where the score of a
# configuration is the number of background (0-value) pixels in the
# 3x3 neighborhood
cornerness_table = cp.asarray(_medial_axis_cornerness_table)
corner_score = _table_lookup(masked_image, cornerness_table)
# Define arrays for inner loop
distance = distance[masked_image]
i, j = cp.where(masked_image)
# Determine the order in which pixels are processed.
# We use a random # for tiebreaking. Assign each pixel in the image a
# predictable, random # so that masking doesn't affect arbitrary choices
# of skeletons
if rng is None or isinstance(rng, int):
tiebreaker = _get_tiebreaker(n=distance.size, seed=rng)
elif isinstance(rng, np.random.Generator):
generator = np.random.default_rng(rng)
tiebreaker = cp.asarray(generator.permutation(np.arange(distance.size)))
else:
raise ValueError(
f"{type(rng)} class not yet supported for use in " "`medial_axis`."
)
order = cp.lexsort(
cp.stack((tiebreaker, corner_score[masked_image], distance), axis=0)
)
# Call _skeletonize_loop on the CPU. It requires a single pass over the
# full array using a specific pixel order, so cannot be run multithreaded!
order = cp.asnumpy(order.astype(cp.int32, copy=False))
table = cp.asnumpy(table.astype(cp.uint8, copy=False))
i = cp.asnumpy(i).astype(dtype=np.intp, copy=False)
j = cp.asnumpy(j).astype(dtype=np.intp, copy=False)
result = cp.asnumpy(masked_image)
# Remove pixels not belonging to the medial axis
_skeletonize_loop(result.view(np.uint8), i, j, order, table)
result = cp.asarray(result.view(bool), dtype=bool)
if mask is not None:
result[~mask] = image[~mask]
if return_distance:
return result, store_distance
else:
return result
def _table_lookup(image, table):
"""
Perform a morphological transform on an image, directed by its
neighbors
Parameters
----------
image : ndarray
A binary image
table : ndarray
A 512-element table giving the transform of each pixel given
the values of that pixel and its 8-connected neighbors.
Returns
-------
result : ndarray of same shape as `image`
Transformed image
Notes
-----
The pixels are numbered like this::
0 1 2
3 4 5
6 7 8
The index at a pixel is the sum of 2**<pixel-number> for pixels
that evaluate to true.
"""
#
# We accumulate into the indexer to get the index into the table
# at each point in the image
#
# max possible value of indexer is 512, so just use int16 dtype
kernel = cp.array([[256, 128, 64], [32, 16, 8], [4, 2, 1]], dtype=cp.int16)
indexer = ndi.convolve(image, kernel, output=np.int16, mode="constant")
image = table[indexer]
return image
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/isotropic.py
|
"""
Binary morphological operations
"""
import cupy as cp
from cucim.core.operations.morphology import distance_transform_edt
def _check_output(out, shape):
"""Check shape and dtype of output array.
Parameters
----------
out : cp.ndarray or None
The array to check
shape : tuple of int
The expected shape
Returns
-------
out : cp.ndarray
The original array (or boolean view of a uint8 array).
"""
if out is None:
return None
if out.shape != shape:
raise ValueError("out.shape must match image.shape")
if not out.flags.c_contiguous:
raise ValueError("out array must have C-contiguous memory layout")
if out.dtype == bool:
return out
elif out.dtype == cp.uint8:
# view uint8 as bool
return out.view(bool)
else:
raise ValueError("provided out array should have boolean type")
def isotropic_erosion(image, radius, out=None, spacing=None):
"""Return binary morphological erosion of an image.
This function returns the same result as
:func:`skimage.morphology.binary_erosion` but performs faster for large
circular structuring elements. This works by applying a threshold to the
exact Euclidean distance map of the image [1]_, [2]_. The implementation is
based on:
func:`cucim.core.operations.morphology.distance_transform_edt`.
Parameters
----------
image : ndarray
Binary input image.
radius : float
The radius by which regions should be eroded.
out : ndarray of bool, optional
The array to store the result of the morphology. If None,
a new array will be allocated.
spacing : float, or sequence of float, optional
Spacing of elements along each dimension.
If a sequence, must be of length equal to the input's dimension
(number of axes). If a single number, this value is used for all axes.
If not specified, a grid spacing of unity is implied.
Returns
-------
eroded : ndarray of bool
The result of the morphological erosion taking values in
``[False, True]``.
Notes
-----
Empirically, on an RTX A6000 GPU, it was observed that
``isotropic_erosion`` is faster than ``binary_erosion`` with
``decomposition=None`` at radius 12 in 2D and radius 3 in 3D. It becomes
faster than ``binary_erosion`` with ``decomposition="sequence"`` at radius
14 in 2D and radius 5 in 3D. In practice, the exact point at which these
isotropic functions become faster than their binary counterparts will also
be dependent on image shape and content.
References
----------
.. [1] Cuisenaire, O. and Macq, B., "Fast Euclidean morphological operators
using local distance transformation by propagation, and applications,"
Image Processing And Its Applications, 1999. Seventh International
Conference on (Conf. Publ. No. 465), 1999, pp. 856-860 vol.2.
:DOI:`10.1049/cp:19990446`
.. [2] Ingemar Ragnemalm, Fast erosion and dilation by contour processing
and thresholding of distance maps, Pattern Recognition Letters,
Volume 13, Issue 3, 1992, Pages 161-166.
:DOI:`10.1016/0167-8655(92)90055-5`
"""
out = _check_output(out, image.shape)
dist = distance_transform_edt(image, sampling=spacing)
if out is not None:
cp.greater(dist, radius, out=out)
else:
out = cp.greater(dist, radius)
return out
def isotropic_dilation(image, radius, out=None, spacing=None):
"""Return binary morphological dilation of an image.
This function returns the same result as
:func:`skimage.morphology.binary_dilation` but performs faster for large
circular structuring elements. This works by applying a threshold to the
exact Euclidean distance map of the inverted image [1]_, [2]_. The
implementation is based on:
func:`cucim.core.operations.morphology.distance_transform_edt`.
Parameters
----------
image : ndarray
Binary input image.
radius : float
The radius by which regions should be dilated.
out : ndarray of bool, optional
The array to store the result of the morphology. If None is
passed, a new array will be allocated.
spacing : float, or sequence of float, optional
Spacing of elements along each dimension.
If a sequence, must be of length equal to the input's dimension
(number of axes).
If a single number, this value is used for all axes.
If not specified, a grid spacing of unity is implied.
Returns
-------
dilated : ndarray of bool
The result of the morphological dilation with values in
``[False, True]``.
Notes
-----
Empirically, on an RTX A6000 GPU, it was observed that
``isotropic_dilation`` is faster than ``binary_dilation`` with
``decomposition=None`` at radius 12 in 2D and radius 3 in 3D. It becomes
faster than ``binary_dilation`` with ``decomposition="sequence"`` at radius
14 in 2D and radius 5 in 3D. In practice, the exact point at which these
isotropic functions become faster than their binary counterparts will also
be dependent on image shape and content.
References
----------
.. [1] Cuisenaire, O. and Macq, B., "Fast Euclidean morphological operators
using local distance transformation by propagation, and applications,"
Image Processing And Its Applications, 1999. Seventh International
Conference on (Conf. Publ. No. 465), 1999, pp. 856-860 vol.2.
:DOI:`10.1049/cp:19990446`
.. [2] Ingemar Ragnemalm, Fast erosion and dilation by contour processing
and thresholding of distance maps, Pattern Recognition Letters,
Volume 13, Issue 3, 1992, Pages 161-166.
:DOI:`10.1016/0167-8655(92)90055-5`
"""
out = _check_output(out, image.shape)
dist = distance_transform_edt(cp.logical_not(image), sampling=spacing)
if out is not None:
cp.less_equal(dist, radius, out=out)
else:
out = cp.less_equal(dist, radius)
return out
def isotropic_opening(image, radius, out=None, spacing=None):
"""Return binary morphological opening of an image.
This function returns the same result as
:func:`skimage.morphology.binary_opening` but performs faster for large
circular structuring elements. This works by thresholding the exact
Euclidean distance map [1]_, [2]_. The implementation is based on:
func:`cucim.core.operations.morphology.distance_transform_edt`.
Parameters
----------
image : ndarray
Binary input image.
radius : float
The radius with which the regions should be opened.
out : ndarray of bool, optional
The array to store the result of the morphology. If None
is passed, a new array will be allocated.
spacing : float, or sequence of float, optional
Spacing of elements along each dimension.
If a sequence, must be of length equal to the input's dimension
(number of axes).
If a single number, this value is used for all axes.
If not specified, a grid spacing of unity is implied.
Returns
-------
opened : ndarray of bool
The result of the morphological opening.
Notes
-----
Empirically, on an RTX A6000 GPU, it was observed that
``isotropic_opening`` is faster than ``binary_opening`` with
``decomposition=None`` at radius 12 in 2D and radius 3 in 3D. It becomes
faster than ``binary_erosion`` with ``decomposition="sequence"`` at radius
14 in 2D and radius 5 in 3D. In practice, the exact point at which these
isotropic functions become faster than their binary counterparts will also
be dependent on image shape and content.
References
----------
.. [1] Cuisenaire, O. and Macq, B., "Fast Euclidean morphological operators
using local distance transformation by propagation, and applications,"
Image Processing And Its Applications, 1999. Seventh International
Conference on (Conf. Publ. No. 465), 1999, pp. 856-860 vol.2.
:DOI:`10.1049/cp:19990446`
.. [2] Ingemar Ragnemalm, Fast erosion and dilation by contour processing
and thresholding of distance maps, Pattern Recognition Letters,
Volume 13, Issue 3, 1992, Pages 161-166.
:DOI:`10.1016/0167-8655(92)90055-5`
"""
out = _check_output(out, image.shape)
eroded = isotropic_erosion(image, radius, spacing=spacing)
return isotropic_dilation(eroded, radius, out=out, spacing=spacing)
def isotropic_closing(image, radius, out=None, spacing=None):
"""Return binary morphological closing of an image.
This function returns the same result as binary
:func:`skimage.morphology.binary_closing` but performs faster for large
circular structuring elements. This works by thresholding the exact
Euclidean distance map [1]_, [2]_. The implementation is based on:
func:`cucim.core.operations.morphology.distance_transform_edt`.
Parameters
----------
image : ndarray
Binary input image.
radius : float
The radius with which the regions should be closed.
out : ndarray of bool, optional
The array to store the result of the morphology. If None,
is passed, a new array will be allocated.
spacing : float, or sequence of float, optional
Spacing of elements along each dimension.
If a sequence, must be of length equal to the input's dimension
(number of axes).
If a single number, this value is used for all axes.
If not specified, a grid spacing of unity is implied.
Returns
-------
closed : ndarray of bool
The result of the morphological closing.
Notes
-----
Empirically, on an RTX A6000 GPU, it was observed that
``isotropic_closing`` is faster than ``binary_closing`` with
``decomposition=None`` at radius 12 in 2D and radius 3 in 3D. It becomes
faster than ``binary_erosion`` with ``decomposition="sequence"`` at radius
14 in 2D and radius 5 in 3D. In practice, the exact point at which these
isotropic functions become faster than their binary counterparts will also
be dependent on image shape and content.
References
----------
.. [1] Cuisenaire, O. and Macq, B., "Fast Euclidean morphological operators
using local distance transformation by propagation, and applications,"
Image Processing And Its Applications, 1999. Seventh International
Conference on (Conf. Publ. No. 465), 1999, pp. 856-860 vol.2.
:DOI:`10.1049/cp:19990446`
.. [2] Ingemar Ragnemalm, Fast erosion and dilation by contour processing
and thresholding of distance maps, Pattern Recognition Letters,
Volume 13, Issue 3, 1992, Pages 161-166.
:DOI:`10.1016/0167-8655(92)90055-5`
"""
out = _check_output(out, image.shape)
dilated = isotropic_dilation(image, radius, spacing=spacing)
return isotropic_erosion(dilated, radius, out=out, spacing=spacing)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_reconstruction.py
|
"""
These tests are originally part of CellProfiler, code licensed under both GPL
and BSD licenses.
Website: http://www.cellprofiler.org
Copyright (c) 2003-2009 Massachusetts Institute of Technology
Copyright (c) 2009-2011 Broad Institute
All rights reserved.
Original author: Lee Kamentsky
"""
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal
from skimage.morphology import reconstruction as reconstruction_cpu
from cucim.skimage.morphology import reconstruction
def test_zeros():
"""Test reconstruction with image and mask of zeros"""
assert_array_almost_equal(
reconstruction(cp.zeros((5, 7)), cp.zeros((5, 7))), 0
)
def test_image_equals_mask():
"""Test reconstruction where the image and mask are the same"""
assert_array_almost_equal(
reconstruction(cp.ones((7, 5)), cp.ones((7, 5))), 1
)
def test_image_less_than_mask():
"""Test reconstruction where the image is uniform and less than mask"""
image = cp.ones((5, 5))
mask = cp.ones((5, 5)) * 2
assert_array_almost_equal(reconstruction(image, mask), 1)
def test_one_image_peak():
"""Test reconstruction with one peak pixel"""
image = cp.ones((5, 5))
image[2, 2] = 2
mask = cp.ones((5, 5)) * 3
assert_array_almost_equal(reconstruction(image, mask), 2)
def test_two_image_peaks():
"""Test reconstruction with two peak pixels isolated by the mask"""
# fmt: off
image = cp.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 3, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
mask = cp.array([[4, 4, 4, 1, 1, 1, 1, 1],
[4, 4, 4, 1, 1, 1, 1, 1],
[4, 4, 4, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 4, 4, 4],
[1, 1, 1, 1, 1, 4, 4, 4],
[1, 1, 1, 1, 1, 4, 4, 4]])
expected = cp.array([[2, 2, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 1, 1],
[2, 2, 2, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 3, 3, 3],
[1, 1, 1, 1, 1, 3, 3, 3],
[1, 1, 1, 1, 1, 3, 3, 3]])
# fmt: on
assert_array_almost_equal(reconstruction(image, mask), expected)
def test_zero_image_one_mask():
"""Test reconstruction with an image of all zeros and a mask that's not"""
result = reconstruction(cp.zeros((10, 10)), cp.ones((10, 10)))
assert_array_almost_equal(result, 0)
def test_fill_hole():
"""Test reconstruction by erosion, which should fill holes in mask."""
seed = cp.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0])
mask = cp.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])
result = reconstruction(seed, mask, method="erosion")
assert_array_almost_equal(result, cp.array([0, 3, 6, 4, 4, 4, 4, 4, 2, 0]))
def test_invalid_seed():
seed = cp.ones((5, 5))
mask = cp.ones((5, 5))
with pytest.raises(ValueError):
reconstruction(seed * 2, mask, method="dilation")
with pytest.raises(ValueError):
reconstruction(seed * 0.5, mask, method="erosion")
def test_invalid_footprint():
seed = cp.ones((5, 5))
mask = cp.ones((5, 5))
with pytest.raises(ValueError):
reconstruction(seed, mask, footprint=np.ones((4, 4)))
with pytest.raises(ValueError):
reconstruction(seed, mask, footprint=np.ones((3, 4)))
reconstruction(seed, mask, footprint=np.ones((3, 3)))
def test_invalid_method():
seed = cp.array([0, 8, 8, 8, 8, 8, 8, 8, 8, 0])
mask = cp.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])
with pytest.raises(ValueError):
reconstruction(seed, mask, method="foo")
def test_invalid_offset_not_none():
"""Test reconstruction with invalid not None offset parameter"""
# fmt: off
image = cp.array([[1, 1, 1, 1, 1, 1, 1, 1],
[1, 2, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 3, 1],
[1, 1, 1, 1, 1, 1, 1, 1]])
mask = cp.array([[4, 4, 4, 1, 1, 1, 1, 1],
[4, 4, 4, 1, 1, 1, 1, 1],
[4, 4, 4, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 4, 4, 4],
[1, 1, 1, 1, 1, 4, 4, 4],
[1, 1, 1, 1, 1, 4, 4, 4]])
# fmt: on
with pytest.raises(ValueError):
reconstruction(
image,
mask,
method="dilation",
footprint=cp.ones((3, 3)),
offset=cp.array([3, 0]),
)
def test_offset_not_none():
"""Test reconstruction with valid offset parameter"""
seed = cp.array([0, 3, 6, 2, 1, 1, 1, 4, 2, 0])
mask = cp.array([0, 8, 6, 8, 8, 8, 8, 4, 4, 0])
expected = cp.array([0, 3, 6, 6, 6, 6, 6, 4, 4, 0])
assert_array_almost_equal(
reconstruction(
seed,
mask,
method="dilation",
footprint=cp.ones(3),
offset=cp.array([0]),
),
expected,
)
def test_reconstruction_float_inputs():
"""Verifies fix for: https://github.com/rapidsai/cuci/issues/36
Run the 2D example from the reconstruction docstring and compare the output
to scikit-image.
"""
y, x = np.mgrid[:20:0.5, :20:0.5]
bumps = np.sin(x) + np.sin(y)
h = 0.3
seed = bumps - h
background_cpu = reconstruction_cpu(seed, bumps)
background = reconstruction(cp.asarray(seed), cp.asarray(bumps))
cp.testing.assert_allclose(background, background_cpu)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_misc.py
|
import cupy as cp
import pytest
from cupy.testing import assert_array_equal
from numpy.testing import assert_equal
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage.morphology import remove_small_holes, remove_small_objects
# fmt: off
test_image = cp.array([[0, 0, 0, 1, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 1]], bool)
# fmt: on
def test_one_connectivity():
# fmt: off
expected = cp.array([[0, 0, 0, 0, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]], bool)
# fmt: on
observed = remove_small_objects(test_image, min_size=6)
assert_array_equal(observed, expected)
def test_two_connectivity():
# fmt: off
expected = cp.array([[0, 0, 0, 1, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]], bool)
# fmt: on
observed = remove_small_objects(test_image, min_size=7, connectivity=2)
assert_array_equal(observed, expected)
def test_in_place():
image = test_image.copy()
observed = remove_small_objects(image, min_size=6, out=image)
assert_equal(
observed is image,
True,
"remove_small_objects in_place argument failed.",
)
@pytest.mark.parametrize("in_dtype", [bool, int, cp.int32])
@pytest.mark.parametrize("out_dtype", [bool, int, cp.int32])
def test_out(in_dtype, out_dtype):
image = test_image.astype(in_dtype, copy=True)
expected_out = cp.empty_like(test_image, dtype=out_dtype)
if out_dtype != bool:
# object with only 1 label will warn on non-bool output dtype
exp_warn = ["Only one label was provided"]
else:
exp_warn = []
with expected_warnings(exp_warn):
out = remove_small_objects(image, min_size=6, out=expected_out)
assert out is expected_out
def test_labeled_image():
# fmt: off
labeled_image = cp.array([[2, 2, 2, 0, 1],
[2, 2, 2, 0, 1],
[2, 0, 0, 0, 0],
[0, 0, 3, 3, 3]], dtype=int)
expected = cp.array([[2, 2, 2, 0, 0],
[2, 2, 2, 0, 0],
[2, 0, 0, 0, 0],
[0, 0, 3, 3, 3]], dtype=int)
# fmt: on
observed = remove_small_objects(labeled_image, min_size=3)
assert_array_equal(observed, expected)
def test_uint_image():
# fmt: off
labeled_image = cp.array([[2, 2, 2, 0, 1],
[2, 2, 2, 0, 1],
[2, 0, 0, 0, 0],
[0, 0, 3, 3, 3]], dtype=cp.uint8)
expected = cp.array([[2, 2, 2, 0, 0],
[2, 2, 2, 0, 0],
[2, 0, 0, 0, 0],
[0, 0, 3, 3, 3]], dtype=cp.uint8)
# fmt: on
observed = remove_small_objects(labeled_image, min_size=3)
assert_array_equal(observed, expected)
def test_single_label_warning():
# fmt: off
image = cp.array([[0, 0, 0, 1, 0],
[1, 1, 1, 0, 0],
[1, 1, 1, 0, 0]], int)
# fmt: on
with expected_warnings(["use a boolean array?"]):
remove_small_objects(image, min_size=6)
def test_float_input():
float_test = cp.random.rand(5, 5)
with pytest.raises(TypeError):
remove_small_objects(float_test)
def test_negative_input():
negative_int = cp.random.randint(-4, -1, size=(5, 5))
with pytest.raises(ValueError):
remove_small_objects(negative_int)
# fmt: off
test_holes_image = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)
# fmt: on
def test_one_connectivity_holes():
# fmt: off
expected = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)
# fmt: on
observed = remove_small_holes(test_holes_image, area_threshold=3)
assert_array_equal(observed, expected)
def test_two_connectivity_holes():
# fmt: off
expected = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], bool)
# fmt: on
observed = remove_small_holes(
test_holes_image, area_threshold=3, connectivity=2
)
assert_array_equal(observed, expected)
def test_in_place_holes():
image = test_holes_image.copy()
observed = remove_small_holes(image, area_threshold=3, out=image)
assert_equal(
observed is image, True, "remove_small_holes in_place argument failed."
)
def test_out_remove_small_holes():
image = test_holes_image.copy()
expected_out = cp.empty_like(image)
out = remove_small_holes(image, area_threshold=3, out=expected_out)
assert out is expected_out
def test_non_bool_out():
image = test_holes_image.copy()
expected_out = cp.empty_like(image, dtype=int)
with pytest.raises(TypeError):
remove_small_holes(image, area_threshold=3, out=expected_out)
def test_labeled_image_holes():
# fmt: off
labeled_holes_image = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
dtype=int)
expected = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool)
# fmt: on
with expected_warnings(["returned as a boolean array"]):
observed = remove_small_holes(labeled_holes_image, area_threshold=3)
assert_array_equal(observed, expected)
def test_uint_image_holes():
# fmt: off
labeled_holes_image = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
dtype=cp.uint8)
expected = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1]], dtype=bool)
# fmt: on
with expected_warnings(["returned as a boolean array"]):
observed = remove_small_holes(labeled_holes_image, area_threshold=3)
assert_array_equal(observed, expected)
def test_label_warning_holes():
# fmt: off
labeled_holes_image = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 0, 2],
[0, 0, 0, 0, 0, 0, 0, 2, 2, 2]],
dtype=int)
# fmt: on
with expected_warnings(["use a boolean array?"]):
remove_small_holes(labeled_holes_image, area_threshold=3)
remove_small_holes(labeled_holes_image.astype(bool), area_threshold=3)
def test_float_input_holes():
float_test = cp.random.rand(5, 5)
with pytest.raises(TypeError):
remove_small_holes(float_test)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_footprints.py
|
"""
Tests for Morphological footprints
(skimage.morphology.footprint)
Author: Damian Eads
"""
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_equal
from cucim.skimage._shared.testing import fetch
from cucim.skimage.morphology import footprints
class TestSElem:
def test_square_footprint(self):
"""Test square footprints"""
for k in range(0, 5):
actual_mask = footprints.square(k, dtype=cp.uint8)
expected_mask = np.ones((k, k), dtype="uint8")
assert_array_equal(expected_mask, actual_mask)
def test_rectangle_footprint(self):
"""Test rectangle footprints"""
for i in range(0, 5):
for j in range(0, 5):
actual_mask = footprints.rectangle(i, j, dtype=cp.uint8)
expected_mask = np.ones((i, j), dtype="uint8")
assert_array_equal(expected_mask, actual_mask)
def test_cube_footprint(self):
"""Test cube footprints"""
for k in range(0, 5):
actual_mask = footprints.cube(k, dtype=cp.uint8)
expected_mask = np.ones((k, k, k), dtype="uint8")
assert_array_equal(expected_mask, actual_mask)
def strel_worker(self, fn, func):
matlab_masks = np.load(fetch(fn))
k = 0
for arrname in sorted(matlab_masks):
expected_mask = matlab_masks[arrname]
actual_mask = func(k)
if expected_mask.shape == (1,):
expected_mask = expected_mask[:, np.newaxis]
assert_array_equal(expected_mask, actual_mask)
k = k + 1
def strel_worker_3d(self, fn, func):
matlab_masks = np.load(fetch(fn))
k = 0
for arrname in sorted(matlab_masks):
expected_mask = matlab_masks[arrname]
actual_mask = func(k)
if expected_mask.shape == (1,):
expected_mask = expected_mask[:, np.newaxis]
# Test center slice for each dimension. This gives a good
# indication of validity without the need for a 3D reference
# mask.
c = int(expected_mask.shape[0] / 2)
assert_array_equal(expected_mask, actual_mask[c, :, :])
assert_array_equal(expected_mask, actual_mask[:, c, :])
assert_array_equal(expected_mask, actual_mask[:, :, c])
k = k + 1
def test_footprint_disk(self):
"""Test disk footprints"""
self.strel_worker("data/disk-matlab-output.npz", footprints.disk)
def test_footprint_diamond(self):
"""Test diamond footprints"""
self.strel_worker("data/diamond-matlab-output.npz", footprints.diamond)
def test_footprint_ball(self):
"""Test ball footprints"""
self.strel_worker_3d("data/disk-matlab-output.npz", footprints.ball)
def test_footprint_octahedron(self):
"""Test octahedron footprints"""
self.strel_worker_3d(
"data/diamond-matlab-output.npz", footprints.octahedron
)
def test_footprint_octagon(self):
"""Test octagon footprints"""
# fmt: off
expected_mask1 = cp.array([[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0]],
dtype=cp.uint8)
actual_mask1 = footprints.octagon(5, 3)
expected_mask2 = cp.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=cp.uint8)
# fmt: on
actual_mask2 = footprints.octagon(1, 1)
assert_array_equal(expected_mask1, actual_mask1)
assert_array_equal(expected_mask2, actual_mask2)
def test_footprint_ellipse(self):
"""Test ellipse footprints"""
# fmt: off
expected_mask1 = cp.array([[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0]],
dtype=cp.uint8)
actual_mask1 = footprints.ellipse(5, 3)
expected_mask2 = cp.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], dtype=cp.uint8)
# fmt: on
actual_mask2 = footprints.ellipse(1, 1)
assert_array_equal(expected_mask1, actual_mask1)
assert_array_equal(expected_mask2, actual_mask2)
assert_array_equal(expected_mask1, footprints.ellipse(3, 5).T)
assert_array_equal(expected_mask2, footprints.ellipse(1, 1).T)
def test_footprint_star(self):
"""Test star footprints"""
# fmt: off
expected_mask1 = cp.array([[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0]],
dtype=cp.uint8)
actual_mask1 = footprints.star(4)
expected_mask2 = cp.array([[1, 1, 1],
[1, 1, 1],
[1, 1, 1]], dtype=cp.uint8)
# fmt: on
actual_mask2 = footprints.star(1)
assert_array_equal(expected_mask1, actual_mask1)
assert_array_equal(expected_mask2, actual_mask2)
@pytest.mark.parametrize(
"function, args, supports_sequence_decomposition",
[
(footprints.disk, (3,), True),
(footprints.ball, (3,), True),
(footprints.square, (3,), True),
(footprints.cube, (3,), True),
(footprints.diamond, (3,), True),
(footprints.octahedron, (3,), True),
(footprints.rectangle, (3, 4), True),
(footprints.ellipse, (3, 4), False),
(footprints.octagon, (3, 4), True),
(footprints.star, (3,), False),
],
)
@pytest.mark.parametrize("dtype", [np.uint8, np.float64])
def test_footprint_dtype(
function, args, supports_sequence_decomposition, dtype
):
# make sure footprint dtype matches what was requested
footprint = function(*args, dtype=dtype)
assert footprint.dtype == dtype
if supports_sequence_decomposition:
sequence = function(*args, dtype=dtype, decomposition="sequence")
assert all([fp_tuple[0].dtype == dtype for fp_tuple in sequence])
@pytest.mark.parametrize("function", ["disk", "ball"])
@pytest.mark.parametrize(
"radius", [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 20, 50, 75, 100]
)
def test_nsphere_series_approximation(function, radius):
fp_func = getattr(footprints, function)
expected = fp_func(radius, strict_radius=False, decomposition=None)
footprint_sequence = fp_func(
radius, strict_radius=False, decomposition="sequence"
)
approximate = footprints.footprint_from_sequence(footprint_sequence)
assert approximate.shape == expected.shape
# verify that maximum error does not exceed some fraction of the size
error = np.sum(np.abs(expected.astype(int) - approximate.astype(int)))
if radius == 1:
assert error == 0
else:
max_error = 0.1 if function == "disk" else 0.15
assert error / expected.size <= max_error
@pytest.mark.parametrize("radius", [1, 2, 3, 4, 5, 10, 20, 50, 75])
@pytest.mark.parametrize("strict_radius", [False, True])
def test_disk_crosses_approximation(radius, strict_radius):
fp_func = footprints.disk
expected = fp_func(radius, strict_radius=strict_radius, decomposition=None)
footprint_sequence = fp_func(
radius, strict_radius=strict_radius, decomposition="crosses"
)
approximate = footprints.footprint_from_sequence(footprint_sequence)
assert approximate.shape == expected.shape
# verify that maximum error does not exceed some fraction of the size
error = cp.sum(cp.abs(expected.astype(int) - approximate.astype(int)))
max_error = 0.05
assert error / expected.size <= max_error
@pytest.mark.parametrize("width", [3, 8, 20, 50])
@pytest.mark.parametrize("height", [3, 8, 20, 50])
def test_ellipse_crosses_approximation(width, height):
fp_func = footprints.ellipse
expected = fp_func(width, height, decomposition=None)
footprint_sequence = fp_func(width, height, decomposition="crosses")
approximate = footprints.footprint_from_sequence(footprint_sequence)
assert approximate.shape == expected.shape
# verify that maximum error does not exceed some fraction of the size
error = cp.sum(cp.abs(expected.astype(int) - approximate.astype(int)))
max_error = 0.05
assert error / expected.size <= max_error
def test_disk_series_approximation_unavailable():
# ValueError if radius is too large (only precomputed up to radius=250)
with pytest.raises(ValueError):
footprints.disk(radius=10000, decomposition="sequence")
def test_ball_series_approximation_unavailable():
# ValueError if radius is too large (only precomputed up to radius=100)
with pytest.raises(ValueError):
footprints.ball(radius=10000, decomposition="sequence")
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_gray.py
|
import cupy as cp
import numpy as np
import pytest
from cupy import testing
from cupyx.scipy import ndimage as ndi
from skimage import data
from cucim.skimage import color, morphology, transform
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage._shared.testing import fetch
from cucim.skimage.util import img_as_ubyte, img_as_uint
@pytest.fixture
def cam_image():
from skimage import data
return cp.ascontiguousarray(cp.array(data.camera()[64:112, 64:96]))
@pytest.fixture
def cell3d_image():
from skimage import data
return cp.ascontiguousarray(
cp.array(data.cells3d()[30:48, 0, 20:36, 20:32])
)
class TestMorphology:
# These expected outputs were generated with skimage v0.12.1
# using:
#
# from skimage.morphology.tests.test_gray import TestMorphology
# import numpy as np
# output = TestMorphology()._build_expected_output()
# np.savez_compressed('gray_morph_output.npz', **output)
def _build_expected_output(self):
funcs = (
morphology.erosion,
morphology.dilation,
morphology.opening,
morphology.closing,
morphology.white_tophat,
morphology.black_tophat,
)
footprints_2D = (
morphology.square,
morphology.diamond,
morphology.disk,
morphology.star,
)
image = img_as_ubyte(
transform.downscale_local_mean(
color.rgb2gray(cp.array(data.coffee())), (20, 20)
)
)
output = {}
for n in range(1, 4):
for footprint in footprints_2D:
for func in funcs:
key = "{0}_{1}_{2}".format(
footprint.__name__, n, func.__name__
)
output[key] = func(image, footprint(n))
return output
def test_gray_morphology(self):
expected = dict(np.load(fetch("data/gray_morph_output.npz")))
calculated = self._build_expected_output()
for k, v in calculated.items():
cp.testing.assert_array_equal(cp.asarray(expected[k]), v)
class TestEccentricStructuringElements:
def setup_method(self):
self.black_pixel = 255 * cp.ones((4, 4), dtype=cp.uint8)
self.black_pixel[1, 1] = 0
self.white_pixel = 255 - self.black_pixel
self.footprints = [
morphology.square(2),
morphology.rectangle(2, 2),
morphology.rectangle(2, 1),
morphology.rectangle(1, 2),
]
def test_dilate_erode_symmetry(self):
for s in self.footprints:
c = morphology.erosion(self.black_pixel, s)
d = morphology.dilation(self.white_pixel, s)
assert cp.all(c == (255 - d))
def test_open_black_pixel(self):
for s in self.footprints:
gray_open = morphology.opening(self.black_pixel, s)
assert cp.all(gray_open == self.black_pixel)
def test_close_white_pixel(self):
for s in self.footprints:
gray_close = morphology.closing(self.white_pixel, s)
assert cp.all(gray_close == self.white_pixel)
def test_open_white_pixel(self):
for s in self.footprints:
assert cp.all(morphology.opening(self.white_pixel, s) == 0)
def test_close_black_pixel(self):
for s in self.footprints:
assert cp.all(morphology.closing(self.black_pixel, s) == 255)
def test_white_tophat_white_pixel(self):
for s in self.footprints:
tophat = morphology.white_tophat(self.white_pixel, s)
cp.testing.assert_array_equal(tophat, self.white_pixel)
def test_black_tophat_black_pixel(self):
for s in self.footprints:
tophat = morphology.black_tophat(self.black_pixel, s)
cp.testing.assert_array_equal(tophat, 255 - self.black_pixel)
def test_white_tophat_black_pixel(self):
for s in self.footprints:
tophat = morphology.white_tophat(self.black_pixel, s)
assert cp.all(tophat == 0)
def test_black_tophat_white_pixel(self):
for s in self.footprints:
tophat = morphology.black_tophat(self.white_pixel, s)
assert cp.all(tophat == 0)
gray_functions = [
morphology.erosion,
morphology.dilation,
morphology.opening,
morphology.closing,
morphology.white_tophat,
morphology.black_tophat,
]
@pytest.mark.parametrize("function", gray_functions)
def test_default_footprint(function):
footprint = morphology.diamond(radius=1)
# fmt: off
image = cp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], cp.uint8)
# fmt: on
im_expected = function(image, footprint)
im_test = function(image)
cp.testing.assert_array_equal(im_expected, im_test)
def test_3d_fallback_default_footprint():
# 3x3x3 cube inside a 7x7x7 image:
image = cp.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
opened = morphology.opening(image)
# expect a "hyper-cross" centered in the 5x5x5:
image_expected = cp.zeros((7, 7, 7), dtype=bool)
image_expected[2:5, 2:5, 2:5] = ndi.generate_binary_structure(3, 1)
cp.testing.assert_array_equal(opened, image_expected)
gray_3d_fallback_functions = [morphology.closing, morphology.opening]
@pytest.mark.parametrize("function", gray_3d_fallback_functions)
def test_3d_fallback_cube_footprint(function):
# 3x3x3 cube inside a 7x7x7 image:
image = cp.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
cube = cp.ones((3, 3, 3), dtype=cp.uint8)
new_image = function(image, cube)
cp.testing.assert_array_equal(new_image, image)
def test_3d_fallback_white_tophat():
image = cp.zeros((7, 7, 7), dtype=bool)
image[2, 2:4, 2:4] = 1
image[3, 2:5, 2:5] = 1
image[4, 3:5, 3:5] = 1
with expected_warnings([r"operator.*deprecated|\A\Z"]):
new_image = morphology.white_tophat(image)
footprint = ndi.generate_binary_structure(3, 1)
with expected_warnings([r"operator.*deprecated|\A\Z"]):
image_expected = ndi.white_tophat(
image.view(dtype=cp.uint8), footprint=footprint
)
cp.testing.assert_array_equal(new_image, image_expected)
def test_3d_fallback_black_tophat():
image = cp.ones((7, 7, 7), dtype=bool)
image[2, 2:4, 2:4] = 0
image[3, 2:5, 2:5] = 0
image[4, 3:5, 3:5] = 0
with expected_warnings([r"operator.*deprecated|\A\Z"]):
new_image = morphology.black_tophat(image)
footprint = ndi.generate_binary_structure(3, 1)
with expected_warnings([r"operator.*deprecated|\A\Z"]):
image_expected = ndi.black_tophat(
image.view(dtype=cp.uint8), footprint=footprint
)
cp.testing.assert_array_equal(new_image, image_expected)
def test_2d_ndimage_equivalence():
image = cp.zeros((9, 9), cp.uint8)
image[2:-2, 2:-2] = 128
image[3:-3, 3:-3] = 196
image[4, 4] = 255
opened = morphology.opening(image)
closed = morphology.closing(image)
footprint = ndi.generate_binary_structure(2, 1)
ndimage_opened = ndi.grey_opening(image, footprint=footprint)
ndimage_closed = ndi.grey_closing(image, footprint=footprint)
cp.testing.assert_array_equal(opened, ndimage_opened)
cp.testing.assert_array_equal(closed, ndimage_closed)
# float test images
# fmt: off
im = cp.array([[0.55, 0.72, 0.6 , 0.54, 0.42], # noqa
[0.65, 0.44, 0.89, 0.96, 0.38],
[0.79, 0.53, 0.57, 0.93, 0.07],
[0.09, 0.02, 0.83, 0.78, 0.87],
[0.98, 0.8 , 0.46, 0.78, 0.12]]) # noqa
eroded = cp.array([[0.55, 0.44, 0.54, 0.42, 0.38],
[0.44, 0.44, 0.44, 0.38, 0.07],
[0.09, 0.02, 0.53, 0.07, 0.07],
[0.02, 0.02, 0.02, 0.78, 0.07],
[0.09, 0.02, 0.46, 0.12, 0.12]])
dilated = cp.array([[0.72, 0.72, 0.89, 0.96, 0.54],
[0.79, 0.89, 0.96, 0.96, 0.96],
[0.79, 0.79, 0.93, 0.96, 0.93],
[0.98, 0.83, 0.83, 0.93, 0.87],
[0.98, 0.98, 0.83, 0.78, 0.87]])
opened = cp.array([[0.55, 0.55, 0.54, 0.54, 0.42],
[0.55, 0.44, 0.54, 0.44, 0.38],
[0.44, 0.53, 0.53, 0.78, 0.07],
[0.09, 0.02, 0.78, 0.78, 0.78],
[0.09, 0.46, 0.46, 0.78, 0.12]])
closed = cp.array([[0.72, 0.72, 0.72, 0.54, 0.54],
[0.72, 0.72, 0.89, 0.96, 0.54],
[0.79, 0.79, 0.79, 0.93, 0.87],
[0.79, 0.79, 0.83, 0.78, 0.87],
[0.98, 0.83, 0.78, 0.78, 0.78]])
# fmt: on
def test_float():
cp.testing.assert_allclose(morphology.erosion(im), eroded)
cp.testing.assert_allclose(morphology.dilation(im), dilated)
cp.testing.assert_allclose(morphology.opening(im), opened)
cp.testing.assert_allclose(morphology.closing(im), closed)
def test_uint16():
im16, eroded16, dilated16, opened16, closed16 = map(
img_as_uint, [im, eroded, dilated, opened, closed]
)
cp.testing.assert_allclose(morphology.erosion(im16), eroded16)
cp.testing.assert_allclose(morphology.dilation(im16), dilated16)
cp.testing.assert_allclose(morphology.opening(im16), opened16)
cp.testing.assert_allclose(morphology.closing(im16), closed16)
def test_discontiguous_out_array():
# fmt: off
image = cp.array([[5, 6, 2],
[7, 2, 2],
[3, 5, 1]], cp.uint8)
# fmt: on
out_array_big = cp.zeros((5, 5), cp.uint8)
out_array = out_array_big[::2, ::2]
# fmt: off
expected_dilation = cp.array([[7, 0, 6, 0, 6],
[0, 0, 0, 0, 0],
[7, 0, 7, 0, 2],
[0, 0, 0, 0, 0],
[7, 0, 5, 0, 5]], cp.uint8)
expected_erosion = cp.array([[5, 0, 2, 0, 2],
[0, 0, 0, 0, 0],
[2, 0, 2, 0, 1],
[0, 0, 0, 0, 0],
[3, 0, 1, 0, 1]], cp.uint8)
# fmt: on
morphology.dilation(image, out=out_array)
cp.testing.assert_array_equal(out_array_big, expected_dilation)
morphology.erosion(image, out=out_array)
cp.testing.assert_array_equal(out_array_big, expected_erosion)
def test_1d_erosion():
image = cp.array([1, 2, 3, 2, 1])
expected = cp.array([1, 1, 2, 1, 1])
eroded = morphology.erosion(image)
cp.testing.assert_array_equal(eroded, expected)
def test_deprecated_import():
msg = "Importing from cucim.skimage.morphology.grey is deprecated."
with expected_warnings([msg + r"|\A\Z"]):
from cucim.skimage.morphology.grey import erosion # noqa
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
def test_selem_kwarg_deprecation(function):
with expected_warnings(["`selem` is a deprecated argument name"]):
getattr(morphology, function)(cp.zeros((4, 4)), selem=cp.ones((3, 3)))
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("size", (7,))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_square_decomposition(cam_image, function, size, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.square(size, decomposition=None)
footprint = morphology.square(size, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("nrows", (3, 11))
@pytest.mark.parametrize("ncols", (3, 11))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_rectangle_decomposition(
cam_image, function, nrows, ncols, decomposition
):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.rectangle(nrows, ncols, decomposition=None)
footprint = morphology.rectangle(nrows, ncols, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("radius", (2, 3))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_diamond_decomposition(cam_image, function, radius, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.diamond(radius, decomposition=None)
footprint = morphology.diamond(radius, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("m", (0, 1, 3, 5))
@pytest.mark.parametrize("n", (0, 1, 2, 3))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_octagon_decomposition(cam_image, function, m, n, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
if m == 0 and n == 0:
with pytest.raises(ValueError):
morphology.octagon(m, n, decomposition=decomposition)
else:
footprint_ndarray = morphology.octagon(m, n, decomposition=None)
footprint = morphology.octagon(m, n, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cam_image, footprint=footprint_ndarray)
out = func(cam_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("size", (5,))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_cube_decomposition(cell3d_image, function, size, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.cube(size, decomposition=None)
footprint = morphology.cube(size, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cell3d_image, footprint=footprint_ndarray)
out = func(cell3d_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
[
"erosion",
"dilation",
"closing",
"opening",
"white_tophat",
"black_tophat",
],
)
@pytest.mark.parametrize("radius", (3,))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_octahedron_decomposition(
cell3d_image, function, radius, decomposition
):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.octahedron(radius, decomposition=None)
footprint = morphology.octahedron(radius, decomposition=decomposition)
func = getattr(morphology, function)
expected = func(cell3d_image, footprint=footprint_ndarray)
out = func(cell3d_image, footprint=footprint)
cp.testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["erosion", "dilation", "closing", "opening"],
)
@pytest.mark.parametrize("ndim", [2, 3])
@pytest.mark.parametrize("odd_only", [False, True])
def test_tuple_as_footprint(function, ndim, odd_only):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
if odd_only:
footprint_shape = (3,) * ndim
else:
footprint_shape = tuple(range(2, 2 + ndim))
footprint_ndarray = cp.ones(footprint_shape, dtype=bool)
rng = cp.random.default_rng(5)
img = rng.standard_normal((16,) * ndim, dtype=cp.float32)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint_shape)
testing.assert_array_equal(expected, out)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_binary.py
|
import cupy as cp
import numpy as np
import pytest
from cupy import testing
from cupyx.scipy import ndimage as ndi
from skimage import data
from cucim.skimage import color, morphology
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage.util import img_as_bool
img = color.rgb2gray(cp.array(data.astronaut()))
bw_img = img > 100 / 255.0
def test_non_square_image():
footprint = morphology.square(3)
binary_res = morphology.binary_erosion(bw_img[:100, :200], footprint)
gray_res = img_as_bool(morphology.erosion(bw_img[:100, :200], footprint))
testing.assert_array_equal(binary_res, gray_res)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
def test_selem_kwarg_deprecation(function):
with expected_warnings(["`selem` is a deprecated argument name"]):
getattr(morphology, function)(bw_img, selem=morphology.square(3))
def test_binary_erosion():
footprint = morphology.square(3)
binary_res = morphology.binary_erosion(bw_img, footprint)
gray_res = img_as_bool(morphology.erosion(bw_img, footprint))
testing.assert_array_equal(binary_res, gray_res)
def test_binary_dilation():
footprint = morphology.square(3)
binary_res = morphology.binary_dilation(bw_img, footprint)
gray_res = img_as_bool(morphology.dilation(bw_img, footprint))
testing.assert_array_equal(binary_res, gray_res)
def test_binary_closing():
footprint = morphology.square(3)
binary_res = morphology.binary_closing(bw_img, footprint)
gray_res = img_as_bool(morphology.closing(bw_img, footprint))
testing.assert_array_equal(binary_res, gray_res)
def test_binary_opening():
footprint = morphology.square(3)
binary_res = morphology.binary_opening(bw_img, footprint)
gray_res = img_as_bool(morphology.opening(bw_img, footprint))
testing.assert_array_equal(binary_res, gray_res)
def _get_decomp_test_data(function, ndim=2):
if function == "binary_erosion":
img = cp.ones((17,) * ndim, dtype=cp.uint8)
img[(8,) * ndim] = 0
elif function == "binary_dilation":
img = cp.zeros((17,) * ndim, dtype=cp.uint8)
img[(8,) * ndim] = 1
else:
img = cp.asarray(data.binary_blobs(32, n_dim=ndim, rng=1))
return img
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("size", (3, 4, 11))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_square_decomposition(function, size, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.square(size, decomposition=None)
footprint = morphology.square(size, decomposition=decomposition)
img = _get_decomp_test_data(function)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("nrows", (3, 4, 11))
@pytest.mark.parametrize("ncols", (3, 4, 11))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_rectangle_decomposition(function, nrows, ncols, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.rectangle(nrows, ncols, decomposition=None)
footprint = morphology.rectangle(nrows, ncols, decomposition=decomposition)
img = _get_decomp_test_data(function)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("m", (0, 1, 2, 3, 4, 5))
@pytest.mark.parametrize("n", (0, 1, 2, 3, 4, 5))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_octagon_decomposition(function, m, n, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
if m == 0 and n == 0:
with pytest.raises(ValueError):
morphology.octagon(m, n, decomposition=decomposition)
else:
footprint_ndarray = morphology.octagon(m, n, decomposition=None)
footprint = morphology.octagon(m, n, decomposition=decomposition)
img = _get_decomp_test_data(function)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("radius", (1, 2, 5))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_diamond_decomposition(function, radius, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.diamond(radius, decomposition=None)
footprint = morphology.diamond(radius, decomposition=decomposition)
img = _get_decomp_test_data(function)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("size", (3, 4, 5))
@pytest.mark.parametrize("decomposition", ["separable", "sequence"])
def test_cube_decomposition(function, size, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.cube(size, decomposition=None)
footprint = morphology.cube(size, decomposition=decomposition)
img = _get_decomp_test_data(function, ndim=3)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("radius", (1, 2, 3))
@pytest.mark.parametrize("decomposition", ["sequence"])
def test_octahedron_decomposition(function, radius, decomposition):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_ndarray = morphology.octahedron(radius, decomposition=None)
footprint = morphology.octahedron(radius, decomposition=decomposition)
img = _get_decomp_test_data(function, ndim=3)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint)
testing.assert_array_equal(expected, out)
def test_footprint_overflow():
footprint = cp.ones((17, 17), dtype=cp.uint8)
img = cp.zeros((20, 20), dtype=bool)
img[2:19, 2:19] = True
binary_res = morphology.binary_erosion(img, footprint)
gray_res = img_as_bool(morphology.erosion(img, footprint))
testing.assert_array_equal(binary_res, gray_res)
def test_out_argument():
for func in (morphology.binary_erosion, morphology.binary_dilation):
footprint = cp.ones((3, 3), dtype=cp.uint8)
img = cp.ones((10, 10))
out = cp.zeros_like(img)
out_saved = out.copy()
func(img, footprint, out=out)
assert cp.any(out != out_saved)
testing.assert_array_equal(out, func(img, footprint))
binary_functions = [
morphology.binary_erosion,
morphology.binary_dilation,
morphology.binary_opening,
morphology.binary_closing,
]
@pytest.mark.parametrize("function", binary_functions)
def test_default_footprint(function):
footprint = morphology.diamond(radius=1)
# fmt: off
image = cp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], cp.uint8)
# fmt: on
im_expected = function(image, footprint)
im_test = function(image)
testing.assert_array_equal(im_expected, im_test)
def test_3d_fallback_default_footprint():
# 3x3x3 cube inside a 7x7x7 image:
image = cp.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
opened = morphology.binary_opening(image)
# expect a "hyper-cross" centered in the 5x5x5:
image_expected = cp.zeros((7, 7, 7), dtype=bool)
image_expected[2:5, 2:5, 2:5] = ndi.generate_binary_structure(3, 1)
testing.assert_array_equal(opened, image_expected)
binary_3d_fallback_functions = [
morphology.binary_opening,
morphology.binary_closing,
]
@pytest.mark.parametrize("function", binary_3d_fallback_functions)
def test_3d_fallback_cube_footprint(function):
# 3x3x3 cube inside a 7x7x7 image:
image = cp.zeros((7, 7, 7), bool)
image[2:-2, 2:-2, 2:-2] = 1
cube = cp.ones((3, 3, 3), dtype=cp.uint8)
new_image = function(image, cube)
testing.assert_array_equal(new_image, image)
def test_2d_ndimage_equivalence():
image = cp.zeros((9, 9), cp.uint16)
image[2:-2, 2:-2] = 2**14
image[3:-3, 3:-3] = 2**15
image[4, 4] = 2**16 - 1
bin_opened = morphology.binary_opening(image)
bin_closed = morphology.binary_closing(image)
footprint = ndi.generate_binary_structure(2, 1)
ndimage_opened = ndi.binary_opening(image, structure=footprint)
ndimage_closed = ndi.binary_closing(image, structure=footprint)
testing.assert_array_equal(bin_opened, ndimage_opened)
testing.assert_array_equal(bin_closed, ndimage_closed)
def test_binary_output_2d():
image = cp.zeros((9, 9), cp.uint16)
image[2:-2, 2:-2] = 2**14
image[3:-3, 3:-3] = 2**15
image[4, 4] = 2**16 - 1
bin_opened = morphology.binary_opening(image)
bin_closed = morphology.binary_closing(image)
int_opened = cp.empty_like(image, dtype=cp.uint8)
int_closed = cp.empty_like(image, dtype=cp.uint8)
morphology.binary_opening(image, out=int_opened)
morphology.binary_closing(image, out=int_closed)
np.testing.assert_equal(bin_opened.dtype, bool)
np.testing.assert_equal(bin_closed.dtype, bool)
np.testing.assert_equal(int_opened.dtype, np.uint8)
np.testing.assert_equal(int_closed.dtype, np.uint8)
def test_binary_output_3d():
image = cp.zeros((9, 9, 9), cp.uint16)
image[2:-2, 2:-2, 2:-2] = 2**14
image[3:-3, 3:-3, 3:-3] = 2**15
image[4, 4, 4] = 2**16 - 1
bin_opened = morphology.binary_opening(image)
bin_closed = morphology.binary_closing(image)
int_opened = cp.empty_like(image, dtype=cp.uint8)
int_closed = cp.empty_like(image, dtype=cp.uint8)
morphology.binary_opening(image, out=int_opened)
morphology.binary_closing(image, out=int_closed)
np.testing.assert_equal(bin_opened.dtype, bool)
np.testing.assert_equal(bin_closed.dtype, bool)
np.testing.assert_equal(int_opened.dtype, np.uint8)
np.testing.assert_equal(int_closed.dtype, np.uint8)
@pytest.mark.parametrize(
"function",
["binary_erosion", "binary_dilation", "binary_closing", "binary_opening"],
)
@pytest.mark.parametrize("ndim", [1, 2, 3])
def test_tuple_as_footprint(function, ndim):
"""Validate footprint decomposition for various shapes.
comparison is made to the case without decomposition.
"""
footprint_shape = tuple(range(2, ndim + 2))
footprint_ndarray = cp.ones(footprint_shape, dtype=bool)
img = _get_decomp_test_data(function, ndim=ndim)
func = getattr(morphology, function)
expected = func(img, footprint=footprint_ndarray)
out = func(img, footprint=footprint_shape)
testing.assert_array_equal(expected, out)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_isotropic.py
|
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_equal
from skimage import data
from cucim.skimage import color, morphology
from cucim.skimage.util import img_as_bool
img = color.rgb2gray(cp.asarray(data.astronaut()))
bw_img = img > 100 / 255.0
def test_non_square_image():
isotropic_res = morphology.isotropic_erosion(bw_img[:100, :200], 3)
binary_res = img_as_bool(
morphology.binary_erosion(bw_img[:100, :200], morphology.disk(3))
)
assert_array_equal(isotropic_res, binary_res)
def test_isotropic_erosion():
isotropic_res = morphology.isotropic_erosion(bw_img, 3)
binary_res = img_as_bool(
morphology.binary_erosion(bw_img, morphology.disk(3))
)
assert_array_equal(isotropic_res, binary_res)
def _disk_with_spacing(
radius, dtype=cp.uint8, *, strict_radius=True, spacing=None
):
# Identical to morphology.disk, but with a spacing parameter and without
# decomposition. This is different from morphology.ellipse which produces a
# slightly different footprint.
L = np.arange(-radius, radius + 1)
X, Y = np.meshgrid(L, L)
if spacing is not None:
X *= spacing[1]
Y *= spacing[0]
if not strict_radius:
radius += 0.5
return cp.asarray((X**2 + Y**2) <= radius**2, dtype=dtype)
def test_isotropic_erosion_spacing():
isotropic_res = morphology.isotropic_dilation(bw_img, 6, spacing=(1, 2))
binary_res = img_as_bool(
morphology.binary_dilation(
bw_img, _disk_with_spacing(6, spacing=(1, 2))
)
)
assert_array_equal(isotropic_res, binary_res)
def test_isotropic_dilation():
isotropic_res = morphology.isotropic_dilation(bw_img, 3)
binary_res = img_as_bool(
morphology.binary_dilation(bw_img, morphology.disk(3))
)
assert_array_equal(isotropic_res, binary_res)
def test_isotropic_closing():
isotropic_res = morphology.isotropic_closing(bw_img, 3)
binary_res = img_as_bool(
morphology.binary_closing(bw_img, morphology.disk(3))
)
assert_array_equal(isotropic_res, binary_res)
def test_isotropic_opening():
isotropic_res = morphology.isotropic_opening(bw_img, 3)
binary_res = img_as_bool(
morphology.binary_opening(bw_img, morphology.disk(3))
)
assert_array_equal(isotropic_res, binary_res)
def test_footprint_overflow():
img = cp.zeros((20, 20), dtype=bool)
img[2:19, 2:19] = True
isotropic_res = morphology.isotropic_erosion(img, 9)
binary_res = img_as_bool(morphology.binary_erosion(img, morphology.disk(9)))
assert_array_equal(isotropic_res, binary_res)
@pytest.mark.parametrize("out_dtype", [bool, cp.uint8, cp.int32])
def test_out_argument(out_dtype):
for func in (
morphology.isotropic_erosion,
morphology.isotropic_dilation,
morphology.isotropic_opening,
morphology.isotropic_closing,
):
radius = 3
img = cp.ones((10, 10), dtype=bool)
img[2:5, 2:5] = 0
out = cp.zeros_like(img, dtype=out_dtype)
out_saved = out.copy()
if out_dtype not in [bool, cp.uint8]:
with pytest.raises(ValueError):
func(img, radius, out=out)
else:
func(img, radius, out=out)
assert cp.any(out != out_saved)
assert_array_equal(out, func(img, radius))
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/morphology/tests/test_skeletonize.py
|
import cupy as cp
import numpy as np
import pytest
from cupy.testing import assert_array_equal
from skimage import data
from skimage.morphology import thin as thin_cpu
from cucim.skimage._shared._warnings import expected_warnings
from cucim.skimage.morphology import medial_axis, thin
class TestThin:
@property
def input_image(self):
"""image to test thinning with"""
ii = cp.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
],
dtype=cp.uint8,
)
return ii
def test_zeros(self):
assert cp.all(thin(cp.zeros((10, 10))) == 0)
def test_iter_1(self):
result = thin(self.input_image, 1).astype(cp.uint8)
expected = cp.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
dtype=cp.uint8,
)
assert_array_equal(result, expected)
def test_max_iter_kwarg_deprecation(self):
result1 = thin(self.input_image, max_num_iter=1).astype(cp.uint8)
with expected_warnings(["`max_iter` is a deprecated argument name"]):
result2 = thin(self.input_image, max_iter=1).astype(cp.uint8)
assert_array_equal(result1, result2)
def test_noiter(self):
result = thin(self.input_image).astype(cp.uint8)
expected = cp.array(
[
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
],
dtype=cp.uint8,
)
assert_array_equal(result, expected)
def test_baddim(self):
for ii in [cp.zeros((3)), cp.zeros((3, 3, 3))]:
with pytest.raises(ValueError):
thin(ii)
@pytest.mark.parametrize("invert", [False, True])
def test_compare_skimage(self, invert):
h = data.horse()
if invert:
h = ~h
result = thin(cp.asarray(h))
expected = thin_cpu(h)
assert_array_equal(result, expected)
class TestMedialAxis:
def test_00_00_zeros(self):
"""Test skeletonize on an array of all zeros"""
result = medial_axis(cp.zeros((10, 10), bool))
assert not cp.any(result)
def test_00_01_zeros_masked(self):
"""Test skeletonize on an array that is completely masked"""
result = medial_axis(cp.zeros((10, 10), bool), cp.zeros((10, 10), bool))
assert not cp.any(result)
def _test_vertical_line(self, **kwargs):
"""Test a thick vertical line, issue #3861"""
img = cp.zeros((9, 9))
img[:, 2] = 1
img[:, 3] = 1
img[:, 4] = 1
expected = cp.full(img.shape, False)
expected[:, 3] = True
result = medial_axis(img, **kwargs)
assert_array_equal(result, expected)
def test_vertical_line(self):
"""Test a thick vertical line, issue #3861"""
self._test_vertical_line()
def test_rng_numpy(self):
# NumPy Generator allowed
self._test_vertical_line(rng=np.random.default_rng())
def test_rng_cupy(self):
# CuPy Generator not currently supported
with pytest.raises(ValueError):
self._test_vertical_line(rng=cp.random.default_rng())
def test_rng_int(self):
self._test_vertical_line(rng=15)
def test_vertical_line_seed(self):
"""seed was deprecated (now use rng)"""
with pytest.warns(FutureWarning):
self._test_vertical_line(seed=15)
def test_vertical_line_random_state(self):
"""random_state was deprecated (now use rng)"""
with pytest.warns(FutureWarning):
self._test_vertical_line(random_state=15)
def test_01_01_rectangle(self):
"""Test skeletonize on a rectangle"""
image = cp.zeros((9, 15), bool)
image[1:-1, 1:-1] = True
#
# The result should be four diagonals from the
# corners, meeting in a horizontal line
#
expected = cp.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result = medial_axis(image)
assert cp.all(result == expected)
result, distance = medial_axis(image, return_distance=True)
assert distance.max() == 4
def test_01_02_hole(self):
"""Test skeletonize on a rectangle with a hole in the middle"""
image = cp.zeros((9, 15), bool)
image[1:-1, 1:-1] = True
image[4, 4:-4] = False
expected = cp.array(
[
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
],
dtype=bool,
)
result = medial_axis(image)
assert cp.all(result == expected)
def test_narrow_image(self):
"""Test skeletonize on a 1-pixel thin strip"""
image = cp.zeros((1, 5), bool)
image[:, 1:-1] = True
result = medial_axis(image)
assert cp.all(result == image)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/_dependency_checks.py
|
from .version_requirements import is_installed
has_mpl = is_installed("matplotlib", ">=3.3")
if has_mpl:
try:
# will fail with
# ImportError: Failed to import any qt binding
# if only matplotlib-base is installed
from matplotlib import pyplot # noqa
except ImportError:
has_mpl = False
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/_gradient.py
|
"""
Simplified version of cupy.gradient
This version doesn't support non-unit spacing or 2nd order edges.
Importantly, this version does not promote all integer dtypes to float64, but
instead will promote 8 and 16-bit integer types to float32.
"""
import cupy
from cucim.skimage._shared.utils import _supported_float_type
def gradient(f, axis=None, output_as_array=False):
"""Return the gradient of an N-dimensional array.
The gradient is computed using second order accurate central differences
in the interior points and either first or second order accurate one-sides
(forward or backwards) differences at the boundaries.
The returned gradient hence has the same shape as the input array.
Args:
f (cupy.ndarray): An N-dimensional array containing samples of a scalar
function.
axis (None or int or tuple of ints, optional): The gradient is
calculated only along the given axis or axes. The default
(axis = None) is to calculate the gradient for all the axes of the
input array. axis may be negative, in which case it counts from the
last to the first axis.
output_as_array
Returns:
gradient (cupy.ndarray or list of cupy.ndarray): A set of ndarrays
(or a single ndarray if there is only one dimension) corresponding
to the derivatives of f with respect to each dimension. Each
derivative has the same shape as f.
"""
ndim = f.ndim # number of dimensions
if axis is None:
axes = tuple(range(ndim))
else:
if cupy.isscalar(axis):
axis = (axis,)
for ax in axis:
if ax < -ndim or ax > ndim + 1:
raise ValueError(f"invalid axis: {ax}")
axes = tuple(ax + ndim if ax < 0 else ax for ax in axis)
len_axes = len(axes)
# use central differences on interior and one-sided differences on the
# endpoints. This preserves second order-accuracy over the full domain.
# create slice objects --- initially all are [:, :, ..., :]
slice1 = [slice(None)] * ndim
slice2 = [slice(None)] * ndim
slice3 = [slice(None)] * ndim
slice4 = [slice(None)] * ndim
otype = f.dtype
if cupy.issubdtype(otype, cupy.inexact):
pass
else:
# All other types convert to floating point.
float_dtype = _supported_float_type(otype)
if cupy.issubdtype(otype, cupy.integer):
f = f.astype(float_dtype)
otype = float_dtype
if output_as_array:
out = cupy.empty((ndim,) + f.shape, dtype=otype)
outvals = out
else:
outvals = []
for axis in axes:
if f.shape[axis] < 2:
raise ValueError(
"Shape of array too small to calculate a numerical gradient, "
"at least 2 elements are required."
)
# result allocation
if not output_as_array:
out = cupy.empty_like(f, dtype=otype)
# Numerical differentiation: 2nd order interior
slice1[axis] = slice(1, -1)
slice2[axis] = slice(None, -2)
slice3[axis] = slice(1, -1)
slice4[axis] = slice(2, None)
out_sl = (axis,) + tuple(slice1) if output_as_array else tuple(slice1)
out[out_sl] = (f[tuple(slice4)] - f[tuple(slice2)]) / 2.0
# Numerical differentiation: 1st order edges
slice1[axis] = 0
slice2[axis] = 1
slice3[axis] = 0
# 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0])
out_sl = (axis,) + tuple(slice1) if output_as_array else tuple(slice1)
out[out_sl] = f[tuple(slice2)] - f[tuple(slice3)]
slice1[axis] = -1
slice2[axis] = -1
slice3[axis] = -2
# 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2])
out_sl = (axis,) + tuple(slice1) if output_as_array else tuple(slice1)
out[out_sl] = f[tuple(slice2)] - f[tuple(slice3)]
if not output_as_array:
outvals.append(out)
# reset the slice object in this dimension to ":"
slice1[axis] = slice(None)
slice2[axis] = slice(None)
slice3[axis] = slice(None)
slice4[axis] = slice(None)
if len_axes == 1:
return outvals[0]
else:
return outvals
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/coord.py
|
import cupy as cp
import numpy as np
from scipy.spatial import cKDTree, distance
# TODO: avoid host/device transfers (currently needed for cKDTree)
def _ensure_spacing(coord, spacing, p_norm, max_out):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
----------
coord : ndarray
The coordinates of the considered points.
spacing : float
the maximum allowed spacing between the points.
p_norm : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
max_out: int
If not None, at most the first ``max_out`` candidates are
returned.
Returns
-------
output : ndarray
A subset of coord where a minimum spacing is guaranteed.
"""
# Use KDtree to find the peaks that are too close to each other
tree = cKDTree(coord)
indices = tree.query_ball_point(coord, r=spacing, p=p_norm)
rejected_peaks_indices = set()
naccepted = 0
for idx, candidates in enumerate(indices):
if idx not in rejected_peaks_indices:
# keep current point and the points at exactly spacing from it
candidates.remove(idx)
dist = distance.cdist(
[coord[idx]], coord[candidates], distance.minkowski, p=p_norm
).reshape(-1)
candidates = [c for c, d in zip(candidates, dist) if d < spacing]
# candidates.remove(keep)
rejected_peaks_indices.update(candidates)
naccepted += 1
if max_out is not None and naccepted >= max_out:
break
# Remove the peaks that are too close to each other
output = np.delete(coord, tuple(rejected_peaks_indices), axis=0)
if max_out is not None:
output = output[:max_out]
return output
def ensure_spacing(
coords,
spacing=1,
p_norm=np.inf,
min_split_size=50,
max_out=None,
*,
max_split_size=2000,
):
"""Returns a subset of coord where a minimum spacing is guaranteed.
Parameters
----------
coords : array_like
The coordinates of the considered points.
spacing : float
the maximum allowed spacing between the points.
p_norm : float
Which Minkowski p-norm to use. Should be in the range [1, inf].
A finite large p may cause a ValueError if overflow can occur.
``inf`` corresponds to the Chebyshev distance and 2 to the
Euclidean distance.
min_split_size : int
Minimum split size used to process ``coords`` by batch to save
memory. If None, the memory saving strategy is not applied.
max_out : int
If not None, only the first ``max_out`` candidates are returned.
max_split_size : int
Maximum split size used to process ``coords`` by batch to save
memory. This number was decided by profiling with a large number
of points. Too small a number results in too much looping in
Python instead of C, slowing down the process, while too large
a number results in large memory allocations, slowdowns, and,
potentially, in the process being killed -- see gh-6010. See
benchmark results `here
<https://github.com/scikit-image/scikit-image/pull/6035#discussion_r751518691>`_.
Returns
-------
output : array_like
A subset of coord where a minimum spacing is guaranteed.
"""
output = coords
if len(coords):
coords = cp.atleast_2d(coords)
coords = cp.asnumpy(coords)
if min_split_size is None:
batch_list = [coords]
else:
coord_count = len(coords)
split_idx = [min_split_size]
split_size = min_split_size
while coord_count - split_idx[-1] > max_split_size:
split_size *= 2
split_idx.append(
split_idx[-1] + min(split_size, max_split_size)
)
batch_list = np.array_split(coords, split_idx)
output = np.zeros((0, coords.shape[1]), dtype=coords.dtype)
for batch in batch_list:
output = _ensure_spacing(
np.vstack([output, batch]), spacing, p_norm, max_out
)
if max_out is not None and len(output) >= max_out:
break
return cp.asarray(output)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/testing.py
|
import pytest
from numpy.testing import ( # noqa
TestCase,
assert_,
assert_allclose,
assert_almost_equal,
assert_array_almost_equal,
assert_array_almost_equal_nulp,
assert_array_equal,
assert_array_less,
assert_equal,
assert_no_warnings,
assert_warns,
)
from ._warnings import expected_warnings # noqa
skipif = pytest.mark.skipif
xfail = pytest.mark.xfail
parametrize = pytest.mark.parametrize
raises = pytest.raises
fixture = pytest.fixture
have_fetch = True
try:
# scikit-image >=0.19
from skimage.data._fetchers import _fetch
except ImportError:
# skip this test if private API changed on scikit-image end
have_fetch = False
def fetch(data_filename):
"""Attempt to fetch data, but if unavailable, skip the tests."""
if have_fetch:
try:
# CuPy Backend: TODO: avoid call to non-public _fetch method
return _fetch(data_filename)
except (ConnectionError, ModuleNotFoundError):
pytest.skip(f"Unable to download {data_filename}")
else:
pytest.skip("skimage _fetch utility not found")
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/fft.py
|
"""Prefer FFTs via the new scipy.fft module when available (SciPy 1.4+)
Otherwise fall back to numpy.fft.
Like numpy 1.15+ scipy 1.3+ is also using pocketfft, but a newer
C++/pybind11 version called pypocketfft
"""
import cupyx.scipy.fft
from cupyx.scipy.fft import next_fast_len
fftmodule = cupyx.scipy.fft
__all__ = ["fftmodule", "next_fast_len"]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/filters.py
|
"""Filters used across multiple skimage submodules.
These are defined here to avoid circular imports.
The unit tests remain under skimage/filters/tests/
"""
from collections.abc import Iterable
import cupy as cp
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import _supported_float_type, convert_to_float, warn
class _PatchClassRepr(type):
"""Control class representations in rendered signatures."""
def __repr__(cls):
return f"<{cls.__name__}>"
class ChannelAxisNotSet(metaclass=_PatchClassRepr):
"""Signal that the `channel_axis` parameter is not set.
This is a proxy object, used to signal to `skimage.filters.gaussian` that
the `channel_axis` parameter has not been set, in which case the function
will determine whether a color channel is present. We cannot use ``None``
for this purpose as it has its own meaning which indicates that the given
image is grayscale.
This automatic behavior was broken in v0.19, recovered but deprecated in
v0.20 and will be removed in v0.21.
"""
def gaussian(
image,
sigma=1,
output=None,
mode="nearest",
cval=0,
preserve_range=False,
truncate=4.0,
*,
channel_axis=ChannelAxisNotSet,
):
"""Multi-dimensional Gaussian filter.
Parameters
----------
image : array-like
Input image (grayscale or color) to filter.
sigma : scalar or sequence of scalars, optional
Standard deviation for Gaussian kernel. The standard
deviations of the Gaussian filter are given for each axis as a
sequence, or as a single number, in which case it is equal for
all axes.
output : array, optional
The ``output`` parameter passes an array in which to store the
filter output.
mode : {'reflect', 'constant', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` parameter determines how the array borders are
handled, where ``cval`` is the value when mode is equal to
'constant'. Default is 'nearest'.
cval : scalar, optional
Value to fill past edges of input if ``mode`` is 'constant'. Default
is 0.0
preserve_range : bool, optional
Whether to keep the original range of values. Otherwise, the input
image is converted according to the conventions of ``img_as_float``.
Also see
https://scikit-image.org/docs/dev/user_guide/data_types.html
truncate : float, optional
Truncate the filter at this many standard deviations.
channel_axis : int or None, optional
If None, the image is assumed to be a grayscale (single channel) image.
Otherwise, this parameter indicates which axis of the array corresponds
to channels.
.. warning::
Automatic detection of the color channel based on the old deprecated
``multichannel=None`` was broken in version 0.19. In 0.20 this
behavior is recovered. The last axis of an `image` with dimensions
(M, N, 3) is interpreted as a color channel if `channel_axis` is
not set. Starting with release 23.04.02, ``channel_axis=None`` will
be used as the new default value.
Returns
-------
filtered_image : ndarray
the filtered array
Notes
-----
This function is a wrapper around :func:`scipy.ndi.gaussian_filter`.
Integer arrays are converted to float.
The ``output`` should be floating point data type since gaussian converts
to float provided ``image``. If ``output`` is not provided, another array
will be allocated and returned as the result.
The multi-dimensional filter is implemented as a sequence of
one-dimensional convolution filters. The intermediate arrays are
stored in the same data type as the output. Therefore, for output
types with a limited precision, the results may be imprecise
because intermediate results may be stored with insufficient
precision.
Examples
--------
>>> import cupy as cp
>>> a = cp.zeros((3, 3))
>>> a[1, 1] = 1
>>> a
array([[0., 0., 0.],
[0., 1., 0.],
[0., 0., 0.]])
>>> gaussian(a, sigma=0.4) # mild smoothing
array([[0.00163116, 0.03712502, 0.00163116],
[0.03712502, 0.84496158, 0.03712502],
[0.00163116, 0.03712502, 0.00163116]])
>>> gaussian(a, sigma=1) # more smoothing
array([[0.05855018, 0.09653293, 0.05855018],
[0.09653293, 0.15915589, 0.09653293],
[0.05855018, 0.09653293, 0.05855018]])
>>> # Several modes are possible for handling boundaries
>>> gaussian(a, sigma=1, mode='reflect')
array([[0.08767308, 0.12075024, 0.08767308],
[0.12075024, 0.16630671, 0.12075024],
[0.08767308, 0.12075024, 0.08767308]])
>>> # For RGB images, each is filtered separately
>>> from skimage.data import astronaut
>>> image = cp.array(astronaut())
>>> filtered_img = gaussian(image, sigma=1, channel_axis=-1)
"""
if channel_axis is ChannelAxisNotSet:
if image.ndim == 3 and image.shape[-1] == 3:
warn(
"Automatic detection of the color channel was deprecated in "
"v0.19, and `channel_axis=None` will be the new default in "
"v0.21. Set `channel_axis=-1` explicitly to silence this "
"warning.",
FutureWarning,
stacklevel=2,
)
channel_axis = -1
else:
channel_axis = None
# CuPy Backend: refactor to avoid overhead of cp.any(cp.asarray(sigma))
sigma_msg = "Sigma values less than zero are not valid"
if not isinstance(sigma, Iterable):
if sigma < 0:
raise ValueError(sigma_msg)
elif any(s < 0 for s in sigma):
raise ValueError(sigma_msg)
if channel_axis is not None:
# do not filter across channels
if not isinstance(sigma, Iterable):
sigma = [sigma] * (image.ndim - 1)
if len(sigma) == image.ndim - 1:
sigma = list(sigma)
sigma.insert(channel_axis % image.ndim, 0)
image = convert_to_float(image, preserve_range)
float_dtype = _supported_float_type(image.dtype)
image = image.astype(float_dtype, copy=False)
if output is None:
output = cp.empty_like(image)
elif not cp.issubdtype(output.dtype, cp.floating):
raise ValueError("Provided output data type is not float")
ndi.gaussian_filter(
image, sigma, output=output, mode=mode, cval=cval, truncate=truncate
)
return output
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/_warnings.py
|
import functools
import os
import re
import sys
import warnings
from contextlib import contextmanager
__all__ = ["all_warnings", "expected_warnings", "warn"]
# A version of `warnings.warn` with a default stacklevel of 2.
# functool is used so as not to increase the call stack accidentally
warn = functools.partial(warnings.warn, stacklevel=2)
@contextmanager
def all_warnings():
"""
Context for use in testing to ensure that all warnings are raised.
Examples
--------
>>> import warnings
>>> def foo():
... warnings.warn(RuntimeWarning("bar"), stacklevel=2)
We raise the warning once, while the warning filter is set to "once".
Hereafter, the warning is invisible, even with custom filters:
>>> with warnings.catch_warnings():
... warnings.simplefilter('once')
... foo() # doctest: +SKIP
We can now run ``foo()`` without a warning being raised:
>>> from numpy.testing import assert_warns
>>> foo() # doctest: +SKIP
To catch the warning, we call in the help of ``all_warnings``:
>>> with all_warnings():
... assert_warns(RuntimeWarning, foo)
"""
# _warnings.py is on the critical import path.
# Since this is a testing only function, we lazy import inspect.
import inspect
# Whenever a warning is triggered, Python adds a __warningregistry__
# member to the *calling* module. The exercise here is to find
# and eradicate all those breadcrumbs that were left lying around.
#
# We proceed by first searching all parent calling frames and explicitly
# clearing their warning registries (necessary for the doctests above to
# pass). Then, we search for all submodules of skimage and clear theirs
# as well (necessary for the skimage test suite to pass).
frame = inspect.currentframe()
if frame:
for f in inspect.getouterframes(frame):
f[0].f_locals["__warningregistry__"] = {}
del frame
for mod_name, mod in list(sys.modules.items()):
try:
mod.__warningregistry__.clear()
except AttributeError:
pass
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
@contextmanager
def expected_warnings(matching):
r"""Context for use in testing to catch known warnings matching regexes
Parameters
----------
matching : None or a list of strings or compiled regexes
Regexes for the desired warning to catch
If matching is None, this behaves as a no-op.
Examples
--------
>>> import numpy as np
>>> image = np.random.randint(0, 2**16, size=(100, 100), dtype=np.uint16)
>>> # rank filters are slow when bit-depth exceeds 10 bits
>>> from skimage import filters
>>> with expected_warnings(['Bad rank filter performance']):
... median_filtered = filters.rank.median(image)
Notes
-----
Uses `all_warnings` to ensure all warnings are raised.
Upon exiting, it checks the recorded warnings for the desired matching
pattern(s).
Raises a ValueError if any match was not found or an unexpected
warning was raised.
Allows for three types of behaviors: `and`, `or`, and `optional` matches.
This is done to accommodate different build environments or loop conditions
that may produce different warnings. The behaviors can be combined.
If you pass multiple patterns, you get an orderless `and`, where all of the
warnings must be raised.
If you use the `|` operator in a pattern, you can catch one of several
warnings.
Finally, you can use `|\A\Z` in a pattern to signify it as optional.
"""
if isinstance(matching, str):
raise ValueError(
"``matching`` should be a list of strings and not "
"a string itself."
)
# Special case for disabling the context manager
if matching is None:
yield None
return
strict_warnings = os.environ.get("SKIMAGE_TEST_STRICT_WARNINGS", "1")
if strict_warnings.lower() == "true":
strict_warnings = True
elif strict_warnings.lower() == "false":
strict_warnings = False
else:
strict_warnings = bool(int(strict_warnings))
with all_warnings() as w:
# enter context
yield w
# exited user context, check the recorded warnings
# Allow users to provide None
while None in matching:
matching.remove(None)
remaining = [m for m in matching if r"\A\Z" not in m.split("|")]
for warn in w:
found = False
for match in matching:
if re.search(match, str(warn.message)) is not None:
found = True
if match in remaining:
remaining.remove(match)
if strict_warnings and not found:
raise ValueError(f"Unexpected warning: {str(warn.message)}")
if strict_warnings and (len(remaining) > 0):
newline = "\n"
msg = (
f"No warning raised matching:{newline}{newline.join(remaining)}"
)
raise ValueError(msg)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/utils.py
|
import functools
import inspect
import numbers
import warnings
from collections.abc import Iterable
import cupy as cp
import numpy as np
from ._warnings import all_warnings, warn # noqa
__all__ = [
"deprecate_func",
"get_bound_method_class",
"all_warnings",
"safe_as_int",
"check_shape_equality",
"check_nD",
"warn",
"reshape_nd",
"identity",
"slice_at_axis",
]
def _get_stack_rank(func):
"""Return function rank in the call stack."""
if _is_wrapped(func):
return 1 + _get_stack_rank(func.__wrapped__)
else:
return 0
def _is_wrapped(func):
return "__wrapped__" in dir(func)
def _get_stack_length(func):
"""Return function call stack length."""
return _get_stack_rank(func.__globals__.get(func.__name__, func))
class _DecoratorBaseClass:
"""Used to manage decorators' warnings stacklevel.
The `_stack_length` class variable is used to store the number of
times a function is wrapped by a decorator.
Let `stack_length` be the total number of times a decorated
function is wrapped, and `stack_rank` be the rank of the decorator
in the decorators stack. The stacklevel of a warning is then
`stacklevel = 1 + stack_length - stack_rank`.
"""
_stack_length = {}
def get_stack_length(self, func):
return self._stack_length.get(func.__name__, _get_stack_length(func))
class change_default_value(_DecoratorBaseClass):
"""Decorator for changing the default value of an argument.
Parameters
----------
arg_name: str
The name of the argument to be updated.
new_value: any
The argument new value.
changed_version : str
The package version in which the change will be introduced.
warning_msg: str
Optional warning message. If None, a generic warning message
is used.
"""
def __init__(
self, arg_name, *, new_value, changed_version, warning_msg=None
):
self.arg_name = arg_name
self.new_value = new_value
self.warning_msg = warning_msg
self.changed_version = changed_version
def __call__(self, func):
parameters = inspect.signature(func).parameters
arg_idx = list(parameters.keys()).index(self.arg_name)
old_value = parameters[self.arg_name].default
stack_rank = _get_stack_rank(func)
if self.warning_msg is None:
self.warning_msg = (
f"The new recommended value for {self.arg_name} is "
f"{self.new_value}. Until version {self.changed_version}, "
f"the default {self.arg_name} value is {old_value}. "
f"From version {self.changed_version}, the {self.arg_name} "
f"default value will be {self.new_value}. To avoid "
f"this warning, please explicitly set {self.arg_name} value."
)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
if len(args) < arg_idx + 1 and self.arg_name not in kwargs.keys():
# warn that arg_name default value changed:
warnings.warn(
self.warning_msg, FutureWarning, stacklevel=stacklevel
)
return func(*args, **kwargs)
return fixed_func
class remove_arg(_DecoratorBaseClass):
"""Decorator to remove an argument from function's signature.
Parameters
----------
arg_name: str
The name of the argument to be removed.
changed_version : str
The package version in which the warning will be replaced by
an error.
help_msg: str
Optional message appended to the generic warning message.
"""
def __init__(self, arg_name, *, changed_version, help_msg=None):
self.arg_name = arg_name
self.help_msg = help_msg
self.changed_version = changed_version
def __call__(self, func):
parameters = inspect.signature(func).parameters
arg_idx = list(parameters.keys()).index(self.arg_name)
warning_msg = (
f"{self.arg_name} argument is deprecated in upstream scikit-image "
f"and will be removed in cuCIM {self.changed_version}. To avoid "
f"this warning, please do not use the {self.arg_name} argument. "
f"Please see {func.__name__} documentation for more details."
)
if self.help_msg is not None:
warning_msg += f" {self.help_msg}"
stack_rank = _get_stack_rank(func)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
if len(args) > arg_idx or self.arg_name in kwargs.keys():
# warn that arg_name is deprecated
warnings.warn(warning_msg, FutureWarning, stacklevel=stacklevel)
return func(*args, **kwargs)
return fixed_func
def _docstring_add_deprecated(func, kwarg_mapping, deprecated_version):
"""Add deprecated kwarg(s) to the "Other Params" section of a docstring.
Parameters
----------
func : function
The function whose docstring we wish to update.
kwarg_mapping : dict
A dict containing {old_arg: new_arg} key/value pairs as used by
`deprecate_kwarg`.
deprecated_version : str
A major.minor version string specifying when old_arg was
deprecated.
Returns
-------
new_doc : str
The updated docstring. Returns the original docstring if numpydoc is
not available.
"""
if func.__doc__ is None:
return None
try:
from numpydoc.docscrape import FunctionDoc, Parameter
except ImportError:
# Return an unmodified docstring if numpydoc is not available.
return func.__doc__
Doc = FunctionDoc(func)
for old_arg, new_arg in kwarg_mapping.items():
desc = [
f"Deprecated in favor of `{new_arg}`.",
"",
f".. deprecated:: {deprecated_version}",
]
Doc["Other Parameters"].append(
Parameter(name=old_arg, type="DEPRECATED", desc=desc)
)
new_docstring = str(Doc)
# new_docstring will have a header starting with:
#
# .. function:: func.__name__
#
# and some additional blank lines. We strip these off below.
split = new_docstring.split("\n")
no_header = split[1:]
while not no_header[0].strip():
no_header.pop(0)
# Store the initial description before any of the Parameters fields.
# Usually this is a single line, but the while loop covers any case
# where it is not.
descr = no_header.pop(0)
while no_header[0].strip():
descr += "\n " + no_header.pop(0)
descr += "\n\n"
# '\n ' rather than '\n' here to restore the original indentation.
final_docstring = descr + "\n ".join(no_header)
# strip any extra spaces from ends of lines
final_docstring = "\n".join(
[line.rstrip() for line in final_docstring.split("\n")]
)
return final_docstring
class deprecate_kwarg(_DecoratorBaseClass):
"""Decorator ensuring backward compatibility when argument names are
modified in a function definition.
Parameters
----------
kwarg_mapping: dict
Mapping between the function's old argument names and the new
ones.
deprecated_version : str
The package version in which the argument was first deprecated.
warning_msg: str
Optional warning message. If None, a generic warning message
is used.
removed_version : str
The package version in which the deprecated argument will be
removed.
"""
def __init__(
self,
kwarg_mapping,
deprecated_version,
warning_msg=None,
removed_version=None,
):
self.kwarg_mapping = kwarg_mapping
if warning_msg is None:
self.warning_msg = (
"`{old_arg}` is a deprecated argument name "
"for `{func_name}`. "
)
if removed_version is not None:
self.warning_msg += (
f"It will be removed in cuCIM "
f"version {removed_version}."
)
self.warning_msg += "Please use `{new_arg}` instead."
else:
self.warning_msg = warning_msg
self.deprecated_version = deprecated_version
def __call__(self, func):
stack_rank = _get_stack_rank(func)
@functools.wraps(func)
def fixed_func(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
for old_arg, new_arg in self.kwarg_mapping.items():
if old_arg in kwargs:
# warn that the function interface has changed:
warnings.warn(
self.warning_msg.format(
old_arg=old_arg,
func_name=func.__name__,
new_arg=new_arg,
),
FutureWarning,
stacklevel=stacklevel,
)
# Substitute new_arg to old_arg
kwargs[new_arg] = kwargs.pop(old_arg)
# Call the function with the fixed arguments
return func(*args, **kwargs)
if func.__doc__ is not None:
newdoc = _docstring_add_deprecated(
func, self.kwarg_mapping, self.deprecated_version
)
fixed_func.__doc__ = newdoc
return fixed_func
class channel_as_last_axis:
"""Decorator for automatically making channels axis last for all arrays.
This decorator reorders axes for compatibility with functions that only
support channels along the last axis. After the function call is complete
the channels axis is restored back to its original position.
Parameters
----------
channel_arg_positions : tuple of int, optional
Positional arguments at the positions specified in this tuple are
assumed to be multichannel arrays. The default is to assume only the
first argument to the function is a multichannel array.
channel_kwarg_names : tuple of str, optional
A tuple containing the names of any keyword arguments corresponding to
multichannel arrays.
multichannel_output : bool, optional
A boolean that should be True if the output of the function is not a
multichannel array and False otherwise. This decorator does not
currently support the general case of functions with multiple outputs
where some or all are multichannel.
"""
def __init__(
self,
channel_arg_positions=(0,),
channel_kwarg_names=(),
multichannel_output=True,
):
self.arg_positions = set(channel_arg_positions)
self.kwarg_names = set(channel_kwarg_names)
self.multichannel_output = multichannel_output
def __call__(self, func):
@functools.wraps(func)
def fixed_func(*args, **kwargs):
channel_axis = kwargs.get("channel_axis", None)
if channel_axis is None:
return func(*args, **kwargs)
# TODO: convert scalars to a tuple in anticipation of eventually
# supporting a tuple of channel axes. Right now, only an
# integer or a single-element tuple is supported, though.
if np.isscalar(channel_axis):
channel_axis = (channel_axis,)
if len(channel_axis) > 1:
raise ValueError(
"only a single channel axis is currently supported"
)
if channel_axis == (-1,) or channel_axis == -1:
return func(*args, **kwargs)
if self.arg_positions:
new_args = []
for pos, arg in enumerate(args):
if pos in self.arg_positions:
new_args.append(np.moveaxis(arg, channel_axis[0], -1))
else:
new_args.append(arg)
new_args = tuple(new_args)
else:
new_args = args
for name in self.kwarg_names:
kwargs[name] = np.moveaxis(kwargs[name], channel_axis[0], -1)
# now that we have moved the channels axis to the last position,
# change the channel_axis argument to -1
kwargs["channel_axis"] = -1
# Call the function with the fixed arguments
out = func(*new_args, **kwargs)
if self.multichannel_output:
out = np.moveaxis(out, -1, channel_axis[0])
return out
return fixed_func
class deprecate_func(_DecoratorBaseClass):
"""Decorate a deprecated function and warn when it is called.
Adapted from <http://wiki.python.org/moin/PythonDecoratorLibrary>.
Parameters
----------
deprecated_version : str
The package version when the deprecation was introduced.
removed_version : str
The package version in which the deprecated function will be removed.
hint : str, optional
A hint on how to address this deprecation,
e.g., "Use `skimage.submodule.alternative_func` instead."
Examples
--------
>>> @deprecate_func(
... deprecated_version="1.0.0",
... removed_version="1.2.0",
... hint="Use `bar` instead."
... )
... def foo():
... pass
Calling ``foo`` will warn with::
FutureWarning: `foo` is deprecated since version 1.0.0
and will be removed in version 1.2.0. Use `bar` instead.
"""
def __init__(self, *, deprecated_version, removed_version=None, hint=None):
self.deprecated_version = deprecated_version
self.removed_version = removed_version
self.hint = hint
def __call__(self, func):
message = (
f"`{func.__name__}` is deprecated since version "
f"{self.deprecated_version}"
)
if self.removed_version:
message += (
f" and will be removed in version {self.removed_version}."
)
if self.hint:
message += f" {self.hint.rstrip('.')}."
stack_rank = _get_stack_rank(func)
@functools.wraps(func)
def wrapped(*args, **kwargs):
stacklevel = 1 + self.get_stack_length(func) - stack_rank
warnings.warn(
message, category=FutureWarning, stacklevel=stacklevel
)
return func(*args, **kwargs)
# modify doc string to display deprecation warning
doc = f"**Deprecated:** {message}"
if wrapped.__doc__ is None:
wrapped.__doc__ = doc
else:
wrapped.__doc__ = doc + "\n\n " + wrapped.__doc__
return wrapped
def get_bound_method_class(m):
"""Return the class for a bound method."""
return m.__self__.__class__
def safe_as_int(val, atol=1e-3):
"""
Attempt to safely cast values to integer format.
Parameters
----------
val : scalar or iterable of scalars
Number or container of numbers which are intended to be interpreted as
integers, e.g., for indexing purposes, but which may not carry integer
type.
atol : float
Absolute tolerance away from nearest integer to consider values in
``val`` functionally integers.
Returns
-------
val_int : NumPy scalar or ndarray of dtype `np.int64`
Returns the input value(s) coerced to dtype `np.int64` assuming all
were within ``atol`` of the nearest integer.
Notes
-----
This operation calculates ``val`` modulo 1, which returns the mantissa of
all values. Then all mantissas greater than 0.5 are subtracted from one.
Finally, the absolute tolerance from zero is calculated. If it is less
than ``atol`` for all value(s) in ``val``, they are rounded and returned
in an integer array. Or, if ``val`` was a scalar, a NumPy scalar type is
returned.
If any value(s) are outside the specified tolerance, an informative error
is raised.
Examples
--------
>>> safe_as_int(7.0)
7
>>> safe_as_int([9, 4, 2.9999999999])
array([9, 4, 3])
>>> safe_as_int(53.1)
Traceback (most recent call last):
...
ValueError: Integer argument required but received 53.1, check inputs.
>>> safe_as_int(53.01, atol=0.01)
53
"""
mod = np.asarray(val) % 1 # Extract mantissa
# Check for and subtract any mod values > 0.5 from 1
if mod.ndim == 0: # Scalar input, cannot be indexed
if mod > 0.5:
mod = 1 - mod
else: # Iterable input, now ndarray
mod[mod > 0.5] = 1 - mod[mod > 0.5] # Test on each side of nearest int
try:
np.testing.assert_allclose(mod, 0, atol=atol)
except AssertionError:
raise ValueError(
f"Integer argument required but received " f"{val}, check inputs."
)
return np.round(val).astype(np.int64)
def check_shape_equality(*images):
"""Check that all images have the same shape"""
image0 = images[0]
if not all(image0.shape == image.shape for image in images[1:]):
raise ValueError("Input images must have the same dimensions.")
return
def slice_at_axis(sl, axis):
"""
Construct tuple of slices to slice an array in the given dimension.
Parameters
----------
sl : slice
The slice for the given dimension.
axis : int
The axis to which `sl` is applied. All other dimensions are left
"unsliced".
Returns
-------
sl : tuple of slices
A tuple with slices matching `shape` in length.
Examples
--------
>>> slice_at_axis(slice(None, 3, -1), 1)
(slice(None, None, None), slice(None, 3, -1), Ellipsis)
"""
return (slice(None),) * axis + (sl,) + (...,)
def reshape_nd(arr, ndim, dim):
"""Reshape a 1D array to have n dimensions, all singletons but one.
Parameters
----------
arr : array, shape (N,)
Input array
ndim : int
Number of desired dimensions of reshaped array.
dim : int
Which dimension/axis will not be singleton-sized.
Returns
-------
arr_reshaped : array, shape ([1, ...], N, [1,...])
View of `arr` reshaped to the desired shape.
Examples
--------
>>> rng = np.random.default_rng()
>>> arr = rng.random(7)
>>> reshape_nd(arr, 2, 0).shape
(7, 1)
>>> reshape_nd(arr, 3, 1).shape
(1, 7, 1)
>>> reshape_nd(arr, 4, -1).shape
(1, 1, 1, 7)
"""
if arr.ndim != 1:
raise ValueError("arr must be a 1D array")
new_shape = [1] * ndim
new_shape[dim] = -1
return np.reshape(arr, new_shape)
def check_nD(array, ndim, arg_name="image"):
"""
Verify an array meets the desired ndims and array isn't empty.
Parameters
----------
array : array-like
Input array to be validated
ndim : int or iterable of ints
Allowable ndim or ndims for the array.
arg_name : str, optional
The name of the array in the original function.
"""
msg_incorrect_dim = "The parameter `%s` must be a %s-dimensional array"
msg_empty_array = "The parameter `%s` cannot be an empty array"
if isinstance(ndim, int):
ndim = [ndim]
if array.size == 0:
raise ValueError(msg_empty_array % (arg_name))
if array.ndim not in ndim:
raise ValueError(
msg_incorrect_dim % (arg_name, "-or-".join([str(n) for n in ndim]))
)
def check_random_state(seed):
"""Turn seed into a `cupy.random.RandomState` instance.
Parameters
----------
seed : None, int or cupy.random.RandomState
If `seed` is None, return the RandomState singleton used by`cupy.random`.
If `seed` is an int, return a new RandomState instance seeded with `seed`.
If `seed` is already a RandomState instance, return it.
Raises
------
ValueError
If `seed` is of the wrong type.
""" # noqa
# Function originally from scikit-learn's module sklearn.utils.validation
if seed is None or seed is cp.random:
return cp.random.mtrand._rand
if isinstance(seed, (numbers.Integral, cp.integer)):
return cp.random.RandomState(seed)
if isinstance(seed, cp.random.RandomState):
return seed
raise ValueError(
f"{seed} cannot be used to seed a cupy.random.RandomState instance"
)
def convert_to_float(image, preserve_range):
"""Convert input image to float image with the appropriate range.
Parameters
----------
image : ndarray
Input image.
preserve_range : bool
Determines if the range of the image should be kept or transformed
using img_as_float. Also see
https://scikit-image.org/docs/dev/user_guide/data_types.html
Notes
-----
* Input images with `float32` data type are not upcast.
Returns
-------
image : ndarray
Transformed version of the input.
"""
if image.dtype == np.float16:
return image.astype(np.float32)
if preserve_range:
# Convert image to double only if it is not single or double
# precision float
if image.dtype.char not in "df":
image = image.astype(_supported_float_type(image.dtype))
else:
from ..util.dtype import img_as_float
image = img_as_float(image)
return image
def _validate_interpolation_order(image_dtype, order):
"""Validate and return spline interpolation's order.
Parameters
----------
image_dtype : dtype
Image dtype.
order : int, optional
The order of the spline interpolation. The order has to be in
the range 0-5. See `skimage.transform.warp` for detail.
Returns
-------
order : int
if input order is None, returns 0 if image_dtype is bool and 1
otherwise. Otherwise, image_dtype is checked and input order
is validated accordingly (order > 0 is not supported for bool
image dtype)
"""
if order is None:
return 0 if image_dtype == bool else 1
if order < 0 or order > 5:
raise ValueError(
"Spline interpolation order has to be in the " "range 0-5."
)
if image_dtype == bool and order != 0:
raise ValueError(
"Input image dtype is bool. Interpolation is not defined "
"with bool data type. Please set order to 0 or explicitly "
"cast input image to another data type."
)
return order
def _to_np_mode(mode):
"""Convert padding modes from `ndi.correlate` to `np.pad`."""
mode_translation_dict = dict(
nearest="edge", reflect="symmetric", mirror="reflect"
)
if mode in mode_translation_dict:
mode = mode_translation_dict[mode]
return mode
def _to_ndimage_mode(mode):
"""Convert from `numpy.pad` mode name to the corresponding ndimage mode."""
mode_translation_dict = dict(
constant="constant",
edge="nearest",
symmetric="reflect",
reflect="mirror",
wrap="wrap",
)
if mode not in mode_translation_dict:
raise ValueError(
f"Unknown mode: '{mode}', or cannot translate mode. The "
f"mode should be one of 'constant', 'edge', 'symmetric', "
f"'reflect', or 'wrap'. See the documentation of numpy.pad for "
f"more info."
)
return _fix_ndimage_mode(mode_translation_dict[mode])
def _fix_ndimage_mode(mode):
# SciPy 1.6.0 introduced grid variants of constant and wrap which
# have less surprising behavior for images. Use these when available
grid_modes = {"constant": "grid-constant", "wrap": "grid-wrap"}
return grid_modes.get(mode, mode)
new_float_type = {
# preserved types
"f": cp.float32, # float32
"d": cp.float64, # float64
"F": cp.complex64, # complex64
"D": cp.complex128, # complex128
# promoted float types
"e": cp.float32, # float16
# truncated float types
"g": cp.float64, # float128 (doesn't exist on windows)
"G": cp.complex128, # complex256 (doesn't exist on windows)
# integer types that can be exactly represented in float32
"b": cp.float32, # int8
"B": cp.float32, # uint8
"h": cp.float32, # int16
"H": cp.float32, # uint16
"?": cp.float32, # bool
}
def _supported_float_type(input_dtype, allow_complex=False):
"""Return an appropriate floating-point dtype for a given dtype.
float32, float64, complex64, complex128 are preserved.
float16 is promoted to float32.
complex256 is demoted to complex128.
Other types are cast to float64.
Parameters
----------
input_dtype : np.dtype or Iterable of np.dtype
The input dtype. If a sequence of multiple dtypes is provided, each
dtype is first converted to a supported floating point type and the
final dtype is then determined by applying `np.result_type` on the
sequence of supported floating point types.
allow_complex : bool, optional
If False, raise a ValueError on complex-valued inputs.
Returns
-------
float_type : dtype
Floating-point dtype for the image.
"""
if isinstance(input_dtype, Iterable) and not isinstance(input_dtype, str):
return cp.result_type(*(_supported_float_type(d) for d in input_dtype))
input_dtype = cp.dtype(input_dtype)
if not allow_complex and input_dtype.kind == "c":
raise ValueError("complex valued input is not supported")
return new_float_type.get(input_dtype.char, cp.float64)
def identity(image, *args, **kwargs):
"""Returns the first argument unmodified."""
return image
def as_binary_ndarray(array, *, variable_name):
"""Return `array` as a numpy.ndarray of dtype bool.
Raises
------
ValueError:
An error including the given `variable_name` if `array` can not be
safely cast to a boolean array.
"""
array = cp.asarray(array)
if array.dtype != bool:
if cp.any((array != 1) & (array != 0)):
raise ValueError(
f"{variable_name} array is not of dtype boolean or "
f"contains values other than 0 and 1 so cannot be "
f"safely cast to boolean array."
)
return cp.asarray(array, dtype=bool)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/version_requirements.py
|
import sys
from packaging import version as _version
def _check_version(actver, version, cmp_op):
"""
Check version string of an active module against a required version.
If dev/prerelease tags result in TypeError for string-number comparison,
it is assumed that the dependency is satisfied.
Users on dev branches are responsible for keeping their own packages up to
date.
Copyright (C) 2013 The IPython Development Team
Distributed under the terms of the BSD License.
"""
try:
if cmp_op == ">":
return _version.parse(actver) > _version.parse(version)
elif cmp_op == ">=":
return _version.parse(actver) >= _version.parse(version)
elif cmp_op == "=":
return _version.parse(actver) == _version.parse(version)
elif cmp_op == "<":
return _version.parse(actver) < _version.parse(version)
else:
return False
except TypeError:
return True
def get_module_version(module_name):
"""Return module version or None if version can't be retrieved."""
mod = __import__(module_name, fromlist=[module_name.rpartition(".")[-1]])
return getattr(mod, "__version__", getattr(mod, "VERSION", None))
def is_installed(name, version=None):
"""Test if *name* is installed.
Parameters
----------
name : str
Name of module or "python"
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
out : bool
True if `name` is installed matching the optional version.
Notes
-----
Original Copyright (C) 2009-2011 Pierre Raybaut
Licensed under the terms of the MIT License.
"""
if name.lower() == "python":
actver = sys.version[:6]
else:
try:
actver = get_module_version(name)
except ImportError:
return False
if version is None:
return True
else:
# since version_requirements is in the critical import path,
# we lazy import re
import re
match = re.search("[0-9]", version)
assert match is not None, "Invalid version number"
symb = version[: match.start()]
if not symb:
symb = "="
assert symb in (">=", ">", "=", "<"), (
"Invalid version condition '%s'" % symb
)
version = version[match.start() :]
return _check_version(actver, version, symb)
def require(name, version=None):
"""Return decorator that forces a requirement for a function or class.
Parameters
----------
name : str
Name of module or "python".
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
func : function
A decorator that raises an ImportError if a function is run
in the absence of the input dependency.
"""
# since version_requirements is in the critical import path, we lazy import
# functools
import functools
def decorator(obj):
@functools.wraps(obj)
def func_wrapped(*args, **kwargs):
if is_installed(name, version):
return obj(*args, **kwargs)
else:
msg = '"%s" in "%s" requires "%s'
msg = msg % (obj, obj.__module__, name)
if version is not None:
msg += " %s" % version
raise ImportError(msg + '"')
return func_wrapped
return decorator
def get_module(module_name, version=None):
"""Return a module object of name *module_name* if installed.
Parameters
----------
module_name : str
Name of module.
version : str, optional
Version string to test against.
If version is not None, checking version
(must have an attribute named '__version__' or 'VERSION')
Version may start with =, >=, > or < to specify the exact requirement
Returns
-------
mod : module or None
Module if *module_name* is installed matching the optional version
or None otherwise.
"""
if not is_installed(module_name, version):
return None
return __import__(module_name, fromlist=[module_name.rpartition(".")[-1]])
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/tests/test_warnings.py
|
import os
import pytest
from cucim.skimage._shared._warnings import expected_warnings
@pytest.fixture(scope="function")
def setup():
# Remove any environment variable if it exists
old_strictness = os.environ.pop("SKIMAGE_TEST_STRICT_WARNINGS", None)
yield
# Add the user's desired strictness
if old_strictness is not None:
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = old_strictness
def test_strict_warnigns_default(setup):
# By default we should fail on missing expected warnings
with pytest.raises(ValueError):
with expected_warnings(["some warnings"]):
pass
@pytest.mark.parametrize("strictness", ["1", "true", "True", "TRUE"])
def test_strict_warning_true(setup, strictness):
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness
with pytest.raises(ValueError):
with expected_warnings(["some warnings"]):
pass
@pytest.mark.parametrize("strictness", ["0", "false", "False", "FALSE"])
def test_strict_warning_false(setup, strictness):
# If the user doesnn't wish to be strict about warnings
# the following shouldn't raise any error
os.environ["SKIMAGE_TEST_STRICT_WARNINGS"] = strictness
with expected_warnings(["some warnings"]):
pass
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/_shared/tests/test_utils.py
|
import sys
import warnings
import cupy as cp
import numpy as np
import pytest
from cucim.skimage._shared.utils import (
_supported_float_type,
_validate_interpolation_order,
change_default_value,
channel_as_last_axis,
check_nD,
deprecate_kwarg,
)
complex_dtypes = [np.complex64, np.complex128]
if hasattr(np, "complex256"):
complex_dtypes += [np.complex256]
have_numpydoc = False
try:
import numpydoc # noqa
have_numpydoc = True
except ImportError:
pass
def test_change_default_value():
@change_default_value("arg1", new_value=-1, changed_version="0.12")
def foo(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
@change_default_value(
"arg1",
new_value=-1,
changed_version="0.12",
warning_msg="Custom warning message",
)
def bar(arg0, arg1=0, arg2=1):
"""Expected docstring"""
return arg0, arg1, arg2
# Assert warning messages
with pytest.warns(FutureWarning) as record:
assert foo(0) == (0, 0, 1)
assert bar(0) == (0, 0, 1)
expected_msg = (
"The new recommended value for arg1 is -1. Until "
"version 0.12, the default arg1 value is 0. From "
"version 0.12, the arg1 default value will be -1. "
"To avoid this warning, please explicitly set arg1 value."
)
assert str(record[0].message) == expected_msg
assert str(record[1].message) == "Custom warning message"
# Assert that nothing happens if arg1 is set
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0, 2) == (0, 2, 1)
assert foo(0, arg1=0) == (0, 0, 1)
# Function name and doc is preserved
assert foo.__name__ == "foo"
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
assert foo.__doc__ == "Expected docstring"
# Assert no warnings were raised
assert len(recorded) == 0
def test_deprecate_kwarg():
@deprecate_kwarg({"old_arg1": "new_arg1"}, "22.02.00")
def foo(arg0, new_arg1=1, arg2=None):
"""Expected docstring"""
return arg0, new_arg1, arg2
@deprecate_kwarg(
{"old_arg1": "new_arg1"},
deprecated_version="22.02.00",
warning_msg="Custom warning message",
)
def bar(arg0, new_arg1=1, arg2=None):
"""Expected docstring"""
return arg0, new_arg1, arg2
# Assert that the DeprecationWarning is raised when the deprecated
# argument name is used and that the reasult is valid
with pytest.warns(FutureWarning) as record:
assert foo(0, old_arg1=1) == (0, 1, None)
assert bar(0, old_arg1=1) == (0, 1, None)
msg = (
"`old_arg1` is a deprecated argument name "
"for `foo`. Please use `new_arg1` instead."
)
assert str(record[0].message) == msg
assert str(record[1].message) == "Custom warning message"
# Assert that nothing happens when the function is called with the
# new API
with warnings.catch_warnings(record=True) as recorded:
# No kwargs
assert foo(0) == (0, 1, None)
assert foo(0, 2) == (0, 2, None)
assert foo(0, 1, 2) == (0, 1, 2)
# Kwargs without deprecated argument
assert foo(0, new_arg1=1, arg2=2) == (0, 1, 2)
assert foo(0, new_arg1=2) == (0, 2, None)
assert foo(0, arg2=2) == (0, 1, 2)
assert foo(0, 1, arg2=2) == (0, 1, 2)
# Function name and doc is preserved
assert foo.__name__ == "foo"
if sys.flags.optimize < 2:
# if PYTHONOPTIMIZE is set to 2, docstrings are stripped
if not have_numpydoc:
assert foo.__doc__ == """Expected docstring"""
else:
assert (
foo.__doc__
== """Expected docstring
Other Parameters
----------------
old_arg1 : DEPRECATED
Deprecated in favor of `new_arg1`.
.. deprecated:: 22.02.00
"""
)
assert len(recorded) == 0
def test_check_nD():
z = np.random.random(200**2).reshape((200, 200))
x = z[10:30, 30:10]
with pytest.raises(ValueError):
check_nD(x, 2)
@pytest.mark.parametrize(
"dtype", [bool, int, np.uint8, np.uint16, float, np.float32, np.float64]
)
@pytest.mark.parametrize("order", [None, -1, 0, 1, 2, 3, 4, 5, 6])
def test_validate_interpolation_order(dtype, order):
if order is None:
# Default order
assert (
_validate_interpolation_order(dtype, None) == 0
if dtype == bool
else 1
)
elif order < 0 or order > 5:
# Order not in valid range
with pytest.raises(ValueError):
_validate_interpolation_order(dtype, order)
elif dtype == bool and order != 0:
# Deprecated order for bool array
with pytest.raises(ValueError):
_validate_interpolation_order(bool, order)
else:
# Valid use case
assert _validate_interpolation_order(dtype, order) == order
@pytest.mark.parametrize(
"dtype",
[
bool,
np.float16,
np.float32,
np.float64,
np.uint8,
np.uint16,
np.uint32,
np.uint64,
np.int8,
np.int16,
np.int32,
np.int64,
],
)
def test_supported_float_dtype_real(dtype):
float_dtype = _supported_float_type(dtype)
if dtype in [
np.float16,
np.float32,
np.int8,
np.uint8,
np.int16,
np.uint16,
bool,
]:
assert float_dtype == np.float32
else:
assert float_dtype == np.float64
@pytest.mark.parametrize("dtype", complex_dtypes)
@pytest.mark.parametrize("allow_complex", [False, True])
def test_supported_float_dtype_complex(dtype, allow_complex):
if allow_complex:
float_dtype = _supported_float_type(dtype, allow_complex=allow_complex)
if dtype == np.complex64:
assert float_dtype == np.complex64
else:
assert float_dtype == np.complex128
else:
with pytest.raises(ValueError):
_supported_float_type(dtype, allow_complex=allow_complex)
@pytest.mark.parametrize(
"dtype", ["f", "float32", np.float32, np.dtype(np.float32)]
)
def test_supported_float_dtype_input_kinds(dtype):
assert _supported_float_type(dtype) == np.float32
@pytest.mark.parametrize(
"dtypes, expected",
[
((np.float16, np.float64), np.float64),
([np.float32, np.uint16, np.int8], np.float32),
([np.float32, bool], np.float32),
([np.float32, np.uint32, np.int16], np.float64),
({np.float32, np.float16}, np.float32),
],
)
def test_supported_float_dtype_sequence(dtypes, expected):
float_dtype = _supported_float_type(dtypes)
assert float_dtype == expected
@channel_as_last_axis(multichannel_output=False)
def _decorated_channel_axis_size(x, *, channel_axis=None):
if channel_axis is None:
return None
assert channel_axis == -1
return x.shape[-1]
@pytest.mark.parametrize("channel_axis", [None, 0, 1, 2, -1, -2, -3])
def test_decorated_channel_axis_shape(channel_axis):
# Verify that channel_as_last_axis modifies the channel_axis as expected
# need unique size per axis here
x = cp.zeros((2, 3, 4))
size = _decorated_channel_axis_size(x, channel_axis=channel_axis)
if channel_axis is None:
assert size is None
else:
assert size == x.shape[channel_axis]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_label.py
|
import cupy as cp
import scipy.ndimage as cpu_ndi
from ._label_kernels import _label
def _get_structure(ndim, connectivity):
if connectivity is None:
# use the full connectivity by default
connectivity = ndim
if not 1 <= connectivity <= ndim:
raise ValueError("Connectivity below 1 or above %d is illegal." % ndim)
return cpu_ndi.generate_binary_structure(ndim, connectivity)
# TODO: currently uses int32 for the labels. should add int64 option as well
def label(label_image, background=None, return_num=False, connectivity=None):
r"""Label connected regions of an integer array.
Two pixels are connected when they are neighbors and have the same value.
In 2D, they can be neighbors either in a 1- or 2-connected sense.
The value refers to the maximum number of orthogonal hops to consider a
pixel/voxel a neighbor::
1-connectivity 2-connectivity diagonal connection close-up
[ ] [ ] [ ] [ ] [ ]
| \ | / | <- hop 2
[ ]--[x]--[ ] [ ]--[x]--[ ] [x]--[ ]
| / | \ hop 1
[ ] [ ] [ ] [ ]
Parameters
----------
label_image : ndarray of dtype int
Image to label.
background : int, optional
Consider all pixels with this value as background pixels, and label
them as 0. By default, 0-valued pixels are considered as background
pixels.
return_num : bool, optional
Whether to return the number of assigned labels.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
Returns
-------
labels : ndarray of dtype int
Labeled array, where all connected regions are assigned the
same integer value.
num : int, optional
Number of labels, which equals the maximum label index and is only
returned if return_num is `True`.
See Also
--------
regionprops
regionprops_table
References
----------
.. [1] Christophe Fiorio and Jens Gustedt, "Two linear time Union-Find
strategies for image processing", Theoretical Computer Science
154 (1996), pp. 165-181.
.. [2] Kensheng Wu, Ekow Otoo and Arie Shoshani, "Optimizing connected
component labeling algorithms", Paper LBNL-56864, 2005,
Lawrence Berkeley National Laboratory (University of California),
http://repositories.cdlib.org/lbnl/LBNL-56864
Notes
-----
Currently the cucim implementation of this function always uses 32-bit
integers for the label array. This is done for performance. In the future
64-bit integer support may also be added for better skimage compatibility.
Examples
--------
>>> import cupy as cp
>>> x = cp.eye(3).astype(int)
>>> print(x)
[[1 0 0]
[0 1 0]
[0 0 1]]
>>> print(label(x, connectivity=1))
[[1 0 0]
[0 2 0]
[0 0 3]]
>>> print(label(x, connectivity=2))
[[1 0 0]
[0 1 0]
[0 0 1]]
>>> print(label(x, background=-1))
[[1 2 2]
[2 1 2]
[2 2 1]]
>>> x = cp.asarray([[1, 0, 0],
... [1, 1, 5],
... [0, 0, 0]])
>>> print(label(x))
[[1 0 0]
[1 1 2]
[0 0 0]]
"""
ndim = label_image.ndim
structure = _get_structure(ndim, connectivity)
if background is None:
background = 0
elif background != 0:
# offset so that background becomes 0 as expected by _label below
label_image = label_image - background
if label_image.dtype.kind not in "bui":
# skimage always copies the input into a np.intp dtype array so do the
# same here for non-integer dtypes.
label_image = label_image.astype(cp.intp)
labels = cp.empty(label_image.shape, order="C", dtype=cp.int32)
num = _label(label_image, structure, labels, greyscale_mode=True)
if return_num:
return labels, num
return labels
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_moments.py
|
import itertools
import cupy as cp
import numpy as np
from .._shared.utils import _supported_float_type, check_nD
from ._moments_analytical import moments_raw_to_central
def moments_coords(coords, order=3):
"""Calculate all raw image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
coords : (N, D) double or uint8 array
Array of N points that describe an image of D dimensionality in
Cartesian space.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
M : (``order + 1``, ``order + 1``, ...) array
Raw image moments. (D dimensions)
References
----------
.. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham
University, version 0.2, Durham, 2001.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import moments_coords
>>> coords = cp.array([[row, col]
... for row in range(13, 17)
... for col in range(14, 18)], dtype=cp.float64)
>>> M = moments_coords(coords)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> centroid
(array(14.5), array(15.5))
"""
return moments_coords_central(coords, 0, order=order)
def moments_coords_central(coords, center=None, order=3):
"""Calculate all central image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
coords : (N, D) double or uint8 array
Array of N points that describe an image of D dimensionality in
Cartesian space. A tuple of coordinates as returned by
``cp.nonzero`` is also accepted as input.
center : tuple of float, optional
Coordinates of the image centroid. This will be computed if it
is not provided.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
Mc : (``order + 1``, ``order + 1``, ...) array
Central image moments. (D dimensions)
References
----------
.. [1] Johannes Kilian. Simple Image Analysis By Moments. Durham
University, version 0.2, Durham, 2001.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import moments_coords_central
>>> coords = cp.array([[row, col]
... for row in range(13, 17)
... for col in range(14, 18)])
>>> moments_coords_central(coords)
array([[16., 0., 20., 0.],
[ 0., 0., 0., 0.],
[20., 0., 25., 0.],
[ 0., 0., 0., 0.]])
As seen above, for symmetric objects, odd-order moments (columns 1 and 3,
rows 1 and 3) are zero when centered on the centroid, or center of mass,
of the object (the default). If we break the symmetry by adding a new
point, this no longer holds:
>>> coords2 = cp.concatenate((coords, cp.array([[17, 17]])), axis=0)
>>> cp.around(moments_coords_central(coords2),
... decimals=2) # doctest: +NORMALIZE_WHITESPACE
array([[17. , 0. , 22.12, -2.49],
[ 0. , 3.53, 1.73, 7.4 ],
[25.88, 6.02, 36.63, 8.83],
[ 4.15, 19.17, 14.8 , 39.6 ]])
Image moments and central image moments are equivalent (by definition)
when the center is (0, 0):
>>> cp.allclose(moments_coords(coords),
... moments_coords_central(coords, (0, 0)))
array(True)
"""
if isinstance(coords, tuple):
# This format corresponds to coordinate tuples as returned by
# e.g. cp.nonzero: (row_coords, column_coords).
# We represent them as an npoints x ndim array.
coords = cp.stack(coords, axis=-1)
check_nD(coords, 2)
ndim = coords.shape[1]
float_type = _supported_float_type(coords.dtype)
if center is None:
center = cp.mean(coords, axis=0, dtype=float)
center = center.astype(float_type, copy=False)
else:
center = cp.asarray(center, dtype=float_type)
# center the coordinates
coords = coords.astype(float_type, copy=False)
coords -= center
# CuPy backend: for efficiency, sum over the last axis
# (which is memory contiguous)
# generate all possible exponents for each axis in the given set of points
# produces a matrix of shape (order + 1, D, N)
coords = coords.T
powers = cp.arange(order + 1, dtype=float_type)[:, np.newaxis, np.newaxis]
coords = coords[cp.newaxis, ...] ** powers
# add extra dimensions for proper broadcasting
coords = coords.reshape((1,) * (ndim - 1) + coords.shape)
calc = cp.moveaxis(coords[..., 0, :], -2, 0)
for axis in range(1, ndim):
# isolate each point's axis
isolated_axis = coords[..., axis, :]
# rotate orientation of matrix for proper broadcasting
isolated_axis = cp.moveaxis(isolated_axis, -2, axis)
# calculate the moments for each point, one axis at a time
calc = calc * isolated_axis
# sum all individual point moments to get our final answer
Mc = cp.sum(calc, axis=-1)
return Mc
def moments(image, order=3, *, spacing=None):
"""Calculate all raw image moments up to a certain order.
The following properties can be calculated from raw image moments:
* Area as: ``M[0, 0]``.
* Centroid as: {``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that raw moments are neither translation, scale nor rotation
invariant.
Parameters
----------
image : nD double or uint8 array
Rasterized shape as image.
order : int, optional
Maximum order of moments. Default is 3.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
m : (``order + 1``, ``order + 1``) array
Raw image moments.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import moments
>>> image = cp.zeros((20, 20), dtype=cp.float64)
>>> image[13:17, 13:17] = 1
>>> M = moments(image)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> centroid
(array(14.5), array(14.5))
"""
float_dtype = _supported_float_type(image.dtype)
calc = image.astype(float_dtype, copy=False)
powers = cp.arange(order + 1, dtype=float_dtype)
_delta = cp.arange(max(image.shape), dtype=float_dtype)[:, cp.newaxis]
if spacing is None:
# when spacing is not used can compute the powers outside the loop
_powers_of_delta = _delta**powers
for dim, dim_length in enumerate(image.shape):
if spacing is None:
powers_of_delta = _powers_of_delta[:dim_length]
else:
delta = _delta[:dim_length] * spacing[dim]
powers_of_delta = delta**powers
calc = cp.moveaxis(calc, source=dim, destination=-1)
calc = cp.dot(calc, powers_of_delta)
calc = cp.moveaxis(calc, source=-1, destination=dim)
return calc
def moments_central(image, center=None, order=3, *, spacing=None, **kwargs):
"""Calculate all central image moments up to a certain order.
The center coordinates (cr, cc) can be calculated from the raw moments as:
{``M[1, 0] / M[0, 0]``, ``M[0, 1] / M[0, 0]``}.
Note that central moments are translation invariant but not scale and
rotation invariant.
Parameters
----------
image : nD double or uint8 array
Rasterized shape as image.
center : tuple of float, optional
Coordinates of the image centroid. This will be computed if it
is not provided.
order : int, optional
The maximum order of moments computed.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
mu : (``order + 1``, ``order + 1``) array
Central image moments.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import moments, moments_central
>>> image = cp.zeros((20, 20), dtype=cp.float64)
>>> image[13:17, 13:17] = 1
>>> M = moments(image)
>>> centroid = (M[1, 0] / M[0, 0], M[0, 1] / M[0, 0])
>>> moments_central(image, centroid)
array([[16., 0., 20., 0.],
[ 0., 0., 0., 0.],
[20., 0., 25., 0.],
[ 0., 0., 0., 0.]])
"""
if center is None:
# Note: No need for an explicit call to centroid.
# The centroid will be obtained from the raw moments.
moments_raw = moments(image, order=order, spacing=spacing)
return moments_raw_to_central(moments_raw)
if spacing is None:
spacing = np.ones(image.ndim)
float_dtype = _supported_float_type(image.dtype)
calc = image.astype(float_dtype, copy=False)
powers = cp.arange(order + 1, dtype=float_dtype)
_delta = cp.arange(max(image.shape), dtype=float_dtype)[:, cp.newaxis]
for dim, dim_length in enumerate(image.shape):
delta = _delta[:dim_length] * spacing[dim] - center[dim]
powers_of_delta = delta**powers
calc = cp.moveaxis(calc, source=dim, destination=-1)
calc = cp.dot(calc, powers_of_delta)
calc = cp.moveaxis(calc, source=-1, destination=dim)
return calc
def _get_moments_norm_operation(ndim, order, unit_scale=True):
"""Full normalization computation kernel for 2D or 3D cases.
Variants with or without scaling are provided.
"""
operation = f"""
double mu0 = static_cast<double>(mu[0]);
double ndim = {ndim};
int _i = i;
int coord_i;
int order_of_current_index = 0;
int n_rows = order + 1;
double denom;
"""
if not unit_scale:
operation += """
double s_pow;"""
operation += f"""
for (int d=0; d<{ndim}; d++)"""
operation += """
{
// This loop computes the coordinate index along each axis of the
// matrix in turn and sums them up to get the order of the moment
// at the current index in mu.
coord_i = _i % n_rows;
_i /= n_rows;
order_of_current_index += coord_i;
}
if ((order_of_current_index > order) || (order_of_current_index < 2))
{
continue;
}
"""
if unit_scale:
operation += """
denom = pow(mu0, static_cast<double>(order_of_current_index) / ndim + 1);
nu = mu[i] / denom;""" # noqa
else:
operation += """
s_pow = pow(scale, static_cast<double>(order_of_current_index));
denom = pow(mu0, static_cast<double>(order_of_current_index) / ndim + 1);
nu = (mu[i] / s_pow) / denom;""" # noqa
return operation
@cp.memoize()
def _get_normalize_kernel(ndim, order, unit_scale=True):
return cp.ElementwiseKernel(
"raw F mu, int32 order, float64 scale",
"F nu",
operation=_get_moments_norm_operation(ndim, order, unit_scale),
name="moments_normmalize_2d_kernel",
)
def moments_normalized(mu, order=3, spacing=None):
"""Calculate all normalized central image moments up to a certain order.
Note that normalized central moments are translation and scale invariant
but not rotation invariant.
Parameters
----------
mu : (M,[ ...,] M) array
Central image moments, where M must be greater than or equal
to ``order``.
order : int, optional
Maximum order of moments. Default is 3.
Returns
-------
nu : (``order + 1``,[ ...,] ``order + 1``) array
Normalized central image moments.
Notes
-----
Differs from the scikit-image implementation in that any moments greater
than the requested `order` will be set to ``nan``.
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import (moments, moments_central,
... moments_normalized)
>>> image = cp.zeros((20, 20), dtype=cp.float64)
>>> image[13:17, 13:17] = 1
>>> m = moments(image)
>>> centroid = (m[0, 1] / m[0, 0], m[1, 0] / m[0, 0])
>>> mu = moments_central(image, centroid)
>>> moments_normalized(mu)
array([[ nan, nan, 0.078125 , 0. ],
[ nan, 0. , 0. , 0. ],
[0.078125 , 0. , 0.00610352, 0. ],
[0. , 0. , 0. , 0. ]])
"""
if any(s <= order for s in mu.shape):
raise ValueError("Shape of image moments must be >= `order`")
if spacing is None:
scale = 1.0
else:
if isinstance(spacing, cp.ndarray):
scale = spacing.min()
else:
scale = min(spacing)
# compute using in a single kernel for the 2D or 3D cases
unit_scale = scale == 1.0
kernel = _get_normalize_kernel(mu.ndim, order, unit_scale)
nu = cp.full(mu.shape, cp.nan, dtype=mu.dtype)
kernel(mu, order, scale, nu)
return nu
def moments_hu(nu):
"""Calculate Hu's set of image moments (2D-only).
Note that this set of moments is proved to be translation, scale and
rotation invariant.
Parameters
----------
nu : (M, M) array
Normalized central image moments, where M must be >= 4.
Returns
-------
nu : (7,) array
Hu's set of image moments.
Notes
-----
Due to the small array sizes, this function will be faster on the CPU.
Consider transferring ``nu`` to the host and running
``skimage.measure.moments_hu`` if the moments are not needed on the
device.
References
----------
.. [1] M. K. Hu, "Visual Pattern Recognition by Moment Invariants",
IRE Trans. Info. Theory, vol. IT-8, pp. 179-187, 1962
.. [2] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [3] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [4] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [5] https://en.wikipedia.org/wiki/Image_moment
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import (moments_central, moments_hu,
... moments_normalized)
>>> image = cp.zeros((20, 20), dtype=np.float64)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> mu = moments_central(image)
>>> nu = moments_normalized(mu)
>>> moments_hu(nu)
array([7.45370370e-01, 3.51165981e-01, 1.04049179e-01, 4.06442107e-02,
2.64312299e-03, 2.40854582e-02, 6.50521303e-19])
"""
try:
from skimage.measure import moments_hu
except ImportError:
raise ImportError("moments_hu requires scikit-image.")
# CuPy Backend: TODO: Due to small arrays involved, just transfer to/from
# the CPU implementation.
float_dtype = cp.float32 if nu.dtype == cp.float32 else cp.float64
return cp.asarray(moments_hu(cp.asnumpy(nu)), dtype=float_dtype)
def centroid(image, *, spacing=None):
"""Return the (weighted) centroid of an image.
Parameters
----------
image : array
The input image.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
center : tuple of float, length ``image.ndim``
The centroid of the (nonzero) pixels in ``image``.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import centroid
>>> image = cp.zeros((20, 20), dtype=cp.float64)
>>> image[13:17, 13:17] = 0.5
>>> image[10:12, 10:12] = 1
>>> centroid(image)
array([13.16666667, 13.16666667])
"""
mu = moments(image, order=1, spacing=spacing)
ndim = image.ndim
mu0 = mu[(0,) * ndim]
center = mu[
tuple(
(0,) * dim + (1,) + (0,) * (ndim - dim - 1) for dim in range(ndim)
)
]
center /= mu0
return center
def _get_inertia_tensor_2x2_kernel():
operation = """
F mu0, mxx, mxy, myy;
mu0 = mu[0];
mxx = mu[6];
myy = mu[2];
mxy = mu[4];
result[0] = myy / mu0;
result[1] = result[2] = -mxy / mu0;
result[3] = mxx / mu0;
"""
return cp.ElementwiseKernel(
in_params="raw F mu",
out_params="raw F result",
operation=operation,
name="cucim_skimage_measure_inertia_tensor_2x2",
)
def _get_inertia_tensor_3x3_kernel():
operation = """
F mu0, mxx, myy, mzz, mxy, mxz, myz;
mu0 = mu[0]; // mu[0, 0, 0]
mxx = mu[18]; // mu[2, 0, 0]
myy = mu[6]; // mu[0, 2, 0]
mzz = mu[2]; // mu[0, 0, 2]
mxy = mu[12]; // mu[1, 1, 0]
mxz = mu[10]; // mu[1, 0, 1]
myz = mu[4]; // mu[0, 1, 1]
result[0] = (myy + mzz) / mu0;
result[4] = (mxx + mzz) / mu0;
result[8] = (mxx + myy) / mu0;
result[1] = result[3] = -mxy / mu0;
result[2] = result[6] = -mxz / mu0;
result[5] = result[7] = -myz / mu0;
"""
return cp.ElementwiseKernel(
in_params="raw F mu",
out_params="raw F result",
operation=operation,
name="cucim_skimage_measure_inertia_tensor_3x3",
)
def inertia_tensor(image, mu=None, *, spacing=None):
"""Compute the inertia tensor of the input image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``. The inertia tensor
computation requires the central moments of the image. If an
application requires both the central moments and the inertia tensor
(for example, `skimage.measure.regionprops`), then it is more
efficient to pre-compute them and pass them to the inertia tensor
call.
spacing : tuple of float, optional
The pixel spacing along each axis of the image.
Returns
-------
T : array, shape ``(image.ndim, image.ndim)``
The inertia tensor of the input image. :math:`T_{i, j}` contains
the covariance of image intensity along axes :math:`i` and :math:`j`.
References
----------
.. [1] https://en.wikipedia.org/wiki/Moment_of_inertia#Inertia_tensor
.. [2] Bernd Jähne. Spatio-Temporal Image Processing: Theory and
Scientific Applications. (Chapter 8: Tensor Methods) Springer, 1993.
"""
if mu is None:
# don't need higher-order moments
mu = moments_central(image, order=2, spacing=spacing)
else:
if mu.shape[0] < 3:
raise ValueError("mu must contain second order moments")
if mu.shape[0] > 3:
# if higher than 2nd order moments are present trim the array to
# match the expectations of the _get_inertia_tensor* kernels.
mu = mu[(slice(0, 3),) * mu.ndim]
mu = cp.ascontiguousarray(mu)
if image.ndim == 2:
result = cp.empty((2, 2), dtype=mu.dtype)
kern = _get_inertia_tensor_2x2_kernel()
kern(mu, result, size=1)
elif image.ndim == 3:
result = cp.empty((3, 3), dtype=mu.dtype)
kern = _get_inertia_tensor_3x3_kernel()
kern(mu, result, size=1)
else:
# CuPy Backend: mu and result are tiny, so faster on the CPU
mu = cp.asnumpy(mu)
mu0 = mu[(0,) * image.ndim]
# nD expression to get coordinates ([2, 0], [0, 2]) (2D),
# ([2, 0, 0], [0, 2, 0], [0, 0, 2]) (3D), etc.
corners2 = tuple(2 * np.eye(image.ndim, dtype=int))
# See https://ocw.mit.edu/courses/aeronautics-and-astronautics/
# 16-07-dynamics-fall-2009/lecture-notes/MIT16_07F09_Lec26.pdf
# Iii is the sum of second-order moments of every axis *except* i, not
# the second order moment of axis i.
# See also https://github.com/scikit-image/scikit-image/issues/3229
result = np.diag((np.sum(mu[corners2]) - mu[corners2]) / mu0)
for dims in itertools.combinations(range(image.ndim), 2):
mu_index = np.zeros(image.ndim, dtype=int)
mu_index[list(dims)] = 1
result[dims] = -mu[tuple(mu_index)] / mu0
result.T[dims] = -mu[tuple(mu_index)] / mu0
result = cp.asarray(result)
return result
def inertia_tensor_eigvals(image, mu=None, T=None, *, spacing=None):
"""Compute the eigenvalues of the inertia tensor of the image.
The inertia tensor measures covariance of the image intensity along
the image axes. (See `inertia_tensor`.) The relative magnitude of the
eigenvalues of the tensor is thus a measure of the elongation of a
(bright) object in the image.
Parameters
----------
image : array
The input image.
mu : array, optional
The pre-computed central moments of ``image``.
T : array, shape ``(image.ndim, image.ndim)``
The pre-computed inertia tensor. If ``T`` is given, ``mu`` and
``image`` are ignored.
spacing : tuple of float, optional
The pixel spacing along each axis of the image.
Returns
-------
eigvals : list of float, length ``image.ndim``
The eigenvalues of the inertia tensor of ``image``, in descending
order.
Notes
-----
Computing the eigenvalues requires the inertia tensor of the input image.
This is much faster if the central moments (``mu``) are provided, or,
alternatively, one can provide the inertia tensor (``T``) directly.
"""
# avoid circular import
from ..feature.corner import (
_image_orthogonal_matrix22_eigvals,
_image_orthogonal_matrix33_eigvals,
)
if T is None:
T = inertia_tensor(image, mu, spacing=spacing)
if image.ndim == 2:
eigvals = _image_orthogonal_matrix22_eigvals(
T[0, 0], T[0, 1], T[1, 1], sort="descending", abs_sort=False
)
cp.maximum(eigvals, 0.0, out=eigvals)
elif image.ndim == 3:
# fmt: off
eigvals = _image_orthogonal_matrix33_eigvals(
T[0, 0], T[0, 1], T[0, 2], T[1, 1], T[1, 2], T[2, 2],
sort='descending', abs_sort=False
)
# fmt: on
cp.maximum(eigvals, 0.0, out=eigvals)
else:
# sort in descending order
eigvals = cp.sort(cp.linalg.eigvalsh(T))[::-1]
# call without out argument so copy will be made -> positive strides
eigvals = cp.maximum(eigvals, 0.0)
# Floating point precision problems could make a positive
# semidefinite matrix have an eigenvalue that is very slightly
# negative. This can cause problems down the line, so set values
# very near zero to zero.
return eigvals
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_polygon.py
|
# TODO: use cupyx.scipy.signal once upstream fftconvolve and
# choose_conv_method for > 1d has been implemented.
import cupy as cp
def approximate_polygon(coords, tolerance):
"""Approximate a polygonal chain with the specified tolerance.
It is based on the Douglas-Peucker algorithm.
Note that the approximated polygon is always within the convex hull of the
original polygon.
Parameters
----------
coords : (N, 2) array
Coordinate array.
tolerance : float
Maximum distance from original points of polygon to approximated
polygonal chain. If tolerance is 0, the original coordinate array
is returned.
Returns
-------
coords : (M, 2) array
Approximated polygonal chain where M <= N.
References
----------
.. [1] https://en.wikipedia.org/wiki/Ramer-Douglas-Peucker_algorithm
"""
if tolerance <= 0:
return coords
chain = cp.zeros(coords.shape[0], "bool")
# pre-allocate distance array for all points
dists = cp.zeros(coords.shape[0])
chain[0] = True
chain[-1] = True
pos_stack = [(0, chain.shape[0] - 1)]
end_of_chain = False
while not end_of_chain:
start, end = pos_stack.pop()
# determine properties of current line segment
r0, c0 = cp.asnumpy(coords[start, :])
r1, c1 = cp.asnumpy(coords[end, :])
dr = r1 - r0
dc = c1 - c0
segment_angle = -cp.arctan2(dr, dc)
segment_dist = c0 * cp.sin(segment_angle) + r0 * cp.cos(segment_angle)
# select points in-between line segment
segment_coords = coords[start + 1 : end, :]
segment_dists = dists[start + 1 : end]
# check whether to take perpendicular or euclidean distance with
# inner product of vectors
# vectors from points -> start and end
dr0 = segment_coords[:, 0] - r0
dc0 = segment_coords[:, 1] - c0
dr1 = segment_coords[:, 0] - r1
dc1 = segment_coords[:, 1] - c1
# vectors points -> start and end projected on start -> end vector
projected_lengths0 = dr0 * dr + dc0 * dc
projected_lengths1 = -dr1 * dr - dc1 * dc
perp = cp.logical_and(projected_lengths0 > 0, projected_lengths1 > 0)
eucl = cp.logical_not(perp)
segment_dists[perp] = cp.abs(
segment_coords[perp, 0] * cp.cos(segment_angle)
+ segment_coords[perp, 1] * cp.sin(segment_angle)
- segment_dist
)
segment_dists[eucl] = cp.minimum(
# distance to start point
cp.sqrt(dc0[eucl] ** 2 + dr0[eucl] ** 2),
# distance to end point
cp.sqrt(dc1[eucl] ** 2 + dr1[eucl] ** 2),
)
if cp.any(segment_dists > tolerance):
# select point with maximum distance to line
new_end = start + cp.argmax(segment_dists) + 1
pos_stack.append((new_end, end))
pos_stack.append((start, new_end))
chain[new_end] = True
if len(pos_stack) == 0:
end_of_chain = True
return coords[chain, :]
# B-Spline subdivision
_SUBDIVISION_MASKS = {
# degree: (mask_even, mask_odd)
# extracted from (degree + 2)th row of Pascal's triangle
1: ([1, 1], [1, 1]),
2: ([3, 1], [1, 3]),
3: ([1, 6, 1], [0, 4, 4]),
4: ([5, 10, 1], [1, 10, 5]),
5: ([1, 15, 15, 1], [0, 6, 20, 6]),
6: ([7, 35, 21, 1], [1, 21, 35, 7]),
7: ([1, 28, 70, 28, 1], [0, 8, 56, 56, 8]),
}
def subdivide_polygon(coords, degree=2, preserve_ends=False):
"""Subdivision of polygonal curves using B-Splines.
Note that the resulting curve is always within the convex hull of the
original polygon. Circular polygons stay closed after subdivision.
Parameters
----------
coords : (N, 2) array
Coordinate array.
degree : {1, 2, 3, 4, 5, 6, 7}, optional
Degree of B-Spline. Default is 2.
preserve_ends : bool, optional
Preserve first and last coordinate of non-circular polygon. Default is
False.
Returns
-------
coords : (M, 2) array
Subdivided coordinate array.
References
----------
.. [1] http://mrl.nyu.edu/publications/subdiv-course2000/coursenotes00.pdf
"""
from cucim.skimage import _vendored as signal
if degree not in _SUBDIVISION_MASKS:
raise ValueError(
"Invalid B-Spline degree. Only degree 1 - 7 is " "supported."
)
circular = cp.all(coords[0, :] == coords[-1, :])
method = "valid"
if circular:
# remove last coordinate because of wrapping
coords = coords[:-1, :]
# circular convolution by wrapping boundaries
method = "same"
mask_even, mask_odd = _SUBDIVISION_MASKS[degree]
# divide by total weight
float_dtype = coords.dtype if coords.dtype.kind == "f" else cp.float64
mask_even = cp.array(mask_even, float_dtype) / (2**degree)
mask_odd = cp.array(mask_odd, float_dtype) / (2**degree)
even = signal.convolve2d(
coords.T, cp.atleast_2d(mask_even), mode=method, boundary="wrap"
)
odd = signal.convolve2d(
coords.T, cp.atleast_2d(mask_odd), mode=method, boundary="wrap"
)
out = cp.empty((even.shape[1] + odd.shape[1], 2), dtype=float_dtype)
out[1::2] = even.T
out[::2] = odd.T
if circular:
# close polygon
out = cp.vstack([out, out[0, :]])
if preserve_ends and not circular:
out = cp.vstack([coords[0, :], out, coords[-1, :]])
return out
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_label_kernels.py
|
"""Kernels for scikit-image label.
These are copied from CuPy, with modification to add a greyscale_mode
parameter as needed for scikit-image.
"""
import cupy
import numpy
def _label(x, structure, y, greyscale_mode=False):
elems = numpy.where(structure != 0)
vecs = [elems[dm] - 1 for dm in range(x.ndim)]
offset = vecs[0]
for dm in range(1, x.ndim):
offset = offset * 3 + vecs[dm]
indxs = numpy.where(offset < 0)[0]
dirs = [[vecs[dm][dr] for dm in range(x.ndim)] for dr in indxs]
dirs = cupy.array(dirs, dtype=numpy.int32)
ndirs = indxs.shape[0]
y_shape = cupy.array(y.shape, dtype=numpy.int32)
count = cupy.zeros(2, dtype=numpy.int32)
_kernel_init()(x, y)
if greyscale_mode:
_kernel_connect(True)(x, y_shape, dirs, ndirs, x.ndim, y, size=y.size)
else:
_kernel_connect(False)(y_shape, dirs, ndirs, x.ndim, y, size=y.size)
_kernel_count()(y, count, size=y.size)
maxlabel = int(count[0]) # synchronize
labels = cupy.empty(maxlabel, dtype=numpy.int32)
_kernel_labels()(y, count, labels, size=y.size)
_kernel_finalize()(maxlabel, cupy.sort(labels), y, size=y.size)
return maxlabel
"""
Elementwise kernels for use by label
"""
def _kernel_init():
return cupy.ElementwiseKernel(
"X x",
"Y y",
"if (x == 0) { y = -1; } else { y = i; }",
"cucim_skimage_measure_label_init",
)
def _kernel_connect(greyscale_mode=False, int_t="int"):
"""
Notes
-----
dirs is a (n_neig//2, ndim) of relative offsets to the neighboring voxels.
For example, for structure = np.ones((3, 3)):
dirs = array([[-1, -1],
[-1, 0],
[-1, 1],
[ 0, -1]], dtype=int32)
(Implementation assumes a centro-symmetric structure)
ndirs = dirs.shape[0]
In the dirs loop below, there is a loop over the ndim neighbors:
Here, index j corresponds to the current pixel and k is the current
neighbor location.
"""
in_params = "raw int32 shape, raw int32 dirs, int32 ndirs, int32 ndim"
if greyscale_mode:
# greyscale mode -> different values receive different labels
x_condition = "if (x[k] != x[j]) continue;"
in_params = "raw X x, " + in_params
else:
# binary mode -> all non-background voxels treated the same
x_condition = ""
# Note: atomicCAS is implemented for int, unsigned short, unsigned int, and
# unsigned long long
code = """
if (y[i] < 0) continue;
for (int dr = 0; dr < ndirs; dr++) {{
{int_t} j = i;
{int_t} rest = j;
{int_t} stride = 1;
{int_t} k = 0;
for (int dm = ndim-1; dm >= 0; dm--) {{
int pos = rest % shape[dm] + dirs[dm + dr * ndim];
if (pos < 0 || pos >= shape[dm]) {{
k = -1;
break;
}}
k += pos * stride;
rest /= shape[dm];
stride *= shape[dm];
}}
if (k < 0) continue;
if (y[k] < 0) continue;
{x_condition}
while (1) {{
while (j != y[j]) {{ j = y[j]; }}
while (k != y[k]) {{ k = y[k]; }}
if (j == k) break;
if (j < k) {{
{int_t} old = atomicCAS( &y[k], (Y)k, (Y)j );
if (old == k) break;
k = old;
}}
else {{
{int_t} old = atomicCAS( &y[j], (Y)j, (Y)k );
if (old == j) break;
j = old;
}}
}}
}}
""".format(
x_condition=x_condition, int_t=int_t
)
return cupy.ElementwiseKernel(
in_params,
"raw Y y",
code,
"cucim_skimage_measure_label_connect",
)
def _kernel_count():
return cupy.ElementwiseKernel(
"",
"raw Y y, raw int32 count",
"""
if (y[i] < 0) continue;
int j = i;
while (j != y[j]) { j = y[j]; }
if (j != i) y[i] = j;
else atomicAdd(&count[0], 1);
""",
"cucim_skimage_measure_label_count",
)
def _kernel_labels():
return cupy.ElementwiseKernel(
"",
"raw Y y, raw int32 count, raw int32 labels",
"""
if (y[i] != i) continue;
int j = atomicAdd(&count[1], 1);
labels[j] = i;
""",
"cucim_skimage_measure_label_labels",
)
def _kernel_finalize():
return cupy.ElementwiseKernel(
"int32 maxlabel",
"raw int32 labels, raw Y y",
"""
if (y[i] < 0) {
y[i] = 0;
continue;
}
int yi = y[i];
int j_min = 0;
int j_max = maxlabel - 1;
int j = (j_min + j_max) / 2;
while (j_min < j_max) {
if (yi == labels[j]) break;
if (yi < labels[j]) j_max = j - 1;
else j_min = j + 1;
j = (j_min + j_max) / 2;
}
y[i] = j + 1;
""",
"cucim_skimage_measure_label_finalize",
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/profile.py
|
import math
import cupy as cp
import numpy as np
import cucim.skimage._vendored.ndimage as ndi
from .._shared.utils import _fix_ndimage_mode, _validate_interpolation_order
def profile_line(
image,
src,
dst,
linewidth=1,
order=None,
mode="reflect",
cval=0.0,
*,
reduce_func=cp.mean,
):
"""Return the intensity profile of an image measured along a scan line.
Parameters
----------
image : ndarray, shape (M, N[, C])
The image, either grayscale (2D array) or multichannel
(3D array, where the final axis contains the channel
information).
src : array_like, shape (2, )
The coordinates of the start point of the scan line.
dst : array_like, shape (2, )
The coordinates of the end point of the scan
line. The destination point is *included* in the profile, in
contrast to standard numpy indexing.
linewidth : int, optional
Width of the scan, perpendicular to the line
order : int in {0, 1, 2, 3, 4, 5}, optional
The order of the spline interpolation, default is 0 if
image.dtype is bool and 1 otherwise. The order has to be in
the range 0-5. See `skimage.transform.warp` for detail.
mode : {'constant', 'nearest', 'reflect', 'mirror', 'wrap'}, optional
How to compute any values falling outside of the image.
cval : float, optional
If `mode` is 'constant', what constant value to use outside the image.
reduce_func : callable, optional
Function used to calculate the aggregation of pixel values
perpendicular to the profile_line direction when `linewidth` > 1.
If set to None the unreduced array will be returned.
Returns
-------
return_value : array
The intensity profile along the scan line. The length of the profile
is the ceil of the computed length of the scan line.
Examples
--------
>>> import cupy as cp
>>> x = cp.asarray([[1, 1, 1, 2, 2, 2]])
>>> img = cp.vstack([cp.zeros_like(x), x, x, x, cp.zeros_like(x)])
>>> img
array([[0, 0, 0, 0, 0, 0],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[1, 1, 1, 2, 2, 2],
[0, 0, 0, 0, 0, 0]])
>>> profile_line(img, (2, 1), (2, 4))
array([1., 1., 2., 2.])
>>> profile_line(img, (1, 0), (1, 6), cval=4)
array([1., 1., 1., 2., 2., 2., 2.])
The destination point is included in the profile, in contrast to
standard numpy indexing.
For example:
>>> profile_line(img, (1, 0), (1, 6)) # The final point is out of bounds
array([1., 1., 1., 2., 2., 2., 2.])
>>> profile_line(img, (1, 0), (1, 5)) # This accesses the full first row
array([1., 1., 1., 2., 2., 2.])
For different reduce_func inputs:
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.mean)
array([0.66666667, 0.66666667, 0.66666667, 1.33333333])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.max)
array([1, 1, 1, 2])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sum)
array([2, 2, 2, 4])
The unreduced array will be returned when `reduce_func` is None or when
`reduce_func` acts on each pixel value individually.
>>> profile_line(img, (1, 2), (4, 2), linewidth=3, order=0,
... reduce_func=None)
array([[1, 1, 2],
[1, 1, 2],
[1, 1, 2],
[0, 0, 0]])
>>> profile_line(img, (1, 0), (1, 3), linewidth=3, reduce_func=cp.sqrt)
array([[1. , 1. , 0. ],
[1. , 1. , 0. ],
[1. , 1. , 0. ],
[1.41421356, 1.41421356, 0. ]])
"""
order = _validate_interpolation_order(image.dtype, order)
mode = _fix_ndimage_mode(mode)
perp_lines = _line_profile_coordinates(src, dst, linewidth=linewidth)
if image.ndim == 3:
pixels = [
ndi.map_coordinates(
image[..., i],
perp_lines,
prefilter=order > 1,
order=order,
mode=mode,
cval=cval,
)
for i in range(image.shape[2])
]
pixels = cp.transpose(cp.stack(pixels, axis=0), (1, 2, 0))
else:
pixels = ndi.map_coordinates(
image,
perp_lines,
prefilter=order > 1,
order=order,
mode=mode,
cval=cval,
)
# The outputted array with reduce_func=None gives an array where the
# row values (axis=1) are flipped. Here, we make this consistent.
pixels = np.flip(pixels, axis=1)
if reduce_func is None:
intensities = pixels
else:
try:
intensities = reduce_func(pixels, axis=1)
except TypeError: # function doesn't allow axis kwarg
intensities = cp.apply_along_axis(reduce_func, arr=pixels, axis=1)
return intensities
def _line_profile_coordinates(src, dst, linewidth=1):
"""Return the coordinates of the profile of an image along a scan line.
Parameters
----------
src : 2-tuple of numeric scalar (float or int)
The start point of the scan line.
dst : 2-tuple of numeric scalar (float or int)
The end point of the scan line.
linewidth : int, optional
Width of the scan, perpendicular to the line
Returns
-------
coords : array, shape (2, N, C), float
The coordinates of the profile along the scan line. The length of the
profile is the ceil of the computed length of the scan line.
Notes
-----
This is a utility method meant to be used internally by skimage functions.
The destination point is included in the profile, in contrast to
standard numpy indexing.
"""
src_row, src_col = src
dst_row, dst_col = dst
d_row, d_col = (d - s for d, s in zip(dst, src))
theta = math.atan2(d_row, d_col)
length = math.ceil(math.hypot(d_row, d_col) + 1)
# we add one above because we include the last point in the profile
# (in contrast to standard numpy indexing)
line_col = cp.linspace(src_col, dst_col, length)
line_row = cp.linspace(src_row, dst_row, length)
# we subtract 1 from linewidth to change from pixel-counting
# (make this line 3 pixels wide) to point distances (the
# distance between pixel centers)
col_width = (linewidth - 1) * cp.sin(-theta) / 2
row_width = (linewidth - 1) * cp.cos(theta) / 2
perp_rows = cp.stack(
[
cp.linspace(row_i - row_width, row_i + row_width, linewidth)
for row_i in line_row
]
)
perp_cols = cp.stack(
[
cp.linspace(col_i - col_width, col_i + col_width, linewidth)
for col_i in line_col
]
)
return cp.stack([perp_rows, perp_cols])
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_regionprops.py
|
import inspect
import math
from functools import wraps
from math import pi as PI
from warnings import warn
import cupy as cp
import numpy as np
from cupyx.scipy import ndimage as ndi
from scipy.ndimage import find_objects as cpu_find_objects
from cucim.skimage._vendored import pad
from . import _moments
from ._regionprops_utils import euler_number, perimeter, perimeter_crofton
__all__ = ["regionprops", "euler_number", "perimeter", "perimeter_crofton"]
# All values in this PROPS dict correspond to current scikit-image property
# names. The keys in this PROPS dict correspond to older names used in prior
# releases. For backwards compatibility, these older names will continue to
# work, but will not be documented.
PROPS = {
"Area": "area",
"BoundingBox": "bbox",
"BoundingBoxArea": "area_bbox",
"bbox_area": "area_bbox",
"CentralMoments": "moments_central",
"Centroid": "centroid",
"ConvexArea": "area_convex",
"convex_area": "area_convex",
# 'ConvexHull',
"ConvexImage": "image_convex",
"convex_image": "image_convex",
"Coordinates": "coords",
"Eccentricity": "eccentricity",
"EquivDiameter": "equivalent_diameter_area",
"equivalent_diameter": "equivalent_diameter_area",
"EulerNumber": "euler_number",
"Extent": "extent",
# 'Extrema',
"FeretDiameter": "feret_diameter_max",
"FeretDiameterMax": "feret_diameter_max",
"FilledArea": "area_filled",
"filled_area": "area_filled",
"FilledImage": "image_filled",
"filled_image": "image_filled",
"HuMoments": "moments_hu",
"Image": "image",
"InertiaTensor": "inertia_tensor",
"InertiaTensorEigvals": "inertia_tensor_eigvals",
"IntensityImage": "image_intensity",
"intensity_image": "image_intensity",
"Label": "label",
"LocalCentroid": "centroid_local",
"local_centroid": "centroid_local",
"MajorAxisLength": "axis_major_length",
"major_axis_length": "axis_major_length",
"MaxIntensity": "intensity_max",
"max_intensity": "intensity_max",
"MeanIntensity": "intensity_mean",
"mean_intensity": "intensity_mean",
"MinIntensity": "intensity_min",
"min_intensity": "intensity_min",
"MinorAxisLength": "axis_minor_length",
"minor_axis_length": "axis_minor_length",
"Moments": "moments",
"NormalizedMoments": "moments_normalized",
"Orientation": "orientation",
"Perimeter": "perimeter",
"CroftonPerimeter": "perimeter_crofton",
# 'PixelIdxList',
# 'PixelList',
"Slice": "slice",
"Solidity": "solidity",
# 'SubarrayIdx'
"WeightedCentralMoments": "moments_weighted_central",
"weighted_moments_central": "moments_weighted_central",
"WeightedCentroid": "centroid_weighted",
"weighted_centroid": "centroid_weighted",
"WeightedHuMoments": "moments_weighted_hu",
"weighted_moments_hu": "moments_weighted_hu",
"WeightedLocalCentroid": "centroid_weighted_local",
"weighted_local_centroid": "centroid_weighted_local",
"WeightedMoments": "moments_weighted",
"weighted_moments": "moments_weighted",
"WeightedNormalizedMoments": "moments_weighted_normalized",
"weighted_moments_normalized": "moments_weighted_normalized",
}
COL_DTYPES = {
"area": float,
"area_bbox": float,
"area_convex": float,
"area_filled": float,
"axis_major_length": float,
"axis_minor_length": float,
"bbox": int,
"centroid": float,
"centroid_local": float,
"centroid_weighted": float,
"centroid_weighted_local": float,
"coords": object,
"eccentricity": float,
"equivalent_diameter_area": float,
"euler_number": int,
"extent": float,
"feret_diameter_max": float,
"image": object,
"image_convex": object,
"image_filled": object,
"image_intensity": object,
"inertia_tensor": float,
"inertia_tensor_eigvals": float,
"intensity_max": float,
"intensity_mean": float,
"intensity_min": float,
"label": int,
"moments": float,
"moments_central": float,
"moments_hu": float,
"moments_normalized": float,
"moments_weighted": float,
"moments_weighted_central": float,
"moments_weighted_hu": float,
"moments_weighted_normalized": float,
"orientation": float,
"perimeter": float,
"perimeter_crofton": float,
"slice": object,
"solidity": float,
}
OBJECT_COLUMNS = [col for col, dtype in COL_DTYPES.items() if dtype == object]
PROP_VALS = set(PROPS.values())
_require_intensity_image = (
"image_intensity",
"intensity_max",
"intensity_mean",
"intensity_min",
"moments_weighted",
"moments_weighted_central",
"centroid_weighted",
"centroid_weighted_local",
"moments_weighted_hu",
"moments_weighted_normalized",
)
def _infer_number_of_required_args(func):
"""Infer the number of required arguments for a function
Parameters
----------
func : callable
The function that is being inspected.
Returns
-------
n_args : int
The number of required arguments of func.
"""
argspec = inspect.getfullargspec(func)
n_args = len(argspec.args)
if argspec.defaults is not None:
n_args -= len(argspec.defaults)
return n_args
def _infer_regionprop_dtype(func, *, intensity, ndim):
"""Infer the dtype of a region property calculated by func.
If a region property function always returns the same shape and type of
output regardless of input size, then the dtype is the dtype of the
returned array. Otherwise, the property has object dtype.
Parameters
----------
func : callable
Function to be tested. The signature should be array[bool] -> Any if
intensity is False, or *(array[bool], array[float]) -> Any otherwise.
intensity : bool
Whether the regionprop is calculated on an intensity image.
ndim : int
The number of dimensions for which to check func.
Returns
-------
dtype : NumPy data type
The data type of the returned property.
"""
labels = [1, 2]
sample = cp.zeros((3,) * ndim, dtype=np.intp)
sample[(0,) * ndim] = labels[0]
sample[(slice(1, None),) * ndim] = labels[1]
propmasks = [(sample == n) for n in labels]
rng = cp.random.default_rng()
if intensity and _infer_number_of_required_args(func) == 2:
def _func(mask):
return func(mask, rng.random(sample.shape))
else:
_func = func
props1, props2 = map(_func, propmasks)
if (
cp.isscalar(props1)
and cp.isscalar(props2)
or cp.asarray(props1).shape == cp.asarray(props2).shape
):
dtype = cp.asarray(props1).dtype.type
else:
dtype = np.object_
return dtype
def _cached(f):
@wraps(f)
def wrapper(obj):
cache = obj._cache
prop = f.__name__
if not ((prop in cache) and obj._cache_active):
cache[prop] = f(obj)
return cache[prop]
return wrapper
def only2d(method):
@wraps(method)
def func2d(self, *args, **kwargs):
if self._ndim > 2:
raise NotImplementedError(
f"Property {method.__name__} is not implemented for 3D images"
)
return method(self, *args, **kwargs)
return func2d
def _inertia_eigvals_to_axes_lengths_3D(inertia_tensor_eigvals):
"""Compute ellipsoid axis lengths from inertia tensor eigenvalues.
Parameters
---------
inertia_tensor_eigvals : sequence of float
A sequence of 3 floating point eigenvalues, sorted in descending order.
Returns
-------
axis_lengths : list of float
The ellipsoid axis lengths sorted in descending order.
Notes
-----
Let a >= b >= c be the ellipsoid semi-axes and s1 >= s2 >= s3 be the
inertia tensor eigenvalues.
The inertia tensor eigenvalues are given for a solid ellipsoid in [1]_.
s1 = 1 / 5 * (a**2 + b**2)
s2 = 1 / 5 * (a**2 + c**2)
s3 = 1 / 5 * (b**2 + c**2)
Rearranging to solve for a, b, c in terms of s1, s2, s3 gives
a = math.sqrt(5 / 2 * ( s1 + s2 - s3))
b = math.sqrt(5 / 2 * ( s1 - s2 + s3))
c = math.sqrt(5 / 2 * (-s1 + s2 + s3))
We can then simply replace sqrt(5/2) by sqrt(10) to get the full axes
lengths rather than the semi-axes lengths.
References
----------
..[1] https://en.wikipedia.org/wiki/List_of_moments_of_inertia#List_of_3D_inertia_tensors
""" # noqa: E501
axis_lengths = []
for ax in range(2, -1, -1):
w = sum(
v * -1 if i == ax else v
for i, v in enumerate(inertia_tensor_eigvals)
)
axis_lengths.append(math.sqrt(10 * w))
return axis_lengths
class RegionProperties:
"""Please refer to `skimage.measure.regionprops` for more information
on the available region properties.
"""
def __init__(
self,
slice,
label,
label_image,
intensity_image,
cache_active,
*,
extra_properties=None,
spacing=None,
):
if intensity_image is not None:
ndim = label_image.ndim
if not (
intensity_image.shape[:ndim] == label_image.shape
and intensity_image.ndim in [ndim, ndim + 1]
):
raise ValueError(
"Label and intensity image shapes must match,"
" except for channel (last) axis."
)
multichannel = label_image.shape < intensity_image.shape
else:
multichannel = False
self.label = label
self._slice = slice
self.slice = slice
self._label_image = label_image
self._intensity_image = intensity_image
self._cache_active = cache_active
self._cache = {}
self._ndim = label_image.ndim
self._multichannel = multichannel
self._spatial_axes = tuple(range(self._ndim))
self._spacing = spacing if spacing is not None else (1.0,) * self._ndim
if isinstance(self._spacing, cp.ndarray):
self._pixel_area = cp.product(self._spacing)
else:
self._pixel_area = math.prod(self._spacing)
self._extra_properties = {}
if extra_properties is not None:
for func in extra_properties:
name = func.__name__
if hasattr(self, name):
msg = (
f"Extra property '{name}' is shadowed by existing "
f"property and will be inaccessible. Consider "
f"renaming it."
)
warn(msg)
self._extra_properties = {
func.__name__: func for func in extra_properties
}
def __getattr__(self, attr):
if self._intensity_image is None and attr in _require_intensity_image:
raise AttributeError(
f"Attribute '{attr}' unavailable when `intensity_image` "
f"has not been specified."
)
if attr in self._extra_properties:
func = self._extra_properties[attr]
n_args = _infer_number_of_required_args(func)
# determine whether func requires intensity image
if n_args == 2:
if self._intensity_image is not None:
if self._multichannel:
multichannel_list = [
func(self.image, self.image_intensity[..., i])
for i in range(self.image_intensity.shape[-1])
]
return cp.stack(multichannel_list, axis=-1)
else:
return func(self.image, self.image_intensity)
else:
raise AttributeError(
f"intensity image required to calculate {attr}"
)
elif n_args == 1:
return func(self.image)
else:
raise AttributeError(
f"Custom regionprop function's number of arguments must "
f"be 1 or 2, but {attr} takes {n_args} arguments."
)
elif attr in PROPS and attr.lower() == attr:
if (
self._intensity_image is None
and PROPS[attr] in _require_intensity_image
):
raise AttributeError(
f"Attribute '{attr}' unavailable when `intensity_image` "
f"has not been specified."
)
# retrieve deprecated property (excluding old CamelCase ones)
return getattr(self, PROPS[attr])
else:
raise AttributeError(
f"'{type(self)}' object has no attribute '{attr}'"
)
def __setattr__(self, name, value):
if name in PROPS:
super().__setattr__(PROPS[name], value)
else:
super().__setattr__(name, value)
@property
@_cached
def num_pixels(self):
return np.sum(self.image)
@property
@_cached
def area(self):
return cp.sum(self.image) * self._pixel_area
@property
def bbox(self):
"""
Returns
-------
A tuple of the bounding box's start coordinates for each dimension,
followed by the end coordinates for each dimension
"""
return tuple(
[self.slice[i].start for i in range(self._ndim)]
+ [self.slice[i].stop for i in range(self._ndim)]
)
@property
def area_bbox(self):
return self.image.size * self._pixel_area
@property
def centroid(self):
return tuple(cp.asnumpy(self.coords_scaled.mean(axis=0)))
@property
@_cached
def area_convex(self):
return cp.sum(self.image_convex) * self._pixel_area
@property
@_cached
def image_convex(self):
# TODO: grlee77: avoid host/device transfers
# from ..morphology.convex_hull import convex_hull_image
from skimage.morphology.convex_hull import convex_hull_image
# CuPy Backend: explicitly cast to uint8 to avoid the issue see in
# reported in https://github.com/cupy/cupy/issues/4354
return cp.asarray(convex_hull_image(cp.asnumpy(self.image))).astype(
cp.uint8
)
@property
def coords_scaled(self):
indices = cp.nonzero(self.image)
return cp.vstack(
[
(indices[i] + self.slice[i].start) * s
for i, s in zip(range(self._ndim), self._spacing)
]
).T
@property
def coords(self):
indices = cp.nonzero(self.image)
return cp.vstack(
[indices[i] + self.slice[i].start for i in range(self._ndim)]
).T
@property
@only2d
def eccentricity(self):
l1, l2 = self.inertia_tensor_eigvals
if l1 == 0:
return 0
return math.sqrt(1 - l2 / l1)
@property
def equivalent_diameter_area(self):
if self._ndim == 2:
return math.sqrt(4 * self.area / PI)
return (2 * self._ndim * self.area / PI) ** (1 / self._ndim)
@property
def euler_number(self):
if self._ndim not in [2, 3]:
raise NotImplementedError(
"Euler number is implemented for " "2D or 3D images only"
)
return euler_number(self.image, self._ndim)
@property
def extent(self):
return self.area / self.area_bbox
@property
def feret_diameter_max(self):
from scipy.spatial.distance import pdist
from skimage.measure import find_contours, marching_cubes
# TODO: implement marching cubes, etc.
warn("feret diameter_max currently not implemented on GPU.")
identity_convex_hull = pad(
self.image_convex, 2, mode="constant", constant_values=0
)
identity_convex_hull = cp.asnumpy(identity_convex_hull)
if self._ndim == 2:
coordinates = np.vstack(
find_contours(identity_convex_hull, 0.5, fully_connected="high")
)
elif self._ndim == 3:
coordinates, _, _, _ = marching_cubes(
identity_convex_hull, level=0.5
)
distances = pdist(coordinates * self._spacing, "sqeuclidean")
return math.sqrt(np.max(distances))
@property
def area_filled(self):
return cp.sum(self.image_filled) * self._pixel_area
@property
@_cached
def image_filled(self):
structure = cp.ones((3,) * self._ndim)
return ndi.binary_fill_holes(self.image, structure)
@property
@_cached
def image(self):
return self._label_image[self.slice] == self.label
@property
@_cached
def inertia_tensor(self):
mu = self.moments_central
return _moments.inertia_tensor(self.image, mu, spacing=self._spacing)
@property
@_cached
def inertia_tensor_eigvals(self):
return _moments.inertia_tensor_eigvals(
self.image, T=self.inertia_tensor
)
@property
@_cached
def image_intensity(self):
if self._intensity_image is None:
raise AttributeError("No intensity image specified.")
image = (
self.image
if not self._multichannel
else cp.expand_dims(self.image, self._ndim)
)
return self._intensity_image[self.slice] * image
def _image_intensity_double(self):
return self.image_intensity.astype(cp.double, copy=False)
@property
def centroid_local(self):
M = self.moments
return tuple(
M[tuple(cp.eye(self._ndim, dtype=int))] / M[(0,) * self._ndim]
)
@property
def intensity_max(self):
vals = self.image_intensity[self.image]
return cp.max(vals, axis=0).astype(cp.float64, copy=False)
@property
def intensity_mean(self):
return cp.mean(self.image_intensity[self.image], axis=0)
@property
def intensity_min(self):
vals = self.image_intensity[self.image]
return cp.min(vals, axis=0).astype(cp.float64, copy=False)
@property
def axis_major_length(self):
if self._ndim == 2:
l1 = self.inertia_tensor_eigvals[0]
return 4 * math.sqrt(l1)
elif self._ndim == 3:
# equivalent to _inertia_eigvals_to_axes_lengths_3D(ev)[0]
ev = self.inertia_tensor_eigvals
return math.sqrt(10 * (ev[0] + ev[1] - ev[2]))
else:
raise ValueError("axis_major_length only available in 2D and 3D")
@property
def axis_minor_length(self):
if self._ndim == 2:
l2 = self.inertia_tensor_eigvals[-1]
return 4 * math.sqrt(l2)
elif self._ndim == 3:
# equivalent to _inertia_eigvals_to_axes_lengths_3D(ev)[-1]
ev = self.inertia_tensor_eigvals
return math.sqrt(10 * (-ev[0] + ev[1] + ev[2]))
else:
raise ValueError("axis_minor_length only available in 2D and 3D")
@property
@_cached
def moments(self):
M = _moments.moments(
self.image.astype(cp.uint8), 3, spacing=self._spacing
)
return M
@property
@_cached
def moments_central(self):
mu = _moments.moments_central(
self.image.astype(cp.uint8),
self.centroid_local,
order=3,
spacing=self._spacing,
)
return mu
@property
@only2d
def moments_hu(self):
if any(s != 1.0 for s in self._spacing):
raise NotImplementedError(
"`moments_hu` supports spacing = (1, 1) only"
)
return _moments.moments_hu(self.moments_normalized)
@property
@_cached
def moments_normalized(self):
return _moments.moments_normalized(
self.moments_central, 3, spacing=self._spacing
)
@property
@only2d
def orientation(self):
a, b, b, c = self.inertia_tensor.ravel()
if a - c == 0:
if b < 0:
return -PI / 4.0
else:
return PI / 4.0
else:
return 0.5 * math.atan2(-2 * b, c - a)
@property
@only2d
def perimeter(self):
if len(np.unique(self._spacing)) != 1:
raise NotImplementedError(
"`perimeter` supports isotropic spacings only"
)
return perimeter(self.image, 4) * self._spacing[0]
@property
@only2d
def perimeter_crofton(self):
if len(np.unique(self._spacing)) != 1:
raise NotImplementedError(
"`perimeter` supports isotropic spacings only"
)
return perimeter_crofton(self.image, 4) * self._spacing[0]
@property
def solidity(self):
return self.area / self.area_convex
@property
def centroid_weighted(self):
ctr = self.centroid_weighted_local
return tuple(idx + slc.start for idx, slc in zip(ctr, self.slice))
@property
def centroid_weighted_local(self):
M = self.moments_weighted
return M[tuple(cp.eye(self._ndim, dtype=int))] / M[(0,) * self._ndim]
@property
@_cached
def moments_weighted(self):
image = self._image_intensity_double()
if self._multichannel:
moments = cp.stack(
[
_moments.moments(
image[..., i], order=3, spacing=self._spacing
)
for i in range(image.shape[-1])
],
axis=-1,
)
else:
moments = _moments.moments(image, order=3, spacing=self._spacing)
return moments
@property
@_cached
def moments_weighted_central(self):
ctr = self.centroid_weighted_local
image = self._image_intensity_double()
if self._multichannel:
moments_list = [
_moments.moments_central(
image[..., i],
center=ctr[..., i],
order=3,
spacing=self._spacing,
)
for i in range(image.shape[-1])
]
moments = cp.stack(moments_list, axis=-1)
else:
moments = _moments.moments_central(
image, ctr, order=3, spacing=self._spacing
)
return moments
@property
@only2d
def moments_weighted_hu(self):
if not (np.array(self._spacing) == np.array([1, 1])).all():
raise NotImplementedError(
"`moments_hu` supports spacing = (1, 1) only"
)
nu = self.moments_weighted_normalized
if self._multichannel:
nchannels = self._intensity_image.shape[-1]
return cp.stack(
[_moments.moments_hu(nu[..., i]) for i in range(nchannels)],
axis=-1,
)
else:
return _moments.moments_hu(nu)
@property
@_cached
def moments_weighted_normalized(self):
mu = self.moments_weighted_central
if self._multichannel:
nchannels = self._intensity_image.shape[-1]
return cp.stack(
[
_moments.moments_normalized(
mu[..., i], order=3, spacing=self._spacing
)
for i in range(nchannels)
],
axis=-1,
)
else:
return _moments.moments_normalized(
mu, order=3, spacing=self._spacing
)
return _moments.moments_normalized(
self.moments_weighted_central, 3, spacing=self._spacing
)
def __iter__(self):
props = PROP_VALS
if self._intensity_image is None:
unavailable_props = _require_intensity_image
props = props.difference(unavailable_props)
return iter(sorted(props))
def __getitem__(self, key):
value = getattr(self, key, None)
if value is not None:
return value
else: # backwards compatibility
return getattr(self, PROPS[key])
def __eq__(self, other):
if not isinstance(other, RegionProperties):
return False
for key in PROP_VALS:
try:
v1 = getattr(self, key, None)
v2 = getattr(other, key, None)
if isinstance(v1, tuple):
np.testing.assert_equal(v1, v2)
else:
# so that NaNs are equal
cp.testing.assert_array_equal(
getattr(self, key, None), getattr(other, key, None)
)
except AssertionError:
return False
return True
# For compatibility with code written prior to 0.16
_RegionProperties = RegionProperties
def _props_to_dict(regions, properties=("label", "bbox"), separator="-"):
"""Convert image region properties list into a column dictionary.
Parameters
----------
regions : (N,) list
List of RegionProperties objects as returned by :func:`regionprops`.
properties : tuple or list of str, optional
Properties that will be included in the resulting dictionary
For a list of available properties, please see :func:`regionprops`.
Users should remember to add "label" to keep track of region
identities.
separator : str, optional
For non-scalar properties not listed in OBJECT_COLUMNS, each element
will appear in its own column, with the index of that element separated
from the property name by this separator. For example, the inertia
tensor of a 2D region will appear in four columns:
``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``,
and ``inertia_tensor-1-1`` (where the separator is ``-``).
Object columns are those that cannot be split in this way because the
number of columns would change depending on the object. For example,
``image`` and ``coords``.
Returns
-------
out_dict : dict
Dictionary mapping property names to an array of values of that
property, one value per region. This dictionary can be used as input to
pandas ``DataFrame`` to map property names to columns in the frame and
regions to rows.
Notes
-----
Each column contains either a scalar property, an object property, or an
element in a multidimensional array.
Properties with scalar values for each region, such as "eccentricity", will
appear as a float or int array with that property name as key.
Multidimensional properties *of fixed size* for a given image dimension,
such as "centroid" (every centroid will have three elements in a 3D image,
no matter the region size), will be split into that many columns, with the
name {property_name}{separator}{element_num} (for 1D properties),
{property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D
properties), and so on.
For multidimensional properties that don't have a fixed size, such as
"image" (the image of a region varies in size depending on the region
size), an object array will be used, with the corresponding property name
as the key.
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import util, measure
>>> image = cp.array(data.coins())
>>> label_image = measure.label(image > 110, connectivity=image.ndim)
>>> proplist = regionprops(label_image, image)
>>> props = _props_to_dict(proplist, properties=['label', 'inertia_tensor',
... 'inertia_tensor_eigvals'])
>>> props # doctest: +ELLIPSIS +SKIP
{'label': array([ 1, 2, ...]), ...
'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ...
...,
'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])}
The resulting dictionary can be directly passed to pandas, if installed, to
obtain a clean DataFrame:
>>> import pandas as pd # doctest: +SKIP
>>> data = pd.DataFrame(props) # doctest: +SKIP
>>> data.head() # doctest: +SKIP
label inertia_tensor-0-0 ... inertia_tensor_eigvals-1
0 1 4012.909888 ... 267.065503
1 2 8.514739 ... 2.834806
2 3 0.666667 ... 0.000000
3 4 0.000000 ... 0.000000
4 5 0.222222 ... 0.111111
"""
out = {}
n = len(regions)
for prop in properties:
r = regions[0]
# Copy the original property name so the output will have the
# user-provided property name in the case of deprecated names.
orig_prop = prop
# determine the current property name for any deprecated property.
prop = PROPS.get(prop, prop)
rp = getattr(r, prop)
if prop in COL_DTYPES:
dtype = COL_DTYPES[prop]
else:
func = r._extra_properties[prop]
dtype = _infer_regionprop_dtype(
func,
intensity=r._intensity_image is not None,
ndim=r.image.ndim,
)
is_0dim_array = isinstance(rp, cp.ndarray) and rp.ndim == 0
# scalars and objects are dedicated one column per prop
# array properties are raveled into multiple columns
# for more info, refer to notes 1
if (
cp.isscalar(rp)
or is_0dim_array
or prop in OBJECT_COLUMNS
or dtype is np.object_
):
if prop in OBJECT_COLUMNS:
# keep objects in a NumPy array
column_buffer = np.empty(n, dtype=dtype)
for i in range(n):
column_buffer[i] = regions[i][prop]
out[orig_prop] = np.copy(column_buffer)
else:
column_buffer = []
for i in range(n):
p = regions[i][prop]
column_buffer.append(p)
column_buffer = cp.array(column_buffer)
out[orig_prop] = column_buffer
else:
if isinstance(rp, cp.ndarray):
shape = rp.shape
else:
shape = (len(rp),)
# precompute property column names and locations
modified_props = []
locs = []
for ind in np.ndindex(shape):
modified_props.append(
separator.join(map(str, (orig_prop,) + ind))
)
locs.append(ind if len(ind) > 1 else ind[0])
# fill temporary column data_array
n_columns = len(locs)
column_data = cp.empty((n, n_columns), dtype=dtype)
for k in range(n):
rp = regions[k][prop]
for i, loc in enumerate(locs):
column_data[k, i] = rp[loc]
# add the columns to the output dictionary
for i, modified_prop in enumerate(modified_props):
out[modified_prop] = column_data[:, i]
return out
def regionprops_table(
label_image,
intensity_image=None,
properties=("label", "bbox"),
*,
cache=True,
separator="-",
extra_properties=None,
spacing=None,
):
"""Compute image properties and return them as a pandas-compatible table.
The table is a dictionary mapping column names to value arrays. See Notes
section below for details.
.. versionadded:: 0.16
Parameters
----------
label_image : (N, M[, P]) ndarray
Labeled input image. Labels with value 0 are ignored.
intensity_image : (M, N[, P][, C]) ndarray, optional
Intensity (i.e., input) image with same size as labeled image, plus
optionally an extra dimension for multichannel data. Currently,
this extra channel dimension, if present, must be the last axis.
Default is None.
.. versionchanged:: 0.18.0
The ability to provide an extra dimension for channels was added.
properties : tuple or list of str, optional
Properties that will be included in the resulting dictionary
For a list of available properties, please see :func:`regionprops`.
Users should remember to add "label" to keep track of region
identities.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
separator : str, optional
For non-scalar properties not listed in OBJECT_COLUMNS, each element
will appear in its own column, with the index of that element separated
from the property name by this separator. For example, the inertia
tensor of a 2D region will appear in four columns:
``inertia_tensor-0-0``, ``inertia_tensor-0-1``, ``inertia_tensor-1-0``,
and ``inertia_tensor-1-1`` (where the separator is ``-``).
Object columns are those that cannot be split in this way because the
number of columns would change depending on the object. For example,
``image`` and ``coords``.
extra_properties : Iterable of callables
Add extra property computation functions that are not included with
skimage. The name of the property is derived from the function name,
the dtype is inferred by calling the function on a small sample.
If the name of an extra property clashes with the name of an existing
property the extra property will not be visible and a UserWarning is
issued. A property computation function must take a region mask as its
first argument. If the property requires an intensity image, it must
accept the intensity image as the second argument.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
out_dict : dict
Dictionary mapping property names to an array of values of that
property, one value per region. This dictionary can be used as input to
pandas ``DataFrame`` to map property names to columns in the frame and
regions to rows. If the image has no regions,
the arrays will have length 0, but the correct type.
Notes
-----
Each column contains either a scalar property, an object property, or an
element in a multidimensional array.
Properties with scalar values for each region, such as "eccentricity", will
appear as a float or int array with that property name as key.
Multidimensional properties *of fixed size* for a given image dimension,
such as "centroid" (every centroid will have three elements in a 3D image,
no matter the region size), will be split into that many columns, with the
name {property_name}{separator}{element_num} (for 1D properties),
{property_name}{separator}{elem_num0}{separator}{elem_num1} (for 2D
properties), and so on.
For multidimensional properties that don't have a fixed size, such as
"image" (the image of a region varies in size depending on the region
size), an object array will be used, with the corresponding property name
as the key.
Examples
--------
>>> from skimage import data, util, measure
>>> image = data.coins()
>>> label_image = measure.label(image > 110, connectivity=image.ndim)
>>> props = measure.regionprops_table(label_image, image,
... properties=['label', 'inertia_tensor',
... 'inertia_tensor_eigvals'])
>>> props # doctest: +ELLIPSIS +SKIP
{'label': array([ 1, 2, ...]), ...
'inertia_tensor-0-0': array([ 4.012...e+03, 8.51..., ...]), ...
...,
'inertia_tensor_eigvals-1': array([ 2.67...e+02, 2.83..., ...])}
The resulting dictionary can be directly passed to pandas, if installed, to
obtain a clean DataFrame:
>>> import pandas as pd # doctest: +SKIP
>>> data = pd.DataFrame(props) # doctest: +SKIP
>>> data.head() # doctest: +SKIP
label inertia_tensor-0-0 ... inertia_tensor_eigvals-1
0 1 4012.909888 ... 267.065503
1 2 8.514739 ... 2.834806
2 3 0.666667 ... 0.000000
3 4 0.000000 ... 0.000000
4 5 0.222222 ... 0.111111
[5 rows x 7 columns]
If we want to measure a feature that does not come as a built-in
property, we can define custom functions and pass them as
``extra_properties``. For example, we can create a custom function
that measures the intensity quartiles in a region:
>>> from skimage import data, util, measure
>>> import numpy as np
>>> def quartiles(regionmask, intensity):
... return np.percentile(intensity[regionmask], q=(25, 50, 75))
>>>
>>> image = data.coins()
>>> label_image = measure.label(image > 110, connectivity=image.ndim)
>>> props = measure.regionprops_table(label_image, intensity_image=image,
... properties=('label',),
... extra_properties=(quartiles,))
>>> import pandas as pd # doctest: +SKIP
>>> pd.DataFrame(props).head() # doctest: +SKIP
label quartiles-0 quartiles-1 quartiles-2
0 1 117.00 123.0 130.0
1 2 111.25 112.0 114.0
2 3 111.00 111.0 111.0
3 4 111.00 111.5 112.5
4 5 112.50 113.0 114.0
"""
regions = regionprops(
label_image,
intensity_image=intensity_image,
cache=cache,
extra_properties=extra_properties,
spacing=spacing,
)
if extra_properties is not None:
properties = list(properties) + [
prop.__name__ for prop in extra_properties
]
if len(regions) == 0:
ndim = label_image.ndim
label_image = np.zeros((3,) * ndim, dtype=int)
label_image[(1,) * ndim] = 1
label_image = cp.asarray(label_image)
if intensity_image is not None:
intensity_image = cp.zeros(
label_image.shape + intensity_image.shape[ndim:],
dtype=intensity_image.dtype,
)
regions = regionprops(
label_image,
intensity_image=intensity_image,
cache=cache,
extra_properties=extra_properties,
spacing=spacing,
)
out_d = _props_to_dict(
regions, properties=properties, separator=separator
)
return {k: v[:0] for k, v in out_d.items()}
return _props_to_dict(regions, properties=properties, separator=separator)
def regionprops(
label_image,
intensity_image=None,
cache=True,
*,
extra_properties=None,
spacing=None,
):
r"""Measure properties of labeled image regions.
Parameters
----------
label_image : (M, N[, P]) ndarray
Labeled input image. Labels with value 0 are ignored.
.. versionchanged:: 0.14.1
Previously, ``label_image`` was processed by ``numpy.squeeze`` and
so any number of singleton dimensions was allowed. This resulted in
inconsistent handling of images with singleton dimensions. To
recover the old behaviour, use
``regionprops(np.squeeze(label_image), ...)``.
intensity_image : (M, N[, P][, C]) ndarray, optional
Intensity (i.e., input) image with same size as labeled image, plus
optionally an extra dimension for multichannel data. Currently,
this extra channel dimension, if present, must be the last axis.
Default is None.
.. versionchanged:: 0.18.0
The ability to provide an extra dimension for channels was added.
cache : bool, optional
Determine whether to cache calculated properties. The computation is
much faster for cached properties, whereas the memory consumption
increases.
extra_properties : Iterable of callables
Add extra property computation functions that are not included with
skimage. The name of the property is derived from the function name,
the dtype is inferred by calling the function on a small sample.
If the name of an extra property clashes with the name of an existing
property the extra property will not be visible and a UserWarning is
issued. A property computation function must take a region mask as its
first argument. If the property requires an intensity image, it must
accept the intensity image as the second argument.
spacing: tuple of float, shape (ndim, )
The pixel spacing along each axis of the image.
Returns
-------
properties : list of RegionProperties
Each item describes one labeled region, and can be accessed using the
attributes listed below.
Notes
-----
The following properties can be accessed as attributes or keys:
**num_pixels** : int
Number of foreground pixels.
**area** : float
Area of the region i.e. number of pixels of the region scaled by pixel-area.
**area_bbox** : float
Area of the bounding box i.e. number of pixels of bounding box scaled by pixel-area.
**area_convex** : float
Are of the convex hull image, which is the smallest convex
polygon that encloses the region.
**area_filled** : float
Area of the region with all the holes filled in.
**axis_major_length** : float
The length of the major axis of the ellipse that has the same
normalized second central moments as the region.
**axis_minor_length** : float
The length of the minor axis of the ellipse that has the same
normalized second central moments as the region.
**bbox** : tuple
Bounding box ``(min_row, min_col, max_row, max_col)``.
Pixels belonging to the bounding box are in the half-open interval
``[min_row; max_row)`` and ``[min_col; max_col)``.
**centroid** : array
Centroid coordinate tuple ``(row, col)``.
**centroid_local** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box.
**centroid_weighted** : array
Centroid coordinate tuple ``(row, col)`` weighted with intensity
image.
**centroid_weighted_local** : array
Centroid coordinate tuple ``(row, col)``, relative to region bounding
box, weighted with intensity image.
**coords_scaled** : (N, 2) ndarray
Coordinate list ``(row, col)``of the region scaled by ``spacing``.
**coords** : (N, 2) ndarray
Coordinate list ``(row, col)`` of the region.
**eccentricity** : float
Eccentricity of the ellipse that has the same second-moments as the
region. The eccentricity is the ratio of the focal distance
(distance between focal points) over the major axis length.
The value is in the interval [0, 1).
When it is 0, the ellipse becomes a circle.
**equivalent_diameter_area** : float
The diameter of a circle with the same area as the region.
**euler_number** : int
Euler characteristic of the set of non-zero pixels.
Computed as number of connected components subtracted by number of
holes (input.ndim connectivity). In 3D, number of connected
components plus number of holes subtracted by number of tunnels.
**extent** : float
Ratio of pixels in the region to pixels in the total bounding box.
Computed as ``area / (rows * cols)``
**feret_diameter_max** : float
Maximum Feret's diameter computed as the longest distance between
points around a region's convex hull contour as determined by
``find_contours``. [5]_
**image** : (H, J) ndarray
Sliced binary region image which has the same size as bounding box.
**image_convex** : (H, J) ndarray
Binary convex hull image which has the same size as bounding box.
**image_filled** : (H, J) ndarray
Binary region image with filled holes which has the same size as
bounding box.
**image_intensity** : ndarray
Image inside region bounding box.
**inertia_tensor** : ndarray
Inertia tensor of the region for the rotation around its mass.
**inertia_tensor_eigvals** : tuple
The eigenvalues of the inertia tensor in decreasing order.
**intensity_max** : float
Value with the greatest intensity in the region.
**intensity_mean** : float
Value with the mean intensity in the region.
**intensity_min** : float
Value with the least intensity in the region.
**label** : int
The label in the labeled input image.
**moments** : (3, 3) ndarray
Spatial moments up to 3rd order::
m_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**moments_central** : (3, 3) ndarray
Central moments (translation invariant) up to 3rd order::
mu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's centroid.
**moments_hu** : tuple
Hu moments (translation, scale and rotation invariant).
**moments_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) up to 3rd order::
nu_ij = mu_ij / m_00^[(i+j)/2 + 1]
where `m_00` is the zeroth spatial moment.
**moments_weighted** : (3, 3) ndarray
Spatial moments of intensity image up to 3rd order::
wm_ij = sum{ array(row, col) * row^i * col^j }
where the sum is over the `row`, `col` coordinates of the region.
**moments_weighted_central** : (3, 3) ndarray
Central moments (translation invariant) of intensity image up to
3rd order::
wmu_ij = sum{ array(row, col) * (row - row_c)^i * (col - col_c)^j }
where the sum is over the `row`, `col` coordinates of the region,
and `row_c` and `col_c` are the coordinates of the region's weighted
centroid.
**moments_weighted_hu** : tuple
Hu moments (translation, scale and rotation invariant) of intensity
image.
**moments_weighted_normalized** : (3, 3) ndarray
Normalized moments (translation and scale invariant) of intensity
image up to 3rd order::
wnu_ij = wmu_ij / wm_00^[(i+j)/2 + 1]
where ``wm_00`` is the zeroth spatial moment (intensity-weighted area).
**orientation** : float
Angle between the 0th axis (rows) and the major
axis of the ellipse that has the same second moments as the region,
ranging from `-pi/2` to `pi/2` counter-clockwise.
**perimeter** : float
Perimeter of object which approximates the contour as a line
through the centers of border pixels using a 4-connectivity.
**perimeter_crofton** : float
Perimeter of object approximated by the Crofton formula in 4
directions.
**slice** : tuple of slices
A slice to extract the object from the source image.
**solidity** : float
Ratio of pixels in the region to pixels of the convex hull image.
Each region also supports iteration, so that you can do::
for prop in region:
print(prop, region[prop])
See Also
--------
label
References
----------
.. [1] Wilhelm Burger, Mark Burge. Principles of Digital Image Processing:
Core Algorithms. Springer-Verlag, London, 2009.
.. [2] B. Jähne. Digital Image Processing. Springer-Verlag,
Berlin-Heidelberg, 6. edition, 2005.
.. [3] T. H. Reiss. Recognizing Planar Objects Using Invariant Image
Features, from Lecture notes in computer science, p. 676. Springer,
Berlin, 1993.
.. [4] https://en.wikipedia.org/wiki/Image_moment
.. [5] W. Pabst, E. Gregorová. Characterization of particles and particle
systems, pp. 27-28. ICT Prague, 2007.
https://old.vscht.cz/sil/keramika/Characterization_of_particles/CPPS%20_English%20version_.pdf
Examples
--------
>>> from skimage import data, util
>>> from cucim.skimage.measure import label, regionprops
>>> img = cp.asarray(util.img_as_ubyte(data.coins()) > 110)
>>> label_img = label(img, connectivity=img.ndim)
>>> props = regionprops(label_img)
>>> # centroid of first labeled object
>>> props[0].centroid
(22.72987986048314, 81.91228523446583)
>>> # centroid of first labeled object
>>> props[0]['centroid']
(22.72987986048314, 81.91228523446583)
Add custom measurements by passing functions as ``extra_properties``
>>> from skimage import data, util
>>> from cucim.skimage.measure import label, regionprops
>>> import numpy as np
>>> img = cp.asarray(util.img_as_ubyte(data.coins()) > 110)
>>> label_img = label(img, connectivity=img.ndim)
>>> def pixelcount(regionmask):
... return np.sum(regionmask)
>>> props = regionprops(label_img, extra_properties=(pixelcount,))
>>> props[0].pixelcount
array(7741)
>>> props[1]['pixelcount']
array(42)
""" # noqa
if label_image.ndim not in (2, 3):
raise TypeError("Only 2-D and 3-D images supported.")
if not cp.issubdtype(label_image.dtype, cp.integer):
if cp.issubdtype(label_image.dtype, bool):
raise TypeError(
"Non-integer image types are ambiguous: "
"use skimage.measure.label to label the connected"
"components of label_image,"
"or label_image.astype(np.uint8) to interpret"
"the True values as a single label."
)
else:
raise TypeError("Non-integer label_image types are ambiguous")
regions = []
# CuPy Backend: ndimage.find_objects not implemented
objects = cpu_find_objects(cp.asnumpy(label_image)) # synchronize!
for i, sl in enumerate(objects):
if sl is None:
continue
label = i + 1
props = RegionProperties(
sl,
label,
label_image,
intensity_image,
cache,
spacing=spacing,
extra_properties=extra_properties,
)
regions.append(props)
return regions
def _parse_docs():
import re
import textwrap
doc = regionprops.__doc__ or ""
matches = re.finditer(
r"\*\*(\w+)\*\* \:.*?\n(.*?)(?=\n [\*\S]+)", doc, flags=re.DOTALL
)
prop_doc = {m.group(1): textwrap.dedent(m.group(2)) for m in matches}
return prop_doc
def _install_properties_docs():
prop_doc = _parse_docs()
for p in [
member for member in dir(RegionProperties) if not member.startswith("_")
]:
getattr(RegionProperties, p).__doc__ = prop_doc[p]
if __debug__:
# don't install docstrings when in optimized/non-debug mode
_install_properties_docs()
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_regionprops_utils.py
|
import math
import cupy as cp
import cupyx.scipy.ndimage as ndi
import numpy as np
from cucim.skimage._vendored import pad
# Don't allocate STREL_* on GPU as we don't know in advance which device
# fmt: off
STREL_4 = np.array([[0, 1, 0],
[1, 1, 1],
[0, 1, 0]], dtype=np.uint8)
STREL_8 = np.ones((3, 3), dtype=np.uint8)
# fmt: on
# Coefficients from
# Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of Discretized Sets
# - On the Choice of Adjacency in Homogeneous Lattices.
# In: Mecke K., Stoyan D. (eds) Morphology of Condensed Matter. Lecture Notes
# in Physics, vol 600. Springer, Berlin, Heidelberg.
# The value of coefficients correspond to the contributions to the Euler number
# of specific voxel configurations, which are themselves encoded thanks to a
# LUT. Computing the Euler number from the addition of the contributions of
# local configurations is possible thanks to an integral geometry formula
# (see the paper by Ohser et al. for more details).
EULER_COEFS2D_4 = [0, 1, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, 0]
EULER_COEFS2D_8 = [0, 0, 0, 0, 0, 0, -1, 0, 1, 0, 0, 0, 0, 0, -1, 0]
# fmt: off
EULER_COEFS3D_26 = np.array([0, 1, 1, 0, 1, 0, -2, -1,
1, -2, 0, -1, 0, -1, -1, 0,
1, 0, -2, -1, -2, -1, -1, -2,
-6, -3, -3, -2, -3, -2, 0, -1,
1, -2, 0, -1, -6, -3, -3, -2,
-2, -1, -1, -2, -3, 0, -2, -1,
0, -1, -1, 0, -3, -2, 0, -1,
-3, 0, -2, -1, 0, 1, 1, 0,
1, -2, -6, -3, 0, -1, -3, -2,
-2, -1, -3, 0, -1, -2, -2, -1,
0, -1, -3, -2, -1, 0, 0, -1,
-3, 0, 0, 1, -2, -1, 1, 0,
-2, -1, -3, 0, -3, 0, 0, 1,
-1, 4, 0, 3, 0, 3, 1, 2,
-1, -2, -2, -1, -2, -1, 1,
0, 0, 3, 1, 2, 1, 2, 2, 1,
1, -6, -2, -3, -2, -3, -1, 0,
0, -3, -1, -2, -1, -2, -2, -1,
-2, -3, -1, 0, -1, 0, 4, 3,
-3, 0, 0, 1, 0, 1, 3, 2,
0, -3, -1, -2, -3, 0, 0, 1,
-1, 0, 0, -1, -2, 1, -1, 0,
-1, -2, -2, -1, 0, 1, 3, 2,
-2, 1, -1, 0, 1, 2, 2, 1,
0, -3, -3, 0, -1, -2, 0, 1,
-1, 0, -2, 1, 0, -1, -1, 0,
-1, -2, 0, 1, -2, -1, 3, 2,
-2, 1, 1, 2, -1, 0, 2, 1,
-1, 0, -2, 1, -2, 1, 1, 2,
-2, 3, -1, 2, -1, 2, 0, 1,
0, -1, -1, 0, -1, 0, 2, 1,
-1, 2, 0, 1, 0, 1, 1, 0, ])
# fmt: on
def euler_number(image, connectivity=None):
"""Calculate the Euler characteristic in binary image.
For 2D objects, the Euler number is the number of objects minus the number
of holes. For 3D objects, the Euler number is obtained as the number of
objects plus the number of holes, minus the number of tunnels, or loops.
Parameters
----------
image: (N, M) ndarray or (N, M, D) ndarray.
2D or 3D images.
If image is not binary, all values strictly greater than zero
are considered as the object.
connectivity : int, optional
Maximum number of orthogonal hops to consider a pixel/voxel
as a neighbor.
Accepted values are ranging from 1 to input.ndim. If ``None``, a full
connectivity of ``input.ndim`` is used.
4 or 8 neighborhoods are defined for 2D images (connectivity 1 and 2,
respectively).
6 or 26 neighborhoods are defined for 3D images, (connectivity 1 and 3,
respectively). Connectivity 2 is not defined.
Returns
-------
euler_number : int
Euler characteristic of the set of all objects in the image.
Notes
-----
The Euler characteristic is an integer number that describes the
topology of the set of all objects in the input image. If object is
4-connected, then background is 8-connected, and conversely.
The computation of the Euler characteristic is based on an integral
geometry formula in discretized space. In practice, a neighborhood
configuration is constructed, and a LUT is applied for each
configuration. The coefficients used are the ones of Ohser et al.
It can be useful to compute the Euler characteristic for several
connectivities. A large relative difference between results
for different connectivities suggests that the image resolution
(with respect to the size of objects and holes) is too low.
References
----------
.. [1] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010. Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
.. [2] Ohser J., Nagel W., Schladitz K. (2002) The Euler Number of
Discretized Sets - On the Choice of Adjacency in Homogeneous
Lattices. In: Mecke K., Stoyan D. (eds) Morphology of Condensed
Matter. Lecture Notes in Physics, vol 600. Springer, Berlin,
Heidelberg.
Examples
--------
>>> import cupy as cp
>>> SAMPLE = cp.zeros((100,100,100))
>>> SAMPLE[40:60, 40:60, 40:60] = 1
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
1...
>>> SAMPLE[45:55,45:55,45:55] = 0;
>>> euler_number(SAMPLE) # doctest: +ELLIPSIS
2...
>>> SAMPLE = cp.array([[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
... [1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
... [0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
... [0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]])
>>> euler_number(SAMPLE) # doctest:
array(0)
>>> euler_number(SAMPLE, connectivity=1) # doctest:
array(2)
""" # noqa
# as image can be a label image, transform it to binary
image = (image > 0).astype(int)
image = pad(image, pad_width=1, mode="constant")
# check connectivity
if connectivity is None:
connectivity = image.ndim
# config variable is an adjacency configuration. A coefficient given by
# variable coefs is attributed to each configuration in order to get
# the Euler characteristic.
if image.ndim == 2:
config = cp.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]])
if connectivity == 1:
coefs = EULER_COEFS2D_4
else:
coefs = EULER_COEFS2D_8
bins = 16
else: # 3D images
if connectivity == 2:
raise NotImplementedError(
"For 3D images, Euler number is implemented "
"for connectivities 1 and 3 only"
)
# fmt: off
config = cp.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
[[0, 0, 0], [0, 1, 4], [0, 2, 8]],
[[0, 0, 0], [0, 16, 64], [0, 32, 128]]])
# fmt: on
if connectivity == 1:
coefs = EULER_COEFS3D_26[::-1]
else:
coefs = EULER_COEFS3D_26
bins = 256
# XF has values in the 0-255 range in 3D, and in the 0-15 range in 2D,
# with one unique value for each binary configuration of the
# 27-voxel cube in 3D / 8-pixel square in 2D, up to symmetries
XF = ndi.convolve(image, config, mode="constant", cval=0)
h = cp.bincount(XF.ravel(), minlength=bins)
coefs = cp.asarray(coefs)
if image.ndim == 2:
return coefs @ h
else:
return int(0.125 * coefs @ h)
def perimeter(image, neighborhood=4):
"""Calculate total perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D binary image.
neighborhood : 4 or 8, optional
Neighborhood connectivity for border pixel determination. It is used to
compute the contour. A higher neighborhood widens the border on which
the perimeter is computed.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
References
----------
.. [1] K. Benkrid, D. Crookes. Design and FPGA Implementation of
a Perimeter Estimator. The Queen's University of Belfast.
http://www.cs.qub.ac.uk/~d.crookes/webpubs/papers/perimeter.doc
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage import util
>>> from cucim.skimage.measure import label
>>> # coins image (binary)
>>> img_coins = cp.array(data.coins() > 110)
>>> # total perimeter of all objects in the image
>>> perimeter(img_coins, neighborhood=4) # doctest: +ELLIPSIS
array(7796.86799644)
>>> perimeter(img_coins, neighborhood=8) # doctest: +ELLIPSIS
array(8806.26807333)
"""
if image.ndim != 2:
raise NotImplementedError("`perimeter` supports 2D images only")
if neighborhood == 4:
strel = STREL_4
else:
strel = STREL_8
strel = cp.asarray(strel)
image = image.astype(cp.uint8)
eroded_image = ndi.binary_erosion(image, strel, border_value=0)
border_image = image - eroded_image
perimeter_weights = cp.zeros(50, dtype=cp.float64)
perimeter_weights[[5, 7, 15, 17, 25, 27]] = 1
perimeter_weights[[21, 33]] = math.sqrt(2)
perimeter_weights[[13, 23]] = (1 + math.sqrt(2)) / 2
perimeter_image = ndi.convolve(
border_image,
cp.array([[10, 2, 10], [2, 1, 2], [10, 2, 10]]),
mode="constant",
cval=0,
)
# You can also write
# return perimeter_weights[perimeter_image].sum()
# but that was measured as taking much longer than bincount + cp.dot (5x
# as much time)
perimeter_histogram = cp.bincount(perimeter_image.ravel(), minlength=50)
total_perimeter = perimeter_histogram @ perimeter_weights
return total_perimeter
def perimeter_crofton(image, directions=4):
"""Calculate total Crofton perimeter of all objects in binary image.
Parameters
----------
image : (N, M) ndarray
2D image. If image is not binary, all values strictly greater than zero
are considered as the object.
directions : 2 or 4, optional
Number of directions used to approximate the Crofton perimeter. By
default, 4 is used: it should be more accurate than 2.
Computation time is the same in both cases.
Returns
-------
perimeter : float
Total perimeter of all objects in binary image.
Notes
-----
This measure is based on Crofton formula [1], which is a measure from
integral geometry. It is defined for general curve length evaluation via
a double integral along all directions. In a discrete
space, 2 or 4 directions give a quite good approximation, 4 being more
accurate than 2 for more complex shapes.
Similar to :func:`~.measure.perimeter`, this function returns an
approximation of the perimeter in continuous space.
References
----------
.. [1] https://en.wikipedia.org/wiki/Crofton_formula
.. [2] S. Rivollier. Analyse d’image geometrique et morphometrique par
diagrammes de forme et voisinages adaptatifs generaux. PhD thesis,
2010.
Ecole Nationale Superieure des Mines de Saint-Etienne.
https://tel.archives-ouvertes.fr/tel-00560838
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage import util
>>> from skimage import data
>>> from skimage.measure import label
>>> # coins image (binary)
>>> img_coins = cp.array(data.coins() > 110)
>>> # total perimeter of all objects in the image
>>> perimeter_crofton(img_coins, directions=2) # doctest: +ELLIPSIS
array(8144.57895443)
>>> perimeter_crofton(img_coins, directions=4) # doctest: +ELLIPSIS
array(7837.07740694)
"""
if image.ndim != 2:
raise NotImplementedError("`perimeter_crofton` supports 2D images only")
# as image could be a label image, transform it to binary image
image = (image > 0).astype(cp.uint8)
image = pad(image, pad_width=1, mode="constant")
XF = ndi.convolve(
image,
cp.array([[0, 0, 0], [0, 1, 4], [0, 2, 8]]),
mode="constant",
cval=0,
)
h = cp.bincount(XF.ravel(), minlength=16)
# definition of the LUT
# fmt: off
if directions == 2:
coefs = [0, np.pi / 2, 0, 0, 0, np.pi / 2, 0, 0,
np.pi / 2, np.pi, 0, 0, np.pi / 2, np.pi, 0, 0]
else:
sq2 = math.sqrt(2)
coefs = [0, np.pi / 4 * (1 + 1 / sq2),
np.pi / (4 * sq2),
np.pi / (2 * sq2), 0,
np.pi / 4 * (1 + 1 / sq2),
0, np.pi / (4 * sq2), np.pi / 4, np.pi / 2,
np.pi / (4 * sq2), np.pi / (4 * sq2),
np.pi / 4, np.pi / 2, 0, 0]
# fmt: on
total_perimeter = cp.asarray(coefs) @ h
return total_perimeter
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_moments_analytical.py
|
import itertools
import math
import cupy as cp
import numpy as np
_order0_or_1 = """
mc[0] = m[0];
"""
_order2_2d = """
/* Implementation of the commented code below with C-order raveled
* indices into 3 x 3 matrices, m and mc.
*
* mc[0, 0] = m[0, 0];
* cx = m[1, 0] / m[0, 0];
* cy = m[0, 1] / m[0, 0];
* mc[1, 1] = m[1, 1] - cx*m[0, 1];
* mc[2, 0] = m[2, 0] - cx*m[1, 0];
* mc[0, 2] = m[0, 2] - cy*m[0, 1];
*/
mc[0] = m[0];
F cx = m[3] / m[0];
F cy = m[1] / m[0];
mc[4] = m[4] - cx*m[1];
mc[6] = m[6] - cx*m[3];
mc[2] = m[2] - cy*m[1];
"""
_order3_2d = """
/* Implementation of the commented code below with C-order raveled
* indices into 4 x 4 matrices, m and mc.
*
* mc[0, 0] = m[0, 0];
* cx = m[1, 0] / m[0, 0];
* cy = m[0, 1] / m[0, 0];
* mc[1, 1] = m[1, 1] - cx*m[0, 1];
* mc[2, 0] = m[2, 0] - cx*m[1, 0];
* mc[0, 2] = m[0, 2] - cy*m[0, 1];
* mc[2, 1] = (m[2, 1] - 2*cx*m[1, 1] - cy*m[2, 0] + cx*cx*m[0, 1] + cy*cx*m[1, 0]);
* mc[1, 2] = (m[1, 2] - 2*cy*m[1, 1] - cx*m[0, 2] + 2*cy*cx*m[0, 1]);
* mc[3, 0] = m[3, 0] - 3*cx*m[2, 0] + 2*cx*cx*m[1, 0];
* mc[0, 3] = m[0, 3] - 3*cy*m[0, 2] + 2*cy*cx*m[0, 1];
*/
mc[0] = m[0];
F cx = m[4] / m[0];
F cy = m[1] / m[0];
// 2nd order moments
mc[5] = m[5] - cx*m[1];
mc[8] = m[8] - cx*m[4];
mc[2] = m[2] - cy*m[1];
// 3rd order moments
mc[9] = (m[9] - 2*cx*m[5] - cy*m[8] + cx*cx*m[1] + cy*cx*m[4]);
mc[6] = (m[6] - 2*cy*m[5] - cx*m[2] + 2*cy*cx*m[1]);
mc[12] = m[12] - 3*cx*m[8] + 2*cx*cx*m[4];
mc[3] = m[3] - 3*cy*m[2] + 2*cy*cy*m[1];
""" # noqa
# Note for 2D kernels using C-order raveled indices
_order2_3d = """
/* Implementation of the commented code below with C-order raveled
* indices into shape (3, 3, 3) matrices, m and mc.
*
* mc[0, 0, 0] = m[0, 0, 0];
* cx = m[1, 0, 0] / m[0, 0, 0];
* cy = m[0, 1, 0] / m[0, 0, 0];
* cz = m[0, 0, 1] / m[0, 0, 0];
* mc[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2];
* mc[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1];
* mc[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0];
* mc[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1];
* mc[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0];
* mc[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0];
*/
mc[0] = m[0];
F cx = m[9] / m[0];
F cy = m[3] / m[0];
F cz = m[1] / m[0];
// 2nd order moments
mc[2] = -cz*m[1] + m[2];
mc[4] = -cy*m[1] + m[4];
mc[6] = -cy*m[3] + m[6];
mc[10] = -cx*m[1] + m[10];
mc[12] = -cx*m[3] + m[12];
mc[18] = -cx*m[9] + m[18];
"""
_order3_3d = """
/* Implementation of the commented code below with C-order raveled
* indices into shape (4, 4, 4) matrices, m and mc.
*
* mc[0, 0, 0] = m[0, 0, 0];
* cx = m[1, 0, 0] / m[0, 0, 0];
* cy = m[0, 1, 0] / m[0, 0, 0];
* cz = m[0, 0, 1] / m[0, 0, 0];
* // 2nd order moments
* mc[0, 0, 2] = -cz*m[0, 0, 1] + m[0, 0, 2];
* mc[0, 1, 1] = -cy*m[0, 0, 1] + m[0, 1, 1];
* mc[0, 2, 0] = -cy*m[0, 1, 0] + m[0, 2, 0];
* mc[1, 0, 1] = -cx*m[0, 0, 1] + m[1, 0, 1];
* mc[1, 1, 0] = -cx*m[0, 1, 0] + m[1, 1, 0];
* mc[2, 0, 0] = -cx*m[1, 0, 0] + m[2, 0, 0];
* // 3rd order moments
* mc[0, 0, 3] = (2*cz*cz*m[0, 0, 1] - 3*cz*m[0, 0, 2] + m[0, 0, 3]);
* mc[0, 1, 2] = (-cy*m[0, 0, 2] + 2*cz*(cy*m[0, 0, 1] - m[0, 1, 1]) + m[0, 1, 2]);
* mc[0, 2, 1] = (cy*cy*m[0, 0, 1] - 2*cy*m[0, 1, 1] + cz*(cy*m[0, 1, 0] - m[0, 2, 0]) + m[0, 2, 1]);
* mc[0, 3, 0] = (2*cy*cy*m[0, 1, 0] - 3*cy*m[0, 2, 0] + m[0, 3, 0]);
* mc[1, 0, 2] = (-cx*m[0, 0, 2] + 2*cz*(cx*m[0, 0, 1] - m[1, 0, 1]) + m[1, 0, 2]);
* mc[1, 1, 1] = (-cx*m[0, 1, 1] + cy*(cx*m[0, 0, 1] - m[1, 0, 1]) + cz*(cx*m[0, 1, 0] - m[1, 1, 0]) + m[1, 1, 1]);
* mc[1, 2, 0] = (-cx*m[0, 2, 0] - 2*cy*(-cx*m[0, 1, 0] + m[1, 1, 0]) + m[1, 2, 0]);
* mc[2, 0, 1] = (cx*cx*m[0, 0, 1] - 2*cx*m[1, 0, 1] + cz*(cx*m[1, 0, 0] - m[2, 0, 0]) + m[2, 0, 1]);
* mc[2, 1, 0] = (cx*cx*m[0, 1, 0] - 2*cx*m[1, 1, 0] + cy*(cx*m[1, 0, 0] - m[2, 0, 0]) + m[2, 1, 0]);
* mc[3, 0, 0] = (2*cx*cx*m[1, 0, 0] - 3*cx*m[2, 0, 0] + m[3, 0, 0]);
*/
mc[0] = m[0];
F cx = m[16] / m[0];
F cy = m[4] / m[0];
F cz = m[1] / m[0];
// 2nd order moments
mc[2] = -cz*m[1] + m[2];
mc[5] = -cy*m[1] + m[5];
mc[8] = -cy*m[4] + m[8];
mc[17] = -cx*m[1] + m[17];
mc[20] = -cx*m[4] + m[20];
mc[32] = -cx*m[16] + m[32];
// 3rd order moments
mc[3] = (2*cz*cz*m[1] - 3*cz*m[2] + m[3]);
mc[6] = (-cy*m[2] + 2*cz*(cy*m[1] - m[5]) + m[6]);
mc[9] = (cy*cy*m[1] - 2*cy*m[5] + cz*(cy*m[4] - m[8]) + m[9]);
mc[12] = (2*cy*cy*m[4] - 3*cy*m[8] + m[12]);
mc[18] = (-cx*m[2] + 2*cz*(cx*m[1] - m[17]) + m[18]);
mc[21] = (-cx*m[5] + cy*(cx*m[1] - m[17]) + cz*(cx*m[4] - m[20]) + m[21]);
mc[24] = (-cx*m[8] - 2*cy*(-cx*m[4] + m[20]) + m[24]);
mc[33] = (cx*cx*m[1] - 2*cx*m[17] + cz*(cx*m[16] - m[32]) + m[33]);
mc[36] = (cx*cx*m[4] - 2*cx*m[20] + cy*(cx*m[16] - m[32]) + m[36]);
mc[48] = (2*cx*cx*m[16] - 3*cx*m[32] + m[48]);
""" # noqa
def _moments_raw_to_central_fast(moments_raw):
"""Analytical formulae for 2D and 3D central moments of order < 4.
`moments_raw_to_central` will automatically call this function when
ndim < 4 and order < 4.
Parameters
----------
moments_raw : ndarray
The raw moments.
Returns
-------
moments_central : ndarray
The central moments.
"""
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
# convert to float64 during the computation for better accuracy
moments_raw = moments_raw.astype(cp.float64, copy=False)
moments_central = cp.zeros_like(moments_raw)
if order >= 4 or ndim not in [2, 3]:
raise ValueError(
"This function only supports 2D or 3D moments of order < 4."
)
if ndim == 2:
if order < 2:
operation = _order0_or_1
elif order == 2:
operation = _order2_2d
elif order == 3:
operation = _order3_2d
elif ndim == 3:
if order < 2:
operation = _order0_or_1
elif order == 2:
operation = _order2_3d
elif order == 3:
operation = _order3_3d
kernel = cp.ElementwiseKernel(
"raw F m",
"raw F mc",
operation=operation,
name=f"order{order}_{ndim}d_kernel",
)
# run a single-threaded kernel, so we can avoid device->host->device copy
kernel(moments_raw, moments_central, size=1)
return moments_central
def moments_raw_to_central(moments_raw):
ndim = moments_raw.ndim
order = moments_raw.shape[0] - 1
if ndim in [2, 3] and order < 4:
# fast path with analytical GPU kernels
# (avoids any host/device transfers)
moments_central = _moments_raw_to_central_fast(moments_raw)
return moments_central.astype(moments_raw.dtype, copy=False)
# Fallback to general formula applied on the host
m = cp.asnumpy(moments_raw) # synchronize
moments_central = np.zeros_like(moments_raw)
# centers as computed in centroid above
centers = tuple(m[tuple(np.eye(ndim, dtype=int))] / m[(0,) * ndim])
if ndim == 2:
# This is the general 2D formula from
# https://en.wikipedia.org/wiki/Image_moment#Central_moments
for p in range(order + 1):
for q in range(order + 1):
if p + q > order:
continue
for i in range(p + 1):
term1 = math.comb(p, i)
term1 *= (-centers[0]) ** (p - i)
for j in range(q + 1):
term2 = math.comb(q, j)
term2 *= (-centers[1]) ** (q - j)
moments_central[p, q] += term1 * term2 * m[i, j]
return moments_central
# The nested loops below are an n-dimensional extension of the 2D formula
# given at https://en.wikipedia.org/wiki/Image_moment#Central_moments
# iterate over all [0, order] (inclusive) on each axis
for orders in itertools.product(*((range(order + 1),) * ndim)):
# `orders` here is the index into the `moments_central` output array
if sum(orders) > order:
# skip any moment that is higher than the requested order
continue
# loop over terms from `m` contributing to `moments_central[orders]`
for idxs in itertools.product(*[range(o + 1) for o in orders]):
val = m[idxs]
for i_order, c, idx in zip(orders, centers, idxs):
val *= math.comb(i_order, idx)
val *= (-c) ** (i_order - idx)
moments_central[orders] += val
return cp.asarray(moments_central)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/entropy.py
|
import cupy as cp
from cupyx.scipy.stats import entropy as scipy_entropy
def shannon_entropy(image, base=2):
"""Calculate the Shannon entropy of an image.
The Shannon entropy is defined as S = -sum(pk * log(pk)),
where pk are frequency/probability of pixels of value k.
Parameters
----------
image : (N, M) ndarray
Grayscale input image.
base : float, optional
The logarithmic base to use.
Returns
-------
entropy : 0-dimensional float cupy.ndarray
Notes
-----
The returned value is measured in bits or shannon (Sh) for base=2, natural
unit (nat) for base=np.e and hartley (Hart) for base=10.
References
----------
.. [1] https://en.wikipedia.org/wiki/Entropy_(information_theory) <https://en.wikipedia.org/wiki/Entropy_(information_theory)>`_
.. [2] https://en.wiktionary.org/wiki/Shannon_entropy
Examples
--------
>>> import cupy as cp
>>> from skimage import data
>>> from cucim.skimage.measure import shannon_entropy
>>> shannon_entropy(cp.array(data.camera()))
array(7.23169501)
""" # noqa: E501
_, counts = cp.unique(image, return_counts=True)
return scipy_entropy(counts, base=base)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_blur_effect.py
|
import cupy as cp
import cucim.skimage._vendored.ndimage as ndi
from ..color import rgb2gray
from ..util import img_as_float
__all__ = ["blur_effect"]
def blur_effect(image, h_size=11, channel_axis=None, reduce_func=max):
"""Compute a metric that indicates the strength of blur in an image
(0 for no blur, 1 for maximal blur).
Parameters
----------
image : ndarray
RGB or grayscale nD image. The input image is converted to grayscale
before computing the blur metric.
h_size : int, optional
Size of the re-blurring filter.
channel_axis : int or None, optional
If None, the image is assumed to be grayscale (single-channel).
Otherwise, this parameter indicates which axis of the array
corresponds to color channels.
reduce_func : callable, optional
Function used to calculate the aggregation of blur metrics along all
axes. If set to None, the entire list is returned, where the i-th
element is the blur metric along the i-th axis. This function should be
a host function that operates on standard python floats.
Returns
-------
blur : float (0 to 1) or list of floats
Blur metric: by default, the maximum of blur metrics along all axes.
Notes
-----
`h_size` must keep the same value in order to compare results between
images. Most of the time, the default size (11) is enough. This means that
the metric can clearly discriminate blur up to an average 11x11 filter; if
blur is higher, the metric still gives good results but its values tend
towards an asymptote.
References
----------
.. [1] Frederique Crete, Thierry Dolmiere, Patricia Ladret, and Marina
Nicolas "The blur effect: perception and estimation with a new
no-reference perceptual blur metric" Proc. SPIE 6492, Human Vision and
Electronic Imaging XII, 64920I (2007)
https://hal.archives-ouvertes.fr/hal-00232709
:DOI:`10.1117/12.702790`
"""
if channel_axis is not None:
try:
# ensure color channels are in the final dimension
image = cp.moveaxis(image, channel_axis, -1)
except cp.AxisError:
print("channel_axis must be one of the image array dimensions")
raise
except TypeError:
print("channel_axis must be an integer")
raise
image = rgb2gray(image)
n_axes = image.ndim
image = img_as_float(image)
shape = image.shape
B = []
from ..filters import sobel
host_scalars = True
slices = tuple([slice(2, s - 1) for s in shape])
for ax in range(n_axes):
filt_im = ndi.uniform_filter1d(image, h_size, axis=ax)
im_sharp = cp.abs(sobel(image, axis=ax))
im_blur = cp.abs(sobel(filt_im, axis=ax))
T = cp.maximum(0, im_sharp - im_blur)
if host_scalars:
M1 = float(cp.sum(im_sharp[slices])) # synchronize
M2 = float(cp.sum(T[slices])) # synchronize
B.append(abs(M1 - M2) / M1)
else:
M1 = cp.sum(im_sharp[slices])
M2 = cp.sum(T[slices])
B.append(cp.abs(M1 - M2) / M1)
return B if reduce_func is None else reduce_func(B)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/__init__.py
|
from ._blur_effect import blur_effect
from ._colocalization import (
intersection_coeff,
manders_coloc_coeff,
manders_overlap_coeff,
pearson_corr_coeff,
)
from ._label import label
from ._moments import (
centroid,
inertia_tensor,
inertia_tensor_eigvals,
moments,
moments_central,
moments_coords,
moments_coords_central,
moments_hu,
moments_normalized,
)
from ._polygon import approximate_polygon, subdivide_polygon
from ._regionprops import (
euler_number,
perimeter,
perimeter_crofton,
regionprops,
regionprops_table,
)
from .block import block_reduce
from .entropy import shannon_entropy
from .profile import profile_line
__all__ = [
"blur_effect",
"regionprops",
"regionprops_table",
"perimeter",
"approximate_polygon",
"subdivide_polygon",
"block_reduce",
"centroid",
"moments",
"moments_central",
"moments_coords",
"moments_coords_central",
"moments_normalized",
"moments_hu",
"inertia_tensor",
"inertia_tensor_eigvals",
"profile_line",
"label",
"shannon_entropy",
"intersection_coeff",
"manders_coloc_coeff",
"manders_overlap_coeff",
"pearson_corr_coeff",
]
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/_colocalization.py
|
import cupy as cp
from .._shared.utils import as_binary_ndarray, check_shape_equality
from .._vendored import pearsonr
__all__ = [
"pearson_corr_coeff",
"manders_coloc_coeff",
"manders_overlap_coeff",
"intersection_coeff",
]
def pearson_corr_coeff(image0, image1, mask=None):
r"""Calculate Pearson's Correlation Coefficient between pixel intensities
in channels.
Parameters
----------
image0 : (M, N) ndarray
Image of channel A.
image1 : (M, N) ndarray
Image of channel 2 to be correlated with channel B.
Must have same dimensions as `image0`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0` and `image1` pixels within this region of interest mask
are included in the calculation. Must have same dimensions as `image0`.
Returns
-------
pcc : float
Pearson's correlation coefficient of the pixel intensities between
the two images, within the mask if provided.
p-value : float
Two-tailed p-value.
Notes
-----
Pearson's Correlation Coefficient (PCC) measures the linear correlation
between the pixel intensities of the two images. Its value ranges from -1
for perfect linear anti-correlation to +1 for perfect linear correlation.
The calculation of the p-value assumes that the intensities of pixels in
each input image are normally distributed.
Scipy's implementation of Pearson's correlation coefficient is used. Please
refer to it for further information and caveats [1]_.
.. math::
r = \frac{\sum (A_i - m_A_i) (B_i - m_B_i)}
{\sqrt{\sum (A_i - m_A_i)^2 \sum (B_i - m_B_i)^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `image1`,
:math:`m_A_i` is the mean of the pixel values in `image0`
:math:`m_B_i` is the mean of the pixel values in `image1`
A low PCC value does not necessarily mean that there is no correlation
between the two channel intensities, just that there is no linear
correlation. You may wish to plot the pixel intensities of each of the two
channels in a 2D scatterplot and use Spearman's rank correlation if a
non-linear correlation is visually identified [2]_. Also consider if you
are interested in correlation or co-occurence, in which case a method
involving segmentation masks (e.g. MCC or intersection coefficient) may be
more suitable [3]_ [4]_.
Providing the mask of only relevant sections of the image (e.g., cells, or
particular cellular compartments) and removing noise is important as the
PCC is sensitive to these measures [3]_ [4]_.
References
----------
.. [1] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html
.. [2] https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html
.. [3] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723–C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [4] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01706.x
""" # noqa: E501
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1, mask)
image0 = image0[mask]
image1 = image1[mask]
else:
check_shape_equality(image0, image1)
# scipy pearsonr function only takes flattened arrays
image0 = image0.reshape(-1)
image1 = image1.reshape(-1)
return pearsonr(image0, image1, disable_checks=True)
def manders_coloc_coeff(image0, image1_mask, mask=None):
r"""Manders' colocalization coefficient between two channels.
Parameters
----------
image0 : (M, N) ndarray
Image of channel A. All pixel values should be non-negative.
image1_mask : (M, N) ndarray of dtype bool
Binary mask with segmented regions of interest in channel B.
Must have same dimensions as `image0`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0` pixel values within this region of interest mask are
included in the calculation.
Must have same dimensions as `image0`.
Returns
-------
mcc : float
Manders' colocalization coefficient.
Notes
-----
Manders' Colocalization Coefficient (MCC) is the fraction of total
intensity of a certain channel (channel A) that is within the segmented
region of a second channel (channel B) [1]_. It ranges from 0 for no
colocalisation to 1 for complete colocalization. It is also referred to
as M1 and M2.
MCC is commonly used to measure the colocalization of a particular protein
in a subceullar compartment. Typically a segmentation mask for channel B
is generated by setting a threshold that the pixel values must be above
to be included in the MCC calculation. In this implementation,
the channel B mask is provided as the argument `image1_mask`, allowing
the exact segmentation method to be decided by the user beforehand.
The implemented equation is:
.. math::
r = \frac{\sum A_{i,coloc}}{\sum A_i}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`A_{i,coloc} = A_i` if :math:`Bmask_i > 0`
:math:`Bmask_i` is the value of the :math:`i^{th}` pixel in
`mask`
MCC is sensitive to noise, with diffuse signal in the first channel
inflating its value. Images should be processed to remove out of focus and
background light before the MCC is calculated [2]_.
References
----------
.. [1] Manders, E.M.M., Verbeek, F.J. and Aten, J.A. (1993), Measurement of
co-localization of objects in dual-colour confocal images. Journal
of Microscopy, 169: 375-382.
https://doi.org/10.1111/j.1365-2818.1993.tb03313.x
https://imagej.net/media/manders.pdf
.. [2] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723–C742.
https://doi.org/10.1152/ajpcell.00462.2010
"""
image1_mask = as_binary_ndarray(image1_mask, variable_name="image1_mask")
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1_mask, mask)
image0 = image0[mask]
image1_mask = image1_mask[mask]
else:
check_shape_equality(image0, image1_mask)
# check non-negative image
if image0.min() < 0:
raise ValueError("image contains negative values")
img_sum = cp.sum(image0)
if img_sum == 0:
return 0
return cp.sum(image0 * image1_mask) / img_sum
def manders_overlap_coeff(image0, image1, mask=None):
r"""Manders' overlap coefficient
Parameters
----------
image0 : (M, N) ndarray
Image of channel A. All pixel values should be non-negative.
image1 : (M, N) ndarray
Image of channel B. All pixel values should be non-negative.
Must have same dimensions as `image0`
mask : (M, N) ndarray of dtype bool, optional
Only `image0` and `image1` pixel values within this region of interest
mask are included in the calculation.
Must have same dimensions as `image0`.
Returns
-------
moc: float
Manders' Overlap Coefficient of pixel intensities between the two
images.
Notes
-----
Manders' Overlap Coefficient (MOC) is given by the equation [1]_:
.. math::
r = \frac{\sum A_i B_i}{\sqrt{\sum A_i^2 \sum B_i^2}}
where
:math:`A_i` is the value of the :math:`i^{th}` pixel in `image0`
:math:`B_i` is the value of the :math:`i^{th}` pixel in `image1`
It ranges between 0 for no colocalization and 1 for complete colocalization
of all pixels.
MOC does not take into account pixel intensities, just the fraction of
pixels that have positive values for both channels[2]_ [3]_. Its usefulness
has been criticized as it changes in response to differences in both
co-occurence and correlation and so a particular MOC value could indicate
a wide range of colocalization patterns [4]_ [5]_.
References
----------
.. [1] Manders, E.M.M., Verbeek, F.J. and Aten, J.A. (1993), Measurement of
co-localization of objects in dual-colour confocal images. Journal
of Microscopy, 169: 375-382.
https://doi.org/10.1111/j.1365-2818.1993.tb03313.x
https://imagej.net/media/manders.pdf
.. [2] Dunn, K. W., Kamocka, M. M., & McDonald, J. H. (2011). A practical
guide to evaluating colocalization in biological microscopy.
American journal of physiology. Cell physiology, 300(4), C723–C742.
https://doi.org/10.1152/ajpcell.00462.2010
.. [3] Bolte, S. and Cordelières, F.P. (2006), A guided tour into
subcellular colocalization analysis in light microscopy. Journal of
Microscopy, 224: 213-232.
https://doi.org/10.1111/j.1365-2818.2006.01
.. [4] Adler J, Parmryd I. (2010), Quantifying colocalization by
correlation: the Pearson correlation coefficient is
superior to the Mander's overlap coefficient. Cytometry A.
Aug;77(8):733-42.https://doi.org/10.1002/cyto.a.20896
.. [5] Adler, J, Parmryd, I. Quantifying colocalization: The case for
discarding the Manders overlap coefficient. Cytometry. 2021; 99:
910– 920. https://doi.org/10.1002/cyto.a.24336
"""
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0, image1, mask)
image0 = image0[mask]
image1 = image1[mask]
else:
check_shape_equality(image0, image1)
# check non-negative image
if image0.min() < 0:
raise ValueError("image0 contains negative values")
if image1.min() < 0:
raise ValueError("image1 contains negative values")
denom = cp.linalg.norm(image0) * cp.linalg.norm(image1)
return cp.vdot(image0, image1) / denom
def intersection_coeff(image0_mask, image1_mask, mask=None):
r"""Fraction of a channel's segmented binary mask that overlaps with a
second channel's segmented binary mask.
Parameters
----------
image0_mask : (M, N) ndarray of dtype bool
Image mask of channel A.
image1_mask : (M, N) ndarray of dtype bool
Image mask of channel B.
Must have same dimensions as `image0_mask`.
mask : (M, N) ndarray of dtype bool, optional
Only `image0_mask` and `image1_mask` pixels within this region of
interest
mask are included in the calculation.
Must have same dimensions as `image0_mask`.
Returns
-------
Intersection coefficient, float
Fraction of `image0_mask` that overlaps with `image1_mask`.
"""
image0_mask = as_binary_ndarray(image0_mask, variable_name="image0_mask")
image1_mask = as_binary_ndarray(image1_mask, variable_name="image1_mask")
if mask is not None:
mask = as_binary_ndarray(mask, variable_name="mask")
check_shape_equality(image0_mask, image1_mask, mask)
image0_mask = image0_mask[mask]
image1_mask = image1_mask[mask]
else:
check_shape_equality(image0_mask, image1_mask)
nonzero_image0 = cp.count_nonzero(image0_mask)
if nonzero_image0 == 0:
return 0
nonzero_joint = cp.count_nonzero(cp.logical_and(image0_mask, image1_mask))
return nonzero_joint / nonzero_image0
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/block.py
|
import cupy as cp
import numpy as np
from .._vendored import pad
from ..util import view_as_blocks
def block_reduce(image, block_size=2, func=cp.sum, cval=0, func_kwargs=None):
"""Downsample image by applying function `func` to local blocks.
This function is useful for max and mean pooling, for example.
Parameters
----------
image : ndarray
N-dimensional input image.
block_size : array_like or int
Array containing down-sampling integer factor along each axis.
Default block_size is 2.
func : callable
Function object which is used to calculate the return value for each
local block. This function must implement an ``axis`` parameter.
Primary functions are ``numpy.sum``, ``numpy.min``, ``numpy.max``,
``numpy.mean`` and ``numpy.median``. See also `func_kwargs`.
cval : float
Constant padding value if image is not perfectly divisible by the
block size.
func_kwargs : dict
Keyword arguments passed to `func`. Notably useful for passing dtype
argument to ``np.mean``. Takes dictionary of inputs, e.g.:
``func_kwargs={'dtype': np.float16})``.
Returns
-------
image : ndarray
Down-sampled image with same number of dimensions as input image.
Examples
--------
>>> import cupy as cp
>>> from cucim.skimage.measure import block_reduce
>>> image = cp.arange(3*3*4).reshape(3, 3, 4)
>>> image # doctest: +NORMALIZE_WHITESPACE
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]],
[[24, 25, 26, 27],
[28, 29, 30, 31],
[32, 33, 34, 35]]])
>>> block_reduce(image, block_size=(3, 3, 1), func=cp.mean)
array([[[16., 17., 18., 19.]]])
>>> image_max1 = block_reduce(image, block_size=(1, 3, 4), func=cp.max)
>>> image_max1 # doctest: +NORMALIZE_WHITESPACE
array([[[11]],
[[23]],
[[35]]])
>>> image_max2 = block_reduce(image, block_size=(3, 1, 4), func=cp.max)
>>> image_max2 # doctest: +NORMALIZE_WHITESPACE
array([[[27],
[31],
[35]]])
"""
if np.isscalar(block_size):
block_size = (block_size,) * image.ndim
elif len(block_size) != image.ndim:
raise ValueError(
"`block_size` must be a scalar or have "
"the same length as `image.shape`"
)
if func_kwargs is None:
func_kwargs = {}
pad_width = []
for i in range(len(block_size)):
if block_size[i] < 1:
raise ValueError(
"Down-sampling factors must be >= 1. Use "
"`skimage.transform.resize` to up-sample an "
"image."
)
if image.shape[i] % block_size[i] != 0:
after_width = block_size[i] - (image.shape[i] % block_size[i])
else:
after_width = 0
pad_width.append((0, after_width))
image = pad(
image, pad_width=pad_width, mode="constant", constant_values=cval
)
blocked = view_as_blocks(image, block_size)
return func(
blocked, axis=tuple(range(image.ndim, blocked.ndim)), **func_kwargs
)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/tests/test_profile.py
|
import cupy as cp
import numpy as np
from cupy.testing import assert_array_almost_equal, assert_array_equal
from cucim.skimage.measure import profile_line
image = cp.arange(100).reshape((10, 10)).astype(float)
def test_horizontal_rightward():
prof = profile_line(image, (0, 2), (0, 8), order=0, mode="constant")
expected_prof = cp.arange(2, 9)
assert_array_equal(prof, expected_prof)
def test_horizontal_leftward():
prof = profile_line(image, (0, 8), (0, 2), order=0, mode="constant")
expected_prof = cp.arange(8, 1, -1)
assert_array_equal(prof, expected_prof)
def test_vertical_downward():
prof = profile_line(image, (2, 5), (8, 5), order=0, mode="constant")
expected_prof = cp.arange(25, 95, 10)
assert_array_equal(prof, expected_prof)
def test_vertical_upward():
prof = profile_line(image, (8, 5), (2, 5), order=0, mode="constant")
expected_prof = cp.arange(85, 15, -10)
assert_array_equal(prof, expected_prof)
def test_45deg_right_downward():
prof = profile_line(image, (2, 2), (8, 8), order=0, mode="constant")
expected_prof = cp.array([22, 33, 33, 44, 55, 55, 66, 77, 77, 88])
# repeats are due to aliasing using nearest neighbor interpolation.
# to see this, imagine a diagonal line with markers every unit of
# length traversing a checkerboard pattern of squares also of unit
# length. Because the line is diagonal, sometimes more than one
# marker will fall on the same checkerboard box.
assert_array_almost_equal(prof, expected_prof)
def test_45deg_right_downward_interpolated():
prof = profile_line(image, (2, 2), (8, 8), order=1, mode="constant")
expected_prof = cp.linspace(22, 88, 10)
assert_array_almost_equal(prof, expected_prof)
def test_45deg_right_upward():
prof = profile_line(image, (8, 2), (2, 8), order=1, mode="constant")
expected_prof = cp.arange(82, 27, -6)
assert_array_almost_equal(prof, expected_prof)
def test_45deg_left_upward():
prof = profile_line(image, (8, 8), (2, 2), order=1, mode="constant")
expected_prof = cp.arange(88, 21, -22.0 / 3)
assert_array_almost_equal(prof, expected_prof)
def test_45deg_left_downward():
prof = profile_line(image, (2, 8), (8, 2), order=1, mode="constant")
expected_prof = cp.arange(28, 83, 6)
assert_array_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_right_downward():
prof = profile_line(image, (1, 1), (7, 9), order=0, mode="constant")
expected_prof = cp.array([11, 22, 23, 33, 34, 45, 56, 57, 67, 68, 79])
assert_array_equal(prof, expected_prof)
def test_pythagorean_triangle_right_downward_interpolated():
prof = profile_line(image, (1, 1), (7, 9), order=1, mode="constant")
expected_prof = cp.linspace(11, 79, 11)
assert_array_almost_equal(prof, expected_prof)
pyth_image = np.zeros((6, 7), float)
line = ((1, 2, 2, 3, 3, 4), (1, 2, 3, 3, 4, 5))
below = ((2, 2, 3, 4, 4, 5), (0, 1, 2, 3, 4, 4))
above = ((0, 1, 1, 2, 3, 3), (2, 2, 3, 4, 5, 6))
pyth_image[line] = 1.8
pyth_image[below] = 0.6
pyth_image[above] = 0.6
pyth_image = cp.asarray(pyth_image)
def test_pythagorean_triangle_right_downward_linewidth():
prof = profile_line(
pyth_image, (1, 1), (4, 5), linewidth=3, order=0, mode="constant"
)
expected_prof = cp.ones(6)
assert_array_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_right_upward_linewidth():
prof = profile_line(
pyth_image[::-1, :],
(4, 1),
(1, 5),
linewidth=3,
order=0,
mode="constant",
)
expected_prof = cp.ones(6)
assert_array_almost_equal(prof, expected_prof)
def test_pythagorean_triangle_transpose_left_down_linewidth():
prof = profile_line(
pyth_image.T[:, ::-1],
(1, 4),
(5, 1),
linewidth=3,
order=0,
mode="constant",
)
expected_prof = np.ones(6)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_mean():
prof = profile_line(
pyth_image,
(0, 1),
(3, 1),
linewidth=3,
order=0,
reduce_func=np.mean,
mode="reflect",
)
expected_prof = pyth_image[:4, :3].mean(1)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_max():
prof = profile_line(
pyth_image,
(0, 1),
(3, 1),
linewidth=3,
order=0,
reduce_func=np.max,
mode="reflect",
)
expected_prof = pyth_image[:4, :3].max(1)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_sum():
prof = profile_line(
pyth_image,
(0, 1),
(3, 1),
linewidth=3,
order=0,
reduce_func=np.sum,
mode="reflect",
)
expected_prof = pyth_image[:4, :3].sum(1)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_mean_linewidth_1():
prof = profile_line(
pyth_image,
(0, 1),
(3, 1),
linewidth=1,
order=0,
reduce_func=np.mean,
mode="constant",
)
expected_prof = pyth_image[:4, 1]
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_None_linewidth_1():
prof = profile_line(
pyth_image,
(1, 2),
(4, 2),
linewidth=1,
order=0,
reduce_func=None,
mode="constant",
)
expected_prof = pyth_image[1:5, 2, np.newaxis]
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_None_linewidth_3():
prof = profile_line(
pyth_image,
(1, 2),
(4, 2),
linewidth=3,
order=0,
reduce_func=None,
mode="constant",
)
expected_prof = pyth_image[1:5, 1:4]
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_lambda_linewidth_3():
def reduce_func(x):
return x + x**2
prof = profile_line(
pyth_image,
(1, 2),
(4, 2),
linewidth=3,
order=0,
reduce_func=reduce_func,
mode="constant",
)
expected_prof = cp.apply_along_axis(
reduce_func, arr=pyth_image[1:5, 1:4], axis=1
)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_sqrt_linewidth_3():
def reduce_func(x):
return x**0.5
prof = profile_line(
pyth_image,
(1, 2),
(4, 2),
linewidth=3,
order=0,
reduce_func=reduce_func,
mode="constant",
)
expected_prof = cp.apply_along_axis(
reduce_func, arr=pyth_image[1:5, 1:4], axis=1
)
assert_array_almost_equal(prof, expected_prof)
def test_reduce_func_sumofsqrt_linewidth_3():
def reduce_func(x):
return np.sum(x**0.5)
prof = profile_line(
pyth_image,
(1, 2),
(4, 2),
linewidth=3,
order=0,
reduce_func=reduce_func,
mode="constant",
)
expected_prof = cp.apply_along_axis(
reduce_func, arr=pyth_image[1:5, 1:4], axis=1
)
assert_array_almost_equal(prof, expected_prof)
def test_oob_coodinates():
offset = 2
idx = pyth_image.shape[0] + offset
prof = profile_line(
pyth_image,
(-offset, 2),
(idx, 2),
linewidth=1,
order=0,
reduce_func=None,
mode="constant",
)
expected_prof = cp.vstack(
[
cp.zeros((offset, 1)),
pyth_image[:, 2, cp.newaxis],
cp.zeros((offset + 1, 1)),
]
)
assert_array_almost_equal(prof, expected_prof)
def test_bool_array_input():
shape = (200, 200)
center_x, center_y = (140, 150)
radius = 20
x, y = cp.meshgrid(cp.arange(shape[1]), cp.arange(shape[0]))
mask = (y - center_y) ** 2 + (x - center_x) ** 2 < radius**2
src = (center_y, center_x)
phi = 4 * np.pi / 9.0
dy = 31 * np.cos(phi)
dx = 31 * np.sin(phi)
dst = (center_y + dy, center_x + dx)
profile_u8 = profile_line(mask.astype(cp.uint8), src, dst, mode="reflect")
assert int(cp.all(profile_u8[:radius] == 1))
profile_b = profile_line(mask, src, dst, mode="constant")
assert int(cp.all(profile_b[:radius] == 1))
assert int(cp.all(profile_b == profile_u8))
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/tests/test_blur_effect.py
|
import cupy as cp
import pytest
from cupy.testing import assert_array_equal
from skimage.data import astronaut
from cucim.skimage.color import rgb2gray
from cucim.skimage.filters import gaussian
from cucim.skimage.measure import blur_effect
def test_blur_effect():
"""Test that the blur metric increases with more blurring."""
image = cp.array(astronaut())
B0 = blur_effect(image, channel_axis=-1)
B1 = blur_effect(gaussian(image, sigma=1, channel_axis=-1), channel_axis=-1)
B2 = blur_effect(gaussian(image, sigma=4, channel_axis=-1), channel_axis=-1)
assert 0 <= B0 < 1
assert B0 < B1 < B2
def test_blur_effect_h_size():
"""Test that the blur metric decreases with increasing size of the
re-blurring filter.
"""
image = cp.array(astronaut())
B0 = blur_effect(image, h_size=3, channel_axis=-1)
B1 = blur_effect(image, channel_axis=-1) # default h_size is 11
B2 = blur_effect(image, h_size=30, channel_axis=-1)
assert 0 <= B0 < 1
assert B0 > B1 > B2
def test_blur_effect_channel_axis():
"""Test that passing an RGB image is equivalent to passing its grayscale
version.
"""
image = cp.array(astronaut())
B0 = blur_effect(image, channel_axis=-1)
B1 = blur_effect(rgb2gray(image))
B0_arr = blur_effect(image, channel_axis=-1, reduce_func=None)
B1_arr = blur_effect(rgb2gray(image), reduce_func=None)
assert 0 <= B0 < 1
assert B0 == B1
assert_array_equal(B0_arr, B1_arr)
def test_blur_effect_3d():
"""Test that the blur metric works on a 3D image."""
data = pytest.importorskip("skimage.data")
if not hasattr(data, "cells3d"):
pytest.skip(
"cells3d data not available in this version of scikit-image"
)
image_3d = cp.array(data.cells3d()[:, 1, :, :]) # grab just the nuclei
B0 = blur_effect(image_3d)
B1 = blur_effect(gaussian(image_3d, sigma=1))
B2 = blur_effect(gaussian(image_3d, sigma=4))
assert 0 <= B0 < 1
assert B0 < B1 < B2
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/tests/test_colocalization.py
|
import cupy as cp
import numpy as np
import pytest
from cucim.skimage.measure import (
intersection_coeff,
manders_coloc_coeff,
manders_overlap_coeff,
pearson_corr_coeff,
)
def test_invalid_input():
# images are not same size
img1 = cp.array([[i + j for j in range(4)] for i in range(4)])
img2 = cp.ones((3, 5, 6))
mask = cp.array([[i <= 1 for i in range(5)] for _ in range(5)])
non_binary_mask = cp.array([[2 for __ in range(4)] for _ in range(4)])
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
pearson_corr_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
pearson_corr_coeff(img1, img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_coloc_coeff(img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_coloc_coeff(img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_coloc_coeff(img1, img1 > 0, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_coloc_coeff(img1, img1 > 0, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
manders_overlap_coeff(img1, img1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
manders_overlap_coeff(img1, img1, non_binary_mask)
with pytest.raises(ValueError, match=". must have the same dimensions"):
intersection_coeff(img1 > 2, img2 > 1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
intersection_coeff(img1, img2)
with pytest.raises(ValueError, match=". must have the same dimensions"):
intersection_coeff(img1 > 2, img1 > 1, mask)
with pytest.raises(ValueError, match=". array is not of dtype boolean"):
intersection_coeff(img1 > 2, img1 > 1, non_binary_mask)
def test_pcc():
# simple example
img1 = cp.array([[i + j for j in range(4)] for i in range(4)])
np.testing.assert_allclose(
pearson_corr_coeff(img1, img1),
(1.0, 0.0),
rtol=1e-12,
)
img2 = cp.where(img1 <= 2, 0, img1)
np.testing.assert_allclose(
pearson_corr_coeff(img1, img2),
(0.944911182523068, 3.5667540654536515e-08),
rtol=1e-12,
)
# change background of roi and see if values are same
roi = cp.where(img1 <= 2, 0, 1)
np.testing.assert_allclose(
pearson_corr_coeff(img1, img1, roi),
pearson_corr_coeff(img1, img2, roi),
rtol=1e-12,
)
def test_mcc():
img1 = cp.array([[j for j in range(4)] for i in range(4)])
mask = cp.array([[i <= 1 for j in range(4)] for i in range(4)])
assert manders_coloc_coeff(img1, mask) == 0.5
# test negative values
img_negativeint = cp.where(img1 == 1, -1, img1)
img_negativefloat = img_negativeint / 2.0
with pytest.raises(ValueError):
manders_coloc_coeff(img_negativeint, mask)
with pytest.raises(ValueError):
manders_coloc_coeff(img_negativefloat, mask)
def test_moc():
img1 = cp.ones((4, 4))
img2 = 2 * cp.ones((4, 4))
assert manders_overlap_coeff(img1, img2) == 1
# test negative values
img_negativeint = cp.where(img1 == 1, -1, img1)
img_negativefloat = img_negativeint / 2.0
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativeint, img2)
with pytest.raises(ValueError):
manders_overlap_coeff(img1, img_negativeint)
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativefloat, img2)
with pytest.raises(ValueError):
manders_overlap_coeff(img1, img_negativefloat)
with pytest.raises(ValueError):
manders_overlap_coeff(img_negativefloat, img_negativefloat)
def test_intersection_coefficient():
img1_mask = cp.array([[j <= 1 for j in range(4)] for i in range(4)])
img2_mask = cp.array([[i <= 1 for j in range(4)] for i in range(4)])
img3_mask = cp.array([[1 for j in range(4)] for i in range(4)])
assert intersection_coeff(img1_mask, img2_mask) == 0.5
assert intersection_coeff(img1_mask, img3_mask) == 1
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/tests/test_ccomp.py
|
# Note: These test cases originated in skimage/morphology/tests/test_ccomp.py
import cupy as cp
# import numpy as np
from cupy.testing import assert_array_equal
from cucim.skimage.measure import label
# import pytest
# import cucim.skimage.measure._ccomp as ccomp
BG = 0 # background value
class TestConnectedComponents:
def setup_method(self):
# fmt: off
self.x = cp.array([
[0, 0, 3, 2, 1, 9],
[0, 1, 1, 9, 2, 9],
[0, 0, 1, 9, 9, 9],
[3, 1, 1, 5, 3, 0]])
self.labels = cp.array([
[0, 0, 1, 2, 3, 4],
[0, 5, 5, 4, 2, 4],
[0, 0, 5, 4, 4, 4],
[6, 5, 5, 7, 8, 0]])
# fmt: on
# No background - there is no label 0, instead, labelling starts with 1
# and all labels are incremented by 1.
self.labels_nobg = self.labels + 1
# The 0 at lower right corner is isolated, so it should get a new label
self.labels_nobg[-1, -1] = 10
# We say that background value is 9 (and bg label is 0)
self.labels_bg_9 = self.labels_nobg.copy()
self.labels_bg_9[self.x == 9] = 0
# Then, where there was the label 5, we now expect 4 etc.
# (we assume that the label of value 9 would normally be 5)
self.labels_bg_9[self.labels_bg_9 > 5] -= 1
def test_basic(self):
assert_array_equal(label(self.x), self.labels)
# Make sure data wasn't modified
assert self.x[0, 2] == 3
# Check that everything works if there is no background
assert_array_equal(label(self.x, background=99), self.labels_nobg)
# Check that everything works if background value != 0
assert_array_equal(label(self.x, background=9), self.labels_bg_9)
def test_random(self):
x = (cp.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = int(labels.max())
for i in range(n):
values = x[labels == i]
assert cp.all(values == values[0])
def test_diag(self):
# fmt: off
x = cp.array([[0, 0, 1],
[0, 1, 0],
[1, 0, 0]])
assert_array_equal(label(x), x)
# fmt: on
def test_4_vs_8(self):
# fmt: off
x = cp.array([[0, 1],
[1, 0]], dtype=int)
assert_array_equal(label(x, connectivity=1),
[[0, 1],
[2, 0]])
assert_array_equal(label(x, connectivity=2),
[[0, 1],
[1, 0]])
# fmt: on
def test_background(self):
# fmt: off
x = cp.array([[1, 0, 0],
[1, 1, 5],
[0, 0, 0]])
assert_array_equal(label(x), [[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
assert_array_equal(label(x, background=0),
[[1, 0, 0],
[1, 1, 2],
[0, 0, 0]])
# fmt: on
def test_background_two_regions(self):
# fmt: off
x = cp.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
res = label(x, background=0)
assert_array_equal(res,
[[0, 0, 1],
[0, 0, 1],
[2, 2, 2]])
# fmt: on
def test_background_one_region_center(self):
# fmt: off
x = cp.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
assert_array_equal(label(x, connectivity=1, background=0),
[[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
# fmt: on
def test_return_num(self):
# fmt: off
x = cp.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
# fmt: on
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
class TestConnectedComponents3d:
def setup_method(self):
self.x = cp.zeros((3, 4, 5), int)
# fmt: off
self.x[0] = cp.array([[0, 3, 2, 1, 9],
[0, 1, 9, 2, 9],
[0, 1, 9, 9, 9],
[3, 1, 5, 3, 0]])
self.x[1] = cp.array([[3, 3, 2, 1, 9],
[0, 3, 9, 2, 1],
[0, 3, 3, 1, 1],
[3, 1, 3, 3, 0]])
self.x[2] = cp.array([[3, 3, 8, 8, 0],
[2, 3, 9, 8, 8],
[2, 3, 0, 8, 0],
[2, 1, 0, 0, 0]])
self.labels = cp.zeros((3, 4, 5), int)
self.labels[0] = cp.array([[0, 1, 2, 3, 4],
[0, 5, 4, 2, 4],
[0, 5, 4, 4, 4],
[1, 5, 6, 1, 0]])
self.labels[1] = cp.array([[1, 1, 2, 3, 4],
[0, 1, 4, 2, 3],
[0, 1, 1, 3, 3],
[1, 5, 1, 1, 0]])
self.labels[2] = cp.array([[1, 1, 7, 7, 0],
[8, 1, 4, 7, 7],
[8, 1, 0, 7, 0],
[8, 5, 0, 0, 0]])
# fmt: on
def test_basic(self):
labels = label(self.x)
assert_array_equal(labels, self.labels)
assert self.x[0, 0, 2] == 2, "Data was modified!"
def test_random(self):
x = (cp.random.rand(20, 30) * 5).astype(int)
labels = label(x)
n = int(labels.max())
for i in range(n):
values = x[labels == i]
assert cp.all(values == values[0])
def test_diag(self):
x = cp.zeros((3, 3, 3), int)
x[0, 2, 2] = 1
x[1, 1, 1] = 1
x[2, 0, 0] = 1
assert_array_equal(label(x), x)
def test_4_vs_8(self):
x = cp.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label4 = x.copy()
label4[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label4)
assert_array_equal(label(x, connectivity=3), x)
def test_connectivity_1_vs_2(self):
x = cp.zeros((2, 2, 2), int)
x[0, 1, 1] = 1
x[1, 0, 0] = 1
label1 = x.copy()
label1[1, 0, 0] = 2
assert_array_equal(label(x, connectivity=1), label1)
assert_array_equal(label(x, connectivity=3), x)
def test_background(self):
x = cp.zeros((2, 3, 3), int)
# fmt: off
x[0] = cp.array([[1, 0, 0],
[1, 0, 0],
[0, 0, 0]])
x[1] = cp.array([[0, 0, 0],
[0, 1, 5],
[0, 0, 0]])
lnb = x.copy()
lnb[0] = cp.array([[1, 2, 2],
[1, 2, 2],
[2, 2, 2]])
lnb[1] = cp.array([[2, 2, 2],
[2, 1, 3],
[2, 2, 2]])
lb = x.copy()
lb[0] = cp.array([[1, BG, BG], # noqa
[1, BG, BG], # noqa
[BG, BG, BG]])
lb[1] = cp.array([[BG, BG, BG],
[BG, 1, 2], # noqa
[BG, BG, BG]])
# fmt: on
assert_array_equal(label(x), lb)
assert_array_equal(label(x, background=-1), lnb)
def test_background_two_regions(self):
x = cp.zeros((2, 3, 3), int)
# fmt: off
x[0] = cp.array([[0, 0, 6],
[0, 0, 6],
[5, 5, 5]])
x[1] = cp.array([[6, 6, 0],
[5, 0, 0],
[0, 0, 0]])
lb = x.copy()
lb[0] = cp.array([[BG, BG, 1],
[BG, BG, 1],
[2, 2, 2]]) # noqa
lb[1] = cp.array([[1, 1, BG], # noqa
[2, BG, BG], # noqa
[BG, BG, BG]])
# fmt: on
res = label(x, background=0)
assert_array_equal(res, lb)
def test_background_one_region_center(self):
x = cp.zeros((3, 3, 3), int)
x[1, 1, 1] = 1
lb = cp.ones_like(x) * BG
lb[1, 1, 1] = 1
assert_array_equal(label(x, connectivity=1, background=0), lb)
def test_return_num(self):
# fmt: off
x = cp.array([[1, 0, 6],
[0, 0, 6],
[5, 5, 5]])
# fmt: on
assert_array_equal(label(x, return_num=True)[1], 3)
assert_array_equal(label(x, background=-1, return_num=True)[1], 4)
def test_1D(self):
x = cp.array((0, 1, 2, 2, 1, 1, 0, 0))
xlen = len(x)
y = cp.array((0, 1, 2, 2, 3, 3, 0, 0))
reshapes = (
(xlen,),
(1, xlen),
(xlen, 1),
(1, xlen, 1),
(xlen, 1, 1),
(1, 1, xlen),
)
for reshape in reshapes:
x2 = x.reshape(reshape)
labelled = label(x2)
assert_array_equal(y, labelled.flatten())
# CuPy Backend: unlike scikit-image, the CUDA implementation is nD
# def test_nd(self):
# x = cp.ones((1, 2, 3, 4))
# with testing.raises(NotImplementedError):
# label(x)
# @pytest.mark.skip("ccomp not yet implemented")
# class TestSupport:
# def test_reshape(self):
# shapes_in = ((3, 1, 2), (1, 4, 5), (3, 1, 1), (2, 1), (1,))
# for shape in shapes_in:
# shape = np.array(shape)
# numones = sum(shape == 1)
# inp = np.random.random(shape)
# inp = cp.asarray(inp)
# fixed, swaps = ccomp.reshape_array(inp)
# shape2 = fixed.shape
# # now check that all ones are at the beginning
# for i in range(numones):
# assert shape2[i] == 1
# back = ccomp.undo_reshape_array(fixed, swaps)
# # check that the undo works as expected
# assert_array_equal(inp, back)
| 0 |
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure
|
rapidsai_public_repos/cucim/python/cucim/src/cucim/skimage/measure/tests/test_regionprops.py
|
import math
import cupy as cp
import cupyx.scipy.ndimage as ndi
import numpy as np
import pytest
from cupy.testing import assert_array_almost_equal, assert_array_equal
from numpy.testing import assert_almost_equal, assert_equal
from skimage import data, draw
from skimage.segmentation import slic
from cucim.skimage import transform
from cucim.skimage._vendored import pad
from cucim.skimage.measure import (
euler_number,
perimeter,
perimeter_crofton,
regionprops,
regionprops_table,
)
from cucim.skimage.measure._regionprops import ( # noqa
COL_DTYPES,
OBJECT_COLUMNS,
PROPS,
_inertia_eigvals_to_axes_lengths_3D,
_parse_docs,
_props_to_dict,
_require_intensity_image,
)
# fmt: off
SAMPLE = cp.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1]]
)
# fmt: on
INTENSITY_SAMPLE = SAMPLE.copy()
INTENSITY_SAMPLE[1, 9:11] = 2
INTENSITY_FLOAT_SAMPLE = INTENSITY_SAMPLE.copy().astype(cp.float64) / 10.0
SAMPLE_MULTIPLE = cp.eye(10, dtype=np.int32)
SAMPLE_MULTIPLE[3:5, 7:8] = 2
INTENSITY_SAMPLE_MULTIPLE = SAMPLE_MULTIPLE.copy() * 2.0
SAMPLE_3D = cp.zeros((6, 6, 6), dtype=cp.uint8)
SAMPLE_3D[1:3, 1:3, 1:3] = 1
SAMPLE_3D[3, 2, 2] = 1
INTENSITY_SAMPLE_3D = SAMPLE_3D.copy()
def get_moment_function(img, spacing=(1, 1)):
rows, cols = img.shape
Y, X = np.meshgrid(
cp.linspace(0, rows * spacing[0], rows, endpoint=False),
cp.linspace(0, cols * spacing[1], cols, endpoint=False),
indexing="ij",
)
return lambda p, q: cp.sum(Y**p * X**q * img)
def get_moment3D_function(img, spacing=(1, 1, 1)):
slices, rows, cols = img.shape
Z, Y, X = np.meshgrid(
cp.linspace(0, slices * spacing[0], slices, endpoint=False),
cp.linspace(0, rows * spacing[1], rows, endpoint=False),
cp.linspace(0, cols * spacing[2], cols, endpoint=False),
indexing="ij",
)
return lambda p, q, r: cp.sum(Z**p * Y**q * X**r * img)
def get_central_moment_function(img, spacing=(1, 1)):
rows, cols = img.shape
Y, X = np.meshgrid(
cp.linspace(0, rows * spacing[0], rows, endpoint=False),
cp.linspace(0, cols * spacing[1], cols, endpoint=False),
indexing="ij",
)
Mpq = get_moment_function(img, spacing=spacing)
cY = Mpq(1, 0) / Mpq(0, 0)
cX = Mpq(0, 1) / Mpq(0, 0)
return lambda p, q: cp.sum((Y - cY) ** p * (X - cX) ** q * img)
def test_all_props():
region = regionprops(SAMPLE, INTENSITY_SAMPLE)[0]
for prop in PROPS:
try:
# access legacy name via dict
assert_array_almost_equal(
region[prop], getattr(region, PROPS[prop])
)
# skip property access tests for old CamelCase names
# (we intentionally do not provide properties for these)
if prop.lower() == prop:
# access legacy name via attribute
assert_array_almost_equal(
getattr(region, prop), getattr(region, PROPS[prop])
)
except TypeError: # the `slice` property causes this
pass
def test_all_props_3d():
region = regionprops(SAMPLE_3D, INTENSITY_SAMPLE_3D)[0]
for prop in PROPS:
try:
assert_array_almost_equal(
region[prop], getattr(region, PROPS[prop])
)
# skip property access tests for old CamelCase names
# (we intentionally do not provide properties for these)
if prop.lower() == prop:
assert_array_almost_equal(
getattr(region, prop), getattr(region, PROPS[prop])
)
except (NotImplementedError, TypeError):
pass
def test_num_pixels():
num_pixels = regionprops(SAMPLE)[0].num_pixels
assert num_pixels == 72
num_pixels = regionprops(SAMPLE, spacing=(2, 1))[0].num_pixels
assert num_pixels == 72
def test_dtype():
regionprops(cp.zeros((10, 10), dtype=int))
regionprops(cp.zeros((10, 10), dtype=cp.uint))
with pytest.raises(TypeError):
regionprops(cp.zeros((10, 10), dtype=float))
with pytest.raises(TypeError):
regionprops(cp.zeros((10, 10), dtype=cp.float64))
with pytest.raises(TypeError):
regionprops(cp.zeros((10, 10), dtype=bool))
def test_ndim():
regionprops(cp.zeros((10, 10), dtype=int))
regionprops(cp.zeros((10, 10, 1), dtype=int))
regionprops(cp.zeros((10, 10, 10), dtype=int))
regionprops(cp.zeros((1, 1), dtype=int))
regionprops(cp.zeros((1, 1, 1), dtype=int))
with pytest.raises(TypeError):
regionprops(cp.zeros((10, 10, 10, 2), dtype=int))
@pytest.mark.skip("feret_diameter_max not implemented on the GPU")
def test_feret_diameter_max():
# comparator result is based on SAMPLE from manually-inspected computations
comparator_result = 18
test_result = regionprops(SAMPLE)[0].feret_diameter_max
assert cp.abs(test_result - comparator_result) < 1
comparator_result_spacing = 10
test_result_spacing = regionprops(SAMPLE, spacing=[1, 0.1])[
0
].feret_diameter_max # noqa
assert cp.abs(test_result_spacing - comparator_result_spacing) < 1
# square, test that Feret diameter is sqrt(2) * square side
img = cp.zeros((20, 20), dtype=cp.uint8)
img[2:-2, 2:-2] = 1
feret_diameter_max = regionprops(img)[0].feret_diameter_max
assert cp.abs(feret_diameter_max - 16 * math.sqrt(2)) < 1
# Due to marching-squares with a level of .5 the diagonal goes
# from (0, 0.5) to (16, 15.5).
assert cp.abs(feret_diameter_max - np.sqrt(16**2 + (16 - 1) ** 2)) < 1e-6
spacing = (2, 1)
feret_diameter_max = regionprops(img, spacing=spacing)[
0
].feret_diameter_max # noqa
# For anisotropic spacing the shift is applied to the smaller spacing.
assert (
cp.abs(
feret_diameter_max
- cp.sqrt(
(spacing[0] * 16 - (spacing[0] <= spacing[1])) ** 2
+ (spacing[1] * 16 - (spacing[1] < spacing[0])) ** 2
)
)
< 1e-6
)
@pytest.mark.skip("feret_diameter_max not implemented on the GPU")
def test_feret_diameter_max_3d():
img = cp.zeros((20, 20), dtype=cp.uint8)
img[2:-2, 2:-2] = 1
img_3d = cp.dstack((img,) * 3)
feret_diameter_max = regionprops(img_3d)[0].feret_diameter_max
# Due to marching-cubes with a level of .5 -1=2*0.5 has to be subtracted
# from two axes. There are three combinations
# (x-1, y-1, z), (x-1, y, z-1), (x, y-1, z-1).
# The option yielding the longest diagonal is the computed
# max_feret_diameter.
assert (
cp.abs(
feret_diameter_max - cp.sqrt((16 - 1) ** 2 + 16**2 + (3 - 1) ** 2)
)
< 1e-6
) # noqa
spacing = (1, 2, 3)
feret_diameter_max = regionprops(img_3d, spacing=spacing)[
0
].feret_diameter_max # noqa
# The longest of the three options is the max_feret_diameter
assert (
cp.abs(
feret_diameter_max
- cp.sqrt(
(spacing[0] * (16 - 1)) ** 2
+ (spacing[1] * (16 - 0)) ** 2
+ (spacing[2] * (3 - 1)) ** 2
)
)
< 1e-6
)
assert (
cp.abs(
feret_diameter_max
- cp.sqrt(
(spacing[0] * (16 - 1)) ** 2
+ (spacing[1] * (16 - 1)) ** 2
+ (spacing[2] * (3 - 0)) ** 2
)
)
> 1e-6
)
assert (
cp.abs(
feret_diameter_max
- cp.sqrt(
(spacing[0] * (16 - 0)) ** 2
+ (spacing[1] * (16 - 1)) ** 2
+ (spacing[2] * (3 - 1)) ** 2
)
)
> 1e-6
)
def test_area():
area = regionprops(SAMPLE)[0].area
assert area == cp.sum(SAMPLE)
spacing = (1, 2)
area = regionprops(SAMPLE, spacing=spacing)[0].area
assert area == cp.sum(SAMPLE * math.prod(spacing))
area = regionprops(SAMPLE_3D)[0].area
assert area == cp.sum(SAMPLE_3D)
spacing = (2, 1, 3)
area = regionprops(SAMPLE_3D, spacing=spacing)[0].area
assert area == cp.sum(SAMPLE_3D * math.prod(spacing))
def test_bbox():
bbox = regionprops(SAMPLE)[0].bbox
assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))
bbox = regionprops(SAMPLE, spacing=(1, 2))[0].bbox
assert_array_almost_equal(bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1]))
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[:, -1] = 0
bbox = regionprops(SAMPLE_mod)[0].bbox
assert_array_almost_equal(
bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1] - 1)
)
bbox = regionprops(SAMPLE_mod, spacing=(3, 2))[0].bbox
assert_array_almost_equal(
bbox, (0, 0, SAMPLE.shape[0], SAMPLE.shape[1] - 1)
)
bbox = regionprops(SAMPLE_3D)[0].bbox
assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3))
bbox = regionprops(SAMPLE_3D, spacing=(0.5, 2, 7))[0].bbox
assert_array_almost_equal(bbox, (1, 1, 1, 4, 3, 3))
def test_area_bbox():
padded = pad(SAMPLE, 5, mode="constant")
bbox_area = regionprops(padded)[0].area_bbox
assert_array_almost_equal(bbox_area, SAMPLE.size)
spacing = (0.5, 3)
bbox_area = regionprops(padded, spacing=spacing)[0].area_bbox
assert_array_almost_equal(bbox_area, SAMPLE.size * math.prod(spacing))
def test_moments_central():
mu = regionprops(SAMPLE)[0].moments_central
# determined with OpenCV
assert_almost_equal(mu[2, 0], 436.00000000000045, decimal=4)
# different from OpenCV results, bug in OpenCV
assert_almost_equal(mu[3, 0], -737.333333333333, decimal=3)
assert_almost_equal(mu[1, 1], -87.33333333333303, decimal=3)
assert_almost_equal(mu[2, 1], -127.5555555555593, decimal=3)
assert_almost_equal(mu[0, 2], 1259.7777777777774, decimal=2)
assert_almost_equal(mu[1, 2], 2000.296296296291, decimal=2)
assert_almost_equal(mu[0, 3], -760.0246913580195, decimal=2)
# Verify central moment test functions
centralMpq = get_central_moment_function(SAMPLE, spacing=(1, 1))
assert_almost_equal(centralMpq(2, 0), mu[2, 0], decimal=3)
assert_almost_equal(centralMpq(3, 0), mu[3, 0], decimal=3)
assert_almost_equal(centralMpq(1, 1), mu[1, 1], decimal=3)
assert_almost_equal(centralMpq(2, 1), mu[2, 1], decimal=3)
assert_almost_equal(centralMpq(0, 2), mu[0, 2], decimal=3)
assert_almost_equal(centralMpq(1, 2), mu[1, 2], decimal=3)
assert_almost_equal(centralMpq(0, 3), mu[0, 3], decimal=3)
# Test spacing against verified central moment test function
spacing = (1.8, 0.8)
centralMpq = get_central_moment_function(SAMPLE, spacing=spacing)
mu = regionprops(SAMPLE, spacing=spacing)[0].moments_central
assert_almost_equal(mu[2, 0], centralMpq(2, 0), decimal=3)
assert_almost_equal(mu[3, 0], centralMpq(3, 0), decimal=2)
assert_almost_equal(mu[1, 1], centralMpq(1, 1), decimal=3)
assert_almost_equal(mu[2, 1], centralMpq(2, 1), decimal=2)
assert_almost_equal(mu[0, 2], centralMpq(0, 2), decimal=3)
assert_almost_equal(mu[1, 2], centralMpq(1, 2), decimal=2)
assert_almost_equal(mu[0, 3], centralMpq(0, 3), decimal=2)
def test_centroid():
centroid = regionprops(SAMPLE)[0].centroid
# determined with MATLAB
assert_array_almost_equal(centroid, (5.66666666666666, 9.444444444444444))
# Verify test moment function with spacing=(1, 1)
Mpq = get_moment_function(SAMPLE, spacing=(1, 1))
cY = float(Mpq(1, 0) / Mpq(0, 0))
cX = float(Mpq(0, 1) / Mpq(0, 0))
assert_array_almost_equal((cY, cX), centroid)
spacing = (1.8, 0.8)
# Moment
Mpq = get_moment_function(SAMPLE, spacing=spacing)
cY = float(Mpq(1, 0) / Mpq(0, 0))
cX = float(Mpq(0, 1) / Mpq(0, 0))
centroid = regionprops(SAMPLE, spacing=spacing)[0].centroid
assert_array_almost_equal(centroid, (cY, cX))
def test_centroid_3d():
centroid = regionprops(SAMPLE_3D)[0].centroid
# determined by mean along axis 1 of SAMPLE_3D.nonzero()
assert_array_almost_equal(centroid, (1.66666667, 1.55555556, 1.55555556))
# Verify moment 3D test function
Mpqr = get_moment3D_function(SAMPLE_3D, spacing=(1, 1, 1))
cZ = float(Mpqr(1, 0, 0) / Mpqr(0, 0, 0))
cY = float(Mpqr(0, 1, 0) / Mpqr(0, 0, 0))
cX = float(Mpqr(0, 0, 1) / Mpqr(0, 0, 0))
assert_array_almost_equal((cZ, cY, cX), centroid)
# Test spacing
spacing = (2, 1, 0.8)
Mpqr = get_moment3D_function(SAMPLE_3D, spacing=spacing)
cZ = float(Mpqr(1, 0, 0) / Mpqr(0, 0, 0))
cY = float(Mpqr(0, 1, 0) / Mpqr(0, 0, 0))
cX = float(Mpqr(0, 0, 1) / Mpqr(0, 0, 0))
centroid = regionprops(SAMPLE_3D, spacing=spacing)[0].centroid
assert_array_almost_equal(centroid, (cZ, cY, cX))
def test_area_convex():
area = regionprops(SAMPLE)[0].area_convex
assert area == 125
spacing = (1, 4)
area = regionprops(SAMPLE, spacing=spacing)[0].area_convex
assert area == 125 * np.prod(spacing)
def test_image_convex():
img = regionprops(SAMPLE)[0].image_convex
# fmt: off
ref = cp.array(
[[0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0],
[0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1],
[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
)
# fmt: on
assert_array_equal(img, ref)
def test_coordinates():
sample = cp.zeros((10, 10), dtype=cp.int8)
coords = cp.array([[3, 2], [3, 3], [3, 4]])
sample[coords[:, 0], coords[:, 1]] = 1
prop_coords = regionprops(sample)[0].coords
assert_array_equal(prop_coords, coords)
prop_coords = regionprops(sample, spacing=(0.5, 1.2))[0].coords
assert_array_equal(prop_coords, coords)
def test_coordinates_scaled():
sample = cp.zeros((10, 10), dtype=np.int8)
coords = cp.array([[3, 2], [3, 3], [3, 4]])
sample[coords[:, 0], coords[:, 1]] = 1
spacing = (1, 1)
prop_coords = regionprops(sample, spacing=spacing)[0].coords_scaled
assert_array_equal(prop_coords, coords * cp.array(spacing))
spacing = (1, 0.5)
prop_coords = regionprops(sample, spacing=spacing)[0].coords_scaled
assert_array_equal(prop_coords, coords * cp.array(spacing))
sample = cp.zeros((6, 6, 6), dtype=cp.int8)
coords = cp.array([[1, 1, 1], [1, 2, 1], [1, 3, 1]])
sample[coords[:, 0], coords[:, 1], coords[:, 2]] = 1
prop_coords = regionprops(sample)[0].coords_scaled
assert_array_equal(prop_coords, coords)
spacing = (0.2, 3, 2.3)
prop_coords = regionprops(sample, spacing=spacing)[0].coords_scaled
assert_array_equal(prop_coords, coords * cp.array(spacing))
def test_slice():
padded = pad(SAMPLE, ((2, 4), (5, 2)), mode="constant")
nrow, ncol = SAMPLE.shape
result = regionprops(padded)[0].slice
expected = (slice(2, 2 + nrow), slice(5, 5 + ncol))
assert_array_equal(result, expected)
spacing = (2, 0.2)
result = regionprops(padded, spacing=spacing)[0].slice
assert_equal(result, expected)
def test_eccentricity():
eps = regionprops(SAMPLE)[0].eccentricity
assert_almost_equal(eps, 0.814629313427)
eps = regionprops(SAMPLE, spacing=(1.5, 1.5))[0].eccentricity
assert_almost_equal(eps, 0.814629313427)
img = cp.zeros((5, 5), dtype=int)
img[2, 2] = 1
eps = regionprops(img)[0].eccentricity
assert_almost_equal(eps, 0)
eps = regionprops(img, spacing=(3, 3))[0].eccentricity
assert_almost_equal(eps, 0)
def test_equivalent_diameter_area():
diameter = regionprops(SAMPLE)[0].equivalent_diameter_area
# determined with MATLAB
assert_almost_equal(diameter, 9.57461472963)
spacing = (1, 3)
diameter = regionprops(SAMPLE, spacing=spacing)[0].equivalent_diameter_area
equivalent_area = cp.pi * (diameter / 2.0) ** 2
assert_almost_equal(equivalent_area, SAMPLE.sum() * math.prod(spacing))
def test_euler_number():
for spacing in [(1, 1), (2.1, 0.9)]:
en = regionprops(SAMPLE, spacing=spacing)[0].euler_number
assert en == 0
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[7, -3] = 0
en = regionprops(SAMPLE_mod, spacing=spacing)[0].euler_number
assert en == -1
en = euler_number(SAMPLE, 1)
assert en == 2
en = euler_number(SAMPLE_mod, 1)
assert en == 1
en = euler_number(SAMPLE_3D, 1)
assert en == 1
en = euler_number(SAMPLE_3D, 3)
assert en == 1
# for convex body, Euler number is 1
SAMPLE_3D_2 = cp.zeros((100, 100, 100))
SAMPLE_3D_2[40:60, 40:60, 40:60] = 1
en = euler_number(SAMPLE_3D_2, 3)
assert en == 1
SAMPLE_3D_2[45:55, 45:55, 45:55] = 0
en = euler_number(SAMPLE_3D_2, 3)
assert en == 2
def test_extent():
extent = regionprops(SAMPLE)[0].extent
assert_almost_equal(extent, 0.4)
extent = regionprops(SAMPLE, spacing=(5, 0.2))[0].extent
assert_almost_equal(extent, 0.4)
def test_moments_hu():
hu = regionprops(SAMPLE)[0].moments_hu
# fmt: off
ref = cp.array([
3.27117627e-01,
2.63869194e-02,
2.35390060e-02,
1.23151193e-03,
1.38882330e-06,
-2.72586158e-05,
-6.48350653e-06
])
# fmt: on
# bug in OpenCV caused in Central Moments calculation?
assert_array_almost_equal(hu, ref)
with pytest.raises(NotImplementedError):
regionprops(SAMPLE, spacing=(2, 1))[0].moments_hu
def test_image():
img = regionprops(SAMPLE)[0].image
assert_array_equal(img, SAMPLE)
img = regionprops(SAMPLE_3D)[0].image
assert_array_equal(img, SAMPLE_3D[1:4, 1:3, 1:3])
def test_label():
label = regionprops(SAMPLE)[0].label
assert_array_equal(label, 1)
label = regionprops(SAMPLE_3D)[0].label
assert_array_equal(label, 1)
def test_area_filled():
area = regionprops(SAMPLE)[0].area_filled
assert area == cp.sum(SAMPLE)
spacing = (2, 1.2)
area = regionprops(SAMPLE, spacing=spacing)[0].area_filled
assert area == cp.sum(SAMPLE) * math.prod(spacing)
SAMPLE_mod = SAMPLE.copy()
SAMPLE_mod[7, -3] = 0
area = regionprops(SAMPLE_mod)[0].area_filled
assert area == cp.sum(SAMPLE)
area = regionprops(SAMPLE_mod, spacing=spacing)[0].area_filled
assert area == cp.sum(SAMPLE) * math.prod(spacing)
def test_image_filled():
img = regionprops(SAMPLE)[0].image_filled
assert_array_equal(img, SAMPLE)
img = regionprops(SAMPLE, spacing=(1, 4))[0].image_filled
assert_array_equal(img, SAMPLE)
def test_axis_major_length():
length = regionprops(SAMPLE)[0].axis_major_length
# MATLAB has different interpretation of ellipse than found in literature,
# here implemented as found in literature
target_length = 16.7924234999
assert_almost_equal(length, target_length, decimal=4)
length = regionprops(SAMPLE, spacing=(2, 2))[0].axis_major_length
assert_almost_equal(length, 2 * target_length, decimal=4)
from skimage.draw import ellipse
img = cp.zeros((20, 24), dtype=cp.uint8)
rr, cc = ellipse(11, 11, 7, 9, rotation=np.deg2rad(45))
img[rr, cc] = 1
target_length = regionprops(img, spacing=(1, 1))[0].axis_major_length
length_wo_spacing = regionprops(img[::2], spacing=(1, 1))[
0
].axis_minor_length
assert abs(length_wo_spacing - target_length) > 0.1
length = regionprops(img[:, ::2], spacing=(1, 2))[0].axis_major_length
assert_almost_equal(length, target_length, decimal=0)
def test_intensity_max():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].intensity_max
assert_almost_equal(intensity, 2)
def test_intensity_mean():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].intensity_mean
assert_almost_equal(intensity, 1.02777777777777)
def test_intensity_min():
intensity = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].intensity_min
assert_almost_equal(intensity, 1)
def test_axis_minor_length():
length = regionprops(SAMPLE)[0].axis_minor_length
# MATLAB has different interpretation of ellipse than found in literature,
# here implemented as found in literature
target_length = 9.739302807263
assert_almost_equal(length, target_length, decimal=5)
length = regionprops(SAMPLE, spacing=(1.5, 1.5))[0].axis_minor_length
assert_almost_equal(length, 1.5 * target_length, decimal=5)
from skimage.draw import ellipse
img = cp.zeros((10, 12), dtype=np.uint8)
rr, cc = ellipse(5, 6, 3, 5, rotation=np.deg2rad(30))
img[rr, cc] = 1
target_length = regionprops(img, spacing=(1, 1))[0].axis_minor_length
length_wo_spacing = regionprops(img[::2], spacing=(1, 1))[
0
].axis_minor_length
assert abs(length_wo_spacing - target_length) > 0.1
length = regionprops(img[::2], spacing=(2, 1))[0].axis_minor_length
assert_almost_equal(length, target_length, decimal=1)
def test_moments():
m = regionprops(SAMPLE)[0].moments
# determined with OpenCV
assert_almost_equal(m[0, 0], 72.0)
assert_almost_equal(m[0, 1], 680.0)
assert_almost_equal(m[0, 2], 7682.0)
assert_almost_equal(m[0, 3], 95588.0)
assert_almost_equal(m[1, 0], 408.0)
assert_almost_equal(m[1, 1], 3766.0)
assert_almost_equal(m[1, 2], 43882.0)
assert_almost_equal(m[2, 0], 2748.0)
assert_almost_equal(m[2, 1], 24836.0)
assert_almost_equal(m[3, 0], 19776.0)
# Verify moment test function
Mpq = get_moment_function(SAMPLE, spacing=(1, 1))
assert_almost_equal(Mpq(0, 0), m[0, 0])
assert_almost_equal(Mpq(0, 1), m[0, 1])
assert_almost_equal(Mpq(0, 2), m[0, 2])
assert_almost_equal(Mpq(0, 3), m[0, 3])
assert_almost_equal(Mpq(1, 0), m[1, 0])
assert_almost_equal(Mpq(1, 1), m[1, 1])
assert_almost_equal(Mpq(1, 2), m[1, 2])
assert_almost_equal(Mpq(2, 0), m[2, 0])
assert_almost_equal(Mpq(2, 1), m[2, 1])
assert_almost_equal(Mpq(3, 0), m[3, 0])
# Test moment on spacing
spacing = (2, 0.3)
m = regionprops(SAMPLE, spacing=spacing)[0].moments
Mpq = get_moment_function(SAMPLE, spacing=spacing)
assert_almost_equal(m[0, 0], Mpq(0, 0), decimal=3)
assert_almost_equal(m[0, 1], Mpq(0, 1), decimal=3)
assert_almost_equal(m[0, 2], Mpq(0, 2), decimal=3)
assert_almost_equal(m[0, 3], Mpq(0, 3), decimal=3)
assert_almost_equal(m[1, 0], Mpq(1, 0), decimal=3)
assert_almost_equal(m[1, 1], Mpq(1, 1), decimal=3)
assert_almost_equal(m[1, 2], Mpq(1, 2), decimal=3)
assert_almost_equal(m[2, 0], Mpq(2, 0), decimal=3)
assert_almost_equal(m[2, 1], Mpq(2, 1), decimal=2)
assert_almost_equal(m[3, 0], Mpq(3, 0), decimal=3)
def test_moments_normalized():
nu = regionprops(SAMPLE)[0].moments_normalized
# determined with OpenCV
assert_almost_equal(nu[0, 2], 0.24301268861454037)
assert_almost_equal(nu[0, 3], -0.017278118992041805)
assert_almost_equal(nu[1, 1], -0.016846707818929982)
assert_almost_equal(nu[1, 2], 0.045473992910668816)
assert_almost_equal(nu[2, 0], 0.08410493827160502)
assert_almost_equal(nu[2, 1], -0.002899800614433943)
spacing = (3, 3)
nu = regionprops(SAMPLE, spacing=spacing)[0].moments_normalized
# Normalized moments are scale invariant.
assert_almost_equal(nu[0, 2], 0.24301268861454037)
assert_almost_equal(nu[0, 3], -0.017278118992041805)
assert_almost_equal(nu[1, 1], -0.016846707818929982)
assert_almost_equal(nu[1, 2], 0.045473992910668816)
assert_almost_equal(nu[2, 0], 0.08410493827160502)
assert_almost_equal(nu[2, 1], -0.002899800614433943)
def test_orientation():
orient = regionprops(SAMPLE)[0].orientation
# determined with MATLAB
target_orient = -1.4663278802756865
assert_almost_equal(orient, target_orient)
orient = regionprops(SAMPLE, spacing=(2, 2))[0].orientation
assert_almost_equal(orient, target_orient)
# test diagonal regions
diag = cp.eye(10, dtype=int)
orient_diag = regionprops(diag)[0].orientation
assert_almost_equal(orient_diag, -math.pi / 4)
orient_diag = regionprops(diag, spacing=(1, 2))[0].orientation
assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5**2)))
orient_diag = regionprops(cp.flipud(diag))[0].orientation
assert_almost_equal(orient_diag, math.pi / 4)
orient_diag = regionprops(cp.flipud(diag), spacing=(1, 2))[0].orientation
assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5**2)))
orient_diag = regionprops(cp.fliplr(diag))[0].orientation
assert_almost_equal(orient_diag, math.pi / 4)
orient_diag = regionprops(cp.fliplr(diag), spacing=(1, 2))[0].orientation
assert_almost_equal(orient_diag, -np.arccos(0.5 / math.sqrt(1 + 0.5**2)))
orient_diag = regionprops(cp.fliplr(cp.flipud(diag)))[0].orientation
assert_almost_equal(orient_diag, -math.pi / 4)
orient_diag = regionprops(np.fliplr(np.flipud(diag)), spacing=(1, 2))[
0
].orientation
assert_almost_equal(orient_diag, np.arccos(0.5 / math.sqrt(1 + 0.5**2)))
def test_perimeter():
per = regionprops(SAMPLE)[0].perimeter
target_per = 55.2487373415
assert_almost_equal(per, target_per)
per = regionprops(SAMPLE, spacing=(2, 2))[0].perimeter
assert_almost_equal(per, 2 * target_per)
per = perimeter(SAMPLE.astype(float), neighborhood=8)
assert_almost_equal(per, 46.8284271247)
with pytest.raises(NotImplementedError):
per = regionprops(SAMPLE, spacing=(2, 1))[0].perimeter
def test_perimeter_crofton():
per = regionprops(SAMPLE)[0].perimeter_crofton
target_per_crof = 61.0800637973
assert_almost_equal(per, target_per_crof)
per = regionprops(SAMPLE, spacing=(2, 2))[0].perimeter_crofton
assert_almost_equal(per, 2 * target_per_crof)
per = perimeter_crofton(SAMPLE.astype("double"), directions=2)
assert_almost_equal(per, 64.4026493985)
with pytest.raises(NotImplementedError):
per = regionprops(SAMPLE, spacing=(2, 1))[0].perimeter_crofton
def test_solidity():
solidity = regionprops(SAMPLE)[0].solidity
target_solidity = 0.576
assert_almost_equal(solidity, target_solidity)
solidity = regionprops(SAMPLE, spacing=(3, 9))[0].solidity
assert_almost_equal(solidity, target_solidity)
def test_moments_weighted_central():
wmu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].moments_weighted_central
# fmt: off
ref = cp.array(
[[7.4000000000e+01, 3.7303493627e-14, 1.2602837838e+03,
-7.6561796932e+02],
[-2.1316282073e-13, -8.7837837838e+01, 2.1571526662e+03,
-4.2385971907e+03],
[4.7837837838e+02, -1.4801314828e+02, 6.6989799420e+03,
-9.9501164076e+03],
[-7.5943608473e+02, -1.2714707125e+03, 1.5304076361e+04,
-3.3156729271e+04]])
# fmt: on
np.set_printoptions(precision=10)
assert_array_almost_equal(wmu, ref)
# Verify test function
centralMpq = get_central_moment_function(INTENSITY_SAMPLE, spacing=(1, 1))
assert_almost_equal(centralMpq(0, 0), ref[0, 0])
assert_almost_equal(centralMpq(0, 1), ref[0, 1])
assert_almost_equal(centralMpq(0, 2), ref[0, 2])
assert_almost_equal(centralMpq(0, 3), ref[0, 3])
assert_almost_equal(centralMpq(1, 0), ref[1, 0])
assert_almost_equal(centralMpq(1, 1), ref[1, 1])
assert_almost_equal(centralMpq(1, 2), ref[1, 2])
assert_almost_equal(centralMpq(1, 3), ref[1, 3])
assert_almost_equal(centralMpq(2, 0), ref[2, 0])
assert_almost_equal(centralMpq(2, 1), ref[2, 1])
assert_almost_equal(centralMpq(2, 2), ref[2, 2])
assert_almost_equal(centralMpq(2, 3), ref[2, 3])
assert_almost_equal(centralMpq(3, 0), ref[3, 0])
assert_almost_equal(centralMpq(3, 1), ref[3, 1])
assert_almost_equal(centralMpq(3, 2), ref[3, 2])
assert_almost_equal(centralMpq(3, 3), ref[3, 3])
# Test spacing
spacing = (3.2, 1.2)
wmu = regionprops(
SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing
)[0].moments_weighted_central
centralMpq = get_central_moment_function(INTENSITY_SAMPLE, spacing=spacing)
assert_almost_equal(wmu[0, 0], centralMpq(0, 0))
assert_almost_equal(wmu[0, 1], centralMpq(0, 1))
assert_almost_equal(wmu[0, 2], centralMpq(0, 2))
assert_almost_equal(wmu[0, 3], centralMpq(0, 3))
assert_almost_equal(wmu[1, 0], centralMpq(1, 0))
assert_almost_equal(wmu[1, 1], centralMpq(1, 1))
assert_almost_equal(wmu[1, 2], centralMpq(1, 2))
assert_almost_equal(wmu[1, 3], centralMpq(1, 3))
assert_almost_equal(wmu[2, 0], centralMpq(2, 0))
assert_almost_equal(wmu[2, 1], centralMpq(2, 1))
assert_almost_equal(wmu[2, 2], centralMpq(2, 2))
assert_almost_equal(wmu[2, 3], centralMpq(2, 3))
assert_almost_equal(wmu[3, 0], centralMpq(3, 0))
assert_almost_equal(wmu[3, 1], centralMpq(3, 1))
assert_almost_equal(wmu[3, 2], centralMpq(3, 2))
assert_almost_equal(wmu[3, 3], centralMpq(3, 3))
def test_centroid_weighted():
centroid = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].centroid_weighted
target_centroid = (5.540540540540, 9.445945945945)
centroid = tuple(float(c) for c in centroid)
assert_array_almost_equal(centroid, target_centroid)
# Verify test function
Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=(1, 1))
cY = float(Mpq(0, 1) / Mpq(0, 0))
cX = float(Mpq(1, 0) / Mpq(0, 0))
assert_almost_equal((cX, cY), centroid)
# Test spacing
spacing = (2, 2)
Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=spacing)
cY = float(Mpq(0, 1) / Mpq(0, 0))
cX = float(Mpq(1, 0) / Mpq(0, 0))
centroid = regionprops(
SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing
)[0].centroid_weighted
centroid = tuple(float(c) for c in centroid)
assert_almost_equal(centroid, (cX, cY))
assert_almost_equal(centroid, tuple(2 * c for c in target_centroid))
spacing = (1.3, 0.7)
Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=spacing)
cY = float(Mpq(0, 1) / Mpq(0, 0))
cX = float(Mpq(1, 0) / Mpq(0, 0))
centroid = regionprops(
SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing
)[0].centroid_weighted
centroid = tuple(float(c) for c in centroid)
assert_almost_equal(centroid, (cX, cY))
def test_moments_weighted_hu():
whu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].moments_weighted_hu
# fmt: off
ref = cp.array([
3.1750587329e-01,
2.1417517159e-02,
2.3609322038e-02,
1.2565683360e-03,
8.3014209421e-07,
-3.5073773473e-05,
-6.7936409056e-06
])
# fmt: on
assert_array_almost_equal(whu, ref)
with pytest.raises(NotImplementedError):
regionprops(SAMPLE, spacing=(2, 1))[0].moments_weighted_hu
def test_moments_weighted():
wm = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].moments_weighted
# fmt: off
ref = cp.array(
[[7.4000000e+01, 6.9900000e+02, 7.8630000e+03, 9.7317000e+04],
[4.1000000e+02, 3.7850000e+03, 4.4063000e+04, 5.7256700e+05],
[2.7500000e+03, 2.4855000e+04, 2.9347700e+05, 3.9007170e+06],
[1.9778000e+04, 1.7500100e+05, 2.0810510e+06, 2.8078871e+07]]
)
# fmt: on
assert_array_almost_equal(wm, ref)
# Verify test function
Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=(1, 1))
assert_almost_equal(Mpq(0, 0), ref[0, 0])
assert_almost_equal(Mpq(0, 1), ref[0, 1])
assert_almost_equal(Mpq(0, 2), ref[0, 2])
assert_almost_equal(Mpq(0, 3), ref[0, 3])
assert_almost_equal(Mpq(1, 0), ref[1, 0])
assert_almost_equal(Mpq(1, 1), ref[1, 1])
assert_almost_equal(Mpq(1, 2), ref[1, 2])
assert_almost_equal(Mpq(1, 3), ref[1, 3])
assert_almost_equal(Mpq(2, 0), ref[2, 0])
assert_almost_equal(Mpq(2, 1), ref[2, 1])
assert_almost_equal(Mpq(2, 2), ref[2, 2])
assert_almost_equal(Mpq(2, 3), ref[2, 3])
assert_almost_equal(Mpq(3, 0), ref[3, 0])
assert_almost_equal(Mpq(3, 1), ref[3, 1])
assert_almost_equal(Mpq(3, 2), ref[3, 2])
assert_almost_equal(Mpq(3, 3), ref[3, 3])
# Test spacing
spacing = (3.2, 1.2)
wmu = regionprops(
SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing
)[0].moments_weighted
Mpq = get_moment_function(INTENSITY_SAMPLE, spacing=spacing)
assert_almost_equal(wmu[0, 0], Mpq(0, 0))
assert_almost_equal(wmu[0, 1], Mpq(0, 1))
assert_almost_equal(wmu[0, 2], Mpq(0, 2))
assert_almost_equal(wmu[0, 3], Mpq(0, 3))
assert_almost_equal(wmu[1, 0], Mpq(1, 0))
assert_almost_equal(wmu[1, 1], Mpq(1, 1))
assert_almost_equal(wmu[1, 2], Mpq(1, 2))
assert_almost_equal(wmu[1, 3], Mpq(1, 3))
assert_almost_equal(wmu[2, 0], Mpq(2, 0))
assert_almost_equal(wmu[2, 1], Mpq(2, 1))
assert_almost_equal(wmu[2, 2], Mpq(2, 2))
assert_almost_equal(wmu[2, 3], Mpq(2, 3))
assert_almost_equal(wmu[3, 0], Mpq(3, 0))
assert_almost_equal(wmu[3, 1], Mpq(3, 1))
assert_almost_equal(wmu[3, 2], Mpq(3, 2))
assert_almost_equal(wmu[3, 3], Mpq(3, 3), decimal=6)
def test_moments_weighted_normalized():
wnu = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[
0
].moments_weighted_normalized
# fmt: off
ref = np.array(
[[np.nan, np.nan, 0.2301467830, -0.0162529732], # noqa
[np.nan, -0.0160405109, 0.0457932622, np.nan], # noqa
[0.0873590903, -0.0031421072, np.nan, np.nan], # noqa
[-0.0161217406, np.nan, np.nan, np.nan]] # noqa
)
# fmt: on
assert_array_almost_equal(wnu, ref)
spacing = (3, 3)
wnu = regionprops(
SAMPLE, intensity_image=INTENSITY_SAMPLE, spacing=spacing
)[0].moments_weighted_normalized
# Normalized moments are scale invariant
assert_almost_equal(wnu[0, 2], 0.2301467830)
assert_almost_equal(wnu[0, 3], -0.0162529732)
assert_almost_equal(wnu[1, 1], -0.0160405109)
assert_almost_equal(wnu[1, 2], 0.0457932622)
assert_almost_equal(wnu[2, 0], 0.0873590903)
assert_almost_equal(wnu[2, 1], -0.0031421072)
assert_almost_equal(wnu[3, 0], -0.0161217406)
def test_label_sequence():
a = cp.empty((2, 2), dtype=int)
a[:, :] = 2
ps = regionprops(a)
assert len(ps) == 1
assert ps[0].label == 2
def test_pure_background():
a = cp.zeros((2, 2), dtype=int)
ps = regionprops(a)
assert len(ps) == 0
def test_invalid():
ps = regionprops(SAMPLE)
def get_intensity_image():
ps[0].image_intensity
with pytest.raises(AttributeError):
get_intensity_image()
def test_invalid_size():
wrong_intensity_sample = cp.array([[1], [1]])
with pytest.raises(ValueError):
regionprops(SAMPLE, wrong_intensity_sample)
def test_equals():
arr = cp.zeros((100, 100), dtype=int)
arr[0:25, 0:25] = 1
arr[50:99, 50:99] = 2
regions = regionprops(arr)
r1 = regions[0]
regions = regionprops(arr)
r2 = regions[0]
r3 = regions[1]
assert_equal(r1 == r2, True, "Same regionprops are not equal")
assert_equal(r1 != r3, True, "Different regionprops are equal")
def test_iterate_all_props():
region = regionprops(SAMPLE)[0]
p0 = {p: region[p] for p in region}
region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0]
p1 = {p: region[p] for p in region}
assert len(p0) < len(p1)
def test_cache():
SAMPLE_mod = SAMPLE.copy()
region = regionprops(SAMPLE_mod)[0]
f0 = region.image_filled
region._label_image[:10] = 1
f1 = region.image_filled
# Changed underlying image, but cache keeps result the same
assert_array_equal(f0, f1)
# Now invalidate cache
region._cache_active = False
f1 = region.image_filled
assert cp.any(f0 != f1)
def test_docstrings_and_props():
def foo():
"""foo"""
has_docstrings = bool(foo.__doc__)
region = regionprops(SAMPLE)[0]
docs = _parse_docs()
props = [m for m in dir(region) if not m.startswith("_")]
nr_docs_parsed = len(docs)
nr_props = len(props)
if has_docstrings:
assert_equal(nr_docs_parsed, nr_props)
ds = docs["moments_weighted_normalized"]
assert "iteration" not in ds
assert len(ds.split("\n")) > 3
else:
assert_equal(nr_docs_parsed, 0)
def test_props_to_dict():
regions = regionprops(SAMPLE)
out = _props_to_dict(regions)
assert out == {
"label": cp.array([1]),
"bbox-0": cp.array([0]),
"bbox-1": cp.array([0]),
"bbox-2": cp.array([10]),
"bbox-3": cp.array([18]),
}
regions = regionprops(SAMPLE)
out = _props_to_dict(
regions, properties=("label", "area", "bbox"), separator="+"
)
assert out == {
"label": cp.array([1]),
"area": cp.array([72]),
"bbox+0": cp.array([0]),
"bbox+1": cp.array([0]),
"bbox+2": cp.array([10]),
"bbox+3": cp.array([18]),
}
regions = regionprops(SAMPLE_MULTIPLE)
out = _props_to_dict(regions, properties=("coords",))
coords = np.empty(2, object)
coords[0] = cp.stack((cp.arange(10),) * 2, axis=-1)
coords[1] = cp.array([[3, 7], [4, 7]])
assert out["coords"].shape == coords.shape
assert_array_equal(out["coords"][0], coords[0])
assert_array_equal(out["coords"][1], coords[1])
def test_regionprops_table():
out = regionprops_table(SAMPLE)
assert out == {
"label": cp.array([1]),
"bbox-0": cp.array([0]),
"bbox-1": cp.array([0]),
"bbox-2": cp.array([10]),
"bbox-3": cp.array([18]),
}
out = regionprops_table(
SAMPLE, properties=("label", "area", "bbox"), separator="+"
)
assert out == {
"label": cp.array([1]),
"area": cp.array([72]),
"bbox+0": cp.array([0]),
"bbox+1": cp.array([0]),
"bbox+2": cp.array([10]),
"bbox+3": cp.array([18]),
}
out = regionprops_table(SAMPLE_MULTIPLE, properties=("coords",))
coords = np.empty(2, object)
coords[0] = cp.stack((cp.arange(10),) * 2, axis=-1)
coords[1] = cp.array([[3, 7], [4, 7]])
assert out["coords"].shape == coords.shape
assert_array_equal(out["coords"][0], coords[0])
assert_array_equal(out["coords"][1], coords[1])
def test_regionprops_table_deprecated_vector_property():
out = regionprops_table(SAMPLE, properties=("local_centroid",))
for key in out.keys():
# key reflects the deprecated name, not its new (centroid_local) value
assert key.startswith("local_centroid")
def test_regionprops_table_deprecated_scalar_property():
out = regionprops_table(SAMPLE, properties=("bbox_area",))
assert list(out.keys()) == ["bbox_area"]
def test_regionprops_table_equal_to_original():
regions = regionprops(SAMPLE, INTENSITY_FLOAT_SAMPLE)
out_table = regionprops_table(
SAMPLE, INTENSITY_FLOAT_SAMPLE, properties=COL_DTYPES.keys()
)
for prop, dtype in COL_DTYPES.items():
for i, reg in enumerate(regions):
rp = reg[prop]
if (
cp.isscalar(rp)
or (isinstance(rp, cp.ndarray) and rp.ndim == 0)
or prop in OBJECT_COLUMNS
or dtype is np.object_
):
assert_array_equal(rp, out_table[prop][i])
else:
shape = rp.shape if isinstance(rp, cp.ndarray) else (len(rp),)
for ind in np.ndindex(shape):
modified_prop = "-".join(map(str, (prop,) + ind))
loc = ind if len(ind) > 1 else ind[0]
assert_array_equal(rp[loc], out_table[modified_prop][i])
def test_regionprops_table_no_regions():
out = regionprops_table(
cp.zeros((2, 2), dtype=int),
properties=("label", "area", "bbox"),
separator="+",
)
assert len(out) == 6
assert len(out["label"]) == 0
assert len(out["area"]) == 0
assert len(out["bbox+0"]) == 0
assert len(out["bbox+1"]) == 0
assert len(out["bbox+2"]) == 0
assert len(out["bbox+3"]) == 0
def test_column_dtypes_complete():
assert set(COL_DTYPES.keys()).union(OBJECT_COLUMNS) == set(PROPS.values())
def test_column_dtypes_correct():
msg = "mismatch with expected type,"
region = regionprops(SAMPLE, intensity_image=INTENSITY_SAMPLE)[0]
for col in COL_DTYPES:
r = region[col]
if col in OBJECT_COLUMNS:
assert COL_DTYPES[col] == object
continue
# TODO: grlee77: check desired types for returned.
# e.g. currently inertia_tensor_eigvals returns a list of 0-dim
# arrays
if isinstance(r, (tuple, list)):
r0 = r[0]
if isinstance(r0, cp.ndarray) and r0.ndim == 0:
r0 = r0.item()
t = type(r0)
elif cp.isscalar(r):
t = type(r)
else:
t = type(r.ravel()[0].item())
if cp.issubdtype(t, cp.floating):
assert (
COL_DTYPES[col] == float
), f"{col} dtype {t} {msg} {COL_DTYPES[col]}"
elif cp.issubdtype(t, cp.integer):
assert (
COL_DTYPES[col] == int
), f"{col} dtype {t} {msg} {COL_DTYPES[col]}"
else:
assert False, f"{col} dtype {t} {msg} {COL_DTYPES[col]}"
def pixelcount(regionmask):
"""a short test for an extra property"""
return cp.sum(regionmask)
def intensity_median(regionmask, image_intensity):
return cp.median(image_intensity[regionmask])
def too_many_args(regionmask, image_intensity, superfluous):
return 1
def too_few_args():
return 1
def test_extra_properties():
region = regionprops(SAMPLE, extra_properties=(pixelcount,))[0]
assert region.pixelcount == cp.sum(SAMPLE == 1)
def test_extra_properties_intensity():
region = regionprops(
SAMPLE,
intensity_image=INTENSITY_SAMPLE,
extra_properties=(intensity_median,),
)[0]
assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1])
@pytest.mark.parametrize("intensity_prop", _require_intensity_image)
def test_intensity_image_required(intensity_prop):
region = regionprops(SAMPLE)[0]
with pytest.raises(AttributeError) as e:
getattr(region, intensity_prop)
expected_error = (
f"Attribute '{intensity_prop}' unavailable when `intensity_image` has "
f"not been specified."
)
assert expected_error == str(e.value)
def test_extra_properties_no_intensity_provided():
with pytest.raises(AttributeError):
region = regionprops(SAMPLE, extra_properties=(intensity_median,))[0]
_ = region.intensity_median
def test_extra_properties_nr_args():
with pytest.raises(AttributeError):
region = regionprops(SAMPLE, extra_properties=(too_few_args,))[0]
_ = region.too_few_args
with pytest.raises(AttributeError):
region = regionprops(SAMPLE, extra_properties=(too_many_args,))[0]
_ = region.too_many_args
def test_extra_properties_mixed():
# mixed properties, with and without intensity
region = regionprops(
SAMPLE,
intensity_image=INTENSITY_SAMPLE,
extra_properties=(intensity_median, pixelcount),
)[0]
assert region.intensity_median == cp.median(INTENSITY_SAMPLE[SAMPLE == 1])
assert region.pixelcount == cp.sum(SAMPLE == 1)
def test_extra_properties_table():
out = regionprops_table(
SAMPLE_MULTIPLE,
intensity_image=INTENSITY_SAMPLE_MULTIPLE,
properties=("label",),
extra_properties=(intensity_median, pixelcount),
)
assert_array_almost_equal(out["intensity_median"], np.array([2.0, 4.0]))
assert_array_equal(out["pixelcount"], np.array([10, 2]))
def test_multichannel():
"""Test that computing multichannel properties works."""
astro = data.astronaut()[::4, ::4]
labels = slic(astro.astype(float), start_label=1)
astro = cp.asarray(astro)
astro_green = astro[..., 1]
labels = cp.asarray(labels)
segment_idx = int(cp.max(labels) // 2)
region = regionprops(
labels,
astro_green,
extra_properties=[intensity_median],
)[segment_idx]
region_multi = regionprops(
labels,
astro,
extra_properties=[intensity_median],
)[segment_idx]
for prop in list(PROPS.keys()) + ["intensity_median"]:
p = region[prop]
p_multi = region_multi[prop]
if isinstance(p, (list, tuple)):
p = tuple([cp.asnumpy(p_) for p_ in p])
p = np.stack(p)
if isinstance(p_multi, (list, tuple)):
p_multi = tuple([cp.asnumpy(p_) for p_ in p_multi])
p_multi = np.stack(p_multi)
if np.shape(p) == np.shape(p_multi):
# property does not depend on multiple channels
assert_array_equal(p, p_multi)
else:
# property uses multiple channels, returns props stacked along
# final axis
assert_array_equal(p, p_multi[..., 1])
def test_3d_ellipsoid_axis_lengths():
"""Verify that estimated axis lengths are correct.
Uses an ellipsoid at an arbitrary position and orientation.
"""
# generate a centered ellipsoid with non-uniform half-lengths (radii)
half_lengths = (20, 10, 50)
e = draw.ellipsoid(*half_lengths).astype(int)
# Pad by asymmetric amounts so the ellipse isn't centered. Also, pad enough
# that the rotated ellipse will still be within the original volume.
e = np.pad(e, pad_width=[(30, 18), (30, 12), (40, 20)], mode="constant")
e = cp.array(e)
# apply rotations to the ellipsoid
R = transform.EuclideanTransform(rotation=[0.2, 0.3, 0.4], dimensionality=3)
e = ndi.affine_transform(e, R.params)
# Compute regionprops
rp = regionprops(e)[0]
# estimate principal axis lengths via the inertia tensor eigenvalues
evs = rp.inertia_tensor_eigvals
axis_lengths = _inertia_eigvals_to_axes_lengths_3D(evs)
expected_lengths = sorted([2 * h for h in half_lengths], reverse=True)
for ax_len_expected, ax_len in zip(expected_lengths, axis_lengths):
# verify accuracy to within 1%
assert abs(ax_len - ax_len_expected) < 0.01 * ax_len_expected
# verify that the axis length regionprops also agree
assert abs(rp.axis_major_length - axis_lengths[0]) < 1e-7
assert abs(rp.axis_minor_length - axis_lengths[-1]) < 1e-7
| 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.