prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
TODO:
- [ ] Replace internal padded slice with kwarray.padded_slice
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import ubelt as ub
import numpy as np
import kwarray
from distutils.version import LooseVersion
try:
import torch
import torch.nn.functional as F
TORCH_GRID_SAMPLE_HAS_ALIGN = LooseVersion(torch.__version__) >= LooseVersion('1.3.0')
except Exception:
torch = None
F = None
TORCH_GRID_SAMPLE_HAS_ALIGN = None
def _coordinate_grid(dims, align_corners=False):
"""
Creates a homogenous coordinate system.
Args:
dims (Tuple[int*]): height / width or depth / height / width
align_corners (bool):
returns a grid where the left and right corners assigned to the
extreme values and intermediate values are interpolated.
Returns:
Tensor[shape=(3, *DIMS)]
References:
https://github.com/ClementPinard/SfmLearner-Pytorch/blob/master/inverse_warp.py
Example:
>>> # xdoctest: +IGNORE_WHITESPACE
>>> # xdoctest: +REQUIRES(module:torch)
>>> _coordinate_grid((2, 2))
tensor([[[0., 1.],
[0., 1.]],
[[0., 0.],
[1., 1.]],
[[1., 1.],
[1., 1.]]])
>>> _coordinate_grid((2, 2, 2))
>>> _coordinate_grid((2, 2), align_corners=True)
tensor([[[0., 2.],
[0., 2.]],
[[0., 0.],
[2., 2.]],
[[1., 1.],
[1., 1.]]])
"""
if align_corners:
def _corner_grid(d):
return torch.linspace(0, d, d)
_grid_fn = _corner_grid
else:
def _disc_grid(d):
return torch.arange(0, d)
_grid_fn = _disc_grid
if len(dims) == 2:
h, w = dims
h_range = _grid_fn(h).view(h, 1).expand(h, w).float() # [H, W]
w_range = _grid_fn(w).view(1, w).expand(h, w).float() # [H, W]
ones = torch.ones(h, w)
pixel_coords = torch.stack((w_range, h_range, ones), dim=0) # [3, H, W]
elif len(dims) == 3:
d, h, w = dims
d_range = _grid_fn(d).view(d, 1, 1).expand(d, h, w).float() # [D, H, W]
h_range = _grid_fn(h).view(1, h, 1).expand(d, h, w).float() # [D, H, W]
w_range = _grid_fn(w).view(1, 1, w).expand(d, h, w).float() # [D, H, W]
ones = torch.ones(d, h, w)
pixel_coords = torch.stack((w_range, h_range, d_range, ones), dim=0) # [4, D, H, W]
pass
else:
raise NotImplementedError('Can only work with 2d and 3d dims')
return pixel_coords
def warp_image(inputs, mat, **kw):
raise AssertionError('do not use')
# import kwarray
# _impl = kwarray.ArrayAPI.coerce(inputs)
# inputs = kwarray.atleast_nd(inputs, 3)
# tensor = inputs.transpose(2, 0, 1)
# return warp_tensor(tensor, mat, **kw)
def warp_tensor(inputs, mat, output_dims, mode='bilinear',
padding_mode='zeros', isinv=False, ishomog=None,
align_corners=False, new_mode=False):
r"""
A pytorch implementation of warp affine that works similarly to
cv2.warpAffine / cv2.warpPerspective.
It is possible to use 3x3 transforms to warp 2D image data.
It is also possible to use 4x4 transforms to warp 3D volumetric data.
Args:
inputs (Tensor[..., *DIMS]): tensor to warp.
Up to 3 (determined by output_dims) of the trailing space-time
dimensions are warped. Best practice is to use inputs with the
shape in [B, C, *DIMS].
mat (Tensor):
either a 3x3 / 4x4 single transformation matrix to apply to all
inputs or Bx3x3 or Bx4x4 tensor that specifies a transformation
matrix for each batch item.
output_dims (Tuple[int*]):
The output space-time dimensions. This can either be in the form
(W,), (H, W), or (D, H, W).
mode (str):
Can be bilinear or nearest.
See `torch.nn.functional.grid_sample`
padding_mode (str):
Can be zeros, border, or reflection.
See `torch.nn.functional.grid_sample`.
isinv (bool, default=False):
Set to true if `mat` is the inverse transform
ishomog (bool, default=None):
Set to True if the matrix is non-affine
align_corners (bool, default=False):
Note the default of False does not work correctly with grid_sample
in torch <= 1.2, but using align_corners=True isnt typically what
you want either. We will be stuck with buggy functionality until
torch 1.3 is released.
However, using align_corners=0 does seem to reasonably correspond
with opencv behavior.
Notes:
Also, it may be possible to speed up the code with `F.affine_grid`
KNOWN ISSUE: There appears to some difference with cv2.warpAffine when
rotation or shear are non-zero. I'm not sure what the cause is.
It may just be floating point issues, but Im' not sure.
TODO:
- [ ] FIXME: see example in Mask.scale where this algo breaks when
the matrix is `2x3`
- [ ] Make this algo work when matrix ix 2x2
References:
https://discuss.pytorch.org/t/affine-transformation-matrix-paramters-conversion/19522
https://github.com/pytorch/pytorch/issues/15386
Example:
>>> # Create a relatively simple affine matrix
>>> # xdoctest: +REQUIRES(module:torch)
>>> import skimage
>>> mat = torch.FloatTensor(skimage.transform.AffineTransform(
>>> translation=[1, -1], scale=[.532, 2],
>>> rotation=0, shear=0,
>>> ).params)
>>> # Create inputs and an output dimension
>>> input_shape = [1, 1, 4, 5]
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> output_dims = (11, 7)
>>> # Warp with our code
>>> result1 = warp_tensor(inputs, mat, output_dims=output_dims, align_corners=0)
>>> print('result1 =\n{}'.format(ub.repr2(result1.cpu().numpy()[0, 0], precision=2)))
>>> # Warp with opencv
>>> import cv2
>>> cv2_M = mat.cpu().numpy()[0:2]
>>> src = inputs[0, 0].cpu().numpy()
>>> dsize = tuple(output_dims[::-1])
>>> result2 = cv2.warpAffine(src, cv2_M, dsize=dsize, flags=cv2.INTER_LINEAR)
>>> print('result2 =\n{}'.format(ub.repr2(result2, precision=2)))
>>> # Ensure the results are the same (up to floating point errors)
>>> assert np.all(np.isclose(result1[0, 0].cpu().numpy(), result2, atol=1e-2, rtol=1e-2))
Example:
>>> # Create a relatively simple affine matrix
>>> # xdoctest: +REQUIRES(module:torch)
>>> import skimage
>>> mat = torch.FloatTensor(skimage.transform.AffineTransform(
>>> rotation=0.01, shear=0.1).params)
>>> # Create inputs and an output dimension
>>> input_shape = [1, 1, 4, 5]
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> output_dims = (11, 7)
>>> # Warp with our code
>>> result1 = warp_tensor(inputs, mat, output_dims=output_dims)
>>> print('result1 =\n{}'.format(ub.repr2(result1.cpu().numpy()[0, 0], precision=2, supress_small=True)))
>>> print('result1.shape = {}'.format(result1.shape))
>>> # Warp with opencv
>>> import cv2
>>> cv2_M = mat.cpu().numpy()[0:2]
>>> src = inputs[0, 0].cpu().numpy()
>>> dsize = tuple(output_dims[::-1])
>>> result2 = cv2.warpAffine(src, cv2_M, dsize=dsize, flags=cv2.INTER_LINEAR)
>>> print('result2 =\n{}'.format(ub.repr2(result2, precision=2)))
>>> print('result2.shape = {}'.format(result2.shape))
>>> # Ensure the results are the same (up to floating point errors)
>>> # NOTE: The floating point errors seem to be significant for rotation / shear
>>> assert np.all(np.isclose(result1[0, 0].cpu().numpy(), result2, atol=1, rtol=1e-2))
Example:
>>> # Create a random affine matrix
>>> # xdoctest: +REQUIRES(module:torch)
>>> import skimage
>>> rng = np.random.RandomState(0)
>>> mat = torch.FloatTensor(skimage.transform.AffineTransform(
>>> translation=rng.randn(2), scale=1 + rng.randn(2),
>>> rotation=rng.randn() / 10., shear=rng.randn() / 10.,
>>> ).params)
>>> # Create inputs and an output dimension
>>> input_shape = [1, 1, 5, 7]
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> output_dims = (3, 11)
>>> # Warp with our code
>>> result1 = warp_tensor(inputs, mat, output_dims=output_dims, align_corners=0)
>>> print('result1 =\n{}'.format(ub.repr2(result1.cpu().numpy()[0, 0], precision=2)))
>>> # Warp with opencv
>>> import cv2
>>> cv2_M = mat.cpu().numpy()[0:2]
>>> src = inputs[0, 0].cpu().numpy()
>>> dsize = tuple(output_dims[::-1])
>>> result2 = cv2.warpAffine(src, cv2_M, dsize=dsize, flags=cv2.INTER_LINEAR)
>>> print('result2 =\n{}'.format(ub.repr2(result2, precision=2)))
>>> # Ensure the results are the same (up to floating point errors)
>>> # NOTE: The errors seem to be significant for rotation / shear
>>> assert np.all(np.isclose(result1[0, 0].cpu().numpy(), result2, atol=1, rtol=1e-2))
Example:
>>> # Test 3D warping with identity
>>> # xdoctest: +REQUIRES(module:torch)
>>> mat = torch.eye(4)
>>> input_dims = [2, 3, 3]
>>> output_dims = (2, 3, 3)
>>> input_shape = [1, 1] + input_dims
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> result = warp_tensor(inputs, mat, output_dims=output_dims)
>>> print('result =\n{}'.format(ub.repr2(result.cpu().numpy()[0, 0], precision=2)))
>>> assert torch.all(inputs == result)
Example:
>>> # Test 3D warping with scaling
>>> # xdoctest: +REQUIRES(module:torch)
>>> mat = torch.FloatTensor([
>>> [0.8, 0, 0, 0],
>>> [ 0, 1.0, 0, 0],
>>> [ 0, 0, 1.2, 0],
>>> [ 0, 0, 0, 1],
>>> ])
>>> input_dims = [2, 3, 3]
>>> output_dims = (2, 3, 3)
>>> input_shape = [1, 1] + input_dims
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> result = warp_tensor(inputs, mat, output_dims=output_dims, align_corners=0)
>>> print('result =\n{}'.format(ub.repr2(result.cpu().numpy()[0, 0], precision=2)))
result =
np.array([[[ 0. , 1.25, 1. ],
[ 3. , 4.25, 2.5 ],
[ 6. , 7.25, 4. ]],
...
[[ 7.5 , 8.75, 4.75],
[10.5 , 11.75, 6.25],
[13.5 , 14.75, 7.75]]], dtype=np.float32)
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> mat = torch.eye(3)
>>> input_dims = [5, 7]
>>> output_dims = (11, 7)
>>> for n_prefix_dims in [0, 1, 2, 3, 4, 5]:
>>> input_shape = [2] * n_prefix_dims + input_dims
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> result = warp_tensor(inputs, mat, output_dims=output_dims)
>>> #print('result =\n{}'.format(ub.repr2(result.cpu().numpy(), precision=2)))
>>> print(result.shape)
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> mat = torch.eye(4)
>>> input_dims = [5, 5, 5]
>>> output_dims = (6, 6, 6)
>>> for n_prefix_dims in [0, 1, 2, 3, 4, 5]:
>>> input_shape = [2] * n_prefix_dims + input_dims
>>> inputs = torch.arange(int(np.prod(input_shape))).reshape(*input_shape).float()
>>> result = warp_tensor(inputs, mat, output_dims=output_dims)
>>> #print('result =\n{}'.format(ub.repr2(result.cpu().numpy(), precision=2)))
>>> print(result.shape)
Ignore:
import xdev
globals().update(xdev.get_func_kwargs(warp_tensor))
>>> # xdoctest: +REQUIRES(module:torch)
>>> import cv2
>>> inputs = torch.arange(9).view(1, 1, 3, 3).float() + 2
>>> input_dims = inputs.shape[2:]
>>> #output_dims = (6, 6)
>>> def fmt(a):
>>> return ub.repr2(a.numpy(), precision=2)
>>> s = 2.5
>>> output_dims = tuple(np.round((np.array(input_dims) * s)).astype(int).tolist())
>>> mat = torch.FloatTensor([[s, 0, 0], [0, s, 0], [0, 0, 1]])
>>> inv = mat.inverse()
>>> warp_tensor(inputs, mat, output_dims)
>>> print('## INPUTS')
>>> print(fmt(inputs))
>>> print('\nalign_corners=True')
>>> print('----')
>>> print('## warp_tensor, align_corners=True')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=True)))
>>> print('## interpolate, align_corners=True')
>>> print(fmt(F.interpolate(inputs, output_dims, mode='bilinear', align_corners=True)))
>>> print('\nalign_corners=False')
>>> print('----')
>>> print('## warp_tensor, align_corners=False, new_mode=False')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=False)))
>>> print('## warp_tensor, align_corners=False, new_mode=True')
>>> print(fmt(warp_tensor(inputs, inv, output_dims, isinv=True, align_corners=False, new_mode=True)))
>>> print('## interpolate, align_corners=False')
>>> print(fmt(F.interpolate(inputs, output_dims, mode='bilinear', align_corners=False)))
>>> print('## interpolate (scale), align_corners=False')
>>> print(ub.repr2(F.interpolate(inputs, scale_factor=s, mode='bilinear', align_corners=False).numpy(), precision=2))
>>> cv2_M = mat.cpu().numpy()[0:2]
>>> src = inputs[0, 0].cpu().numpy()
>>> dsize = tuple(output_dims[::-1])
>>> print('\nOpen CV warp Result')
>>> result2 = (cv2.warpAffine(src, cv2_M, dsize=dsize, flags=cv2.INTER_LINEAR))
>>> print('result2 =\n{}'.format(ub.repr2(result2, precision=2)))
"""
if mode == 'linear':
mode = 'bilinear'
output_dims = tuple(map(int, output_dims))
# Determine the number of space-time dimensions
ndims = len(output_dims)
# https://discuss.pytorch.org/t/affine-transformation-matrix-paramters-conversion/19522
input_dims = inputs.shape[-ndims:]
prefix_dims = inputs.shape[:-ndims]
# Normalize the inputs so they are in 4D or 5D standard form
# I.e. either [B, C, H, W] or [B, C, D, H, W]
# We need exactly two non-spacetime (prefix) dims
if len(prefix_dims) < 2:
# Create a dummy batch / channel dimension
_part1 = [1] * (2 - len(prefix_dims))
_part2 = [-1] * len(inputs.shape)
_input_expander = _part1 + _part2
inputs_ = inputs.expand(*_input_expander)
elif len(prefix_dims) > 2:
fake_b = np.prod(prefix_dims[:-1])
fake_c = prefix_dims[-1]
# Consolodate leading dimensions into the batch dim
inputs_ = inputs.view(fake_b, fake_c, *input_dims)
else:
inputs_ = inputs
device = inputs.device
input_size = torch.Tensor(np.array(input_dims[::-1]))[None, :, None]
input_size = input_size.to(device) # [1, ndims, 1]
if len(mat.shape) not in [2, 3]:
raise ValueError('Invalid mat shape')
if mat.shape[-1] not in [3, 4] or mat.shape[-1] not in [2, 3, 4]:
# if tuple(mat.shape) != (2, 2):
raise ValueError(
'mat must have shape: '
# '(..., 2, 2) or '
'(..., 2, 3) or (..., 3, 3)'
' or (..., 3, 4) or (..., 4, 4)'
)
# Ensure that mat is a 3x3 matrix, and check if it is affine or projective
if mat.shape[-2] != mat.shape[-1]:
_homog_row = [0] * (mat.shape[-1] - 1) + [1]
homog_row = torch.Tensor(_homog_row).to(mat.device)
homog_row = homog_row.expand_as(mat[..., 0:1, :])
mat = torch.cat([homog_row, mat], dim=len(mat.shape) - 2)
ishomog = False
if ishomog is None:
ishomog = False # set to true for non-affine
if mat.shape[-2] == 3:
if not torch.all(mat[-2] != torch.Tensor([0, 0, 1])):
ishomog = True
inv = mat if isinv else mat.inverse()
if len(inv.shape) == 2:
inv = inv[None, :]
if inv.device != device:
inv = inv.to(device)
# Construct a homogenous coordinate system in the output frame where the
# input is aligned with the top left corner.
# X = ndims + 1 if ishomog else ndims
X = ndims + 1
if not TORCH_GRID_SAMPLE_HAS_ALIGN:
import warnings
warnings.warn('cannot use new mode in warp_tensor when torch < 1.3')
new_mode = False
# NOTE: grid_sample in torch<1.3 does not support align_corners=False correctly
unwarped_coords = _coordinate_grid(output_dims, align_corners=align_corners) # [X, *DIMS]
unwarped_coords = unwarped_coords.to(device)
unwarped_coords_ = unwarped_coords.view(1, X, -1) # [1, X, prod(DIMS)]
warped_coords = inv.matmul(unwarped_coords_)
if ishomog:
# If we had a projective projective transform we unhomogenize
warped_coords = warped_coords[:, 0:ndims] / warped_coords[:, ndims]
else:
# For affine we can simply discard the homogenous component
warped_coords = warped_coords[:, 0:ndims]
# Normalized the warped coordinates that align with the input to [-1, +1]
# Anything outside of the input range is mapped outside of [-1, +1]
if align_corners:
grid_coords = warped_coords * (2.0 / (input_size)) # normalize from [0, 2]
grid_coords -= 1.0 # normalize from [-1, +1]
else:
grid_coords = warped_coords * (2.0 / (input_size - 1.0)) # normalize from [0, 2]
grid_coords -= 1.0 # normalize from [-1, +1]
if new_mode:
# HACK: For whatever reason the -1,+1 extremes doesn't point to the
# extreme pixels, but applying this squish factor seems to help.
# The idea seems to be that if the input dims are D x D the
# ((D - 1) / D)-th value is what points to the middle of the bottom
# right input pixel and not (+1, +1).
# Need to figure out what's going on in a more principled way.
input_dims_ = torch.FloatTensor(list(input_dims))
squish = ((input_dims_ - 1.0) / (input_dims_))
grid_coords = grid_coords * squish[None, :, None]
if False:
# Debug output coords
print('### unwarped')
print(unwarped_coords[0:2])
print('### warped')
print(warped_coords.view(2, *output_dims))
print('### grid')
print(grid_coords.view(2, *output_dims))
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-1.0, -1.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-2 / 3, -2 / 3]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[0.0, 0.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[2 / 3, 2 / 3]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_, torch.FloatTensor(
[[[[1.0, 1.0]]]]), mode='bilinear', align_corners=False)
F.grid_sample(inputs_[:, :, 0:2, 0:2], torch.FloatTensor(
[[[[-1 / 2, -1 / 2]]]]), mode='bilinear', align_corners=False)
inputs_ = torch.arange(16).view(1, 1, 4, 4).float() + 1
F.grid_sample(inputs_, torch.FloatTensor(
[[[[-3 / 4, -3 / 4]]]]), mode='bilinear', align_corners=False)
for f in np.linspace(0.5, 1.0, 10):
print('f = {!r}'.format(f))
print(F.grid_sample(inputs_, torch.FloatTensor(
[[[[f, f]]]]), mode='bilinear', align_corners=False))
# The warped coordinate [-1, -1] will references to the left-top pixel of
# the input, analgously [+1, +1] references the right-bottom pixel of the
# input.
# Note: that -1, -1 refers to the center of the first pixel, not the edge.
# See:
# https://github.com/pytorch/pytorch/issues/20785
# https://github.com/pytorch/pytorch/pull/23923
# https://github.com/pytorch/pytorch/pull/24929
# https://user-images.githubusercontent.com/9757500/58150486-c5315900-7c34-11e9-9466-24f2bd431fa4.png
# # Note: Was unable to quite figure out how to use F.affine_grid
# gride_shape = torch.Size((B, C,) + tuple(output_dims))
# grid = F.affine_grid(inv[None, 0:2], gride_shape)
# outputs = F.grid_sample(inputs, grid)
# return outputs
# Reshape to dimensions compatible with grid_sample
grid_coords = grid_coords.transpose(1, 2) # swap space/coord dims
_reshaper = [1] + list(output_dims) + [ndims]
grid_coords = grid_coords.reshape(*_reshaper) # Unpack dims
_expander = [inputs_.shape[0]] + list(output_dims) + [ndims]
grid_coords = grid_coords.expand(*_expander)
# grid_coords = grid_coords.to(device)
# TODO: pass align_corners when supported in torch 1.3
# Note: enabling this breaks tests and backwards compat, so
# verify there are no problems before enabling this.
if new_mode and TORCH_GRID_SAMPLE_HAS_ALIGN:
# the new grid sample allows you to set align_corners, but I don't
# remember if the previous logic depends on the old behavior.
outputs_ = F.grid_sample(inputs_, grid_coords, mode=mode,
padding_mode=padding_mode,
align_corners=bool(align_corners))
else:
# The old grid sample always had align_corners=True
outputs_ = F.grid_sample(inputs_, grid_coords, mode=mode,
padding_mode=padding_mode,
align_corners=True)
# Unpack outputs to match original input shape
final_dims = list(prefix_dims) + list(output_dims)
outputs = outputs_.view(*final_dims)
return outputs
def subpixel_align(dst, src, index, interp_axes=None):
"""
Returns an aligned version of the source tensor and destination index.
Used as the backend to implement other subpixel functions like:
subpixel_accum, subpixel_maximum.
"""
if interp_axes is None:
# Assume spatial dimensions are trailing
interp_axes = len(dst.shape) + np.arange(-min(2, len(index)), 0)
raw_subpixel_starts = np.array([0 if sl.start is None else sl.start
for sl in index])
raw_subpixel_stops = np.array([dst.shape[i] if sl.stop is None else sl.stop
for i, sl in enumerate(index)])
raw_extent = raw_subpixel_stops - raw_subpixel_starts
if not ub.iterable(src):
# Broadcast scalars
impl = kwarray.ArrayAPI.impl(dst)
shape = tuple(raw_extent.astype(int).tolist())
src = impl.full(shape, dtype=dst.dtype, fill_value=src)
if not np.all(np.isclose(src.shape, raw_extent, atol=0.3)):
raise ValueError(
'Got src.shape = {}, but the raw slice extent was {}'.format(
tuple(src.shape), tuple(raw_extent)))
if True:
# check that all non interp slices are integral
noninterp_axes = np.where(~kwarray.boolmask(interp_axes, len(dst.shape)))[0]
for i in noninterp_axes:
assert raw_subpixel_starts[i] % 1 == 0
assert raw_subpixel_stops[i] % 1 == 0
# Clip off any out of bounds
subpixel_st, extra_padding = _rectify_slice(
dst.shape, raw_subpixel_starts, raw_subpixel_stops)
subpixel_starts = np.array([a[0] for a in subpixel_st])
subpixel_stops = np.array([a[1] for a in subpixel_st])
subpixel_pad_left = np.array([a[0] for a in extra_padding])
# subpixel_pad_right = np.array([a[1] for a in extra_padding])
# Any fractional start dimension will be a positive translate
translation = np.zeros_like(subpixel_starts, dtype=float)
translation += (subpixel_starts % 1)
# Any value that is cutoff on the left is a negative translate
translation -= subpixel_pad_left
# Construct the slice in dst that will correspond to the aligned src
aligned_index = tuple([
slice(s, t) for s, t in zip(np.floor(subpixel_starts).astype(int),
np.ceil(subpixel_stops).astype(int))])
# Align the source coordinates with the destination coordinates
output_shape = [sl.stop - sl.start for sl in aligned_index]
translation_ = [translation[i] for i in interp_axes]
aligned_src = subpixel_translate(src, translation_,
output_shape=output_shape,
interp_axes=interp_axes)
return aligned_src, aligned_index
def subpixel_set(dst, src, index, interp_axes=None):
"""
Add the source values array into the destination array at a particular
subpixel index.
Args:
dst (ArrayLike): destination accumulation array
src (ArrayLike): source array containing values to add
index (Tuple[slice]): subpixel slice into dst that corresponds with src
interp_axes (tuple): specify which axes should be spatially interpolated
TODO:
- [ ]: allow index to be a sequence indices
Example:
>>> import kwimage
>>> dst = np.zeros(5) + .1
>>> src = np.ones(2)
>>> index = [slice(1.5, 3.5)]
>>> kwimage.util_warp.subpixel_set(dst, src, index)
>>> print(ub.repr2(dst, precision=2, with_dtype=0))
np.array([0.1, 0.5, 1. , 0.5, 0.1])
"""
aligned_src, aligned_index = subpixel_align(dst, src, index, interp_axes)
# accumulate the newly aligned source array
try:
dst[aligned_index] = aligned_src
except RuntimeError:
try:
print('dst.shape = {!r}'.format(dst.shape))
print('dst.dtype = {!r}'.format(dst.dtype))
print('dst.device = {!r}'.format(dst.device))
print('aligned_src.shape = {!r}'.format(aligned_src.shape))
print('aligned_src.dtype = {!r}'.format(aligned_src.dtype))
print('aligned_src.device = {!r}'.format(aligned_src.device))
print('src.shape = {!r}'.format(src.shape))
print('src.dtype = {!r}'.format(src.dtype))
print('src.device = {!r}'.format(src.device))
except Exception:
print('unexpected numpy')
raise
return dst
def subpixel_accum(dst, src, index, interp_axes=None):
"""
Add the source values array into the destination array at a particular
subpixel index.
Args:
dst (ArrayLike): destination accumulation array
src (ArrayLike): source array containing values to add
index (Tuple[slice]): subpixel slice into dst that corresponds with src
interp_axes (tuple): specify which axes should be spatially interpolated
Notes:
Inputs:
+---+---+---+---+---+ dst.shape = (5,)
+---+---+ src.shape = (2,)
|=======| index = 1.5:3.5
Subpixel shift the source by -0.5.
When the index is non-integral, pad the aligned src with an extra value
to ensure all dst pixels that would be influenced by the smaller
subpixel shape are influenced by the aligned src. Note that we are not
scaling.
+---+---+---+ aligned_src.shape = (3,)
|===========| aligned_index = 1:4
Example:
>>> dst = np.zeros(5)
>>> src = np.ones(2)
>>> index = [slice(1.5, 3.5)]
>>> subpixel_accum(dst, src, index)
>>> print(ub.repr2(dst, precision=2, with_dtype=0))
np.array([0. , 0.5, 1. , 0.5, 0. ])
Example:
>>> dst = np.zeros((6, 6))
>>> src = np.ones((3, 3))
>>> index = (slice(1.5, 4.5), slice(1, 4))
>>> subpixel_accum(dst, src, index)
>>> print(ub.repr2(dst, precision=2, with_dtype=0))
np.array([[0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0.5, 0.5, 0.5, 0. , 0. ],
[0. , 1. , 1. , 1. , 0. , 0. ],
[0. , 1. , 1. , 1. , 0. , 0. ],
[0. , 0.5, 0.5, 0.5, 0. , 0. ],
[0. , 0. , 0. , 0. , 0. , 0. ]])
>>> # xdoctest: +REQUIRES(module:torch)
>>> dst = torch.zeros((1, 3, 6, 6))
>>> src = torch.ones((1, 3, 3, 3))
>>> index = (slice(None), slice(None), slice(1.5, 4.5), slice(1.25, 4.25))
>>> subpixel_accum(dst, src, index)
>>> print(ub.repr2(dst.numpy()[0, 0], precision=2, with_dtype=0))
np.array([[0. , 0. , 0. , 0. , 0. , 0. ],
[0. , 0.38, 0.5 , 0.5 , 0.12, 0. ],
[0. , 0.75, 1. , 1. , 0.25, 0. ],
[0. , 0.75, 1. , 1. , 0.25, 0. ],
[0. , 0.38, 0.5 , 0.5 , 0.12, 0. ],
[0. , 0. , 0. , 0. , 0. , 0. ]])
Doctest:
>>> # TODO: move to a unit test file
>>> subpixel_accum(np.zeros(5), np.ones(2), [slice(1.5, 3.5)]).tolist()
[0.0, 0.5, 1.0, 0.5, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(2), [slice(0, 2)]).tolist()
[1.0, 1.0, 0.0, 0.0, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(.5, 3.5)]).tolist()
[0.5, 1.0, 1.0, 0.5, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(-1, 2)]).tolist()
[1.0, 1.0, 0.0, 0.0, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(-1.5, 1.5)]).tolist()
[1.0, 0.5, 0.0, 0.0, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(10, 13)]).tolist()
[0.0, 0.0, 0.0, 0.0, 0.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(3.25, 6.25)]).tolist()
[0.0, 0.0, 0.0, 0.75, 1.0]
>>> subpixel_accum(np.zeros(5), np.ones(3), [slice(4.9, 7.9)]).tolist()
[0.0, 0.0, 0.0, 0.0, 0.099...]
>>> subpixel_accum(np.zeros(5), np.ones(9), [slice(-1.5, 7.5)]).tolist()
[1.0, 1.0, 1.0, 1.0, 1.0]
>>> subpixel_accum(np.zeros(5), np.ones(9), [slice(2.625, 11.625)]).tolist()
[0.0, 0.0, 0.375, 1.0, 1.0]
>>> subpixel_accum(np.zeros(5), 1, [slice(2.625, 11.625)]).tolist()
[0.0, 0.0, 0.375, 1.0, 1.0]
"""
aligned_src, aligned_index = subpixel_align(dst, src, index, interp_axes)
# accumulate the newly aligned source array
try:
dst[aligned_index] += aligned_src
except RuntimeError:
try:
print('dst.shape = {!r}'.format(dst.shape))
print('dst.dtype = {!r}'.format(dst.dtype))
print('dst.device = {!r}'.format(dst.device))
print('aligned_src.shape = {!r}'.format(aligned_src.shape))
print('aligned_src.dtype = {!r}'.format(aligned_src.dtype))
print('aligned_src.device = {!r}'.format(aligned_src.device))
print('src.shape = {!r}'.format(src.shape))
print('src.dtype = {!r}'.format(src.dtype))
print('src.device = {!r}'.format(src.device))
except Exception:
print('unexpected numpy')
raise
return dst
def subpixel_maximum(dst, src, index, interp_axes=None):
"""
Take the max of the source values array into and the destination array at a
particular subpixel index. Modifies the destination array.
Args:
dst (ArrayLike): destination array to index into
src (ArrayLike): source array that agrees with the index
index (Tuple[slice]): subpixel slice into dst that corresponds with src
interp_axes (tuple): specify which axes should be spatially interpolated
Example:
>>> dst = np.array([0, 1.0, 1.0, 1.0, 0])
>>> src = np.array([2.0, 2.0])
>>> index = [slice(1.6, 3.6)]
>>> subpixel_maximum(dst, src, index)
>>> print(ub.repr2(dst, precision=2, with_dtype=0))
np.array([0. , 1. , 2. , 1.2, 0. ])
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> dst = torch.zeros((1, 3, 5, 5)) + .5
>>> src = torch.ones((1, 3, 3, 3))
>>> index = (slice(None), slice(None), slice(1.4, 4.4), slice(1.25, 4.25))
>>> subpixel_maximum(dst, src, index)
>>> print(ub.repr2(dst.numpy()[0, 0], precision=2, with_dtype=0))
np.array([[0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[0.5 , 0.5 , 0.6 , 0.6 , 0.5 ],
[0.5 , 0.75, 1. , 1. , 0.5 ],
[0.5 , 0.75, 1. , 1. , 0.5 ],
[0.5 , 0.5 , 0.5 , 0.5 , 0.5 ]])
"""
aligned_src, aligned_index = subpixel_align(dst, src, index, interp_axes)
impl = kwarray.ArrayAPI.impl(dst)
impl.maximum(dst[aligned_index], aligned_src, out=dst[aligned_index])
return dst
def subpixel_minimum(dst, src, index, interp_axes=None):
"""
Take the min of the source values array into and the destination array at a
particular subpixel index. Modifies the destination array.
Args:
dst (ArrayLike): destination array to index into
src (ArrayLike): source array that agrees with the index
index (Tuple[slice]): subpixel slice into dst that corresponds with src
interp_axes (tuple): specify which axes should be spatially interpolated
Example:
>>> dst = np.array([0, 1.0, 1.0, 1.0, 0])
>>> src = np.array([2.0, 2.0])
>>> index = [slice(1.6, 3.6)]
>>> subpixel_minimum(dst, src, index)
>>> print(ub.repr2(dst, precision=2, with_dtype=0))
np.array([0. , 0.8, 1. , 1. , 0. ])
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> dst = torch.zeros((1, 3, 5, 5)) + .5
>>> src = torch.ones((1, 3, 3, 3))
>>> index = (slice(None), slice(None), slice(1.4, 4.4), slice(1.25, 4.25))
>>> subpixel_minimum(dst, src, index)
>>> print(ub.repr2(dst.numpy()[0, 0], precision=2, with_dtype=0))
np.array([[0.5 , 0.5 , 0.5 , 0.5 , 0.5 ],
[0.5 , 0.45, 0.5 , 0.5 , 0.15],
[0.5 , 0.5 , 0.5 , 0.5 , 0.25],
[0.5 , 0.5 , 0.5 , 0.5 , 0.25],
[0.5 , 0.3 , 0.4 , 0.4 , 0.1 ]])
"""
aligned_src, aligned_index = subpixel_align(dst, src, index, interp_axes)
impl = kwarray.ArrayAPI.impl(dst)
impl.minimum(dst[aligned_index], aligned_src, out=dst[aligned_index])
return dst
def subpixel_slice(inputs, index):
"""
Take a subpixel slice from a larger image. The returned output is
left-aligned with the requested slice.
Args:
inputs (ArrayLike): data
index (Tuple[slice]): a slice to subpixel accuracy
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> import kwimage
>>> import torch
>>> # say we have a (576, 576) input space
>>> # and a (9, 9) output space downsampled by 64x
>>> ospc_feats = np.tile(np.arange(9 * 9).reshape(1, 9, 9), (1024, 1, 1))
>>> inputs = torch.from_numpy(ospc_feats)
>>> # We detected a box in the input space
>>> ispc_bbox = kwimage.Boxes([[64, 65, 100, 120]], 'ltrb')
>>> # Get coordinates in the output space
>>> ospc_bbox = ispc_bbox.scale(1 / 64)
>>> tl_x, tl_y, br_x, br_y = ospc_bbox.data[0]
>>> # Convert the box to a slice
>>> index = [slice(None), slice(tl_y, br_y), slice(tl_x, br_x)]
>>> # Note: I'm not 100% sure this work right with non-intergral slices
>>> outputs = kwimage.subpixel_slice(inputs, index)
Example:
>>> inputs = np.arange(5 * 5 * 3).reshape(5, 5, 3)
>>> index = [slice(0, 3), slice(0, 3)]
>>> outputs = subpixel_slice(inputs, index)
>>> index = [slice(0.5, 3.5), slice(-0.5, 2.5)]
>>> outputs = subpixel_slice(inputs, index)
>>> inputs = np.arange(5 * 5).reshape(1, 5, 5).astype(float)
>>> index = [slice(None), slice(3, 6), slice(3, 6)]
>>> outputs = subpixel_slice(inputs, index)
>>> print(outputs)
[[[18. 19. 0.]
[23. 24. 0.]
[ 0. 0. 0.]]]
>>> index = [slice(None), slice(3.5, 6.5), slice(2.5, 5.5)]
>>> outputs = subpixel_slice(inputs, index)
>>> print(outputs)
[[[20. 21. 10.75]
[11.25 11.75 6. ]
[ 0. 0. 0. ]]]
"""
subpixel_starts = np.array(
[0 if sl.start is None else sl.start for sl in index])
subpixel_stops = np.array(
[inputs.shape[i] if sl.stop is None else sl.stop
for i, sl in enumerate(index)])
is_fractional = ((subpixel_starts % 1) + (subpixel_stops % 1)) > 0
if not np.any(is_fractional):
# If none of the slices are fractional just do the simple thing
int_index = [slice(int(s), int(t)) for s, t in
zip(subpixel_starts, subpixel_stops)]
outputs, _ = _padded_slice(inputs, int_index)
else:
interp_axes = np.where(is_fractional)[0]
shift = -subpixel_starts[interp_axes]
output_shape = subpixel_stops - subpixel_starts
if np.any(output_shape % 1 > 0):
output_shape = np.ceil(output_shape)
# raise ValueError('the slice length must be integral')
output_shape = output_shape.astype(int)
outputs = subpixel_translate(inputs, shift, interp_axes=interp_axes,
output_shape=output_shape)
return outputs
def subpixel_translate(inputs, shift, interp_axes=None, output_shape=None):
"""
Translates an image by a subpixel shift value using bilinear interpolation
Args:
inputs (ArrayLike): data to translate
shift (Sequence):
amount to translate each dimension specified by `interp_axes`.
Note: if inputs contains more than one "image" then all "images" are
translated by the same amount. This function contains no mechanism
for translating each image differently. Note that by default
this is a y,x shift for 2 dimensions.
interp_axes (Sequence, default=None):
axes to perform interpolation on, if not specified the final
`n` axes are interpolated, where `n=len(shift)`
output_shape (tuple, default=None):
if specified the output is returned with this shape, otherwise
Notes:
This function powers most other functions in this file.
Speedups here can go a long way.
Example:
>>> inputs = np.arange(5) + 1
>>> print(inputs.tolist())
[1, 2, 3, 4, 5]
>>> outputs = subpixel_translate(inputs, 1.5)
>>> print(outputs.tolist())
[0.0, 0.5, 1.5, 2.5, 3.5]
Example:
>>> # xdoctest: +REQUIRES(module:torch)
>>> inputs = torch.arange(9).view(1, 1, 3, 3).float()
>>> print(inputs.long())
tensor([[[[0, 1, 2],
[3, 4, 5],
[6, 7, 8]]]])
>>> outputs = subpixel_translate(inputs, (-.4, .5), output_shape=(1, 1, 2, 5))
>>> print(outputs)
tensor([[[[0.6000, 1.7000, 2.7000, 1.6000, 0.0000],
[2.1000, 4.7000, 5.7000, 3.1000, 0.0000]]]])
Ignore:
>>> inputs = np.arange(5)
>>> shift = -.6
>>> interp_axes = None
>>> subpixel_translate(inputs, -.6)
>>> subpixel_translate(inputs[None, None, None, :], -.6)
>>> inputs = np.arange(25).reshape(5, 5)
>>> shift = (-1.6, 2.3)
>>> interp_axes = (0, 1)
>>> subpixel_translate(inputs, shift, interp_axes, output_shape=(9, 9))
>>> subpixel_translate(inputs, shift, interp_axes, output_shape=(3, 4))
"""
impl = kwarray.ArrayAPI.impl(inputs)
if output_shape is None:
output_shape = inputs.shape
if interp_axes is None:
shift = _ensure_arraylike(shift)
interp_axes = np.arange(-len(shift), 0)
else:
interp_axes = _ensure_arraylike(interp_axes)
shift = _ensure_arraylike(shift, len(interp_axes))
ndims = len(inputs.shape) # number of inputs dimensions
interp_order = len(interp_axes) # number of interpolated dimensions
output_dims = [output_shape[i] for i in interp_axes]
# The negative shift defines the new start coordinate
start = -shift
# Sample values (using padded slice to deal with borders)
# border_mode = 'zeros'
# if border_mode == 'zeros':
# padkw = dict(pad_mode='constant', constant_value=0)
# if border_mode == 'edge':
# padkw = dict(pad_mode='edge')
padkw = {}
if np.all(start % 1 == 0):
# short circuit common simple cases where no interpolation is needed
relevant_slice = [slice(None)] * ndims
for i, x, d in zip(interp_axes, map(int, start), output_dims):
relevant_slice[i] = slice(x, x + d)
subpxl_vals, _ = _padded_slice(inputs, relevant_slice, **padkw)
elif interp_order == 1:
i, = interp_axes
width, = output_dims
x, = start
# Get quantized pixel locations near subpixel pts
x0 = int(np.floor(x))
x1 = x0 + 1
# Find linear weights
wa = (x1 - x)
wb = (x - x0)
# Create a (potentially negative) slice containing the relvant area
relevant_slice = [slice(None)] * ndims
relevant_slice[i] = slice(x0, x1 + width)
relevant, _ = _padded_slice(inputs, relevant_slice, **padkw)
if impl.dtype_kind(relevant) != 'f':
relevant = impl.astype(relevant, 'float32')
# Take subslices of the relevant area
sl_a = [slice(None)] * ndims
sl_b = [slice(None)] * ndims
# Sample values (using padded slice to deal with borders)
sl_a[i] = slice(0, width)
sl_b[i] = slice(1, width + 1)
Ia = relevant[tuple(sl_a)]
Ib = relevant[tuple(sl_b)]
# Perform the linear interpolation
subpxl_vals = (wa * Ia) + (wb * Ib)
elif interp_order == 2:
j, i = interp_axes
height, width = output_dims
y, x = start
# Get quantized pixel locations near subpixel pts
start0 = kwarray.ArrayAPI.ifloor(start)
start1 = start0 + 1
alpha = start1 - start
beta = start - start0
# Find bilinear weights
wa = alpha[1] * alpha[0]
wb = alpha[1] * beta[0]
wc = beta[1] * alpha[0]
wd = beta[1] * beta[0]
# Create a (potentially negative) slice containing the relvant area
relevant_slice = [slice(None)] * ndims
y0, x0 = start0
y1, x1 = start1
relevant_slice[j] = slice(y0, y1 + height)
relevant_slice[i] = slice(x0, x1 + width)
relevant, _ = _padded_slice(inputs, relevant_slice, **padkw)
if impl.dtype_kind(relevant) != 'f':
relevant = impl.astype(relevant, 'float32')
# Take subslices of the relevant area
sl_a = [slice(None)] * ndims
sl_b = [slice(None)] * ndims
sl_c = [slice(None)] * ndims
sl_d = [slice(None)] * ndims
# Sample values (using padded slice to deal with borders)
sl_a[j] = slice(0, height)
sl_a[i] = slice(0, width)
sl_b[j] = slice(1, height + 1)
sl_b[i] = slice(0, width)
sl_c[j] = slice(0, height)
sl_c[i] = slice(1, width + 1)
sl_d[j] = slice(1, height + 1)
sl_d[i] = slice(1, width + 1)
Ia = relevant[tuple(sl_a)]
Ib = relevant[tuple(sl_b)]
Ic = relevant[tuple(sl_c)]
Id = relevant[tuple(sl_d)]
# Perform the bilinear interpolation
subpxl_vals = (wa * Ia) + (wb * Ib) + (wc * Ic) + (wd * Id)
else:
raise NotImplementedError('trilinear interpolation is not implemented')
return subpxl_vals
def _padded_slice(data, in_slice, ndim=None, pad_slice=None,
pad_mode='constant', **padkw):
"""
Allows slices with out-of-bound coordinates. Any out of bounds coordinate
will be sampled via padding.
Note:
Negative slices have a different meaning here then they usually do.
Normally, they indicate a wrap-around or a reversed stride, but here
they index into out-of-bounds space (which depends on the pad mode).
For example a slice of -2:1 literally samples two pixels to the left of
the data and one pixel from the data, so you get two padded values and
one data value.
Args:
data (Sliceable[T]): data to slice into. Any channels must be the last dimension.
in_slice (Tuple[slice, ...]): slice for each dimensions
ndim (int): number of spatial dimensions
pad_slice (List[int|Tuple]): additional padding of the slice
Returns:
Tuple[Sliceable, List] :
data_sliced: subregion of the input data (possibly with padding,
depending on if the original slice went out of bounds)
st_dims : a list indicating the low and high space-time coordinate
values of the returned data slice.
Example:
>>> data = np.arange(5)
>>> in_slice = [slice(-2, 7)]
>>> data_sliced, st_dims = _padded_slice(data, in_slice)
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([0, 0, 0, 1, 2, 3, 4, 0, 0])
[(-2, 7)]
>>> data_sliced, st_dims = _padded_slice(data, in_slice, pad_slice=(3, 3))
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([0, 0, 0, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 0, 0])
[(-5, 10)]
>>> data_sliced, st_dims = _padded_slice(data, slice(3, 4), pad_slice=[(1, 0)])
>>> print(ub.repr2(data_sliced, with_dtype=False))
>>> print(st_dims)
np.array([2, 3])
[(2, 4)]
"""
# TODO: use kwarray instead
if isinstance(in_slice, slice):
in_slice = [in_slice]
ndim = len(in_slice)
data_dims = data.shape[:ndim]
low_dims = [sl.start for sl in in_slice]
high_dims = [sl.stop for sl in in_slice]
data_slice, extra_padding = _rectify_slice(data_dims, low_dims, high_dims,
pad_slice=pad_slice)
in_slice_clipped = tuple(slice(*d) for d in data_slice)
# Get the parts of the image that are in bounds
data_clipped = data[in_slice_clipped]
# Add any padding that is needed to behave like negative dims exist
if sum(map(sum, extra_padding)) == 0:
# The slice was completely in bounds
data_sliced = data_clipped
else:
if len(data.shape) != len(extra_padding):
extra_padding = extra_padding + [(0, 0)]
impl = kwarray.ArrayAPI.impl(data_clipped)
data_sliced = impl.pad(data_clipped, extra_padding, mode=pad_mode,
**padkw)
st_dims = data_slice[0:ndim]
pad_dims = extra_padding[0:ndim]
st_dims = [(s - pad[0], t + pad[1])
for (s, t), pad in zip(st_dims, pad_dims)]
return data_sliced, st_dims
def _ensure_arraylike(data, n=None):
if not ub.iterable(data):
if n is None:
return np.array([data])
else:
return np.array([data] * n)
else:
if n is None or len(data) == n:
return np.array(data)
elif len(data) == 1:
return | np.repeat(data, n, axis=0) | numpy.repeat |
import numpy as np
import pytest
import unittest
from sdia_python.lab2.box_window import BoxWindow, UnitBoxWindow
def test_raise_type_error_when_something_is_called():
with pytest.raises(TypeError):
# call_something_that_raises_TypeError()
raise TypeError()
#checks the str function for the box_window
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[2.5, 2.5]]), "BoxWindow: [2.5, 2.5]"),
(np.array([[0, 5], [0, 5]]), "BoxWindow: [0, 5] x [0, 5]"),
(
np.array([[0, 5], [-1.45, 3.14], [-10, 10]]),
"BoxWindow: [0, 5] x [-1.45, 3.14] x [-10, 10]",
),
],
)
def test_box_string_representation(bounds, expected):
assert str(BoxWindow(bounds)) == expected
#checks if the indicator function is well defined for dimension=2
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([0, 0]), True),
(np.array([2.5, 2.5]), True),
(np.array([-1, 5]), False),
(np.array([10, 3]), False),
],
)
def test_indicator_function_box_2d(box_2d_05, point, expected):
is_in = box_2d_05.indicator_function(point)
assert is_in == expected
# ================================
# ==== WRITE YOUR TESTS BELOW ====
# ================================
# checks if the dimension of the window box is correct (d,2).
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), (3, 2)),
(np.array([[2.5, 2.5]]), (1, 2)),
],
)
def test_init(bounds, expected):
c = BoxWindow(bounds)
assert c.bounds.shape == expected
def test_bad_init():
with pytest.raises(ValueError):
BoxWindow(np.array([[0, 5], [-1.45, 3.14], [10, -10]])).__init__()
# checks the evaluation of the length of each bound is correct.
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), np.array([5, 5])),
(np.array([[2.5, 2.5]]), np.array([0])),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), np.array([5, 4.59, 20])),
],
)
def test_length(bounds, expected):
c = BoxWindow(bounds)
assert np.all(c.length() == expected)
# checks if the len of the box window is correct.
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 10),
(np.array([[2.5, 2.5]]), 0),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 29.59),
],
)
def test_len(bounds, expected):
c = BoxWindow(bounds)
assert c.__len__() == expected
# checks if for the box_2d, the points are in the box window
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([1, 1]), True),
(np.array([2.5, 2.5]), True),
(np.array([-1, 5]), False),
(np.array([10, 3]), False),
],
)
def test_contains(box_2d_05, point, expected):
is_in = box_2d_05.__contains__(point)
assert is_in == expected
#error test
def test_bad_contains(box_2d_05):
with pytest.raises(ValueError):
box_2d_05.__contains__(np.array([1, 1, 1]))
# checks if the dimension of the box window is correct
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 2),
(np.array([[2.5, 2.5]]), 1),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 3),
],
)
def test_dimension(bounds, expected):
c = BoxWindow(bounds)
assert c.dimension() == expected
# checks if the evaluation of the volume of the box is correct
@pytest.mark.parametrize(
"bounds, expected",
[
(np.array([[0, 5], [0, 5]]), 25),
(np.array([[2.5, 2.5]]), 0),
(np.array([[0, 5], [-1.45, 3.14], [-10, 10]]), 459),
],
)
def test_volume(bounds, expected):
c = BoxWindow(bounds)
assert c.volume() == expected
# checks if the indicator function returns 1 if the point is in the box, 0 otherwise
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([1, 1]), 1),
(np.array([2.5, 2.5]), 1),
(np.array([-1, 5]), 0),
(np.array([10, 3]), 0),
],
)
def test_indicator_function(box_2d_05, point, expected):
is_in = box_2d_05.indicator_function(point)
assert is_in == expected
# checks if the multiple indicator function returns 1 if all the points are in the box, 0 otherwise
@pytest.fixture
def box_2d_05():
return BoxWindow(np.array([[0, 5], [0, 5]]))
@pytest.mark.parametrize(
"point, expected",
[
(np.array([[1, 1], [2, 0.5]]), 1),
(np.array([2.5, 2.5]), 1),
(np.array([[-1, 5], [33, 9], [0, 0]]), 0),
(np.array([[10, 3], [1, 1]]), 0),
],
)
def test_mutliple_indicator_function(box_2d_05, point, expected):
is_in = box_2d_05.multiple_indicator_function(point)
assert is_in == expected
# checks if the point taken randomly is in the box
@pytest.mark.parametrize(
"bounds, expected",
[
( | np.array([[0, 5], [0, 5]]) | numpy.array |
"""
This module contains classes used for generating different types of analysis plots
Scatter:
This class contains a variety of scatter plot types, e.g. parity (predicted vs. true) plots
Error:
This class contains plotting methods used to better quantify the model errors and uncertainty quantification.
Histogram:
This class contains methods for constructing histograms of data distributions and visualization of model residuals.
Line:
This class contains methods for making line plots, e.g. for constructing learning curves of model performance vs.
amount of data or number of features.
"""
import warnings
import math
import os
import pandas as pd
import numpy as np
from collections import Iterable
from math import log, ceil
from scipy.stats import gaussian_kde, norm
import scipy.stats as stats
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from mastml.metrics import Metrics
from mastml.error_analysis import ErrorUtils
import matplotlib
from matplotlib import pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure, figaspect
from matplotlib.font_manager import FontProperties
from mpl_toolkits.axes_grid1.inset_locator import mark_inset
from mpl_toolkits.axes_grid1.inset_locator import zoomed_inset_axes
from mpl_toolkits.axes_grid1 import make_axes_locatable
matplotlib.rc('font', size=18, family='sans-serif') # set all font to bigger
matplotlib.rc('figure', autolayout=True) # turn on autolayout
warnings.filterwarnings(action="ignore")
# adding dpi as a constant global so it can be changed later
DPI = 250
class Scatter():
"""
Class to generate scatter plots, such as parity plots showing true vs. predicted data values
Args:
None
Methods:
plot_predicted_vs_true: method to plot a parity plot
Args:
y_true: (pd.Series), series of true y data
y_pred: (pd.Series), series of predicted y data
savepath: (str), string denoting the save path for the figure image
data_type: (str), string denoting the data type (e.g. train, test, leaveout)
x_label: (str), string denoting the true and predicted property name
metrics_list: (list), list of strings of metric names to evaluate and include on the figure
show_figure: (bool), whether or not to show the figure output (e.g. when using Jupyter notebook)
Returns:
None
plot_best_worst_split: method to find the best and worst split in an evaluation set and plot them together
Args:
savepath: (str), string denoting the save path for the figure image
data_type: (str), string denoting the data type (e.g. train, test, leaveout)
x_label: (str), string denoting the true and predicted property name
metrics_list: (list), list of strings of metric names to evaluate and include on the figure
show_figure: (bool), whether or not to show the figure output (e.g. when using Jupyter notebook)
Returns:
None
plot_best_worst_per_point: method to find all of the best and worst data points from an evaluation set and plot them together
Args:
savepath: (str), string denoting the save path for the figure image
data_type: (str), string denoting the data type (e.g. train, test, leaveout)
x_label: (str), string denoting the true and predicted property name
metrics_list: (list), list of strings of metric names to evaluate and include on the figure
show_figure: (bool), whether or not to show the figure output (e.g. when using Jupyter notebook)
Returns:
None
plot_predicted_vs_true_bars: method to plot the average predicted value of each data point from an evaluation set with error bars denoting the standard deviation in predicted values
Args:
savepath: (str), string denoting the save path for the figure image
data_type: (str), string denoting the data type (e.g. train, test, leaveout)
x_label: (str), string denoting the true and predicted property name
metrics_list: (list), list of strings of metric names to evaluate and include on the figure
show_figure: (bool), whether or not to show the figure output (e.g. when using Jupyter notebook)
Returns:
None
plot_metric_vs_group: method to plot the metric value for each group during e.g. a LeaveOneGroupOut data split
Args:
savepath: (str), string denoting the save path for the figure image
data_type: (str), string denoting the data type (e.g. train, test, leaveout)
show_figure: (bool), whether or not to show the figure output (e.g. when using Jupyter notebook)
Returns:
None
"""
@classmethod
def plot_predicted_vs_true(cls, y_true, y_pred, savepath, data_type, x_label, metrics_list=None, show_figure=False):
# Make the dataframe/array 1D if it isn't
y_true = check_dimensions(y_true)
y_pred = check_dimensions(y_pred)
# Set image aspect ratio:
fig, ax = make_fig_ax()
# gather max and min
maxx = max(np.nanmax(y_true), np.nanmax(y_pred))
minn = min(np.nanmin(y_true), np.nanmin(y_pred))
#maxx = max(y_true)
#minn = min(y_true)
#maxx = round(float(maxx), rounder(maxx - minn))
#minn = round(float(minn), rounder(maxx - minn))
_set_tick_labels(ax, maxx, minn)
ax.scatter(y_true, y_pred, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7)
# draw dashed horizontal line
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
ax.set_xlabel('True ' + x_label, fontsize=14)
ax.set_ylabel('Predicted ' + x_label, fontsize=14)
if metrics_list is None:
# Use some default metric set
metrics_list = ['r2_score', 'mean_absolute_error', 'root_mean_squared_error', 'rmse_over_stdev']
stats_dict = Metrics(metrics_list=metrics_list).evaluate(y_true=y_true, y_pred=y_pred)
plot_stats(fig, stats_dict, x_align=0.65, y_align=0.90, fontsize=12)
fig.savefig(os.path.join(savepath, 'parity_plot_'+str(data_type) + '.png'), dpi=DPI, bbox_inches='tight')
if show_figure == True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_best_worst_split(cls, savepath, data_type, x_label, metrics_list, show_figure=False):
dirs = os.listdir(savepath)
splitdirs = [d for d in dirs if 'split_' in d and '.png' not in d]
stats_files_dict = dict()
for splitdir in splitdirs:
stats_files_dict[splitdir] = pd.read_excel(os.path.join(os.path.join(savepath, splitdir), data_type + '_stats_summary.xlsx'), engine='openpyxl').to_dict('records')[0]
# Find best/worst splits based on RMSE value
rmse_best = 10**20
rmse_worst = 0
for split, stats_dict in stats_files_dict.items():
if stats_dict['root_mean_squared_error'] < rmse_best:
best_split = split
rmse_best = stats_dict['root_mean_squared_error']
if stats_dict['root_mean_squared_error'] > rmse_worst:
worst_split = split
rmse_worst = stats_dict['root_mean_squared_error']
if data_type == 'test':
y_true_best = pd.read_excel(os.path.join(os.path.join(savepath, best_split), 'y_test.xlsx'), engine='openpyxl')
y_pred_best = pd.read_excel(os.path.join(os.path.join(savepath, best_split), 'y_pred.xlsx'), engine='openpyxl')
y_true_worst = pd.read_excel(os.path.join(os.path.join(savepath, worst_split), 'y_test.xlsx'), engine='openpyxl')
y_pred_worst = pd.read_excel(os.path.join(os.path.join(savepath, worst_split), 'y_pred.xlsx'), engine='openpyxl')
elif data_type == 'train':
y_true_best = pd.read_excel(os.path.join(os.path.join(savepath, best_split), 'y_train.xlsx'), engine='openpyxl')
y_pred_best = pd.read_excel(os.path.join(os.path.join(savepath, best_split), 'y_pred_train.xlsx'), engine='openpyxl')
y_true_worst = pd.read_excel(os.path.join(os.path.join(savepath, worst_split), 'y_train.xlsx'), engine='openpyxl')
y_pred_worst = pd.read_excel(os.path.join(os.path.join(savepath, worst_split), 'y_pred_train.xlsx'), engine='openpyxl')
# Make the dataframe/array 1D if it isn't
y_true_best = check_dimensions(y_true_best)
y_pred_best = check_dimensions(y_pred_best)
y_true_worst = check_dimensions(y_true_worst)
y_pred_worst = check_dimensions(y_pred_worst)
# Set image aspect ratio:
fig, ax = make_fig_ax()
# gather max and min
maxx = max(np.nanmax(y_true_best), np.nanmax(y_pred_best), np.nanmax(y_true_worst), np.nanmax(y_pred_worst))
minn = min(np.nanmin(y_true_best), np.nanmin(y_pred_best), np.nanmin(y_true_worst), np.nanmin(y_pred_worst))
#maxx = round(float(max1), rounder(max1 - min1))
#minn = round(float(min1), rounder(max1 - min1))
_set_tick_labels(ax, maxx, minn)
ax.scatter(y_true_best, y_pred_best, c='b', edgecolor='darkblue', zorder=2, s=100, alpha=0.7, label='Best split')
ax.scatter(y_true_worst, y_pred_worst, c='r', edgecolor='darkred', zorder=2, s=100, alpha=0.7, label='Worst split')
ax.legend(loc='best')
# draw dashed horizontal line
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
ax.set_xlabel('True ' + x_label, fontsize=14)
ax.set_ylabel('Predicted ' + x_label, fontsize=14)
stats_dict_best = Metrics(metrics_list=metrics_list).evaluate(y_true=y_true_best, y_pred=y_pred_best)
stats_dict_worst = Metrics(metrics_list=metrics_list).evaluate(y_true=y_true_worst, y_pred=y_pred_worst)
plot_stats(fig, stats_dict_best, x_align=0.65, y_align=0.90, font_dict={'fontsize': 12, 'color': 'blue'})
plot_stats(fig, stats_dict_worst, x_align=0.65, y_align=0.50, font_dict={'fontsize': 12, 'color': 'red'})
# Save data to excel file and image
fig.savefig(os.path.join(savepath, 'parity_plot_best_worst_split_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure == True:
plt.show()
else:
plt.close()
return
#TODO: this method runs into issues when the y_true data have multiple instances where the y data have the same value, leading to size mismatch errors
@classmethod
def plot_best_worst_per_point(cls, savepath, data_type, x_label, metrics_list, show_figure=False):
# Get lists of all ytrue and ypred for each split
dirs = os.listdir(savepath)
splitdirs = [d for d in dirs if 'split_' in d and '.png' not in d]
y_true_list = list()
y_pred_list = list()
for splitdir in splitdirs:
y_true_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_'+str(data_type)+'.xlsx'), engine='openpyxl'))
if data_type == 'test':
y_pred_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_pred.xlsx'), engine='openpyxl'))
elif data_type == 'train':
y_pred_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_pred_train.xlsx'), engine='openpyxl'))
all_y_true = list()
all_y_pred = list()
all_abs_residuals = list()
for yt, y_pred in zip(y_true_list, y_pred_list):
yt = np.array(check_dimensions(yt))
y_pred = np.array(check_dimensions(y_pred))
abs_residuals = abs(yt-y_pred)
all_y_true.append(yt)
all_y_pred.append(y_pred)
all_abs_residuals.append(abs_residuals)
all_y_true_flat = np.array([item for sublist in all_y_true for item in sublist])
all_y_pred_flat = np.array([item for sublist in all_y_pred for item in sublist])
all_residuals_flat = np.array([item for sublist in all_abs_residuals for item in sublist])
# TODO: this is the source of the issue, as y_true_unique can be smaller than y_true. A better way?
y_true_unique = np.unique(all_y_true_flat)
bests = list()
worsts = list()
for yt in y_true_unique:
best = min(abs(all_y_pred_flat[np.where(all_y_true_flat == yt)] - all_y_true_flat[np.where(all_y_true_flat == yt)]))
worst = max(abs(all_y_pred_flat[np.where(all_y_true_flat == yt)] - all_y_true_flat[np.where(all_y_true_flat == yt)]))
bests.append(all_y_pred_flat[np.where(all_residuals_flat == best)])
worsts.append(all_y_pred_flat[np.where(all_residuals_flat == worst)])
bests = np.array([item for sublist in bests for item in sublist])
worsts = np.array([item for sublist in worsts for item in sublist])
stats_dict_best = Metrics(metrics_list=metrics_list).evaluate(y_true=y_true_unique, y_pred=bests)
stats_dict_worst = Metrics(metrics_list=metrics_list).evaluate(y_true=y_true_unique, y_pred=worsts)
fig, ax = make_fig_ax(x_align=0.65)
# gather max and min
maxx = max([max(y_true_unique), max(bests), max(worsts)])
minn = min([min(y_true_unique), min(bests), min(worsts)])
# draw dashed horizontal line
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True '+x_label, fontsize=16)
ax.set_ylabel('Predicted '+x_label, fontsize=16)
# set tick labels
#maxx = round(float(max1), rounder(max1-min1))
#minn = round(float(min1), rounder(max1-min1))
_set_tick_labels(ax, maxx, minn)
ax.scatter(y_true_unique, bests, c='b', alpha=0.7, label='best all points', edgecolor='darkblue', zorder=2, s=100)
ax.scatter(y_true_unique, worsts, c='r', alpha=0.7, label='worst all points', edgecolor='darkred', zorder=2, s=70)
ax.legend(loc='best', fontsize=12)
#plot_stats(fig, avg_stats, x_align=x_align, y_align=0.51, fontsize=10)
plot_stats(fig, stats_dict_best, x_align=0.65, y_align=0.90, font_dict={'fontsize': 10, 'color': 'b'})
plot_stats(fig, stats_dict_worst, x_align=0.65, y_align=0.50, font_dict={'fontsize': 10, 'color': 'r'})
# Save data to excel file and image
fig.savefig(os.path.join(savepath, 'parity_plot_best_worst_eachpoint_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure == True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_predicted_vs_true_bars(cls, savepath, x_label, data_type, metrics_list, show_figure=False):
# Get lists of all ytrue and ypred for each split
dirs = os.listdir(savepath)
splitdirs = [d for d in dirs if 'split_' in d and '.png' not in d]
y_true_list = list()
y_pred_list = list()
for splitdir in splitdirs:
y_true_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_'+str(data_type)+'.xlsx'), engine='openpyxl'))
if data_type == 'test':
y_pred_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_pred.xlsx'), engine='openpyxl'))
elif data_type == 'train':
y_pred_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_pred_train.xlsx'), engine='openpyxl'))
elif data_type == 'leaveout':
y_pred_list.append(pd.read_excel(os.path.join(os.path.join(savepath, splitdir), 'y_pred_leaveout.xlsx'), engine='openpyxl'))
all_y_true = list()
all_y_pred = list()
for yt, y_pred in zip(y_true_list, y_pred_list):
yt = np.array(check_dimensions(yt))
y_pred = np.array(check_dimensions(y_pred))
all_y_true.append(yt)
all_y_pred.append(y_pred)
df_all = pd.DataFrame({'all_y_true': np.array([item for sublist in all_y_true for item in sublist]),
'all_y_pred': np.array([item for sublist in all_y_pred for item in sublist])})
df_all_grouped = df_all.groupby(df_all['all_y_true'], sort=False)
df_avg = df_all_grouped.mean()
df_std = df_all_grouped.std()
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# gather max and min
maxx = max(np.nanmax(df_avg.index.values.tolist()), np.nanmax(df_avg['all_y_pred']))
minn = min(np.nanmin(df_avg.index.values.tolist()), np.nanmin(df_avg['all_y_pred']))
# draw dashed horizontal line
ax.plot([minn, maxx], [minn, maxx], 'k--', lw=2, zorder=1)
# set axis labels
ax.set_xlabel('True ' + x_label, fontsize=16)
ax.set_ylabel('Predicted ' + x_label, fontsize=16)
# set tick labels
_set_tick_labels(ax, maxx, minn)
ax.errorbar(df_avg.index.values.tolist(), df_avg['all_y_pred'], yerr=df_std['all_y_pred'], fmt='o',
markerfacecolor='blue', markeredgecolor='black',
markersize=10, alpha=0.7, capsize=3)
stats_files_dict = dict()
for splitdir in splitdirs:
stats_files_dict[splitdir] = pd.read_excel(os.path.join(os.path.join(savepath, splitdir), data_type + '_stats_summary.xlsx'), engine='openpyxl').to_dict('records')[0]
metrics_list = list(stats_files_dict[splitdir].keys())
avg_stats = dict()
for metric in metrics_list:
stats = list()
for splitdir in splitdirs:
stats.append(stats_files_dict[splitdir][metric])
avg_stats[metric] = (np.mean(stats), np.std(stats))
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
fig.savefig(os.path.join(savepath, 'parity_plot_allsplits_average_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
df = pd.DataFrame({'y true': df_avg.index.values.tolist(),
'average predicted values': df_avg['all_y_pred'],
'error bar values': df_std['all_y_pred']})
df.to_excel(os.path.join(savepath, 'parity_plot_allsplits_average_'+str(data_type)+'.xlsx'))
df_stats = pd.DataFrame().from_dict(avg_stats)
df_stats.to_excel(os.path.join(savepath, str(data_type)+'_average_stdev_stats_summary.xlsx'), index=False)
if show_figure == True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_metric_vs_group(cls, savepath, data_type, show_figure):
dirs = os.listdir(savepath)
splitdirs = [d for d in dirs if 'split_' in d and '.png' not in d]
stats_files_dict = dict()
groups = list()
for splitdir in splitdirs:
with open(os.path.join(os.path.join(savepath, splitdir), 'test_group.txt'), 'r') as f:
group = f.readlines()[0]
groups.append(group)
stats_files_dict[group] = pd.read_excel(os.path.join(os.path.join(savepath, splitdir), data_type + '_stats_summary.xlsx'), engine='openpyxl').to_dict('records')[0]
metrics_list = list(stats_files_dict[group].keys())
for metric in metrics_list:
stats = list()
for group in groups:
stats.append(stats_files_dict[group][metric])
avg_stats = {metric: (np.mean(stats), np.std(stats))}
# make fig and ax, use x_align when placing text so things don't overlap
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
# do the actual plotting
ax.scatter(groups, stats, c='blue', alpha=0.7, edgecolor='darkblue', zorder=2, s=100)
# set axis labels
ax.set_xlabel('Group', fontsize=14)
ax.set_ylabel(metric, fontsize=14)
ax.set_xticklabels(labels=groups, fontsize=14)
plot_stats(fig, avg_stats, x_align=x_align, y_align=0.90)
fig.savefig(os.path.join(savepath, str(metric)+'_value_per_group_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure == True:
plt.show()
else:
plt.close()
return
class Error():
"""
Class to make plots related to model error assessment and uncertainty quantification
Args:
None
Methods:
plot_normalized_error: Method to plot the normalized residual errors of a model prediction
Args:
residuals: (pd.Series), series containing the true errors (model residuals)
savepath: (str), string denoting the save path to save the figure to
data_type: (str), string denoting the data type, e.g. train, test, leftout
model_errors: (pd.Series), series containing the predicted model errors (optional, default None)
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
Returns:
None
plot_cumulative_normalized_error: Method to plot the cumulative normalized residual errors of a model prediction
Args:
residuals: (pd.Series), series containing the true errors (model residuals)
savepath: (str), string denoting the save path to save the figure to
data_type: (str), string denoting the data type, e.g. train, test, leftout
model_errors: (pd.Series), series containing the predicted model errors (optional, default None)
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
Returns:
None
plot_rstat: Method for plotting the r-statistic distribution (true divided by predicted error)
Args:
savepath: (str), string denoting the save path to save the figure to
data_type: (str), string denoting the data type, e.g. train, test, leftout
residuals: (pd.Series), series containing the true errors (model residuals)
model_errors: (pd.Series), series containing the predicted model errors
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
is_calibrated: (bool), whether or not the model errors have been recalibrated (default False)
Returns:
None
plot_rstat_uncal_cal_overlay: Method for plotting the r-statistic distribution for two cases together: the as-obtained uncalibrated model errors and calibrated errors
Args:
savepath: (str), string denoting the save path to save the figure to
data_type: (str), string denoting the data type, e.g. train, test, leftout
residuals: (pd.Series), series containing the true errors (model residuals)
model_errors: (pd.Series), series containing the predicted model errors
model_errors_cal: (pd.Series), series containing the calibrated predicted model errors
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
Returns:
None
plot_real_vs_predicted_error: Sometimes called the RvE plot, or residual vs. error plot, this method plots the binned RMS residuals as a function of the binned model errors
Args:
savepath: (str), string denoting the save path to save the figure to
model: (mastml.models object), a MAST-ML model object, e.g. SklearnModel or EnsembleModel
data_type: (str), string denoting the data type, e.g. train, test, leftout
model_errors: (pd.Series), series containing the predicted model errors
residuals: (pd.Series), series containing the true errors (model residuals)
dataset_stdev: (float), the standard deviation of the training dataset
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
is_calibrated: (bool), whether or not the model errors have been recalibrated (default False)
well_sampled_fraction: (float), number denoting whether a bin qualifies as well-sampled or not. Default to 0.025 (2.5% of total samples). Only affects visuals, not fitting
Returns:
None
plot_real_vs_predicted_error_uncal_cal_overlay: Method for making the residual vs. error plot for two cases together: using the as-obtained uncalibrated model errors and calibrated errors
Args:
savepath: (str), string denoting the save path to save the figure to
model: (mastml.models object), a MAST-ML model object, e.g. SklearnModel or EnsembleModel
data_type: (str), string denoting the data type, e.g. train, test, leftout
model_errors: (pd.Series), series containing the predicted model errors
model_errors_cal: (pd.Series), series containing the calibrated predicted model errors
residuals: (pd.Series), series containing the true errors (model residuals)
dataset_stdev: (float), the standard deviation of the training dataset
show_figure: (bool), whether or not the generated figure is output to the notebook screen (default False)
well_sampled_fraction: (float), number denoting whether a bin qualifies as well-sampled or not. Default to 0.025 (2.5% of total samples). Only affects visuals, not fitting
Returns:
None
"""
@classmethod
def plot_normalized_error(cls, residuals, savepath, data_type, model_errors=None, show_figure=False):
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
mu = 0
sigma = 1
residuals[residuals == 0.0] = 10**-6
normalized_residuals = residuals / np.std(residuals)
density_residuals = gaussian_kde(normalized_residuals)
x = np.linspace(mu - 5 * sigma, mu + 5 * sigma, residuals.shape[0])
ax.plot(x, norm.pdf(x, mu, sigma), linewidth=4, color='blue', label="Analytical Gaussian")
ax.plot(x, density_residuals(x), linewidth=4, color='green', label="Model Residuals")
maxx = 5
minn = -5
if model_errors is not None:
model_errors[model_errors == 0.0] = 0.0001
rstat = residuals / model_errors
density_errors = gaussian_kde(rstat)
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)), max(density_errors(x)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)), max(density_errors(x)))
ax.plot(x, density_errors(x), linewidth=4, color='purple', label="Model Errors")
# Save data to csv file
data_dict = {"Plotted x values": x, "model_errors": model_errors,
# "analytical gaussian (plotted y blue values)": norm.pdf(x, mu, sigma),
"residuals": residuals,
"model normalized residuals (plotted y green values)": density_residuals(x),
"model errors (plotted y purple values)": density_errors(x)}
pd.DataFrame(data_dict).to_excel(os.path.join(savepath, 'normalized_error_data_'+str(data_type)+'.xlsx'))
else:
# Save data to csv file
data_dict = {"x values": x,
# "analytical gaussian": norm.pdf(x, mu, sigma),
"model normalized residuals (plotted y green values)": density_residuals(x)}
pd.DataFrame(data_dict).to_excel(os.path.join(savepath, 'normalized_error_data_'+str(data_type)+'.xlsx'))
maxy = max(max(density_residuals(x)), max(norm.pdf(x, mu, sigma)))
miny = min(min(density_residuals(x)), min(norm.pdf(x, mu, sigma)))
ax.legend(loc=0, fontsize=12, frameon=False)
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Probability density", fontsize=18)
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
fig.savefig(os.path.join(savepath, 'normalized_errors_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure is True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_cumulative_normalized_error(cls, residuals, savepath, data_type, model_errors=None, show_figure=False):
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
analytic_gau = np.random.normal(0, 1, 10000)
analytic_gau = abs(analytic_gau)
n_analytic = np.arange(1, len(analytic_gau) + 1) / np.float(len(analytic_gau))
X_analytic = np.sort(analytic_gau)
residuals[residuals == 0.0] = 10 ** -6
normalized_residuals = abs((residuals) / np.std(residuals))
n_residuals = np.arange(1, len(normalized_residuals) + 1) / np.float(len(normalized_residuals))
X_residuals = np.sort(normalized_residuals) # r"$\mathrm{Predicted \/ Value}, \mathit{eV}$"
ax.set_xlabel(r"$\mathrm{x}/\mathit{\sigma}$", fontsize=18)
ax.set_ylabel("Fraction", fontsize=18)
ax.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
ax.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
ax.set_xlim([0, 5])
if model_errors is not None:
model_errors[model_errors == 0.0] = 0.0001
rstat = abs((residuals) / model_errors)
n_errors = np.arange(1, len(rstat) + 1) / np.float(len(rstat))
X_errors = np.sort(rstat)
ax.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
# Save data to csv file
data_dict = { # "Analytical Gaussian values": analytic_gau,
# "Analytical Gaussian (sorted, blue data)": X_analytic,
"residuals": residuals,
"normalized residuals": normalized_residuals,
"Model Residuals (sorted, green data)": X_residuals,
"Model error values (r value: (ytrue-ypred)/(model error avg))": rstat,
"Model errors (sorted, purple values)": X_errors}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in data_dict.items()]))
df.to_excel(os.path.join(savepath, 'cumulative_normalized_errors_'+str(data_type)+'.xlsx'), index=False)
else:
# Save data to csv file
data_dict = { # "x analytical": X_analytic,
# "analytical gaussian": n_analytic,
"Model Residuals (sorted, green data)": X_residuals,
"model residuals": n_residuals}
# Save this way to avoid issue with different array sizes in data_dict
df = pd.DataFrame(dict([(k, pd.Series(v)) for k, v in data_dict.items()]))
df.to_excel(os.path.join(savepath, 'cumulative_normalized_errors_'+str(data_type)+'.xlsx'), index=False)
ax.legend(loc=0, fontsize=14, frameon=False)
xlabels = np.linspace(2, 3, 3)
ylabels = np.linspace(0.9, 1, 2)
axin = zoomed_inset_axes(ax, 2.5, loc=7)
axin.step(X_residuals, n_residuals, linewidth=3, color='green', label="Model Residuals")
axin.step(X_analytic, n_analytic, linewidth=3, color='blue', label="Analytical Gaussian")
if model_errors is not None:
axin.step(X_errors, n_errors, linewidth=3, color='purple', label="Model Errors")
axin.set_xticklabels(xlabels, fontsize=8, rotation=90)
axin.set_yticklabels(ylabels, fontsize=8)
axin.set_xlim([2, 3])
axin.set_ylim([0.9, 1])
maxx = 5
minn = 0
maxy = 1.1
miny = 0
_set_tick_labels_different(ax, maxx, minn, maxy, miny)
mark_inset(ax, axin, loc1=1, loc2=2)
fig.savefig(os.path.join(savepath, 'cumulative_normalized_errors_'+str(data_type)+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure is True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_rstat(cls, savepath, data_type, residuals, model_errors, show_figure=False, is_calibrated=False):
# Eliminate model errors with value 0, so that the ratios can be calculated
zero_indices = []
for i in range(0, len(model_errors)):
if model_errors[i] == 0:
zero_indices.append(i)
residuals = np.delete(residuals, zero_indices)
model_errors = np.delete(model_errors, zero_indices)
# make data for gaussian plot
gaussian_x = np.linspace(-5, 5, 1000)
# create plot
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
ax.set_xlabel('residuals / model error estimates')
ax.set_ylabel('relative counts')
ax.hist(residuals/model_errors, bins=30, color='blue', edgecolor='black', density=True)
ax.plot(gaussian_x, stats.norm.pdf(gaussian_x, 0, 1), label='Gaussian mu: 0 std: 1', color='orange')
ax.text(0.05, 0.9, 'mean = %.3f' % (np.mean(residuals / model_errors)), transform=ax.transAxes)
ax.text(0.05, 0.85, 'std = %.3f' % (np.std(residuals / model_errors)), transform=ax.transAxes)
if is_calibrated == False:
calibrate = 'uncalibrated'
if is_calibrated == True:
calibrate = 'calibrated'
fig.savefig(os.path.join(savepath, 'rstat_histogram_'+str(data_type)+'_'+calibrate+'.png'), dpi=DPI, bbox_inches='tight')
if show_figure is True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_rstat_uncal_cal_overlay(cls, savepath, data_type, residuals, model_errors, model_errors_cal,
show_figure=False):
# Eliminate model errors with value 0, so that the ratios can be calculated
zero_indices = []
for i in range(0, len(model_errors)):
if model_errors[i] == 0:
zero_indices.append(i)
residuals = np.delete(residuals, zero_indices)
model_errors = np.delete(model_errors, zero_indices)
model_errors_cal = np.delete(model_errors_cal, zero_indices)
# make data for gaussian plot
gaussian_x = np.linspace(-5, 5, 1000)
# create plot
x_align = 0.64
fig, ax = make_fig_ax(x_align=x_align)
ax.set_xlabel('residuals / model error estimates')
ax.set_ylabel('relative counts')
ax.hist(residuals/model_errors, bins=30, color='gray', edgecolor='black', density=True, alpha=0.4)
ax.hist(residuals/model_errors_cal, bins=30, color='blue', edgecolor='black', density=True, alpha=0.4)
ax.plot(gaussian_x, stats.norm.pdf(gaussian_x, 0, 1), label='Gaussian mu: 0 std: 1', color='orange')
ax.text(0.05, 0.9, 'mean = %.3f' % (np.mean(residuals / model_errors)), transform=ax.transAxes, fontdict={'fontsize': 10, 'color': 'gray'})
ax.text(0.05, 0.85, 'std = %.3f' % (np.std(residuals / model_errors)), transform=ax.transAxes, fontdict={'fontsize': 10, 'color': 'gray'})
ax.text(0.05, 0.8, 'mean = %.3f' % (np.mean(residuals / model_errors_cal)), transform=ax.transAxes, fontdict={'fontsize': 10, 'color': 'blue'})
ax.text(0.05, 0.75, 'std = %.3f' % (np.std(residuals / model_errors_cal)), transform=ax.transAxes, fontdict={'fontsize': 10, 'color': 'blue'})
fig.savefig(os.path.join(savepath, 'rstat_histogram_'+str(data_type)+'_uncal_cal_overlay.png'), dpi=DPI, bbox_inches='tight')
if show_figure is True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_real_vs_predicted_error(cls, savepath, model, data_type, model_errors, residuals, dataset_stdev,
show_figure=False, is_calibrated=False, well_sampled_fraction=0.025):
bin_values, rms_residual_values, num_values_per_bin, number_of_bins, ms_residual_values, var_sq_residual_values = ErrorUtils()._parse_error_data(model_errors=model_errors,
residuals=residuals,
dataset_stdev=dataset_stdev)
model_name = model.model.__class__.__name__
if model_name == 'RandomForestRegressor':
model_type = 'RF'
elif model_name == 'GradientBoostingRegressor':
model_type = 'GBR'
elif model_name == 'ExtraTreesRegressor':
model_type = 'ET'
elif model_name == 'GaussianProcessRegressor':
model_type = 'GPR'
elif model_name == 'BaggingRegressor':
model_type = 'BR'
elif model_name == 'AdaBoostRegressor':
model_type = 'ABR'
if data_type not in ['train', 'test', 'leaveout']:
print('Error: data_test_type must be one of "train", "test" or "leaveout"')
exit()
# Make RF error plot
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=0.65)
linear = LinearRegression(fit_intercept=True)
# Fit just blue circle data
# Find nan entries
nans = np.argwhere(np.isnan(rms_residual_values)).tolist()
# use nans (which are indices) to delete relevant parts of bin_values and
# rms_residual_values as they can't be used to fit anyway
bin_values_copy = np.empty_like(bin_values)
bin_values_copy[:] = bin_values
rms_residual_values_copy = np.empty_like(rms_residual_values)
rms_residual_values_copy[:] = rms_residual_values
bin_values_copy = np.delete(bin_values_copy, nans)
rms_residual_values_copy = np.delete(rms_residual_values_copy, nans)
num_values_per_bin_copy = np.array(num_values_per_bin)[np.array(num_values_per_bin) != 0]
# Only examine the bins that are well-sampled, i.e. have number of data points in them above a given threshold
well_sampled_number = round(well_sampled_fraction*np.sum(num_values_per_bin_copy))
rms_residual_values_wellsampled = rms_residual_values_copy[np.where(num_values_per_bin_copy > well_sampled_number)]
bin_values_wellsampled = bin_values_copy[np.where(num_values_per_bin_copy > well_sampled_number)]
num_values_per_bin_wellsampled = num_values_per_bin_copy[np.where(num_values_per_bin_copy > well_sampled_number)]
rms_residual_values_poorlysampled = rms_residual_values_copy[np.where(num_values_per_bin_copy <= well_sampled_number)]
bin_values_poorlysampled = bin_values_copy[np.where(num_values_per_bin_copy <= well_sampled_number)]
num_values_per_bin_poorlysampled = num_values_per_bin_copy[np.where(num_values_per_bin_copy <= well_sampled_number)]
yerr = list()
for i, j, k in zip(var_sq_residual_values, num_values_per_bin, rms_residual_values):
if j > 1:
yerr.append(np.sqrt(i) / (2 * np.sqrt(j) * k))
else:
yerr.append(1)
yerr = np.array(yerr)
yerr_wellsampled = yerr[np.where(num_values_per_bin > well_sampled_number)[0]]
yerr_poorlysampled = yerr[np.where(num_values_per_bin <= well_sampled_number)[0]]
ax.scatter(bin_values_wellsampled, rms_residual_values_wellsampled, s=80, color='blue', alpha=0.7)
ax.scatter(bin_values_poorlysampled, rms_residual_values_poorlysampled, s=80, color='blue', alpha=0.7)
ax.errorbar(bin_values_wellsampled, rms_residual_values_wellsampled, yerr=yerr_wellsampled, ecolor='blue', capsize=2, linewidth=0, elinewidth=1)
ax.errorbar(bin_values_poorlysampled, rms_residual_values_poorlysampled, yerr=yerr_poorlysampled, ecolor='blue', capsize=2, linewidth=0, elinewidth=1)
ax.set_xlabel(str(model_type) + ' model errors / dataset stdev', fontsize=12)
ax.set_ylabel('RMS Absolute residuals\n / dataset stdev', fontsize=12)
ax.tick_params(labelsize=10)
if not rms_residual_values_copy.size:
print("---WARNING: ALL ERRORS TOO LARGE FOR PLOTTING---")
exit()
else:
# Fit the line to all data, including the poorly sampled data, and weight data points by number of samples per bin
linear.fit(np.array(bin_values_copy).reshape(-1, 1), rms_residual_values_copy,
sample_weight=num_values_per_bin_copy)
yfit = linear.predict(np.array(bin_values_copy).reshape(-1, 1))
ax.plot(bin_values_copy, yfit, 'k--', linewidth=2)
r2 = r2_score(rms_residual_values_copy, yfit, sample_weight=num_values_per_bin_copy)
slope = linear.coef_
intercept = linear.intercept_
divider = make_axes_locatable(ax)
axbarx = divider.append_axes("top", 1.2, pad=0.12, sharex=ax)
axbarx.bar(x=bin_values, height=num_values_per_bin, width=bin_values[1]-bin_values[0], color='blue', edgecolor='black', alpha=0.7)
axbarx.tick_params(labelsize=10, axis='y')
axbarx.tick_params(labelsize=0, axis='x')
axbarx.set_ylabel('Counts', fontsize=12)
total_samples = sum(num_values_per_bin)
axbarx.text(0.95, round(0.67 * max(num_values_per_bin)), 'Total counts = ' + str(total_samples), fontsize=12)
xmax = max(max(bin_values_copy) + 0.05, 1.6)
ymax = max(1.3, max(rms_residual_values))
ax.set_ylim(bottom=0, top=ymax)
axbarx.set_ylim(bottom=0, top=round(max(num_values_per_bin) + 0.1*max(num_values_per_bin)))
ax.set_xlim(left=0, right=xmax)
ax.text(0.02, 0.9*ymax, 'R$^2$ = %3.2f ' % r2, fontdict={'fontsize': 10, 'color': 'k'})
ax.text(0.02, 0.8*ymax, 'slope = %3.2f ' % slope, fontdict={'fontsize': 10, 'color': 'k'})
ax.text(0.02, 0.7*ymax, 'intercept = %3.2f ' % intercept, fontdict={'fontsize': 10, 'color': 'k'})
# Plot y = x line as reference point
maxx = max(xmax, ymax)
ax.plot([0, maxx], [0, maxx], 'k--', lw=2, zorder=1, color='gray', alpha=0.5)
if is_calibrated == False:
calibrate = 'uncalibrated'
if is_calibrated == True:
calibrate = 'calibrated'
fig.savefig(os.path.join(savepath, str(model_type) + '_residuals_vs_modelerror_' + str(data_type) + '_' + calibrate + '.png'),
dpi=300, bbox_inches='tight')
if show_figure is True:
plt.show()
else:
plt.close()
return
@classmethod
def plot_real_vs_predicted_error_uncal_cal_overlay(cls, savepath, model, data_type, model_errors, model_errors_cal,
residuals, dataset_stdev, show_figure=False,
well_sampled_fraction=0.025):
bin_values_uncal, rms_residual_values_uncal, num_values_per_bin_uncal, number_of_bins_uncal, ms_residual_values_uncal, var_sq_residual_values_uncal = ErrorUtils()._parse_error_data(model_errors=model_errors,
residuals=residuals,
dataset_stdev=dataset_stdev)
bin_values_cal, rms_residual_values_cal, num_values_per_bin_cal, number_of_bins_cal, ms_residual_values_cal, var_sq_residual_values_cal = ErrorUtils()._parse_error_data(model_errors=model_errors_cal,
residuals=residuals,
dataset_stdev=dataset_stdev)
model_name = model.model.__class__.__name__
if model_name == 'RandomForestRegressor':
model_type = 'RF'
elif model_name == 'GradientBoostingRegressor':
model_type = 'GBR'
elif model_name == 'ExtraTreesRegressor':
model_type = 'ET'
elif model_name == 'GaussianProcessRegressor':
model_type = 'GPR'
elif model_name == 'BaggingRegressor':
model_type = 'BR'
elif model_name == 'AdaBoostRegressor':
model_type = 'ABR'
if data_type not in ['train', 'test', 'leaveout']:
print('Error: data_test_type must be one of "train", "test" or "leaveout"')
exit()
# Make RF error plot
fig, ax = make_fig_ax(aspect_ratio=0.5, x_align=0.65)
linear_uncal = LinearRegression(fit_intercept=True)
linear_cal = LinearRegression(fit_intercept=True)
# Only examine the bins that are well-sampled, i.e. have number of data points in them above a given threshold
well_sampled_number_uncal = round(well_sampled_fraction*np.sum(num_values_per_bin_uncal))
rms_residual_values_wellsampled_uncal = rms_residual_values_uncal[np.where(num_values_per_bin_uncal > well_sampled_number_uncal)[0]]
bin_values_wellsampled_uncal = bin_values_uncal[np.where(num_values_per_bin_uncal > well_sampled_number_uncal)[0]]
num_values_per_bin_wellsampled_uncal = num_values_per_bin_uncal[ | np.where(num_values_per_bin_uncal > well_sampled_number_uncal) | numpy.where |
"""
Module file for handling of downscaling input regridded (possibly bias-corrected)
forcing fields. Each input forcing product will loop through and determine if
downscaling is needed, based off options specified by the user.
"""
import math
import os
import time
import numpy as np
from netCDF4 import Dataset
from core import err_handler
def run_downscaling(input_forcings, config_options, geo_meta_wrf_hydro, mpi_config):
"""
Top level module function that will downscale forcing variables
for this particular input forcing product.
:param geo_meta_wrf_hydro:
:param mpi_config:
:param input_forcings:
:param config_options:
:return:
"""
# Dictionary mapping to temperature downscaling.
downscale_temperature = {
0: no_downscale,
1: simple_lapse,
2: param_lapse
}
downscale_temperature[input_forcings.t2dDownscaleOpt](input_forcings, config_options,
geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to pressure downscaling.
downscale_pressure = {
0: no_downscale,
1: pressure_down_classic
}
downscale_pressure[input_forcings.psfcDownscaleOpt](input_forcings, config_options,
geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to shortwave radiation downscaling
downscale_sw = {
0: no_downscale,
1: ncar_topo_adj
}
downscale_sw[input_forcings.swDowscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to specific humidity downscaling
downscale_q2 = {
0: no_downscale,
1: q2_down_classic
}
downscale_q2[input_forcings.q2dDownscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
# Dictionary mapping to precipitation downscaling.
downscale_precip = {
0: no_downscale,
1: nwm_monthly_PRISM_downscale
#1: precip_mtn_mapper
}
downscale_precip[input_forcings.precipDownscaleOpt](input_forcings, config_options, geo_meta_wrf_hydro, mpi_config)
err_handler.check_program_status(config_options, mpi_config)
def no_downscale(input_forcings, ConfigOptions, GeoMetaWrfHydro, MpiConfig):
"""
Generic function for passing states through without any
downscaling.
:param input_forcings:
:param ConfigOptions:
:return:
"""
input_forcings.final_forcings = input_forcings.final_forcings
def simple_lapse(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Function that applies a single lapse rate adjustment to modeled
2-meter temperature by taking the difference of the native
input elevation and the WRF-hydro elevation.
:param inpute_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Applying simple lapse rate to temperature downscaling"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
# Assign existing, un-downscaled temperatures to a temporary placeholder, which
# will be used for specific humidity downscaling.
if input_forcings.q2dDownscaleOpt > 0:
input_forcings.t2dTmp[:,:] = input_forcings.final_forcings[4,:,:]
# Apply single lapse rate value to the input 2-meter
# temperature values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
input_forcings.final_forcings[4,:,:] = input_forcings.final_forcings[4,:,:] + \
(6.49/1000.0)*elevDiff
except:
ConfigOptions.errMsg = "Unable to apply lapse rate to input 2-meter temperatures."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indNdv = None
def param_lapse(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Function that applies a apriori lapse rate adjustment to modeled
2-meter temperature by taking the difference of the native
input elevation and the WRF-hydro elevation. It's assumed this lapse
rate grid has already been regridded to the final output WRF-Hydro
grid.
:param inpute_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Applying aprior lapse rate grid to temperature downscaling"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
if input_forcings.lapseGrid is None:
#if not np.any(input_forcings.lapseGrid):
# We have not read in our lapse rate file. Read it in, do extensive checks,
# scatter the lapse rate grid out to individual processors, then apply the
# lapse rate to the 2-meter temperature grid.
if MpiConfig.rank == 0:
while (True):
# First ensure we have a parameter directory
if input_forcings.paramDir == "NONE":
ConfigOptions.errMsg = "User has specified spatial temperature lapse rate " \
"downscaling while no downscaling parameter directory " \
"exists."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Compose the path to the lapse rate grid file.
lapsePath = input_forcings.paramDir + "/lapse_param.nc"
if not os.path.isfile(lapsePath):
ConfigOptions.errMsg = "Expected lapse rate parameter file: " + \
lapsePath + " does not exist."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Open the lapse rate file. Check for the expected variable, along with
# the dimension size to make sure everything matches up.
try:
idTmp = Dataset(lapsePath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if not 'lapse' in idTmp.variables.keys():
ConfigOptions.errMsg = "Expected 'lapse' variable not located in parameter " \
"file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
lapseTmp = idTmp.variables['lapse'][:,:]
except:
ConfigOptions.errMsg = "Unable to extracte 'lapse' variable from parameter: " \
"file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Check dimensions to ensure they match up to the output grid.
if lapseTmp.shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "X-Dimension size mismatch between output grid and lapse " \
"rate from parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if lapseTmp.shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Y-Dimension size mismatch between output grid and lapse " \
"rate from parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Perform a quick search to ensure we don't have radical values.
indTmp = np.where(lapseTmp < -10.0)
if len(indTmp[0]) > 0:
ConfigOptions.errMsg = "Found anomolous negative values in the lapse rate grid from " \
"parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
indTmp = np.where(lapseTmp > 100.0)
if len(indTmp[0]) > 0:
ConfigOptions.errMsg = "Found excessively high values in the lapse rate grid from " \
"parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Close the parameter lapse rate file.
try:
idTmp.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + lapsePath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
break
else:
lapseTmp = None
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Scatter the lapse rate grid to the other processors.
input_forcings.lapseGrid = MpiConfig.scatter_array(GeoMetaWrfHydro,lapseTmp,ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Apply the local lapse rate grid to our local slab of 2-meter temperature data.
temperature_grid_tmp = input_forcings.final_forcings[4, :, :]
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input " + \
input_forcings.productName + " regridded forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
indValid = np.where(temperature_grid_tmp != ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform search for valid values on input " + \
input_forcings.productName + " regridded temperature forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
temperature_grid_tmp[indValid] = temperature_grid_tmp[indValid] + \
((input_forcings.lapseGrid[indValid]/1000.0) * elevDiff[indValid])
except:
ConfigOptions.errMsg = "Unable to apply spatial lapse rate values to input " + \
input_forcings.productName + " regridded temperature forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[4,:,:] = temperature_grid_tmp
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indTmp = None
indNdv = None
indValid = None
elevDiff = None
temperature_grid_tmp = None
def pressure_down_classic(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Generic function to downscale surface pressure to the WRF-Hydro domain.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to surface pressure."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Calculate the elevation difference.
elevDiff = input_forcings.height - GeoMetaWrfHydro.height
# Assign existing, un-downscaled pressure values to a temporary placeholder, which
# will be used for specific humidity downscaling.
if input_forcings.q2dDownscaleOpt > 0:
input_forcings.psfcTmp[:, :] = input_forcings.final_forcings[6, :, :]
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
try:
input_forcings.final_forcings[6,:,:] = input_forcings.final_forcings[6,:,:] +\
(input_forcings.final_forcings[6,:,:]*elevDiff*9.8)/\
(input_forcings.final_forcings[4,:,:]*287.05)
except:
ConfigOptions.errMsg = "Unable to downscale surface pressure to input forcings."
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
# Reset for memory efficiency
indNdv = None
def q2_down_classic(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
NCAR function for downscaling 2-meter specific humidity using already downscaled
2-meter temperature, unadjusted surface pressure, and downscaled surface
pressure.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to specific humidity."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish where we have missing values.
try:
indNdv = np.where(input_forcings.final_forcings == ConfigOptions.globalNdv)
except:
ConfigOptions.errMsg = "Unable to perform NDV search on input forcings"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# First calculate relative humidity given original surface pressure and 2-meter
# temperature
try:
relHum = rel_hum(input_forcings,ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to perform topographic downscaling of incoming " \
"specific humidity to relative humidity"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
# Downscale 2-meter specific humidity
try:
q2Tmp = mixhum_ptrh(input_forcings,relHum,2,ConfigOptions)
except:
ConfigOptions.errMsg = "Unable to perform topographic downscaling of " \
"incoming specific humidity"
err_handler.log_critical(ConfigOptions, MpiConfig)
return
input_forcings.final_forcings[5,:,:] = q2Tmp
input_forcings.final_forcings[indNdv] = ConfigOptions.globalNdv
q2Tmp = None
indNdv = None
def nwm_monthly_PRISM_downscale(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
NCAR/OWP function for downscaling precipitation using monthly PRISM climatology in a
mountain-mapper like fashion.
:param input_forcings:
:param ConfigOptions:
:param GeoMetaWrfHydro:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing NWM Monthly PRISM Mountain Mapper " \
"Downscaling of Precipitation"
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish whether or not we need to read in new PRISM monthly climatology:
# 1.) This is the first output timestep, and no grids have been initialized.
# 2.) We have switched months from the last timestep. In this case, we need
# to re-initialize the grids for the current month.
initialize_flag = False
if input_forcings.nwmPRISM_denGrid is None and input_forcings.nwmPRISM_numGrid is None:
# We are on situation 1 - This is the first output step.
initialize_flag = True
# print('WE NEED TO READ IN PRISM GRIDS')
if ConfigOptions.current_output_date.month != ConfigOptions.prev_output_date.month:
# We are on situation #2 - The month has changed so we need to reinitialize the
# PRISM grids.
initialize_flag = True
# print('MONTH CHANGE.... NEED TO READ IN NEW PRISM GRIDS.')
if initialize_flag is True:
while (True):
# First reset the local PRISM grids to be safe.
input_forcings.nwmPRISM_numGrid = None
input_forcings.nwmPRISM_denGrid = None
# Compose paths to the expected files.
numeratorPath = input_forcings.paramDir + "/PRISM_Precip_Clim_" + \
ConfigOptions.current_output_date.strftime('%h') + '_NWM_Mtn_Mapper_Numer.nc'
denominatorPath = input_forcings.paramDir + "/PRISM_Precip_Clim_" + \
ConfigOptions.current_output_date.strftime('%h') + '_NWM_Mtn_Mapper_Denom.nc'
#print(numeratorPath)
#print(denominatorPath)
# Make sure files exist.
if not os.path.isfile(numeratorPath):
ConfigOptions.errMsg = "Expected parameter file: " + numeratorPath + \
" for mountain mapper downscaling of precipitation not found."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if not os.path.isfile(denominatorPath):
ConfigOptions.errMsg = "Expected parameter file: " + denominatorPath + \
" for mountain mapper downscaling of precipitation not found."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if MpiConfig.rank == 0:
# Open the NetCDF parameter files. Check to make sure expected dimension
# sizes are in place, along with variable names, etc.
try:
idNum = Dataset(numeratorPath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
idDenom = Dataset(denominatorPath,'r')
except:
ConfigOptions.errMsg = "Unable to open parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Check to make sure expected names, dimension sizes are present.
if 'x' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'x' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'x' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'x' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'y' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'y' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'y' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'y' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'Data' not in idNum.variables.keys():
ConfigOptions.errMsg = "Expected 'Data' variable not found in parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if 'Data' not in idDenom.variables.keys():
ConfigOptions.errMsg = "Expected 'Data' variable not found in parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idNum.variables['Data'].shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Input Y dimension for: " + numeratorPath + \
" does not match the output WRF-Hydro Y dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idDenom.variables['Data'].shape[0] != GeoMetaWrfHydro.ny_global:
ConfigOptions.errMsg = "Input Y dimension for: " + denominatorPath + \
" does not match the output WRF-Hydro Y dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idNum.variables['Data'].shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "Input X dimension for: " + numeratorPath + \
" does not match the output WRF-Hydro X dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
if idDenom.variables['Data'].shape[1] != GeoMetaWrfHydro.nx_global:
ConfigOptions.errMsg = "Input X dimension for: " + denominatorPath + \
" does not match the output WRF-Hydro X dimension size."
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Read in the PRISM grid on the output grid. Then scatter the array out to the processors.
try:
numDataTmp = idNum.variables['Data'][:,:]
except:
ConfigOptions.errMsg = "Unable to extract 'Data' from parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
denDataTmp = idDenom.variables['Data'][:,:]
except:
ConfigOptions.errMsg = "Unable to extract 'Data' from parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
# Close the parameter files.
try:
idNum.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + numeratorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
try:
idDenom.close()
except:
ConfigOptions.errMsg = "Unable to close parameter file: " + denominatorPath
err_handler.log_critical(ConfigOptions, MpiConfig)
break
else:
numDataTmp = None
denDataTmp = None
break
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Scatter the array out to the local processors
input_forcings.nwmPRISM_numGrid = MpiConfig.scatter_array(GeoMetaWrfHydro, numDataTmp, ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
input_forcings.nwmPRISM_denGrid = MpiConfig.scatter_array(GeoMetaWrfHydro, denDataTmp, ConfigOptions)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Create temporary grids from the local slabs of params/precip forcings.
localRainRate = input_forcings.final_forcings[3,:,:]
numLocal = input_forcings.nwmPRISM_numGrid[:,:]
denLocal = input_forcings.nwmPRISM_denGrid[:,:]
# Establish index of where we have valid data.
try:
indValid = np.where((localRainRate > 0.0) & (denLocal > 0.0) & (numLocal > 0.0))
except:
ConfigOptions.errMsg = "Unable to run numpy search for valid values on precip and " \
"param grid in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Convert precipitation rate, which is mm/s to mm, which is needed to run the PRISM downscaling.
try:
localRainRate[indValid] = localRainRate[indValid]*3600.0
except:
ConfigOptions.errMsg = "Unable to convert temporary precip rate from mm/s to mm."
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
try:
localRainRate[indValid] = localRainRate[indValid] * numLocal[indValid]
except:
ConfigOptions.errMsg = "Unable to multiply precip by numerator in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
try:
localRainRate[indValid] = localRainRate[indValid] / denLocal[indValid]
except:
ConfigOptions.errMsg = "Unable to divide precip by denominator in mountain mapper downscaling"
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
# Convert local precip back to a rate (mm/s)
try:
localRainRate[indValid] = localRainRate[indValid]/3600.0
except:
ConfigOptions.errMsg = "Unable to convert temporary precip rate from mm to mm/s."
err_handler.log_critical(ConfigOptions, MpiConfig)
err_handler.check_program_status(ConfigOptions, MpiConfig)
input_forcings.final_forcings[3, :, :] = localRainRate
# Reset variables for memory efficiency
idDenom = None
idNum = None
localRainRate = None
numLocal = None
denLocal = None
def ncar_topo_adj(input_forcings,ConfigOptions,GeoMetaWrfHydro,MpiConfig):
"""
Topographic adjustment of incoming shortwave radiation fluxes,
given input parameters.
:param input_forcings:
:param ConfigOptions:
:return:
"""
if MpiConfig.rank == 0:
ConfigOptions.statusMsg = "Performing topographic adjustment to incoming " \
"shortwave radiation flux."
err_handler.log_msg(ConfigOptions, MpiConfig)
# Establish where we have missing values.
try:
indNdv = | np.where(input_forcings.final_forcings == ConfigOptions.globalNdv) | numpy.where |
import pandas as pd
import numpy as np
from sklearn.metrics import *
from sklearn.utils.multiclass import type_of_target
from sklearn.utils import check_consistent_length, column_or_1d, assert_all_finite
from sklearn.utils.extmath import stable_cumsum
def calc_uncertainty_regection_curve(errors, uncertainty, group_by_uncertainty=True):
n_objects = errors.shape[0]
if group_by_uncertainty:
data = pd.DataFrame(dict(
errors=errors,
uncertainty=uncertainty
))
mean_errors = data.groupby("uncertainty").mean()
mean_errors.rename(columns={"errors": "mean_errors"}, inplace=True)
data = data.join(mean_errors, "uncertainty")
data.drop("errors", axis=1, inplace=True)
uncertainty_order = data["uncertainty"].argsort()
errors = data["mean_errors"][uncertainty_order]
else:
uncertainty_order = uncertainty.argsort()
errors = errors[uncertainty_order]
error_rates = np.zeros(n_objects + 1)
error_rates[:-1] = np.cumsum(errors)[::-1] / n_objects
return error_rates
assert np.allclose(
calc_uncertainty_regection_curve(np.array([2, 1]), np.array([1, 0])).mean(),
2 / 3
)
assert np.allclose(
calc_uncertainty_regection_curve(np.arange(5), np.array([0, 0, 2, 1, 1])).mean(),
0.8
)
debug_errors = np.random.rand(10)
assert np.allclose(
calc_uncertainty_regection_curve(debug_errors, np.zeros_like(debug_errors)).mean(),
debug_errors.mean() / 2
)
def calc_aucs(errors, uncertainty):
uncertainty_rejection_curve = calc_uncertainty_regection_curve(errors, uncertainty)
uncertainty_rejection_auc = uncertainty_rejection_curve.mean()
random_rejection_auc = uncertainty_rejection_curve[0] / 2
ideal_rejection_auc = calc_uncertainty_regection_curve(errors, errors).mean()
rejection_ratio = (uncertainty_rejection_auc - random_rejection_auc) / (
ideal_rejection_auc - random_rejection_auc) * 100.0
return rejection_ratio, uncertainty_rejection_auc
def prr_classification(labels, probs, measure, rev: bool):
if rev:
measure = -measure
preds = np.argmax(probs, axis=1)
errors = (labels != preds).astype("float32")
return calc_aucs(errors, measure)
def prr_regression(targets, preds, measure):
preds = np.squeeze(preds)
# Compute MSE errors
errors = (preds - targets) ** 2
return calc_aucs(errors, measure)
def ood_detect(domain_labels, in_measure, out_measure, mode='ROC', pos_label=1):
scores = np.concatenate((in_measure, out_measure), axis=0)
scores = np.asarray(scores)
if pos_label != 1:
scores *= -1.0
if mode == 'PR':
precision, recall, thresholds = precision_recall_curve(domain_labels, scores)
aupr = auc(recall, precision)
return aupr
elif mode == 'ROC':
roc_auc = roc_auc_score(domain_labels, scores)
return roc_auc
def nll_regression(target, mu, var, epsilon=1e-8, raw=False):
nll = (target - mu) ** 2 / (2.0 * var + epsilon) + np.log(var + epsilon) / 2.0 + np.log(2 * np.pi) / 2.0
if raw:
return nll
return np.mean(nll)
def nll_class(target, probs, epsilon=1e-10):
log_p = -np.log(probs + epsilon)
return target * log_p[:, 1] + (1 - target) * log_p[:, 0]
def ens_nll_regression(target, preds, epsilon=1e-8, raw=False):
mu = preds[:, :, 0]
var = preds[:, :, 1]
nll = (target - mu) ** 2 / (2.0 * var + epsilon) + np.log(var + epsilon) / 2.0 + np.log(2 * np.pi) / 2.0
proba = np.exp(-1 * nll)
if raw:
return -1 * np.log(np.mean(proba, axis=0))
return np.mean(-1 * np.log(np.mean(proba, axis=0)))
def calc_rmse(preds, target, raw=False):
if raw:
return (preds - target) ** 2
return np.sqrt(np.mean((preds - target) ** 2))
def ens_rmse(target, preds, epsilon=1e-8, raw=False):
means = preds[:, :, 0] # mean values predicted by all models
avg_mean = np.mean(means, axis=0) # average predicted mean value
if raw:
return calc_rmse(avg_mean, target, raw=True)
return calc_rmse(avg_mean, target)
def _check_pos_label_consistency(pos_label, y_true):
# ensure binary classification if pos_label is not specified
# classes.dtype.kind in ('O', 'U', 'S') is required to avoid
# triggering a FutureWarning by calling np.array_equal(a, b)
# when elements in the two arrays are not comparable.
classes = np.unique(y_true)
if (pos_label is None and (
classes.dtype.kind in 'OUS' or
not (np.array_equal(classes, [0, 1]) or
np.array_equal(classes, [-1, 1]) or
np.array_equal(classes, [0]) or
np.array_equal(classes, [-1]) or
np.array_equal(classes, [1])))):
classes_repr = ", ".join(repr(c) for c in classes)
raise ValueError(
f"y_true takes value in {{{classes_repr}}} and pos_label is not "
f"specified: either make y_true take value in {{0, 1}} or "
f"{{-1, 1}} or pass pos_label explicitly."
)
elif pos_label is None:
pos_label = 1.0
return pos_label
def _binary_clf_curve_ret(y_true, y_score, pos_label=None, sample_weight=None):
# Check to make sure y_true is valid
y_type = type_of_target(y_true)
if not (y_type == "binary" or
(y_type == "multiclass" and pos_label is not None)):
raise ValueError("{0} format is not supported".format(y_type))
check_consistent_length(y_true, y_score, sample_weight)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
pos_label = _check_pos_label_consistency(pos_label, y_true)
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# distinct_value_indices = np.where(np.diff(y_score))[0]
# threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight) # [threshold_idxs]
if sample_weight is not None:
# express fps as a cumsum to ensure fps is increasing even in
# the presence of floating point errors
fps = stable_cumsum((1 - y_true) * weight) # [threshold_idxs]
else:
fps = stable_cumsum((1 - y_true)) # [threshold_idxs]
return fps, tps, y_score # [threshold_idxs]
def _precision_recall_curve_retention(y_true, probas_pred, *, pos_label=None,
sample_weight=None):
fps, tps, thresholds = _binary_clf_curve_ret(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
precision[np.isnan(precision)] = 0
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(-1, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def _acceptable_error(errors, threshold):
return np.asarray(errors <= threshold, dtype=np.float32)
def _calc_fbeta_regection_curve(errors, uncertainty, threshold, beta=1.0, group_by_uncertainty=True, eps=1e-10):
ae = _acceptable_error(errors, threshold)
pr, rec, _ = _precision_recall_curve_retention(ae, -uncertainty)
pr = np.asarray(pr)
rec = | np.asarray(rec) | numpy.asarray |
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 5 11:56:32 2020
Some functions to do Logistic Regression and Neural Network Learning
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import minimize
plt.rc('text',usetex=True)
plt.rc('font',family='Times New Roman')
def sigmoid(z):
'''sigmoid function, output between 0-1'''
sigmoid = 1/(1+np.exp(-z))
return sigmoid
def costfuncReg(theta,x,y,lam):
'''Compute cost for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
J=np.zeros([len(theta),1])
z = x@theta
J = -1/m*([email protected](sigmoid(z))+(1-y)[email protected](1-sigmoid(z)))+lam/2/m*theta.T[:,1:]@theta[1:,:]
return J
def gridReg(theta,x,y,lam):
'''Compute gradient for Logistic Regression'''
m = len(y)
theta = theta.reshape(-1,1)
grid=np.zeros([len(theta),1])
z = x@theta
grid[0,:] = 1/m*(x.T@(sigmoid(z)-y))[0,:]
grid[1:,:] = 1/m*(x.T@(sigmoid(z)-y))[1:,:] + lam/m*theta[1:,:]
return grid.flatten()
def onevsall(x, y, labels, lam):
'''trains multiple logistic regression classifiers and returns all the classifiers in a matrix all_theta'''
all_theta = | np.zeros([labels,x.shape[1]]) | numpy.zeros |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
test for util operations for unary operations without tagged data
:remark: use see `test_util`
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base
haveLapack = hasFeature('lapack')
class Test_util_unary_with_tagged_data(Test_util_base):
"""
test for unary operations. only tagged data are tested.
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank0(self):
arg=Data(55.1652630602,self.functionspace)
arg.setTaggedValue(1,10.5828519405)
res=log10(arg)
ref=Data(1.74166569349,self.functionspace)
ref.setTaggedValue(1,1.02460272017)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank1(self):
arg=Data(numpy.array([72.282406932431243, 35.729324148665768]),self.functionspace)
arg.setTaggedValue(1,numpy.array([23.804409029158563, 82.472345107833661]))
res=log10(arg)
ref=Data(numpy.array([1.8590326057050119, 1.5530248012211607]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.3766574041024322, 1.916308343937587]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank2(self):
arg=Data(numpy.array([[51.453194171526192, 13.570707659190413, 60.521549437302234, 31.599538694833306,
14.891175253445139], [98.173449179193497, 5.0087678798438278, 52.481382734493792, 29.128158011918146, 98.064075237764598],
[36.407550507350827, 89.884167676960288, 39.308304837547745, 75.538185852569995, 33.601340111371606], [63.889377928887228,
4.6186118848356488, 69.136277385337451, 2.6710200091532696, 63.918258275478514]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[3.7796743359003022, 13.180984629177244, 59.461828020927761, 20.406986279454642,
72.171015597628937], [1.9587215571356407, 17.280986015551932, 84.05693756007831, 45.547880329201142, 32.870521541704392],
[26.737813837799116, 37.531132826532321, 51.180765330858762, 78.2056706013324, 52.489986316281318], [63.141114740929247,
23.684128984789403, 81.88613234855724, 36.918777925154153, 19.245705222936365]]))
res=log10(arg)
ref=Data(numpy.array([[1.7114123405757837, 1.1326024950044853, 1.7819100380467305, 1.4996807426262928,
1.1729289748471519], [1.9919940495580575, 0.69973090574844821, 1.7200052689371468, 1.464313021855508, 1.991509937212202],
[1.5611914606717532, 1.9536832012319965, 1.5944843153649138, 1.8781665504719267, 1.5263565985615033], [1.8054286595616771,
0.66451146877260103, 1.8397059914266514, 0.4266771414217112, 1.8056249322687863]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.57745438179108843, 1.1199478535847189, 1.7742382564867842, 1.309778872484638,
1.8583628167754329], [0.29197270307270162, 1.2375685187503027, 1.9245735633402872, 1.6584681709302851, 1.516806594890489],
[1.4271258951849872, 1.5743916732030367, 1.7091067758161855, 1.8932382443844189, 1.7200764595510651], [1.8003122449866822,
1.3744574176029094, 1.9132103589710923, 1.5672473166998622, 1.2843338296326507]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank3(self):
arg=Data(numpy.array([[[90.690037899671296, 23.785411821546496], [91.533710749831201, 35.110847145935921]],
[[31.790987742717761, 80.73086172262245], [76.879402834581697, 85.744103669605451]], [[8.1269631331611549, 52.871037837294452],
[9.1059277056430368, 62.81826904111756]], [[91.343888112038101, 42.790045009836057], [98.923113540366373, 76.508459842843422]],
[[75.511345908209677, 53.05029465716877], [21.756665086458423, 84.34627478690912]], [[20.881744462723443, 21.498514780242811],
[94.039541086706947, 63.040386118170531]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[49.039591873498914, 47.203472254904376], [13.39618662616329, 80.407751980277737]],
[[94.740716086709412, 71.680553195598819], [22.063626045323556, 78.003840393051846]], [[32.738459746566498,
34.410070237534583], [90.599810283330726, 38.951280733947229]], [[97.583590849819274, 21.088714492740912], [60.799357198434329,
9.2997265651169414]], [[92.165943907973187, 12.913360305242884], [89.764291870306224, 11.704176719145334]],
[[33.563051881776232, 10.411945777444954], [23.411376390403166, 48.768282109713994]]]))
res=log10(arg)
ref=Data(numpy.array([[[1.9575595833906394, 1.3763106752125882], [1.9615810688900812, 1.5454413081428469]],
[[1.50230402160926, 1.9070395881750126], [1.885810001264896, 1.9332042647871273]], [[0.90992828964073624, 1.7232178355155006],
[0.95932419800764912, 1.7980859652326844]], [[1.9606794936916605, 1.6313427433082643], [1.9952977770394453,
1.8837094594664194]], [[1.8780122111672353, 1.7246878004452211], [1.3375923264330749, 1.9260659064246666]],
[[1.3197667768440355, 1.3324084578214161], [1.9733105010994656, 1.7996188645144506]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.6905468471583012, 1.6739739462120611], [1.1269811891924277, 1.9052979203898317]],
[[1.9765366629687919, 1.855401348340298], [1.3436768880708625, 1.8921159850039784]], [[1.5150582432096382, 1.5366855590999351],
[1.9571272882619251, 1.59052174202249]], [[1.9893767950999692, 1.3240501072063162], [1.7838989877160136, 0.96847017941343561]],
[[1.964570475228363, 1.1110392687676565], [1.9531036091864897, 1.0683408704968049]], [[1.5258614442649203, 1.0175318977566594],
[1.3694269472799274, 1.6881374575925516]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank4(self):
arg=Data(numpy.array([[[[90.916803177883821, 54.525338275396493, 13.364424356447465, 51.314413977784419],
[80.231561588327807, 82.162456273997748, 8.3566662162590539, 94.730819165987], [77.473176646131762, 20.972609574388059,
74.896335161378119, 88.10387415596874]], [[11.595133878605829, 29.493110939671826, 31.490679142790391, 39.161104201178077],
[32.61379799879397, 58.334287443171277, 88.246072805422187, 35.746693154753984], [88.031724708015759, 42.086782575753446,
76.30576562684233, 41.664454917294485]]], [[[73.805732338880929, 29.722157924518495, 11.979308129040467, 56.678829139293917],
[6.1110346378486105, 61.420099159473246, 24.460988572874975, 9.9837108208795708], [29.304214355701266, 69.239538294798919,
43.726703031386528, 96.453481611027584]], [[83.748022272324235, 32.953465755838039, 34.11675054427031, 16.642877884588994],
[64.574790966313543, 42.938611636354324, 46.810954363884647, 91.97971646326387], [96.485547539718311, 42.855584051837369,
73.227470310618529, 73.565844556183777]]], [[[88.201355962594207, 41.836289548798113, 69.197678273827108, 31.32522051118902],
[44.933739003053383, 82.304262181531868, 46.662125485783939, 25.216812874514684], [37.715123702749331, 0.33654002188789439,
77.616411403471773, 19.152072401340583]], [[92.715182555824981, 51.479018971675195, 58.389552448640487, 11.079825716836668],
[66.120381536086015, 54.696122559623113, 74.602124135737157, 46.764404847359458], [92.441508878592927, 49.13843332363826,
84.277334637691311, 61.375020008040991]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[62.928329908936867, 78.142232594489769, 23.694870846158736, 77.379623356172573],
[11.343774294284144, 5.0141456599208922, 65.791042346980248, 72.904521530203226], [46.165649854154751, 46.031503262450066,
64.12361229840144, 51.813579296266198]], [[22.679300826571335, 62.977332064943198, 13.771125130940399, 59.844651806488763],
[14.177003870203592, 30.872939480711043, 76.89257820864357, 21.359624412764553], [64.357528521726167, 45.754541308463061,
86.917154454162898, 62.525134832715636]]], [[[75.962390497323355, 70.145694672660483, 76.932538896196164, 61.719435975622567],
[77.812655042655194, 15.285729007526603, 50.390239206343267, 62.704163646191077], [49.67778501460851, 46.415926037609047,
56.588556029575471, 27.934863117344474]], [[76.060984285811514, 81.295553924710816, 69.918265989518105, 83.458206572989525],
[63.469111974419398, 69.954750106734039, 31.380947651740421, 19.198733624736676], [64.480248540295207, 13.727409292553201,
31.845984674993723, 65.803516596296177]]], [[[99.122756107881074, 86.808131124216828, 1.4321294301423275, 8.3438957972984138],
[34.503440819741336, 35.67099265092223, 48.831668912254365, 14.139212054299726], [98.020513665211695, 25.954655701381547,
1.3758192696653002, 95.607029783574006]], [[49.7055983523964, 12.62977930442664, 26.742962982817151, 83.708869974268808],
[40.504846807543508, 68.747127993174473, 99.752608339104768, 95.244092191429729], [53.238233591188212, 34.920347644790411,
10.5293904374393, 9.2580418923770118]]]]))
res=log10(arg)
ref=Data(numpy.array([[[[1.958644156632638, 1.7365983686236373, 1.1259502572038609, 1.7102393735278381],
[1.9043452451825991, 1.9146734141614923, 0.92203305583094985, 1.9764912927969005], [1.8891513637839643, 1.3216524721079508,
1.8744605672571355, 1.9449950058872605]], [[1.0642757674116168, 1.4697205844431256, 1.4981820269365049, 1.5928549288337999],
[1.5134013769682526, 1.7659238973023306, 1.9456953871829765, 1.5532358724009305], [1.9446392105900805, 1.6241457263418644,
1.8825573542762462, 1.6197657044633726]]], [[[1.8680900938889471, 1.4730803374132002, 1.0784317358816056, 1.7534208700934903],
[0.78611474534376435, 1.7883105132528869, 1.3884740047472364, 0.99929199314012029], [1.4669300824636733, 1.8403541632373015,
1.640746733021623, 1.9843179086742269]], [[1.9229745598717729, 1.5179010966078323, 1.5329676600131747, 1.2212284266018456],
[1.8100630090021725, 1.6328479979852315, 1.670347495447966, 1.9636920664836188], [1.9844622657395339, 1.6320074182054687,
1.8646740314178081, 1.8666762251564302]]], [[[1.9454752618046733, 1.6215531600863007, 1.8400915232171524, 1.4958941368555099],
[1.652572558384511, 1.9154223260262504, 1.6689645172636371, 1.4016901956994581], [1.5765155365073076, -0.47296328136118848,
1.8899535592530265, 1.2822157748461316]], [[1.9671508577724528, 1.7116302620231869, 1.766335146430758, 1.0445329290917416],
[1.8203353509269165, 1.7379565400327668, 1.8727511932541194, 1.6699154116173478], [1.9658670256463047, 1.6914213057918996,
1.9257107921182273, 1.787991646709524]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.7988462059099257, 1.8928858152534429, 1.3746543458291129, 1.8886266114703387],
[1.0547575768197655, 0.70019694597262339, 1.8181667671078665, 1.8627544640480471], [1.6643189533177833, 1.6630551579903574,
1.8070179796377215, 1.7144435945053167]], [[1.355629661691661, 1.7991842584914508, 1.1389694245123259, 1.7770253444715181],
[1.1515844579186774, 1.4895779815869408, 1.8858844230448231, 1.329593611797627], [1.808599357832434, 1.660434205884993,
1.9391054996884767, 1.7960546369405848]]], [[[1.8805986233038012, 1.8460010205206747, 1.886110065083187, 1.7904219485998918],
[1.8910502341136941, 1.1842861559437698, 1.7023464199899885, 1.7972963795632348], [1.6961622236947298, 1.6666670194238626,
1.7527286121825749, 1.4461465476300437]], [[1.8811619412061271, 1.9100667945637591, 1.8445906490239146, 1.9214690479550947],
[1.8025624219423253, 1.8448172095369506, 1.4966660544104895, 1.2832725829781746], [1.8094267027962698, 1.1375885826765326,
1.5030546817757668, 1.8182491033032764]]], [[[1.9961733690924155, 1.9385604064782116, 0.15598226954758712,
0.92136887176784188], [1.5378624067983235, 1.5523151950355665, 1.6887015673518488, 1.1504252079046915], [1.9913169740527836,
1.4142152721675985, 0.1385613878659461, 1.9804898262088639]], [[1.6964053061705062, 1.1013957616632397, 1.4272095231877091,
1.9227714792212125], [1.6075069939758382, 1.8372545596256498, 1.9989242604737723, 1.9788380467284004], [1.7262236374426407,
1.5430785586149354, 1.0224032299396042, 0.96651914162145247]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank0(self):
arg=Data(-77.2124777804,self.functionspace)
arg.setTaggedValue(1,-76.5223591123)
res=wherePositive(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank1(self):
arg=Data(numpy.array([95.507404522977254, 85.699228977736311]),self.functionspace)
arg.setTaggedValue(1,numpy.array([22.570768490261898, -91.124851922506281]))
res=wherePositive(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank2(self):
arg=Data(numpy.array([[-76.258485344070493, -13.031569421642786, 56.331314317015853, 76.297860126066155,
88.130561052243763], [-6.1668191468469757, 54.331037054715466, -81.929096086646751, 11.266746882647325, 48.963064080280049],
[60.302120288359191, -98.222376211103324, 24.902263686516406, 76.321693298041907, -15.612529577369273], [-89.36373926383007,
-99.797095905565556, 55.669412249479365, 73.050408854136265, 58.641360635396893]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[84.521756116539905, -21.50523951337766, -71.758529828844189, -31.116527593639944,
73.480533241007663], [61.356497085608538, 87.01679183964643, 73.718163356212273, 65.825276106677222, -67.838266379557695],
[94.900878893099161, 6.6085152597015195, -16.431260589637816, -60.728182658412621, -20.252278299611689], [16.99540909074102,
-92.327824606679144, -84.194337061595093, -99.086577441520987, 1.4609814172980435]]))
res=wherePositive(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0,
1.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0], [1.0,
0.0, 0.0, 0.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank3(self):
arg=Data(numpy.array([[[80.508473585546227, -73.527464319061067], [-63.562066924370612, -27.980541518495002]],
[[-41.335157425780203, -43.662467900732139], [19.157012696460683, -20.184083339906508]], [[35.870058595838856,
72.278036946039947], [75.339493834805268, -9.1707737241088836]], [[-68.38683588297539, -47.88605412318423],
[-20.399875642984753, -29.241844531878812]], [[-67.76044429517556, 55.107326245665774], [59.476906111528308,
65.132080499441145]], [[39.011636203343926, 68.793212772548998], [-5.2117301620619116,
-37.964739068093408]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-43.212061592928961, 66.640597663557344], [-43.214739911125989, 71.228530019395464]],
[[86.72455453389685, 0.070521918497504998], [18.131949004592585, 67.642647170226724]], [[-20.485683043230935,
-76.185964145658346], [1.5005108312435596, 24.688848573063282]], [[86.368146458112335, 12.287053770624041],
[65.053528607732602, -40.176824870036555]], [[67.412368199122028, 93.02485737256805], [2.3354688446274565,
-77.333138418682523]], [[68.799317717343797, 50.656492146642165], [-11.239017823949453, -0.61920809407223487]]]))
res=wherePositive(arg)
ref=Data(numpy.array([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [1.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[0.0, 0.0], [0.0,
0.0]], [[0.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [0.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.0, 1.0], [0.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [1.0, 1.0]], [[1.0,
1.0], [1.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank4(self):
arg=Data(numpy.array([[[[88.580956068056565, 14.00279382125251, 99.108011223826338, 45.511681652958828],
[82.854668978990787, -71.320284703817521, -47.231688078520051, -87.449857804724985], [93.257616694753921, 52.060412772871643,
18.537844019590111, 53.497901549669848]], [[-46.91724689666372, -75.312992998386903, 24.379946633037505, 3.6136809288494618],
[55.419075241203274, 64.655875544211057, 5.6929661553654682, -80.668713367017716], [9.6958327067133041, -69.61412534721569,
-39.445219790469352, 87.567956888590658]]], [[[-73.321457711307843, 10.82305253374048, -40.400284930212905,
-92.490874982007981], [-79.425928971727643, -75.222388693443605, 89.503284861115134, 83.502961391303643], [-88.448173270777147,
-50.193426055655976, -70.923108466792598, -25.976738197547292]], [[-8.5488119421924864, -1.9838167877165915,
-56.838230691876412, -35.192343099118673], [-14.387471763442306, -65.661449017261418, 75.22011478664038, -84.87320516882086],
[98.450531686197365, -81.019483890591289, -94.982842703436916, -49.156850403858819]]], [[[75.118284154717031,
-51.311615796136792, -89.182477325683962, 55.44041573353897], [-80.77776129565197, -34.097004704596088, 75.361574051712552,
-16.248241864062635], [84.169901667127789, 74.398090927221261, 86.145123497406473, 88.071402053067715]], [[93.648624647556488,
1.6348597078223719, 6.0244027607641755, 19.383999786570769], [-41.76041050584827, 10.228798270020405, -47.174639917060254,
-7.0250102695790275], [-48.870699185907625, -19.365332607009293, 51.663276846691986,
-68.319951789687167]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[43.072392507789715, 25.44897255914222, -92.365292140693199, -72.28679201432702],
[-35.283625661883562, 51.080784351734991, 94.294048609912153, -48.875639845246745], [54.267354274548921, -77.595839033594572,
13.255608540993677, -79.509755448949591]], [[76.273739615561396, -51.362336359893511, -85.195354636261797,
-4.4124416020654849], [-97.854643345235729, -10.568395289402361, -79.904773298276851, -37.753686446232606],
[64.250602682004057, -79.115735111510105, -32.503923559859047, 90.214123166503839]]], [[[39.304515721103343, 85.49840367909664,
60.966173887100808, 4.4734960523447711], [53.114809276601221, -14.423789459082229, -13.61152991089152, -96.486812903270419],
[-52.378017052068572, -0.16685024940963444, 2.2217407671002007, 2.7128133952607953]], [[58.208300545381121, -23.46308457904766,
-67.68416120310016, -35.150913017323049], [-18.407699905877124, 1.6451869874854879, -1.401899624666143, -87.412868064712512],
[-65.336170807327917, 68.755684784091613, 85.913136752325443, 27.997231935596872]]], [[[-66.686788600040472,
6.9245385685220668, -75.689596750307246, -73.922470171071836], [-56.830071118701973, -87.957208168819264, 15.670539647819766,
-25.0926801353923], [-9.3946841261667942, 81.217979881426032, 31.881116652908219, -94.330057102451676]], [[-13.101408221863963,
5.3815053309403993, -42.53780805955558, -33.796637768394746], [72.590706488145599, -33.171908847280093, 38.102432612245622,
-71.169285857339815], [-54.513514454446252, -15.087111212827736, 68.23763859582499, -73.63388136632733]]]]))
res=wherePositive(arg)
ref=Data(numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0], [1.0,
1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0]]], [[[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]], [[1.0,
0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]], [[[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 1.0,
0.0]], [[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank0(self):
arg=Data(-19.2850342868,self.functionspace)
arg.setTaggedValue(1,-31.7600922133)
res=whereNegative(arg)
ref=Data(1.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank1(self):
arg=Data(numpy.array([-69.991852168228164, -51.135726516141467]),self.functionspace)
arg.setTaggedValue(1,numpy.array([64.371955068626278, 56.155825493201263]))
res=whereNegative(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank2(self):
arg=Data(numpy.array([[48.844728607913282, -72.785354714899881, 53.3081372120038, 18.65599332913655,
-46.488345451249288], [-82.483069621758148, -33.022373579278181, -62.408982644197899, -30.801150776046654,
-3.1747181449523367], [68.051986644816708, -10.324492516248156, -35.538799676186628, -76.221649010357453, -10.365176815811154],
[12.925649512488647, -69.48406607854993, -14.171821915240514, 66.552057082826508, -98.385243996883332]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[63.457797271160132, 61.751200439630537, 75.390084871615102, -50.818227552321105,
64.870767673506009], [-37.606099936006679, -75.587774814583128, -30.927974509536099, -95.537626302784801, 85.46249790652584],
[-78.74170463945444, -96.246956122658901, 26.26558952019225, -96.296602166810459, 28.778665120929929], [13.299637195309444,
63.658102616485678, 86.796794951252622, 49.68308177081957, -86.280121323311391]]))
res=whereNegative(arg)
ref=Data(numpy.array([[0.0, 1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0,
0.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 1.0, 0.0], [0.0,
0.0, 0.0, 0.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank3(self):
arg=Data(numpy.array([[[69.353620509386616, -74.080076314847517], [-30.249703014852415, -95.672525613399003]],
[[-8.5039415761560377, 60.253313051648774], [-13.801342152251323, 40.764779434191979]], [[-36.581197219625516,
75.047667541458054], [-77.793778451165309, -72.594277712095419]], [[-72.619314204148793, -14.806208252588647],
[87.915581023315411, 95.105365322376201]], [[15.147306304672597, 14.666885700887903], [81.180471023319853,
85.165436080616928]], [[43.823915191016482, -49.98290658400564], [-72.588576349996117,
96.137982642309737]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-1.2728722356096398, -60.492198430412984], [24.24667632089907, 27.091663987424013]],
[[-32.679381620480711, -97.47865445886741], [-56.075348674988426, 38.715773862053993]], [[16.009087713355214,
-68.066576558113326], [25.559656695696759, -9.5774290533191078]], [[-52.544021441893761, 47.869839568114628],
[-72.606586250159438, 18.849506685859737]], [[-73.113930006549779, 4.602906873284013], [-56.38605187693679,
-27.367675802071062]], [[70.16996004059547, 60.366327688828079], [15.101213546349101, 72.59226569598178]]]))
res=whereNegative(arg)
ref=Data(numpy.array([[[0.0, 1.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 0.0]], [[1.0, 0.0], [1.0, 1.0]], [[1.0, 1.0], [0.0,
0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [0.0, 1.0]], [[1.0,
0.0], [1.0, 0.0]], [[1.0, 0.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank4(self):
arg=Data(numpy.array([[[[78.567215316552364, 73.624001898494328, 6.6089006903017093, -16.066074308908668],
[4.6493463096686014, 90.440517625816341, -39.347037075742385, -33.297969929859519], [64.699949994741132, 29.115653652245214,
37.822018084896769, 40.128249957713678]], [[-55.450069727212096, 81.466719888892953, -14.393705514447504, 50.041870644315622],
[-26.112384871019117, 91.0126228352174, -89.730765921875076, 49.059538764459973], [38.483838119837088, -96.69931018125024,
20.572376725250095, -19.453405707808002]]], [[[13.464674311866403, -63.957052627899927, 27.769891567982711,
-33.550495063440906], [43.131655313012601, 4.7880717355257048, 11.709216606284343, -73.375330948322741], [24.471638138818889,
-70.587099709547374, 42.297065361106633, -34.039431318624949]], [[21.857294994809905, -19.704682667449276, -86.108666845333829,
-75.436492450552578], [87.94303965840291, 97.530458057774098, 25.97064557505557, -36.945527429857819], [90.911480668328323,
6.1671903724853223, 25.709485934911285, -21.355346056419705]]], [[[67.782998886788846, 70.917380141486149, 13.823579458254926,
18.402548374224679], [-15.060657679519679, 82.09963819729029, -31.92180428664399, -97.536671374116139], [37.669076925828392,
-75.58771930699335, 45.895049803831114, 35.48815045691137]], [[12.714019161158106, -57.944653564941675, 62.430334339808155,
-66.857496337271897], [-6.4429554795663648, -8.3994486590568158, -80.192945429058966, -93.438462560326158],
[34.875330751872951, 69.588212740586386, -70.503265404744013, 35.080768936736405]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[89.523458495532452, 6.8140581760945906, 52.278561982811112, 1.9103504285077975],
[-31.826956501731047, 58.297165172424911, 29.759024667767875, 0.92087730333993534], [28.957424646854918, 94.135111344573943,
-32.433995320712384, 67.081234380844705]], [[11.644557903097066, 56.050511369559786, -11.185754045196305, -94.014631510042364],
[-89.421458369162281, -27.806019206551923, 42.132488895560329, 37.020232240255524], [43.230885088291984, -83.189373937963836,
-74.094138681022528, -14.531760465098415]]], [[[-26.981360981714403, 24.064730821609444, -21.105581216059704,
-97.174757209589899], [33.300290491855606, 10.01590267931401, 51.489118545402135, -96.912506834915362], [47.653206939723475,
64.688747326811637, 94.943693671280016, 47.03846492475401]], [[-35.473632387755515, 72.503085095886973, 4.845984081191105,
64.852159504672017], [-19.964052254250646, 84.483169362896547, 73.78740822181058, 45.240727131786315], [-13.995221221821026,
-34.521569172453638, 98.500596615631622, 66.324330733855049]]], [[[-16.964585323232882, 26.406760086703088, 20.25984200782429,
-62.287754490513514], [-2.4701333556092777, -77.61548111631889, 86.671403323307715, 50.284535309177016], [-39.214050892482689,
-36.902295671557624, 26.750130444414737, 91.76317471624742]], [[50.615056318343221, -90.898178535525375, 94.958720223937036,
-93.80724680506188], [4.8266070012118405, 10.075720310299204, 42.099211642413536, 10.006938668548315], [55.032904164362009,
11.263981513981918, -63.130755368899848, 81.657868184177858]]]]))
res=whereNegative(arg)
ref=Data(numpy.array([[[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[1.0, 0.0, 1.0, 0.0], [1.0,
0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]]], [[[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0]], [[0.0, 1.0, 1.0,
1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]], [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]], [[0.0,
1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0,
1.0], [1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]], [[[1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[1.0,
0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0,
0.0]], [[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank0(self):
arg=Data(-78.1544615646,self.functionspace)
arg.setTaggedValue(1,-78.4151851666)
res=whereNonNegative(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank1(self):
arg=Data(numpy.array([17.602013218893518, 29.547786128150307]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.26203337714731845, 57.479799350895149]))
res=whereNonNegative(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank2(self):
arg=Data(numpy.array([[-0.38327384036398371, -34.645139490102878, 65.429233454558641, 95.235253228502785,
29.097950082051085], [-43.855663256862009, 55.686325731330783, -67.824366444401477, 16.702344987904212, 22.218041380401374],
[78.969508595512451, -60.305312026473089, -59.523292190062982, 74.808651981782504, 79.872897022513683], [63.606277951467064,
-76.462470884188775, -72.691576180524351, -49.079190521880697, 45.394053081951711]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-55.552381227989599, -54.271211889675719, -12.582403003252466, 8.5917236679262601,
-22.455892824367908], [-16.776071759898258, -19.553013266124879, -21.813530512599172, 9.6881948240011582, 98.11892272389133],
[-4.3416722922198403, 38.725023582219706, 1.4757500981863529, -39.708613920267013, -80.375084634623164], [-99.616497105650254,
-57.007203450971453, 22.87724914844766, 83.97051539516184, 37.346070066579273]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0,
0.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 0.0, 0.0], [0.0,
0.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank3(self):
arg=Data(numpy.array([[[-13.892573110308774, 43.478804863362512], [-36.040457512639264, -51.981819940572649]],
[[-62.169201833468925, 57.986370401331953], [-4.1036429741114802, 48.023435568940073]], [[-80.645707040180412,
-10.141695439237907], [-4.8056301100563417, 48.75486098147897]], [[-91.963242822660888, 88.059478204857612],
[43.320911501208769, -22.30145015628095]], [[-86.786948436003428, 31.120205822215894], [34.433146395475489,
87.18740518030657]], [[-9.4220225035139435, -20.184163123649284], [-19.921535324926339,
25.857031424846014]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[41.114617505053531, 77.617269604848303], [-73.085362575419381, -7.1084361894678381]],
[[43.213365294039818, 70.52545134609511], [99.082934876352368, -17.971939602273878]], [[33.829613730905436,
30.404114402478598], [-57.246747638382956, 34.541916089376258]], [[42.317171529871842, -54.768491746554183],
[-23.879054879709557, -50.383761075240805]], [[-57.28165027876075, -45.225575620770144], [-31.710104697280144,
-75.917892701858989]], [[19.07744929226061, -71.495870306203571], [-10.602129940209977, 68.760350259599107]]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 1.0]], [[0.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [1.0,
0.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]], [[1.0,
0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank4(self):
arg=Data(numpy.array([[[[31.064058622394185, -23.221839301023678, 68.763472697005085, 59.65832614821349],
[-40.149807242440751, 55.03145271535027, -64.4959578656334, -2.1518498706666946], [55.404104858000437, 49.396652662387567,
-49.624666695838648, -62.848812119970042]], [[-91.389556659029211, 72.332633311747458, -52.32546838722876,
-38.634885477981307], [-71.603348675002792, -2.4791274164933697, 24.130510966659983, -5.0112124293864042],
[-25.546226826848041, 27.236363562768304, -61.886267845384936, -60.880197873757623]]], [[[27.658384194987519,
84.510658257152215, -58.927887590834139, 49.108023674331434], [-70.267015166041148, -79.457401602686048, 56.127202277200126,
25.839278403805395], [26.875154742009613, 1.4487959347651866, -88.070139902975072, -38.510490284412093]],
[[-47.959868897350646, -5.7819206338316036, -56.800386597248306, -55.237835036677076], [28.516757929967042, 90.778719180512979,
15.739130130788766, -74.040382579111679], [-71.111296754698344, 12.107778742152561, -79.104526891456999,
-41.005699875297388]]], [[[18.567303332583634, -73.676314650547354, -97.749794073567415, 59.159591299795522],
[29.97705193558275, 90.413624368349787, 24.306766472883965, -69.797371947362393], [-39.289453466471549, -40.65110745107021,
-59.028684721855718, -20.270569577803272]], [[83.125187150431088, 27.563776588259614, -9.5176832989115212, -90.5959013499223],
[-69.524801083902418, -31.838028445081164, 14.626332705121882, 38.303372039757761], [-9.2077846008452156, -84.558594581494532,
-15.169395910120073, 39.522809676227837]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-69.876802801564537, -5.9375511403534773, 37.812297768315801, 17.556587568915376],
[53.958312857432333, -19.376205062615014, -22.920706818379031, 68.341061958106707], [49.048432585399325, -25.760395137889574,
-82.684148984451667, 70.938532287692567]], [[79.604355123625282, 28.576654650134685, 50.589177240580227, 31.904425649228699],
[49.30738620973429, 22.581941862820869, 70.638562695750181, 65.557713647175206], [58.121816082877899, -69.657739078881974,
57.259334058428038, 37.031029061370617]]], [[[-57.067719073541355, -83.590170196214359, -87.889020994197423,
77.03678490031848], [23.421242774091994, -64.962452658702915, 43.744442519067377, 67.095949407897251], [-9.97401351270058,
-84.15959986185959, 46.118179052522692, -52.922820472672541]], [[52.186352698780212, -8.0958035404479176, -33.331663389002927,
-76.607138954123229], [-20.87488584894281, -63.126524249384097, 8.0428232453640902, 52.19000132579842], [-60.91173907515013,
18.081845081324616, -44.231668576405255, -37.550260961693603]]], [[[-27.309398591668639, -5.5219138202315321,
-87.956648017701525, 10.89423659338236], [32.139714674893639, -17.347998935818666, -41.884445570079933, -22.512510804223936],
[45.623599790055323, -34.102558427374177, 87.032277901218464, -25.231126136650801]], [[-82.687583433642246, 10.301272646701861,
-80.557394277641677, 58.389873199971959], [61.375478497215084, 78.589623746356949, -90.675956160020263, -73.180287451090507],
[-60.580572035442451, 60.154646880978504, 59.209979266176958, 79.32948990654927]]]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[[[1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0, 0.0], [0.0,
0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]]], [[[1.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[1.0,
1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0]]], [[[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0]], [[1.0,
0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0,
0.0]], [[0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank0(self):
arg=Data(59.5300640359,self.functionspace)
arg.setTaggedValue(1,-2.15432794908)
res=whereNonPositive(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank1(self):
arg=Data(numpy.array([-35.653771579383431, -57.809515571795679]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-58.726261256725685, -4.9867937639187971]))
res=whereNonPositive(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank2(self):
arg=Data(numpy.array([[66.312754795349093, 82.034605718677227, -46.821806782974143, -87.117091329676626,
79.995435394657591], [23.928044089256971, 37.587150540719591, 8.7201565013642579, 16.527262198522521, 43.468010592942164],
[-58.095116913299293, 29.439827568578721, -0.091616442994578051, -54.761434852877166, -11.808816784702444],
[-69.299763869285897, -13.113050785108982, -5.1976088703165289, 21.099974177713761, 6.0733045244008679]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[73.896405389024125, -46.001844583629413, 98.809723817761267, 30.537291415620416,
40.574672076255666], [55.468937066548705, 11.801387698915406, 20.974309113460365, 77.660614559427415, 28.161039265035498],
[70.623954948137481, -37.457034114261312, -40.898398662139201, 26.109057449542121, 10.398162551919015], [-63.730141883353532,
62.137449485782696, -90.96748737577029, -20.908383264888286, -70.74195335323418]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
0.0, 0.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [1.0,
0.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank3(self):
arg=Data(numpy.array([[[25.436500051631739, -52.748846289150244], [41.085054249733929, 37.28010897113333]],
[[99.576478076717791, -9.9679696571838718], [-61.344641894951302, -3.7437435600148774]], [[-50.906089775590772,
64.943203676394404], [42.050588774194182, 63.118383844777753]], [[-35.778055648047726, -63.920957612224157],
[15.37985889218254, -68.424348417967053]], [[-56.55550570286416, 42.304324718922885], [70.622324649491162,
-12.596055870540511]], [[34.100758417960179, 8.1628573265152085], [-32.962482469141108,
81.284708270077232]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-43.479659251216304, 54.528183443358017], [-57.05484443799638, -63.991441300924265]],
[[-26.9579882337963, 26.355076456251851], [-46.233015073952679, 36.53708746210657]], [[-24.786797130371284,
18.160754379725191], [34.747212955275302, 87.480335155520635]], [[28.625134809911913, -60.971379031499382],
[-88.66690636480152, -2.9400365936678128]], [[-51.567858740292259, 4.1984826727889129], [-31.243781268304645,
-95.989957539907223]], [[-17.551752211418361, -80.268436137583237], [75.208631120335241, 46.121751987400842]]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 1.0], [1.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [0.0,
1.0]], [[1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 0.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]], [[0.0,
1.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 1.0]], [[1.0, 1.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank4(self):
arg=Data(numpy.array([[[[-97.589886785634022, 75.704136798826028, 72.570441980077703, 41.142807006068949],
[52.186886198585341, -81.13583733115226, 19.645361321685243, 25.969196500252664], [90.98310621636935, 91.207424784752419,
32.52582221573229, -46.376838969725199]], [[-24.243208827988809, 20.526435158270147, 46.439239753806106, -33.65061411585431],
[-50.041229640131604, -49.061380582231109, -39.874744210873516, -36.550968632191804], [32.736481368599613, -75.040028568765322,
46.201877528037613, 66.414419319197265]]], [[[2.5037656480006802, -29.171488642086899, 37.367016013632622,
-70.265534203155468], [-5.0615104556033259, -75.110477824991989, 7.1287630039422965, -17.239036014080057],
[-28.096242015816813, -7.528715826985021, -85.497229220965764, 22.546758335502105]], [[74.817640632876163, 40.267037402027995,
10.981140051252439, -15.739056364934243], [-11.202000266050078, 76.223681897029763, -17.41622944432541, -3.2765461050635594],
[-25.659541213077148, 80.506749270081087, -1.000794733449311, 98.399202561993803]]], [[[-46.153348025882913,
64.301872580934884, 67.551433419371364, 86.776352830516998], [28.558361132430576, 78.958726721940224, -35.553376040555037,
-17.553520543738372], [11.165619248232318, -97.969411066483929, 50.903682207966739, -10.289318584097984]],
[[22.570215658384171, 75.89748134306177, -89.388639375388706, -54.6472406344094], [-33.869164257400811, 38.645420950299723,
-3.8841219051897156, 28.668559253094486], [-82.413441576756185, -78.700513819287238, -75.50816982500163,
-52.061106946967861]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[36.219375399209952, -17.824472494521174, 82.058503767107737, -71.379484930391683],
[13.550225541993569, -67.628807348691709, 20.055125227804993, -29.234664858312229], [-18.292530127300381, -39.897170713739548,
-39.328945564903783, 2.857031958593808]], [[49.335977298056065, 80.508916415103982, -18.493351689982831, 51.621759203569923],
[33.973864190922626, -7.4031669533306257, 11.727041061390153, 64.639807865470686], [42.573651614588044, 41.852110298728377,
-64.065734918246676, -6.9916640699874506]]], [[[41.50002565593519, 87.840585919409989, 2.0581894170644546,
-3.9639341014441811], [8.7028087633685089, 69.156286173220167, -83.153991217442822, 14.406484280025737], [-34.420051853304614,
94.578630567732802, -48.230261021352902, 53.242310400679315]], [[-84.442282958911122, -99.822594295799561, -39.959520090517287,
-90.546856339981431], [20.518433145652864, -98.471982254610907, 22.178227167774111, 71.388198500404911], [69.127077441526353,
43.428513943743894, -71.615864538073225, 20.113448559972809]]], [[[89.953272044597895, 43.16167804611743, 53.919371581222919,
1.3311125255161187], [-95.465237294020739, 67.804004576510494, -14.742900384283658, -27.263059934517742], [69.754390418730139,
79.35923926098971, -51.386888599874567, 51.913251831821356]], [[16.947530613873013, -39.040428548927153, -46.681825859807603,
-77.418328228167098], [62.579502644870047, 54.635165987247035, 10.68424789801503, 66.321201110893043], [78.476241287880896,
-29.449312093617081, -59.013155676678885, 6.6196016328634357]]]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0, 1.0], [1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]]], [[[0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0]], [[0.0, 0.0, 0.0,
1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0]]], [[[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0]], [[0.0,
0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 1.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0]], [[0.0, 0.0, 1.0,
0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]], [[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0]], [[1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0,
0.0]], [[0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank0(self):
arg=Data(-60.3073206028,self.functionspace)
arg.setTaggedValue(1,-63.6162748199)
res=whereZero(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank1(self):
arg=Data(numpy.array([57.478569368864356, 25.206882696210428]),self.functionspace)
arg.setTaggedValue(1,numpy.array([26.231910549413783, -27.085991237832573]))
res=whereZero(arg)
ref=Data(numpy.array([0.0, 0.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank2(self):
arg=Data(numpy.array([[23.125617746775646, -41.981698421134659, 88.634083806667888, 85.022668924224405,
35.388273276293091], [-85.767444923711466, 47.859030088870099, -69.395187041220851, 35.5734575739055, 24.859215630808464],
[45.04844052116951, -95.695008500839691, -94.766369979921919, -29.956871512177429, -11.074586010585591], [-20.148312524898017,
-79.433644676490502, -19.87738780106119, 58.95117313559922, 50.971789815159298]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-81.469429807666714, 11.07859262128126, 33.849563441706891, 14.848734531164027,
-42.320200232359248], [37.962288693888382, -43.238362945841821, -64.391556397361285, -7.8460700293939283, -20.730397433363208],
[97.393519560018603, -94.167885954290782, 94.002103086540188, 51.422088904276251, 63.729022355064359], [95.391379833296668,
62.703543794846581, -70.921738135430985, 70.232187871319354, 86.806722655888649]]))
res=whereZero(arg)
ref=Data(numpy.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0, 0.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0,
0.0, 0.0, 0.0, 0.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank3(self):
arg=Data(numpy.array([[[3.4027245130060919, -61.09284878347885], [-57.399713635858582, -71.643573213948272]],
[[-11.523234389426221, -44.578343090400388], [83.731526181974061, -65.104529547136394]], [[-12.081015577295332,
-4.172878078793758], [-89.292376152335635, -25.743293720522729]], [[53.199172477134539, 60.186513433215026],
[67.767250940092424, 89.139480682860551]], [[17.059488076066813, 37.190593835102874], [-19.912772973905007,
-65.497513416201755]], [[-25.360470411847373, 43.142714268731936], [21.199116504341944,
37.635585100309612]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-11.26010310706846, 34.128222291421537], [-68.434470407451215, 70.638428466633115]],
[[-77.544908385112606, 29.23464957312018], [-3.0407974101420763, -31.841776259248377]], [[-4.4176903627463133,
37.991200497907613], [-9.6402073259949077, -9.0856737835734833]], [[26.730099650557975, -65.247161722597966],
[-46.62552821590311, -56.733831760674391]], [[-36.874008752740004, -2.7797064670085092], [-64.175546396086474,
-99.28541091199989]], [[-5.5337745528672997, -45.378676661048623], [-90.349005740211496, 97.078047761501324]]]))
res=whereZero(arg)
ref=Data(numpy.array([[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0,
0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0,
0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank4(self):
arg=Data(numpy.array([[[[93.831260447485874, 7.7407513574382989, 78.739371854572369, -68.363475865430772],
[72.824139460523099, 69.006620095233416, -67.000181405109601, 95.691658959713436], [-46.580816493258205, -19.62723037000471,
7.1964131702848562, -94.003505042058904]], [[85.06939733259054, -12.729485711394545, -95.590773804649672, 66.129455034149544],
[52.5485700301343, 1.8469556361458359, 59.238187186745563, 0.89938435519005111], [50.763202555174757, 38.515989700998915,
14.509412952688436, 19.098524401100889]]], [[[15.441947994447844, -87.547935450574357, -15.093661946970599,
-34.577822306130841], [47.655788884739167, -13.593073478163831, 73.901883902793401, 50.766658802389429], [93.106292386838589,
-26.449736171409441, -32.523468497737113, -36.825111629796645]], [[-71.149835259772914, -77.966052917274098,
-40.594142361637765, -93.497294871292127], [-37.049924286179639, -49.307577406565684, 68.805856372840026, -83.077598973248371],
[-35.950769604858124, 53.444154742123146, -29.736934427716307, -0.43882835811794507]]], [[[-28.804291026424494,
36.420207954120713, 44.975880956788671, -18.160405554758484], [-15.015205668084675, -36.844405430803782, -55.648827533689385,
-63.666847070332658], [-38.323848308813055, -86.094993931559884, -47.504890814498715, 75.386260617980327]],
[[-49.43361721674431, -48.467520039782322, -13.393183500735859, 33.478259837688171], [-46.591630982573548, -15.732761279461855,
55.398884354877111, 42.656388373806152], [20.973563827725044, -83.810921836893868, 37.036944354976555,
95.055268401462797]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-72.44920515300953, -5.1325597489963855, -70.544555413458653, -21.868457284041369],
[28.059209379223262, -97.012837923502218, 17.364646849616733, 69.883388876193692], [-88.768250111578212, -49.421061117246538,
33.314147890655022, -43.967461259845294]], [[35.724082924424522, 21.20468417199848, 5.277992247636206, 77.828098329437609],
[83.375548593581215, 72.566063535932898, -39.58180677719443, 85.382749592078113], [72.093057622870248, -28.938840377791905,
-22.476983470220873, -96.655105739800831]]], [[[-95.917035925462301, -0.3656737198951987, 78.260689518762831,
-26.841902628320639], [69.925254995666364, 62.325571622342864, 47.660763937485541, 32.260139442261902], [-2.2726094824157173,
8.5002090466555558, -41.642153397299793, 33.220453104115677]], [[22.661303145423545, -52.489538131795044, -89.151747983141831,
18.242363722336137], [-25.178052459037687, -20.34523575497515, 25.391874579437612, -58.809820165710214], [-60.790728856888791,
37.195293760072531, -41.479538487050348, -21.114525244725101]]], [[[-49.820373222887085, -49.810943103539486,
-24.582970051099622, -22.599787936123761], [76.777056975485948, -58.811863993488878, 77.842740611399165, 18.640966616664173],
[-19.158614872609775, -72.976807090542167, -86.531194215051471, 48.429555859657114]], [[85.258816970664725,
-15.780961333046449, 49.948813051783191, 53.155720106784031], [-85.9905021073629, -0.23998617994342908, 82.190464755424955,
63.007615196139739], [-23.037986153437245, -37.536769208240784, 75.375056084992167, -10.052811879961808]]]]))
res=whereZero(arg)
ref=Data(numpy.array([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank0(self):
arg=Data(-1.9710533403,self.functionspace)
arg.setTaggedValue(1,99.5842297151)
res=whereNonZero(arg)
ref=Data(1.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank1(self):
arg=Data(numpy.array([-59.49925191560812, 86.042241301467669]),self.functionspace)
arg.setTaggedValue(1,numpy.array([42.554874917129013, -64.6377412204602]))
res=whereNonZero(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank2(self):
arg=Data(numpy.array([[-6.8281814038072071, -59.600553597455487, -83.253618903284348, -55.555714436842266,
-58.64125306605785], [-54.669636874026729, -40.668963536281467, 48.151126090125331, -82.810118365697718, -2.0625309958108886],
[-50.145961443418784, 33.196540210116666, 18.939660902081542, -35.312472223501246, 45.025825447182569], [-91.787750952036063,
-19.219817615082405, 30.739339723723532, 46.808225524785058, -53.347997111730059]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-6.1766328777260071, 12.353983207929858, 87.643004443775652, 0.1054506713352481,
-49.898039891751097], [-6.464367133652658, -99.376428379787214, 81.830552974282909, 3.3503835694606181, 99.424767953367194],
[-23.667088793561319, 65.145198516233364, -10.00780177932495, -69.125279379621645, -70.19911439214637], [-56.661848212973823,
-78.777508065420989, -44.257423096893753, 23.610690037963238, 52.764627315898679]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
1.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0,
1.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank3(self):
arg=Data(numpy.array([[[-77.400629617637264, -7.8585991909028223], [79.37731461196492, -78.311221968888802]],
[[-2.2871031341482109, 61.613282578413418], [14.239083629293276, -86.60455578579527]], [[32.648402678335373,
67.120663891666482], [-16.40250641841989, -45.441109646024543]], [[-88.814372300408252, 96.863741115845073],
[-53.568931159701449, 61.772732453745817]], [[12.78118059732283, 61.665805717605537], [81.736967250564334,
-79.838957222371846]], [[-45.746992316765287, -50.359908369594095], [67.084057007701773,
-77.367125763337725]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[20.916835852467159, 98.359772976470907], [-65.641953951612976, 48.35339386982011]],
[[72.394336553366969, 0.15850039108870817], [-37.64849616557904, -37.7540766591151]], [[4.7508355327704663,
31.008319102711397], [-54.917295021552114, -24.534459964429843]], [[19.525286161344553, 63.669539108570319],
[-1.0431050089863732, -17.966268638209357]], [[-79.076564771286044, -45.063188127277719], [-57.520467509927364,
-69.399848959156472]], [[74.966631181955592, -21.675113256460349], [47.3018877491821, -95.419016191439553]]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0,
1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0,
1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank4(self):
arg=Data(numpy.array([[[[-56.530041249956064, -75.595773656477377, 57.930726925789344, -35.064711718518922],
[89.927579325782204, -71.292052004298981, 74.426753570770302, 9.9254833972331795], [66.426699960830888, -79.419436482993362,
70.191990839107291, 25.664966939851837]], [[91.588972225813166, 75.24780363003768, 22.708376398136238, 79.829437424982274],
[78.426467246737957, 34.311496583740819, -45.051218540773853, 21.126634987063937], [0.93545608150542137, -64.07783608516722,
50.0880392185164, 3.0383401979216416]]], [[[33.521385232890651, 65.495547288295967, -36.26432133678432, 43.817943477527393],
[25.622359237391734, 46.879767530213655, 44.264770652047645, -82.611848465548164], [8.6931324018650855, 98.705476157468638,
-69.064996470241397, -82.140570519506227]], [[50.341676972542217, -57.113225217844878, 23.496128915773994, -84.4500434098574],
[-42.826308284507533, -40.068614099685277, -64.107129980786979, -64.752370052337284], [-68.77258294388686, -65.093744454055411,
-2.0441147238691144, 38.710454571834248]]], [[[23.964849311323277, -9.8504539937835318, -24.01385095241659,
-65.609734597116542], [-34.482955634964824, -55.357317162052141, -29.891659032054235, -59.722077669432629],
[24.465604000129801, -3.1488488615906647, 90.185493485946637, -9.9469471059439201]], [[47.887647758738581, -18.650373829652906,
-88.084195156027434, -50.216336238949744], [84.137883656735909, -12.149905093038768, 96.23351445652429, -70.450849093320912],
[79.18622708472455, 93.448904090150648, 15.570836279018437, -91.458357264821544]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-12.894979773801623, 48.187305922131372, -22.228691774958477, 71.32250925591336],
[88.198575928561297, -82.879750265641363, 6.0047803940490638, -75.104784095705114], [95.651210249923281, 99.227840476768279,
38.201888712638237, -85.338045871397298]], [[61.890764559936457, -99.084864794308373, 19.908026187604563, 76.48683075285868],
[15.244819962785968, 81.134443755015496, 91.695315009752335, 53.656684202280047], [-13.910795126783682, 62.546356367686997,
57.939671348548501, -16.711735701291104]]], [[[-17.647299335238117, 75.459048312325109, -41.034997451013353,
22.776483937861556], [-97.792781150657731, -89.213116628864611, -36.170629374287323, 76.170885998109583], [51.302094246614928,
73.764119678021643, 2.9931334740095537, -6.7949120092559525]], [[-81.233259812949598, 86.178813783813297, -0.82062800096618105,
95.276937599720668], [25.56603608019212, -69.150407154520252, -97.002071851697821, -38.857149391397485], [86.964544699076953,
-44.217066849378782, -92.21466310897317, -30.366338991012668]]], [[[66.127428481144136, 84.702864167161209, 53.320435341942385,
34.341339969042622], [75.475890485661608, 6.568751254456501, -32.661380753798539, 73.048056732159722], [8.3932556720025104,
86.553622630163773, -96.143353218643952, -12.061654127884765]], [[53.325736920559024, 24.80213757617615, -70.674103395487791,
-11.797716418097565], [-39.44141732563584, -42.670437444648911, 79.49977026651581, 79.898615913406843], [-32.436244300917423,
63.389192944364225, 48.691557489453828, 91.496017284059604]]]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank0(self):
arg=Data(-24.7105931613,self.functionspace)
arg.setTaggedValue(1,-37.638505349)
res=sin(arg)
ref=Data(0.40972088744,self.functionspace)
ref.setTaggedValue(1,0.0605693981609)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank1(self):
arg=Data(numpy.array([19.860974255803598, 48.899013130941427]),self.functionspace)
arg.setTaggedValue(1,numpy.array([14.319017737469665, -59.326252904429587]))
res=sin(arg)
ref=Data(numpy.array([0.84758534887649317, -0.97919776342443343]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.98351066065067827, -0.3560220138624291]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank2(self):
arg=Data(numpy.array([[37.890743553866486, -52.175642782800914, 61.917008025503975, -8.538416676807941,
-94.304749798245496], [-17.787570828089727, -19.048274463511873, -8.2634570563295142, -56.253500812466228, 87.627404284894396],
[-14.454217499387354, 73.713310630128319, -52.818033941567855, 90.807246316901796, 59.632923220807299], [2.3430650859352511,
56.726750975618302, -69.98474018040875, -30.128841460819984, 0.11683572211893534]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-60.059131288860598, -78.931098378024842, -99.522738887570867, -9.6007074071729619,
-66.250286193785655], [15.651568400631106, 57.654505938017678, -21.858524591969015, -92.849176312405305, -45.214082756051297],
[-85.045751900057368, 10.170104148330267, 85.540180625403167, 34.743740334373229, 27.680023474288177], [72.313181060961483,
-93.451973592336017, 68.715544032783157, -57.013152797460179, 69.395677045629242]]))
res=sin(arg)
ref=Data(numpy.array([[0.19046098975424755, -0.94296657311066345, -0.7924680880494267, -0.77477635663664268,
-0.056939378452443026], [0.87332421967504115, -0.1974132538348578, -0.91732979816211846, 0.29089958624583467,
-0.33083665313437571], [-0.95015908369497537, -0.99349574918962724, -0.55556403598677151, 0.29450799309098907,
0.057305786038470398], [0.71632946014175625, 0.17714342493014262, -0.76413661669322097, 0.96002319680218495,
0.11657009080686483]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.36056248350542847, 0.38137387117559401, 0.84588700623357704, 0.17502331279847,
0.2733177892111176], [0.056364979209719938, 0.89384032979663164, -0.13223553506078178, 0.98521137495670197,
-0.94308411592712293], [0.22091275938263169, -0.67821155091384655, -0.65726119089937152, -0.18514670365491534,
0.5599375367095778], [-0.056519892938693105, 0.7144278132655969, -0.38895255454538685, -0.44796245325739548,
0.2769693862212248]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank3(self):
arg=Data(numpy.array([[[-55.804968616645148, 58.560992915206214], [42.652192703056301, -91.42882116095241]],
[[39.310441995226739, 33.870993341596233], [80.401033234710297, 73.000828209637689]], [[5.7360515152169285,
82.330874482727353], [-75.426134234758621, 7.5453684113771118]], [[-19.895965390103115, -88.950469683568315],
[31.355932404642459, 36.487846363447858]], [[67.666456279782437, 58.020389340319895], [-37.89476101041673,
-42.399630457776482]], [[-23.830782444196501, 6.0849055767691738], [91.294861085921525,
-52.847710860098182]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-18.125779887526264, -95.420123122001257], [-8.1568796731757516,
32.219735537825017]], [[26.245851241680057, 96.102520961925848], [14.02431043315147, -9.9572364002324321]],
[[17.181359346351925, 47.963801042849468], [-95.527667200507665, -64.204019349910141]], [[-98.658267090216341,
-7.0939733146426107], [-41.783037015039959, -46.517744129299096]], [[-57.202627940362859, 79.223818560607498],
[-70.017222005175654, 23.987327490175844]], [[71.375583584624991, 89.788775552486129], [98.882752617270086,
21.455679838723768]]]))
res=sin(arg)
ref=Data(numpy.array([[[0.67701499649890673, 0.90409941974537544], [-0.97117328078000487, 0.31706594876811195]],
[[0.99917861697072197, 0.63385392022976472], [-0.95812352836612924, -0.67738144661254696]], [[-0.52024157266400017,
0.60476080407034305], [-0.027906925031102141, 0.95275570243286156]], [[-0.8656310935760867, -0.83375573809919057],
[-0.059958148294456545, -0.93606199112953326]], [[-0.99254113222478446, 0.99509611012034227], [-0.19440335819459723,
0.99992954803650558]], [[0.96408030914008547, -0.19698305893325982], [-0.18755672118757361,
-0.53064744896026339]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.66221880833880609, -0.92166239454618104], [-0.95447604942411934,
0.72000463780845769]], [[0.89707725485516432, 0.95995773196345946], [0.9936384606300962, 0.50765297031738676]],
[[-0.99526034460150747, -0.74458387237142676], [-0.95798362538691173, -0.98033782538997782]], [[0.95477283132130442,
-0.72483027934968591], [0.80893159374577939, -0.56970402250150143]], [[-0.6083342836862955, -0.63189999754289639],
[-0.78468240482370322, -0.9108809171944825]], [[0.77140583862359613, 0.96806942866170043], [-0.99700488005111876,
0.51024422262880564]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank4(self):
arg=Data(numpy.array([[[[95.057014858860498, -50.023546676067944, -19.520550887074563, -14.915416158905259],
[-72.276262048791182, -57.778697211921127, -10.589425732964969, 25.789691603835237], [71.432158308304565, 83.59773672148529,
32.062292384526415, -22.527648292677569]], [[-39.074624167039062, 92.303231204531414, -1.4192851682411742,
-68.883629510294497], [46.825962360174429, 85.58307574133471, 83.176640836526758, -93.888543574320394], [59.195891546840528,
-43.884372920271829, 46.885208516026694, -24.330067940056807]]], [[[-85.391419005371418, -52.993590690134319, 41.1653245235631,
70.963880995127738], [-6.8099927112965162, 14.755258748362692, -77.400445539133742, -3.6276152651411877], [-88.775588041032492,
89.080955577757038, 0.97522108268380236, 11.904044693769748]], [[28.114564123404421, 17.406751514150216, -90.824155259332073,
66.590378374684491], [-23.314357242078572, 66.459924224373196, 1.190010463508969, -19.129294185825657], [12.759163310131783,
94.16098679455763, -80.470912052594556, -87.769040453881502]]], [[[-68.103901459227245, 96.524362598603318,
-3.2834594710336376, -25.520289808877067], [56.393892750276962, 17.548302326605253, 15.721717465663659, 76.20380788007958],
[-65.13810360798314, -4.9406764890286041, 65.373960553505867, -11.670204391287569]], [[54.171569268655503, 53.359368061868707,
-46.012260984758143, -78.151318891542985], [20.615711960999178, 40.160242458140658, -80.640118059335776, -94.602105820605374],
[58.356391780158305, -78.074396086921837, 69.50163735189372, -68.547938015025153]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[1.279534719127895, 77.967895548901566, 56.093855457217416, 55.241022797731574],
[-99.18622666243968, -10.886097986772711, 44.708474069781573, -26.616286906326849], [-92.350922530980355, 54.858168650538886,
51.906834850649233, 77.865437338097109]], [[24.233171382130436, -49.713594059906626, 75.407909417958365, 86.691179170294532],
[96.871157363423322, 23.148017134014822, -29.421912304159292, -58.976308318504977], [-5.4545343262298189, -89.036199846063681,
-83.742519983327739, 35.372319522991887]]], [[[-95.511576008994936, -83.10806319229718, 63.825192259702391, 92.80910275607684],
[44.426319323500707, 88.815074429332554, -18.021325932633019, -69.917789857742505], [-84.319087816871672, -30.317627038327316,
-38.345827346198959, -81.91833965828738]], [[11.186751110650022, -54.257619696250828, 84.729270493118236, -8.0244377640246114],
[77.805655721275429, -14.229050163525699, 32.671007471850089, -96.882778316793122], [-56.456065533953058, -25.01675593935984,
65.68053871510449, -14.266571167222295]]], [[[-39.965547886942353, 19.317802794261738, 80.566440631464729, 43.745566353754214],
[28.366421988006579, 68.970448204763755, -64.475182800936267, 20.331319130101249], [-87.117125888478327, 91.992851667866603,
30.281916963531046, -39.27414258485895]], [[93.364522015716602, 78.717156004724472, 61.222491284093536, 86.104631528043967],
[29.395392816847448, 16.532063410538484, 10.319065205651995, 10.917748038478663], [-92.263775558488874, 50.00911791017316,
-6.6661922286034354, -51.536766809586055]]]]))
res=sin(arg)
ref=Data(numpy.array([[[[0.72375966950305115, 0.23958247784190015, -0.62176555507714748, -0.71214373694684574],
[0.019629755360536964, -0.94249864392330207, 0.9186492389762253, 0.61070482468498899], [0.73418989352805997,
0.9408956015682427, 0.60228932671485913, 0.51113062555018463]], [[-0.98099259974089892, -0.93096397739276848,
-0.98854412433116323, 0.22934906491349744], [0.29353958469160507, -0.68897514951619387, 0.99714636265035372,
0.35155913660386867], [0.47447618324952018, 0.097767803237166412, 0.23642150489763264, 0.71921602354647907]]],
[[[0.53829894600069828, -0.4018025366804851, -0.31894868102661073, 0.96159352277239407], [-0.50277617521839357,
0.8149857013480003, -0.90837056606621547, 0.46711279434815001], [-0.7249719991634016, 0.89850915924785046, 0.82782593247756842,
-0.61495265106171171]], [[0.15909090210144514, -0.99182021016560207, -0.27830764816239967, -0.57853598042401821],
[0.96950642440138313, -0.46751584043254807, 0.92837285606475217, -0.27610409657055596], [0.19160059401890014,
-0.086683885904533534, 0.93579131574343599, 0.19430985516438759]]], [[[0.84743598014352139, 0.76110153119788515,
0.14139142291111614, -0.37791993461054291], [-0.15415780695875661, -0.96389276338977681, -0.013753764053628931,
0.72123543134514545], [-0.74152608990617153, 0.97405537665333219, 0.56421749260196419, 0.78093804490852481]],
[[-0.69217200421492153, 0.04768895464089825, -0.89642177035913229, -0.37879826270669459], [0.98097790580385125,
0.62915222544178329, 0.86305700540000696, -0.34695851228481017], [0.97206368588434766, -0.44879839287225581,
0.37704058161193998, 0.53718857715535373]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.9578823534926072, 0.54124816795457964, -0.43929370911716231,
-0.96557502749159507], [0.97455749834004068, 0.99401345246169626, 0.66401590677623057, -0.99619607619697281],
[0.94731148656605035, -0.99284417199371577, 0.99751195316042607, 0.62441237853622522]], [[-0.78305944951663353,
0.52429620121273146, 0.0096855803621312825, -0.95610712166060408], [0.49533020111560083, -0.9155481075478985,
0.91177208072521287, -0.65482579444370448], [0.73702027966623906, -0.8779702346494217, -0.88218472628424938,
-0.72758863915572014]]], [[[-0.95324448144023388, -0.9896294977803074, 0.83785349282853971, -0.99128491043904499],
[0.42957507779222781, 0.75159719546211767, 0.73673567820434016, -0.71927034677937474], [-0.48285656690859402,
0.89043473057109679, -0.60256841133763539, -0.23472014974367561]], [[-0.98178130166608535, 0.75163971078732728,
0.093593967784617274, -0.98550749523114423], [0.66996424044290459, -0.99578170573160452, 0.95057449576530817,
-0.48520180467023327], [0.092469940703161432, 0.11572541384732027, 0.2887366377307638, -0.99163895037731464]]],
[[[-0.7676438791646546, 0.45132255753166978, -0.89789686094785226, -0.23452586033429529], [-0.091958006320412053,
-0.14408689254970225, -0.99737060586631121, 0.99603916939064607], [0.74960719408299126, -0.77496816002780011,
-0.9061156382123059, -0.99999103487825647]], [[-0.77281036418314564, -0.17641158915267149, -0.99925644477650222,
-0.95848189929893357], [-0.90056129662048501, -0.73393688041745886, -0.77976304128985197, -0.99697306576558797],
[0.91583747858958031, -0.25356559568768045, -0.37371120994166129, -0.95548059670784435]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank0(self):
arg=Data(52.3923651613,self.functionspace)
arg.setTaggedValue(1,92.6499316384)
res=cos(arg)
ref=Data(-0.527866301451,self.functionspace)
ref.setTaggedValue(1,-0.0270483432209)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank1(self):
arg=Data(numpy.array([-74.897126998165533, 76.673400450800756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([99.065445380314515, -86.748306948983256]))
res=cos(arg)
ref=Data(numpy.array([0.87705625402072684, 0.29133259267605394]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.10508243263067833, 0.34712991573165969]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank2(self):
arg=Data(numpy.array([[40.593544538866865, -8.8839015039393558, -49.468879573084436, -24.708042838510465,
20.413703995745891], [-79.108713409558405, -68.647136982462371, -80.858963259372672, -43.373193372132903, -19.507573187625411],
[64.214585816318845, -78.826300537435486, 57.661889712775803, 95.493641862455291, -48.386749127960769], [67.334847000926004,
-34.70671409523483, -36.873199353443709, 3.6386929918643176, 35.181153901083945]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-30.787970015064928, 82.074060959202797, 25.479756845345577, 10.895119259966464,
63.74412167304564], [-60.035262414428935, 54.332578347635263, 18.293985264200202, -9.7571535510820695, -70.419305661969503],
[-66.629926110044835, -43.57208065884415, 57.437026616340574, 20.73240225691022, -80.496461940478952], [19.883318148806438,
-98.729450313914597, 73.172600335425471, -53.114967436072469, 41.781624603862156]]))
res=cos(arg)
ref=Data(numpy.array([[-0.96961115090719441, -0.85725773398423355, 0.69913962613000602, 0.91116305149951837,
0.0066482036132297587], [-0.84249563945044104, 0.8925167529418252, 0.68043098049226469, 0.82016110495020733,
0.79120632545455827], [0.18695697167168232, -0.95924330257900359, 0.44177331900046657, 0.3192463084776716,
-0.3030933270774539], [-0.20786820316301155, -0.98889106925546555, 0.67788641598410604, -0.87896904245554386,
-0.81176118995632829]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.80922972543055438, 0.92389756193357364, 0.9403918665642913, -0.10028616065108438,
0.6119531025181365], [-0.9410747774886985, -0.60143262098807782, 0.8495995978399381, -0.94526988131298229,
0.26338463011163266], [-0.79214526943475394, 0.91703450582859369, 0.63068646834096875, -0.30701028605243086,
0.3763461069696134], [0.5115898554852758, -0.22867682408200724, -0.60902205590663616, -0.95763905321643927,
-0.5890447354610614]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank3(self):
arg=Data(numpy.array([[[-36.309518950317376, 0.93081070250144649], [31.019427711604664, -74.09863093545404]],
[[-38.496677203305893, -85.824133574935331], [95.203836891504238, 22.838846451350705]], [[60.75609230931488,
6.003670139700219], [-31.49567872236139, -63.206983059929222]], [[-9.6812822737183666, 0.078728886948780996],
[66.900652835446493, -94.869473621923703]], [[-6.6770163744116076, 22.876520146740972], [-55.737787303088737,
6.2425399184533319]], [[-81.429470177177521, -81.6116687923749], [-97.082967034525325,
-67.37269287178016]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-76.097111647315714, 55.656786197463788], [11.140883121429439, 54.147885791873421]],
[[-24.180524234728694, -45.703945118544723], [42.10979532559989, -22.79785029244421]], [[67.322737034238003,
18.304852118006011], [7.015704936158869, -94.401853589660817]], [[35.279952362677818, -7.8217175297602637],
[-81.23891082515344, 54.069639284286751]], [[4.2395499199061106, -11.974337349322099], [-77.095389819359994,
26.711493864407473]], [[-66.565935528207518, 41.011773246282445], [-62.114425668075299, -64.456999774045073]]]))
res=cos(arg)
ref=Data(numpy.array([[[0.18021343448473101, 0.59718391060744369], [0.92241876797360978, 0.26789121482669265]],
[[0.69845114643777229, -0.53934947535935607], [0.57674535988171327, -0.66171093596184249]], [[-0.48377631503826568,
0.96118931155712628], [0.99682147963778267, 0.93046000296873854]], [[-0.967282744906935, 0.99690248160545425],
[-0.60007048402362761, 0.81289300751647198]], [[0.92344574856902162, -0.63300189293768494], [0.68886045878833047,
0.99917408990060674]], [[0.96843093014337922, 0.99756913767256539], [-0.95342403093885042,
-0.17070899404470352]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.76555810931252977, 0.62794880060935965], [0.14479801574845574,
-0.73792168383216183]], [[0.57987832415218665, -0.15028015665535041], [-0.29714910041836201, -0.69188358205701828]],
[[-0.21969811854411034, 0.85528080687409014], [0.74349177468513394, 0.98815406589512933]], [[-0.75019910814961466,
0.032258506831785543], [0.90368477270236081, -0.78841749829099372]], [[-0.45541567307928488, 0.82980773284067688],
[-0.12603373471688831, -0.0079562249516561077]], [[-0.82955287573817371, -0.9854033904934385], [0.75349957000523238,
-0.054323621236985108]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank4(self):
arg=Data(numpy.array([[[[37.610264246462435, -85.560668463311075, 92.790982866326573, -21.753674410293172],
[-23.164181244709354, 64.496397223384463, 69.277186049494105, 6.3927475799028457], [67.583896168477764, 36.111360823700437,
30.266912701944563, -54.963319263159384]], [[-58.145969583496672, -97.225443498755453, -56.934313916342269,
35.421162068171839], [65.866615855863898, -57.072034755161027, -95.488754117534285, 81.149953518095799], [-18.30949886526929,
-89.680457620572071, -73.87886392983259, 81.259366551703209]]], [[[1.8677491996480029, 36.828382975770609, -80.40672114911041,
-49.292595896369647], [-37.983864569797767, 35.583525872048824, -42.451157688857613, 33.755615612774108], [32.674252940671579,
90.058275023987306, -96.26155980692819, -90.500098763836021]], [[90.079955965660446, -70.687430685137031, -51.111371179982747,
-74.109677346578138], [-32.896920002886091, 62.26499948195692, -59.833741060334056, 11.794198300820895], [43.437452546746755,
97.455115222231768, 87.354131572829402, 3.2818247457694412]]], [[[78.306182680183269, -64.892175839143391, -55.104588214315342,
-96.744717049677931], [-38.337933398739985, -72.796076467431135, 60.379171901212146, -81.927733276050247], [63.885059436029167,
-31.980639093805863, -57.261994523508044, 17.357515328643643]], [[77.429908518363192, 9.5882415367278355, 72.484182388500756,
63.089077313098954], [84.07047179403375, -21.092477779767819, 41.614178023999727, -98.204118862286279], [-71.275012546567766,
78.730240012789466, -11.573247145900382, 33.098945113087012]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-40.533710954365063, -21.161469670738327, -69.120302774135837, -14.38267699844107],
[-91.158843533364944, -85.491074434980831, 87.152587959079909, -33.044835488961624], [-68.672525163755367, -1.8217843916724235,
-33.6594071845862, -94.719797609019921]], [[5.7039466733317994, 69.091962753216535, 42.000508648719546, 63.142145355765422],
[79.524244986771464, 62.133683756888729, -63.061242691543782, 51.048740976244147], [-88.653022332832293, -81.214225577010723,
35.550248226917518, 76.160743630564809]]], [[[-63.524226576744191, -56.896009941669014, 63.19522201987138, 66.388629592533931],
[-56.646135485855687, 8.9406032636504307, 89.111063185551444, 12.201705041404125], [64.844923341968638, 93.705153189621086,
42.451679671109446, 55.611996897559266]], [[-50.4500969589295, -56.48304920853591, -43.397487648713806, 24.970484957830536],
[10.247946263340424, 53.164372653170489, 20.567237785266812, 9.4104989925598801], [-56.157152366568909, 42.552761052044843,
56.078516299029076, 18.940543407164128]]], [[[-33.632224346804193, -69.405810068119834, 44.225943185591831,
95.213025790079087], [-38.509288601106675, -62.938695142627999, 82.460256045254965, -40.372955049612315], [71.091785922673608,
-67.332900637102753, 99.968681344820283, 87.597127665814384]], [[-15.352405373769315, 13.607690117097107, -27.035258608117374,
-88.065123343235953], [46.351984421658017, 40.175457656434133, 90.498104230403385, -29.926375524616702], [89.955509906700911,
75.738059235642481, 92.170833583735543, 28.373336853066405]]]]))
res=cos(arg)
ref=Data(numpy.array([[[[0.99605564800414159, -0.74003978479165422, 0.11375282452021319, -0.97193527337579688],
[-0.38735756213150352, -0.09361056558100582, 0.98688284383678593, 0.99400405568827155], [0.039643724768732305,
-0.016953880290015939, 0.40938737891293392, -0.014551661058647967]], [[-0.026502388661538694, -0.98659374258249288,
0.92655557103089836, -0.6496724166496719], [-0.99429911442146879, 0.86614127304686683, 0.32387445454567942,
0.86207036562540851], [0.85767934528605649, -0.14455871961558475, 0.051413892338749011, 0.91225420946081004]]],
[[[-0.29260780746660703, 0.64426928163414932, 0.29180026978390428, 0.56291609637951678], [0.95973114604298926,
-0.51823819807950822, 0.039646471705651949, -0.69533990552752001], [0.3074098932007972, -0.49938120217558235,
-0.42863722843891311, -0.82188268941215192]], [[-0.51804635050563663, -0.0015959786891496938, 0.66306628166652848,
0.27851730967463495], [0.089682205138485488, 0.84359470875913611, -0.98972429938704287, 0.71639675290673688],
[0.85520784080680545, -0.99783970980023062, 0.81938284847117593, -0.9901835826774219]]], [[[-0.9728315762339087,
-0.47020799835491661, 0.12637793045273601, -0.79930700568503443], [0.80279896236009785, -0.85799367135344373,
-0.77193843948759455, 0.96981526124126383], [0.49478722014669613, 0.84474246657231211, 0.75618918968201176,
0.078674345855532332]], [[-0.4447440791197817, -0.98666955220849251, -0.97422171127622192, 0.96709984785995873],
[-0.7299620834657633, -0.62265062060091214, -0.71548829876243725, -0.68605277637160145], [-0.55567766942870023,
-0.98192413256230948, 0.54607588753058456, -0.11198684728536197]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.9532464104225401, -0.67511267180012213, 0.99998614310380007,
-0.24305111882757385], [-0.99861396263192814, -0.78501698113748208, 0.68804387880332429, -0.058079923319158462],
[0.90367832948274707, -0.24836118666524837, -0.62308777688597838, 0.89065254831191276]], [[0.8368796543112903,
0.99973376956186366, -0.39951907575386714, 0.95224436545888613], [-0.55333911666953939, 0.76602026469360152,
0.97380536659847572, 0.70861811863632929], [0.77224567755776974, 0.89284034215998742, -0.54640627400327102,
0.72309766743732995]]], [[[0.76973301984773468, 0.94028075556243462, 0.93470472120159609, -0.91504217164360968],
[0.99525378084999272, -0.8850594108800458, 0.4117079619272091, 0.93424308242332088], [-0.42799571611881249,
0.85635538858959426, 0.040168038134317727, 0.59247321999590674]], [[0.98300708870591891, 0.99784787493267124,
0.83381726388896149, 0.9868653057824498], [-0.67990129583749026, -0.97069204733006775, -0.14635792168284548,
-0.9998980572655366], [0.92433186130108524, 0.14079090200582819, 0.89149968128133894, 0.99586349366046734]]],
[[[-0.60159909873717321, 0.9580229239875917, 0.97046484832780555, 0.56921448509653549], [0.68937039860779081,
0.99429781336097167, 0.71172390998364865, -0.89258527400759391], [-0.39489284771819805, -0.20977165731745406,
0.84603990208711677, 0.93324045680470225]], [[-0.93745242426842468, 0.5050818721371243, -0.32567073631884641,
0.99495120989406094], [-0.71658296354855922, -0.78676444613630847, -0.82074485588505508, 0.081155964310960879],
[-0.40786936285205416, 0.94280949490341759, -0.48481330596382211, -0.99510320754671833]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank0(self):
arg=Data(81.2949649872,self.functionspace)
arg.setTaggedValue(1,12.3613553191)
res=tan(arg)
ref=Data(-0.406904128478,self.functionspace)
ref.setTaggedValue(1,-0.207936773642)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank1(self):
arg=Data(numpy.array([-95.851047486395899, -87.804262570020512]),self.functionspace)
arg.setTaggedValue(1,numpy.array([35.849126399037175, 13.927401673303507]))
res=tan(arg)
ref=Data(numpy.array([30.785362859177258, 0.16171984883073159]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4882512502950971, 4.6971057126849036]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank2(self):
arg=Data(numpy.array([[-6.2123974990845596, 78.448990575809376, -99.326922204393099, 34.466953331401896,
75.081637288912191], [43.152743095243096, -23.515299958653429, -45.139362755422809, 37.555578473020233, 83.674961808589416],
[-20.329004079626117, -89.68698187313413, 63.797873117120815, -97.852830452441481, -20.352637389791738], [73.846890877324569,
34.650839207487195, 41.531549740281122, 42.117481567836307, -14.893287864506703]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[32.462416722884939, -48.149844207496905, -56.126695807148465, 60.69249745488338,
-88.519356123058287], [-77.612992699512802, -82.951630694447118, 41.156303211277162, 76.793150351335555, -72.585387811636906],
[91.644542807801855, 51.947395321575783, -56.950928573202873, 18.806113794978117, -2.4791441058101356], [13.946361780608797,
-89.75004089425245, -7.8270132480697612, -75.115117512127185, -17.025098103456472]]))
res=tan(arg)
ref=Data(numpy.array([[0.07090628276813575, -0.091076341049522724, 2.6032644546506951, -0.090814285504178624,
-0.32760519036327246], [-1.0924542795462242, -21.423000533021405, -2.2775209371153795, -0.14452724439561776,
-2.2228009589039441], [-10.916660613554196, 6.5460789983613727, 1.4468186297845727, -0.49976325011584999, -14.745229039106976],
[-51.371685347992141, 0.093591861362473022, 0.82675810400212302, 3.3025618500686895, 1.0603382585364733]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.7292248126420149, -1.6500818693323478, 0.44893985316262996, 1.5650975398719362,
-0.61967622915013898], [1.3320242181202921, -3.2261605736834746, 0.32651185794691795, 5.6272845624900736, -0.3411363783892129],
[0.59719690743737996, -8.962491127283629, -0.42546071042084166, -0.043469475465786209, 0.78003579386887978],
[5.1771917698562682, 4.5869701687908737, -37.07146436191114, 0.29092035884983614, -3.8573430010424579]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank3(self):
arg=Data(numpy.array([[[-31.363348163296223, -63.428729589813429], [-96.507312106299665, 31.782542686119456]],
[[-34.681083479682911, 94.077182149079221], [15.294827923512429, -2.7318887141934312]], [[40.882434195822839,
-64.18724581426693], [11.572828840371855, 90.497867255100772]], [[-28.641061694503762, -87.171931801575766],
[-11.020127312733962, -30.967479102653854]], [[-7.3186224758889296, -50.786266134306011], [27.048134010190211,
2.5490774473318112]], [[61.949209649501199, 85.202746332031268], [-96.092429938290053,
-18.405433004645062]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-56.051822909334504, 24.794825821519993], [-0.67715445796852691,
83.973745178864789]], [[-67.137455945524025, -23.605845271936829], [-50.37296104694218, 61.52765962606091]],
[[89.358776831997943, -14.338540458285891], [-29.266662560421409, -3.8651288601611213]], [[34.695285195870127,
90.166487346465175], [-10.666151183063903, 29.826418400042996]], [[35.897311083718819, -5.6908691923045893],
[81.788877267557382, -28.486351983044273]], [[-10.244239444046215, 61.596990071263548], [9.0672269007921784,
5.6119735254265208]]]))
res=tan(arg)
ref=Data(numpy.array([[[0.052626876950351224, -0.67956116470517647], [1.2147515603304766, 0.38397534066424899]],
[[-0.12419702009054982, -0.17227194869809437], [-0.43836392791494461, 0.43427926215657442]], [[0.041753938290720764,
-4.5704240024751872], [-1.5355049455338108, -0.69642068661122147]], [[-0.38410347124596128, 1.0146352438416113],
[40.719994162243026, 0.48114165299842049]], [[-1.6859462819034383, -0.57360288493474976], [-2.7861599010747122,
-0.67320458364148295]], [[-1.2161965333027973, 0.39911670622190515], [3.5598363671486943,
0.47582704782624802]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.54221273929241065, -0.35139316545369953], [-0.80396583079821793,
-1.1366271548317615]], [[-2.3211791986595185, 22.764215950312941], [-0.10789436233823439, -3.6616056049070829]],
[[5.6030778046056575, 4.8985896068181933], [-1.5314392133091446, -0.88334381751181845]], [[0.13864424980142789,
-1.3683712672276451], [-2.9249993656191324, 53.435949643775317]], [[4.2516444503647923, 0.6729153019338332],
[0.10788392698491613, -0.2152531375981952]], [[-1.0705573488747255, -2.8639517816691598], [-0.37360951726234692,
-0.79422844785382163]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank4(self):
arg=Data(numpy.array([[[[-97.656975158803732, 86.62989131986555, -77.554231481640841, 3.8669983308735141],
[-46.038298726138535, 40.067291705031494, -98.270364197569336, -94.6969458479483], [87.514637704319199, -92.077863733525263,
-3.2419643949078676, -63.397042107941658]], [[-78.795717243110204, -15.279620056917338, 80.460981482647753,
58.412764427127541], [78.818186435655377, 37.682189940374087, 44.849206889045604, 44.848625721891608], [-35.5574347957388,
-12.199741839763533, 97.654780429841566, -80.88961765682032]]], [[[-22.652246910060853, -67.38886650009394,
-23.477307930487527, -10.646755410960651], [28.156205936499333, 61.620904094908099, -68.627816293502761, 67.122379266164245],
[-98.867312915684863, -67.45476737752476, -25.299310914293784, 37.934440895232058]], [[49.380808935348227, -39.408417085726846,
-20.541815407391638, 83.649278193509474], [-87.485520624890597, 58.270434291856475, 94.943963892353963, -72.626165639298705],
[76.160653617847743, -56.169631642306463, -99.798183422398054, -90.426229350215536]]], [[[88.117152264551066,
52.841153774969399, 66.022106559130634, 40.622509829181638], [-4.2428278873716465, -39.006278992668378, 25.439473540797223,
61.543987328178218], [-15.166974840542792, -90.535188054136981, 84.651661558032657, -16.693664047828548]],
[[74.767862181117096, -78.437587118526309, -79.957730051825251, -82.440913401255031], [13.979534083611483, 37.832449897143647,
-41.090148981970032, -8.6289016011243689], [41.053492503522762, 8.9183240359808309, -66.578380761411694,
66.20182099550621]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-65.426437737609433, -42.513342752929837, -61.194113194155307, -64.800952933449537],
[-99.624865579613584, -3.2191662753780008, -52.527343047996354, -63.282633267519969], [39.453397429286866, -27.130144978241091,
-56.965824807007913, 74.720174006700404]], [[-8.9359752895985025, -12.273022000155564, 79.215464783067716, 54.215640736250037],
[-10.545731014079962, 39.462823926104164, 40.564010266889511, 92.013499250506641], [20.430731908343816, -93.73852236295572,
38.616446665550825, 16.303457902544551]]], [[[-8.6131335963080886, 91.666532748276779, 67.718239380865299,
-24.067799387278825], [2.6235650197201892, 84.44637358608324, 55.396761917366206, 30.029168851594335], [3.7411742822091298,
7.2609312887790338, -64.032694520213113, 71.454721297104697]], [[63.195820999024335, 10.691306316136078, 70.801143686977696,
34.146817443572871], [-81.881989121896652, 39.550920228819763, 4.1371918628010178, 93.50590280236878], [97.927125099631667,
64.442557162542499, 81.251825689023093, -72.766391730318389]]], [[[-99.988258264364134, 6.3957796419498578,
-83.783069257657814, -42.282613168159912], [36.226072219705117, 53.514293085963089, 55.242986060328917, 30.924108560738716],
[48.213793181692523, 33.583178486196687, -17.776417123041654, -79.70940504468291]], [[-26.186562848112317, 35.053508602494276,
-74.15973561349287, -8.998608974101586], [72.625731679153461, -45.763185712759238, 96.157419130429048, 82.60958774405006],
[28.466122898236932, 13.661431663776554, -64.305636252969435, 78.126668760850237]]]]))
res=tan(arg)
ref=Data(numpy.array([[[[-0.27417913318913278, -4.1566210200104123, 1.5091093500762029, 0.88667753844569219],
[1.8966516810504024, -0.97631203132464983, -1.2121104814243526, -0.48202716829016767], [-0.48300153499471354,
-1.4644610945344889, -0.10071016976344437, -0.63420186225059838]], [[-0.26163711886038371, 0.45661727740971819,
-2.7363785710173922, -3.3111423421076758], [0.2857904741045893, -0.016923518088534008, 1.1779198979973127, 1.1765333132126685],
[-1.5571186702709128, 0.38398982640921775, 0.27182083255769302, 1.0128687939892587]]], [[[-0.77786631973260933,
-6.3841435669149291, -11.786941762312257, -2.749590141226586], [-0.11868049092962783, -2.6579585972745292, 0.52982517743967028,
2.2281208753465154], [-10.738431613965821, -11.146506047949773, -0.16812749423430426, 0.23977164577867416]],
[[-1.2212418866506991, 7.1735230424816061, 8.192421452643833, -2.3846593272264083], [0.51943402007653894, -6.5734237237147752,
0.83578647210017276, -0.38732785222465582], [0.95508720601143937, 0.39829550561729016, 0.89993916898678605,
0.80859125720920677]]], [[[0.15375262833206235, -0.63522917927167522, 0.048699277776481524, -0.22172457509128218],
[-1.9707778487603462, -3.7049205779162753, 0.3167282752575496, -3.4396185005903077], [0.60077400519944801, 0.64237874378765658,
-0.17303671645831173, -1.5094893398587974]], [[-0.72966855935529684, 0.10258684244697755, -6.4899256428743657,
-0.94950861550679122], [6.2912235198952393, 0.13413392183447914, -0.25475027110751947, 1.0211790862248971],
[0.21605885016652787, -0.55471233347047222, -0.69140625670162514, 0.23243020992993024]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.60899603533502777, 9.7851675904442956, -14.915637546865332, 2.3764544607985711],
[1.2760657540936704, -0.077729601321507483, 1.2090043292697605, -0.48401767690643016], [-5.3886094617159346, 2.200117216306007,
-0.44316694059504153, -0.80544075052469299]], [[0.53185115511734737, 0.30206325695557501, 0.80148938350550525,
1.047442535039711], [-2.0709860162340474, -5.1191444088641491, -0.28397859569936534, 1.2792587767882238], [-96.338809394137982,
0.55838397029774967, 1.3060273034426464, 0.67754301675915884]]], [[[1.0539199784527833, 0.62743135353607671,
-5.6890981603187942, 1.8052864811134648], [-0.56994582002442928, -0.39550809900908157, -2.2459675092850895,
-5.3721562872445512], [0.68352279680128936, 1.4837179824840887, -2.5785739645551788, -1.033580665481149]],
[[0.38093975110296807, 3.1845224145186783, -8.6338809194109842, -0.43546576800373976], [-0.20331407344276023,
-3.4643985918583193, 1.5424352550195062, -0.91653700413563366], [0.59637912459294451, -25.044476766299347,
-0.45811677917613758, -0.55904458203375451]]], [[[0.60311480417088692, 0.11307256431662512, 1.7033508050940664,
-7.7156847407432672], [-10.196871172372907, 0.1076307213120567, -3.6831649770066708, -0.53572561021033649],
[1.9166401734031362, -1.4728710216778025, 1.8407229850668281, -2.3572817040493903]], [[-1.758855002901736, 0.54110631448591939,
2.8976606059699086, 0.45399233058581712], [0.38682887197477689, 4.6895764808665765, -2.8373957569747743, 1.3357904861442966],
[0.19417566417476489, 1.9409854797509967, -10.275523383878257, -0.43837851346057438]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank0(self):
arg=Data(0.989657679547,self.functionspace)
arg.setTaggedValue(1,-0.473489993439)
res=asin(arg)
ref=Data(1.42685052415,self.functionspace)
ref.setTaggedValue(1,-0.49324888058)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank1(self):
arg=Data(numpy.array([-0.60614764750793948, -0.76269603799552499]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.074431252427650141, 0.46633079704255254]))
res=asin(arg)
ref=Data(numpy.array([-0.65120801803032125, -0.86747147905375954]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.074500149323603215, 0.48513840344368775]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank2(self):
arg=Data(numpy.array([[-0.38725529068582309, -0.035002566048380368, 0.29215359547572062, -0.74047728151511116,
0.86375088045015236], [-0.32624879922924277, 0.2147493204278017, 0.69593358057302734, -0.67043661608803573,
-0.36870988653065229], [0.47510384739864087, -0.76918433519434637, -0.34537195722060143, 0.011183356922037646,
-0.11541581215659935], [0.27260362912724068, 0.61437603398068741, -0.25663483045732682, -0.041872856264680069,
-0.91618191033723884]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.41390194006847647, 0.47906669770750199, -0.65223346998753673, -0.15143835662945238,
-0.33784007340735844], [-0.70843138277904028, -0.54438594011729147, 0.034207507853760921, -0.32612382903828285,
0.70186023759432148], [0.46194569999679191, 0.20862219804974647, 0.43258225592841981, 0.89408040795536747,
-0.21133552002226552], [0.47668525800656281, -0.98272511924427908, 0.12392756171121211, -0.74247467351461138,
-0.70926856758101375]]))
res=asin(arg)
ref=Data(numpy.array([[-0.39765272784734212, -0.035009717396994572, 0.29647790574666982, -0.8337802351353637,
1.0426662706606511], [-0.33233251116675155, 0.21643515475826403, 0.76971914249277162, -0.73479708883031969,
-0.37762073948486119], [0.49508204578845194, -0.87756375238241691, -0.35263510222938133, 0.011183590047521475,
-0.1156735991133048], [0.27609811766175707, 0.66159490472202953, -0.25953880511997846, -0.04188510212750262,
-1.1584472025690309]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.42673623857902859, 0.49959114862575316, -0.71052717518368491, -0.1520232503062508,
-0.3446210935366435], [-0.78727319189839995, -0.57565690165345518, 0.034214182708838151, -0.33220031031626085,
0.77800568719018426], [0.4801877492089463, 0.21016594677352965, 0.44735491475086192, 1.1063738876172791, -0.21294114010121065],
[0.49688012059430242, -1.3846520382811431, 0.12424698835488085, -0.83675707078112471, -0.78846008349356012]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.27078670807121452, -0.6521353392582292], [-0.98935156671000035, 0.71924091352997421]],
[[-0.596001253659459, 0.61191364730020958], [0.66651667689067051, 0.90368733535070822]], [[0.73399023574476341,
0.70767255658646566], [-0.13936321871982982, -0.62987342971179183]], [[-0.4948738057441634, 0.49128944743073721],
[-0.88331247766967902, -0.86621949805759135]], [[0.14384806329256028, 0.095935576215879115], [-0.69504930628562311,
-0.41646976607569797]], [[-0.39226444326380583, 0.2452138366202059], [-0.1518719139549316,
0.93521490540250141]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.22465455750635011, -0.64081370087964307], [0.6605109979083339,
-0.54615750594210355]], [[0.56019147828484561, -0.67363253611107687], [-0.26666555499697531, 0.5822337693194235]],
[[0.96028563934974787, -0.53648605635740176], [0.016439994710971462, 0.30498182045115629]], [[-0.27256730376760929,
0.25969816117226885], [-0.64645590181357815, -0.097098197456417124]], [[0.090976011478984375, 0.61837879336933055],
[-0.69346965532985538, -0.27865603672245576]], [[-0.18099305537471033, -0.21217824413547803], [-0.24741889558471541,
-0.10075712603260512]]]))
res=asin(arg)
ref=Data(numpy.array([[[-0.27421017834129852, -0.71039772542571444], [-1.4247320578861771, 0.80270911325546157]],
[[-0.63851198939279763, 0.65847783965152196], [0.72952644193099725, 1.1283041433740428]], [[0.82417870001472116,
0.78619861108497946], [-0.13981832890670637, -0.68139024128778247]], [[-0.51768960196670843, 0.51356956389655972],
[-1.0828819297210799, -1.0475858703421546]], [[0.1443488307389203, 0.096083348390609224], [-0.76848847834177259,
-0.42955884388247545]], [[-0.4030920504517207, 0.2477402572487051], [-0.15246188107240444,
1.2088655180324399]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.22658851752026385, -0.69555772267891613], [0.72149914702634521,
-0.57777028882908332]], [[0.59461693452585995, -0.73911287993369601], [-0.26993164239658068, 0.62147348780168055]],
[[1.2880241486013853, -0.56626768327750376], [0.016440735350003028, 0.30991934023703022]], [[-0.27606036257642469,
0.26270962685432236], [-0.7029299940282141, -0.097251423034157833]], [[0.091101977070680226, 0.66667810639918901],
[-0.76629367807938997, -0.28239443270507703]], [[-0.1819960893458433, -0.21380341867513972], [-0.25001541559304535,
-0.1009283906282767]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.17667641262684564, -0.20528415608852835, -0.86377896580752189, -0.5808064056077008],
[-0.53575131452410674, -0.94973341635526076, -0.94934619991031122, -0.36012502776308042], [0.30866069414823993,
-0.22625586744343318, 0.89172233386738275, 0.041379170795829534]], [[-0.74767149169546465, 0.50300329169060554,
-0.77484404377979366, -0.8352964117129088], [-0.30013599718442951, -0.10886586640435891, 0.2730681338127674,
-0.1615077727930434], [-0.41658644426176838, 0.87939475296716063, -0.86922788571063614, -0.69906009036185479]]],
[[[0.12016629383182509, -0.70534585776132253, -0.87839274516803956, 0.2480882290553843], [0.31790318495990477,
0.03461698975366434, -0.027498912345320314, -0.66291073124981814], [0.50626012778834562, 0.77210638834559853,
0.32650848757464579, 0.9203611453694176]], [[-0.47360390489237758, 0.85676222423230053, -0.015787865739628981,
-0.37070260942360855], [-0.72961058537924894, -0.14494860353517136, -0.52932600855417877, 0.88281683601885486],
[-0.6909459206541444, 0.69160226630289623, -0.54290210856405896, 0.34524619417902236]]], [[[0.36542406478716893,
-0.96793946499057182, 0.38442480479198515, -0.57435150997595197], [0.12545758270235607, 0.96934407264412958,
-0.24044762690293819, -0.340989156148089], [-0.44460870104468952, 0.88803936519219207, 0.55053765965557822,
-0.5546454459624317]], [[0.83841967081685675, 0.50019773286874503, 0.22967486065013221, 0.56354640088505747],
[-0.65856710498790327, -0.90765207375799539, -0.58747426169848049, -0.53180638195873375], [-0.83276666060509819,
-0.68616293259457728, -0.17418580748690327, -0.62859194512462024]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.45006916215173465, -0.92518399158900699, -0.04783796036863075,
-0.19626369399103549], [0.09169070885520969, -0.87029170445041015, -0.0045652336502262081, -0.32329394607483464],
[0.55866492213457741, -0.22397258908984941, 0.052303570015150402, -0.7530677681629222]], [[0.54997830254931301,
0.35346830466133805, -0.40403225037158907, -0.04482659680963097], [-0.11974859579133346, 0.94132610730205091,
-0.66324538714156511, -0.22519453019335234], [-0.70728958122137842, 0.92386346743545333, -0.6458871899721651,
-0.4609281078649099]]], [[[0.12930111020374491, -0.98520462255645291, -0.40152660242615856, 0.010632521180307775],
[0.031150770980220788, 0.55887463503362822, -0.54295081925718014, 0.050402433196199858], [0.013662223054701439,
0.40718009215619322, -0.71883819418052053, -0.96296533562944775]], [[-0.1927124007905795, -0.68855068933515107,
0.6316010981181337, -0.041869003739051891], [-0.046326825303942165, 0.92598846850093763, -0.72356399590221465,
0.25634295052044487], [0.22051227192098355, 0.90166643600746443, -0.06859211365531892, -0.045149621190890721]]],
[[[0.39410312014393267, -0.044987876742481614, 0.8979457783429603, 0.72166466731038081], [-0.83729280796152195,
-0.97851172484586679, 0.17112070485897912, 0.2107396926071694], [-0.22810606176169324, -0.92777597337878248,
0.58581791765258862, -0.57511066270834021]], [[-0.30425553857922449, 0.63784070657640024, 0.76802042170834328,
0.56358714499635787], [0.76594910306777875, 0.85231338535685475, -0.94636186542722056, 0.77240089163366621],
[0.31982221170560687, -0.32750948889637299, -0.034744253720429996, -0.50257821297680039]]]]))
res=asin(arg)
ref=Data(numpy.array([[[[0.17760871448276636, -0.20675403804646908, -1.0427220071893906, -0.61971896050972197],
[-0.56539728309730153, -1.2523832523933589, -1.2511486870629975, -0.36840190993020011], [0.31378465192018662,
-0.2282321452737586, 1.1011365828156379, 0.041390988386389355]], [[-0.84454869639849361, 0.52707016378896132,
-0.8864683510196445, -0.98867169122207821], [-0.30483522099816734, -0.10908206377238625, 0.27658094036046738,
-0.16221829359885875], [-0.42968718537947675, 1.0745894259046815, -1.0536384870903868, -0.7740822073469662]]],
[[[0.12045738832689482, -0.78291093220064634, -1.072488853750152, 0.25070628836034903], [0.32351712207393363,
0.034623907281987255, -0.027502379259771104, -0.72469982402898492], [0.53084255928695823, 0.88214908246269863,
0.33260724413419246, 1.1690029652625629]], [[-0.4933782124574862, 1.0289582745627337, -0.015788521685103963,
-0.3797654160853966], [-0.8177523437209665, -0.14546102725991955, -0.55780595912809805, 1.0818256899500005],
[-0.76279673253995905, 0.76370505307109449, -0.57388899520112124, 0.35250109635513183]]], [[[0.37408833395292318,
-1.3168934796373459, 0.39458466710266937, -0.61181174217240464], [0.12578904459759122, 1.3225470507143868,
-0.24282698097084726, -0.34796891551730025], [-0.46073736173792446, 1.0930630422580974, 0.58300815127146821,
-0.58793680336412102]], [[0.99437717008195303, 0.52382711290120032, 0.23174359978487752, 0.59867256987095474],
[-0.71891304682557911, -1.137655796129406, -0.6279341788586954, -0.56073216019431915], [-0.98408642456901807,
-0.75620114331540811, -0.17507887513122425, -0.67974142884518618]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.46684278730023371, -1.1815201100750479, -0.047856225143968707,
-0.19754604320899144], [0.091819674168062873, -1.0557942602491508, -0.0045652495079866228, -0.32920830092822206],
[0.59277522179959163, -0.22588871606051758, 0.052327446914480526, -0.85271238565039864]], [[0.58233825824309027,
0.36127616478278501, -0.41592063812310764, -0.044841623005844253], [-0.12003665216187458, 1.2265382433727618,
-0.72514689608315608, -0.22714269023447864], [-0.78565671511328383, 1.1780552671570363, -0.70218481885046757,
-0.47904074383403694]]], [[[0.12966414087424233, -1.3985638911620022, -0.41318311251144624, 0.010632721525817797],
[0.031155811146442221, 0.59302809425903769, -0.57394699929636273, 0.050423798063665655], [0.013662648113827933,
0.41936448841432711, -0.8021296351981142, -1.2977925042003347]], [[-0.19392561536532552, -0.75948862127879602,
0.68361662963146053, -0.041881246220352109], [-0.046343412238644396, 1.1836453452376237, -0.80895172981266905,
0.25923682302888701], [0.22233963948715654, 1.1236078001339596, -0.06864601410630812, -0.045164974774920652]]],
[[[0.40509178676205976, -0.045003065808640254, 1.1150794983658745, 0.80620405929955008], [-0.99231290343135414,
-1.3631154327855153, 0.17196703968158128, 0.2123315842232695], [-0.23013201668885772, -1.1884076378143387, 0.62588891338210939,
-0.6127394135964227]], [[-0.30915681905645181, 0.69169133505319369, 0.87574435958173658, 0.59872189267169562],
[0.87251631631408044, 1.0203925151294579, -1.2417837981042867, 0.88261263771725229], [0.32554183754839172,
-0.33366648106510866, -0.034751247851029093, -0.52657840683397406]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank0(self):
arg=Data(-0.291587772644,self.functionspace)
arg.setTaggedValue(1,0.0219832082111)
res=acos(arg)
ref=Data(1.86668265093,self.functionspace)
ref.setTaggedValue(1,1.54881134759)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank1(self):
arg=Data(numpy.array([0.12826451971761799, 0.33491528109011126]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.22031555171954387, 0.13047651014043127]))
res=acos(arg)
ref=Data(numpy.array([1.4421774808126324, 1.2292810095841651]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.3486583675462354, 1.4399467435143751]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank2(self):
arg=Data(numpy.array([[-0.41152758157568659, 0.50648377704044156, 0.004765718148047382, -0.19506763365502267,
0.27315738992922611], [0.33882351517959264, -0.029041529921664644, 0.28655425515791988, -0.58345582451173839,
-0.95744850153173044], [0.32067870881780935, -0.59696574288858684, 0.80001515403024226, 0.20766175365526407,
-0.92537824876724606], [-0.62608849117550203, 0.49584899163835461, -0.91012458136569108, -0.28964137555570646,
-0.019645711019819267]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.30161380606367949, 0.47771896537569658, -0.4069706892444982, 0.36563120333238408,
0.34635703099859194], [0.77283471581548135, 0.39085192259836066, -0.91464145652686901, -0.5785113219336232,
-0.55834836602534121], [-0.89226447891919258, -0.005420603335181351, -0.23047363216965455, -0.31627282725938288,
0.24708083627782318], [-0.36674157132228213, 0.84284509604242497, -0.55735470644132779, 0.28132905259474161,
-0.54756243112029979]]))
res=acos(arg)
ref=Data(numpy.array([[1.9949258434411383, 1.0396944084450979, 1.566030590606778, 1.7671227348451009,
1.2941226028750279], [1.2251301602101246, 1.5998419405884083, 1.2801680039289014, 2.1937737283320473, 2.8488240011834036],
[1.2443503750305041, 2.2105099844323077, 0.64347585165092913, 1.3616123305271102, 2.7528286077860806], [2.2473230240603672,
1.0519841271516832, 2.7143809675072594, 1.8646484580550975, 1.5904433017575812]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.2644114932860036, 1.0727399259992247, 1.9899315573166447, 1.196485454770198,
1.2171113576324784], [0.68750036295992678, 1.1692393692469043, 2.7254164558807252, 2.1876987449669811, 2.1631899258148524],
[2.6731322180353185, 1.5762169566759732, 1.8033607174166493, 1.8925943802877971, 1.3211298029884102], [1.9463004481873596,
0.56824804065117174, 2.1619926599703043, 1.2856175077339616, 2.1502446915093585]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.0018967734613412057, 0.59300273345453158], [-0.81381641137830263, 0.11977023446026669]],
[[-0.037555176051128791, 0.021140469012053797], [0.19984949833653998, -0.11442177154206457]], [[0.34269780843852971,
-0.29360044358101034], [0.53220357878081814, -0.36059448682460116]], [[-0.78355112918456538, -0.083700661501167062],
[0.32350069086320987, -0.38066114197452472]], [[-0.97705826933358608, 0.40808141278379839], [-0.42109501987534292,
0.38704401675830336]], [[-0.38925229608360801, -0.94561123734124619], [0.38734879630697328,
-0.84315118092500463]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.81502930234987203, 0.0050248424447604245], [-0.39601071676781818,
0.78146079161187321]], [[-0.40269237311187456, 0.59630968314412591], [0.43146464177692678, 0.28202596494731069]],
[[-0.88345581362782388, -0.40846927031376268], [-0.65907710124008867, -0.51535297922329859]], [[0.3791891533095284,
-0.1878703433573069], [-0.550755371929043, 0.00053164521361281913]], [[-0.67703943464546334, -0.71143279160208883],
[0.1862121811026225, -0.84162834520638197]], [[-0.77834727203948528, -0.40520983099715968], [-0.82342473982088238,
0.50868059221882711]]]))
res=acos(arg)
ref=Data(numpy.array([[[1.5726931013935923, 0.93601340923207366], [2.5214858775304476, 1.4507378791003851]],
[[1.6083603363688714, 1.5496542827850082], [1.3695920087141873, 1.6854692556999198]], [[1.2210092255140392,
1.8687874345506976], [1.009595077794389, 1.9397015089412193]], [[2.4711571166083619, 1.654595029389675], [1.2413695403820464,
1.9613074868225726]], [[2.9269767557006974, 1.1504447893795458], [2.0054485856592787, 1.1733727421774625]],
[[1.970616056541908, 2.8102648289377887], [1.1730421776900686, 2.5739136323477916]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.61801656176645547, 1.5657714632044866], [1.9779646207779984, 0.67379274631597552]],
[[1.9852526825543613, 0.93190017823864835], [1.1246806413386898, 1.2848911859657939]], [[2.6539841054134725,
1.9915727464827593], [2.2903872908337721, 2.1122158133671496]], [[1.1818764764700775, 1.7597897566840754], [2.1540652930004773,
1.5702646815562391]], [[2.3145286806652781, 2.3623312612316218], [1.3834908482456931, 2.5710876379804022]],
[[2.4628254031317334, 1.9880046639617615], [2.5382167551116179, 1.0371447242973137]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.17419378414794906, 0.20877755349477622, -0.9560863974612771, -0.14865727072192525],
[-0.17733095703209234, -0.075638384984418994, -0.95407857695419507, 0.6554653414860343], [-0.61341371635489927,
-0.52092549325400384, 0.56479500388669734, 0.6212897490792797]], [[0.62874023072485619, 0.23625020456583901,
-0.16020531855055364, 0.55591840537558723], [-0.79872077551867249, 0.04557168403173395, 0.3613949288270315,
-0.95572639016521221], [0.79815227797346733, 0.17234806171331019, 0.26868955082890489, 0.98424717569008591]]],
[[[-0.54543833436541767, 0.13415007696775616, 0.13534722231297658, -0.72252620207541485], [0.72328095946739612,
-0.13862718470167823, 0.044552606563023067, -0.69905432254503375], [-0.085225036679073551, 0.98164131149719402,
0.76464095979981717, -0.86551052241781756]], [[0.62239954920343643, -0.70181949407386546, -0.8168814556558559,
-0.13547995725989515], [0.82528036114158154, 0.87633827050160984, -0.080143289194266831, 0.83348769953227908],
[0.32539673255685431, 0.93803075579628348, -0.27823622244460522, -0.39447875543393573]]], [[[-0.59356695628956446,
0.42068072139579571, -0.2966816048473806, -0.7687956041808558], [-0.71878760019674037, 0.97463077881539117,
-0.31728131209986821, -0.73484475981245589], [-0.12727988625513686, -0.94231248176633886, -0.77734468240253696,
-0.90444340163288839]], [[-0.18269489759149571, 0.29439735776753762, -0.27185087948144659, -0.62048852806225785],
[-0.98053876278297158, -0.26218270695695034, -0.68261964802157793, -0.019759295107996899], [-0.089430250213055507,
0.74948889896974036, -0.75852786166420638, 0.35370400678788205]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.1619684522755932, -0.10284993380175211, 0.25821458903539396,
-0.82364767575665576], [-0.36584913630709737, -0.72676741961971969, -0.97076126343096136, 0.91364000367151532],
[-0.77168739033605294, 0.39113009993279912, 0.48535347425417696, -0.52215067436842233]], [[-0.84535167107707998,
-0.87641054599151036, -0.4118154689589868, -0.72256509448603845], [-0.81670007180109772, -0.19694267578855462,
-0.27574844635108997, 0.95030776906209558], [0.76565743374572093, 0.46912480629116415, 0.50374256986893307,
0.093162354426354455]]], [[[-0.73310899789237172, 0.2875723021943497, -0.81245888640465547, 0.58799309157190205],
[-0.90714187533421775, -0.073098886394030282, 0.67818356310455719, 0.26455797708589723], [-0.66652933651616353,
-0.23032652509234064, 0.22892282028568234, 0.44787613065694942]], [[-0.44317080986172785, 0.31925123670093725,
-0.18287302844160769, 0.89497686805988264], [-0.30669668515287685, -0.75162807334334503, 0.61189143325364581,
0.053990654942066563], [-0.11090521653913066, -0.24677109143944231, -0.70304586767346078, 0.54736846949774476]]],
[[[-0.59873211647357749, -0.31774376245292935, 0.43070766306328645, -0.016079803275450555], [0.5261614333552953,
0.6797542937501353, -0.68473148542300333, -0.41052525916288962], [0.68024025081334183, -0.40577339681915869,
0.4087295894868801, 0.61958252572202288]], [[-0.72011815326102413, -0.10455144248350179, 0.75334594465757676,
-0.56149938171962099], [0.16913646946275085, -0.45034525250556334, -0.36739372725079256, -0.041181300029745849],
[-0.64556849960251772, -0.040795848776974841, 0.85515343427832669, 0.6096018411341606]]]]))
res=acos(arg)
ref=Data(numpy.array([[[[1.3957093511629617, 1.360471526581287, 2.8441407383080222, 1.7200066442732735],
[1.7490700866368647, 1.6465070213809823, 2.8373649283858922, 0.85599770843850642], [2.2311721630105854, 2.1187311401011293,
0.97061150484655345, 0.90040872753615897]], [[0.89086421992331588, 1.3322913334783058, 1.7316949814428857, 0.9813289023048628],
[2.495962524203899, 1.5252088542888533, 1.2010328242314052, 2.8429148515570239], [0.64657435379029982, 1.3975834206961624,
1.2987640322677911, 0.1777321440297753]]], [[[2.1477083304186579, 1.4362405897686086, 1.4350324256625075, 2.3782457505851391],
[0.76225455373160755, 1.7098714069908214, 1.5262289680482763, 2.3448704680068988], [1.6561248714124477, 0.19191216072142248,
0.70031230156130786, 2.616965031785782]], [[0.89899161244489156, 2.3487448168313412, 2.5267798820772609, 1.7066941968405251],
[0.60009784920496412, 0.50258924678280592, 1.6510256575906948, 0.5854062256837147], [1.2393650626053583, 0.3538928296356465,
1.8527536586887519, 1.9762968634509979]]], [[[2.2062801487665875, 1.136600790119608, 1.8720122511782569, 2.4477519846112199],
[2.3728531862116133, 0.22573089755729248, 1.8936576221249246, 2.3962340910120021], [1.6984224020679184, 2.8002691893334375,
2.4612300776159772, 2.7008694873643462]], [[1.7545231138284718, 1.2719714573571184, 1.8461121498570447, 2.2401618278670212],
[2.9439834930659066, 1.8360796668105717, 2.3221377460874395, 1.5905569078982662], [1.6603462151776323, 0.72350662179783332,
2.4318473355004873, 1.2092681825027514]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.7334814462425481, 1.673828455510769, 1.3096226655494099, 2.5386097574192203],
[1.9453413548263998, 2.3844003298346768, 2.8991783717530111, 0.41864650952806914], [2.4522863458860922, 1.1689371321766686,
1.0640289187834875, 2.1201670767225766]], [[2.5780194523086384, 2.6391534710101032, 1.9952417407117879, 2.3783020085117652],
[2.526465489342848, 1.7690348671872353, 1.8501645764491155, 0.31657329697684766], [0.69873356468677461, 1.0824968212721804,
1.0428705855020852, 1.4774986804685288]]], [[[2.3936783941171464, 1.279105226241779, 2.5191536430590338, 0.94222086861618171],
[2.7072381573266595, 1.6439604702314694, 0.82550822412311597, 1.3030507874849933], [2.3003397504688494, 1.8032095431448985,
1.3398253531931346, 1.1064078436784477]], [[2.0299290447557947, 1.2458570544086836, 1.7547042970540918, 0.46241697896834122],
[1.8825168384735331, 2.4213232683150521, 0.91234657301380784, 1.5167794070081579], [1.6819301661171466, 1.8201432078142425,
2.3504678570451745, 0.99157974394862303]]], [[[2.2127135213118359, 1.8941453082545316, 1.125519574857567, 1.5868768230835029],
[1.0167160158190489, 0.82336874945488769, 2.3250315257063354, 1.9938263516346553], [0.82270597341644369, 1.9886211901601889,
1.149734692650326, 0.90258559607088251]], [[2.3747689167871977, 1.6755391877344084, 0.71766104149909937, 2.1669930068530197],
[1.4008428772515369, 2.0379483121019257, 1.9470015447043691, 1.6119892756074885], [2.2725637754510268, 1.6116035001525102,
0.54494956853218945, 0.91523810991933396]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank0(self):
arg=Data(-14.7254534244,self.functionspace)
arg.setTaggedValue(1,-30.821724654)
res=atan(arg)
ref=Data(-1.50299080856,self.functionspace)
ref.setTaggedValue(1,-1.53836305618)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank1(self):
arg=Data(numpy.array([79.645526314923757, 31.251712111300833]),self.functionspace)
arg.setTaggedValue(1,numpy.array([17.802002898972049, -11.766456436013172]))
res=atan(arg)
ref=Data(numpy.array([1.5582413534867612, 1.5388089940682592]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.5146818449820139, -1.4860127074426057]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank2(self):
arg=Data(numpy.array([[-51.014136430165856, -10.355356517133202, 28.251309295456821, 13.133655639607198,
-4.4836594247888968], [67.588862677322936, -35.526211499854085, 57.141721412265554, 3.5594116807501166, 24.644697631626315],
[-59.211703959261456, -72.046541293224493, -68.738506813922058, 36.183945043854038, 40.813283481240802], [-84.224967550292789,
2.382009962300117, -97.667999664168718, -17.641085801592737, -0.094387632167098445]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[71.722254489170183, -77.163739464663479, -62.935449615094207, 71.421618282216969,
-92.086755719577582], [86.1279234487221, -75.927119138293932, -53.426711624869739, 78.243007256108257, -93.228905473462987],
[51.146224478610861, 18.135315836848292, 25.055391871257115, -63.299860388548915, 43.155037911250076], [82.260960323640546,
-59.034950659315768, 43.419415178367842, -96.721347394404589, 69.939661568517351]]))
res=atan(arg)
ref=Data(numpy.array([[-1.5511964273421315, -1.4745264598757113, 1.5354145091343874, 1.494802690678932,
-1.3513557426844944], [1.5560020692406058, -1.542655526646358, 1.553297765207124, 1.2969112580541855, 1.5302418945935552],
[-1.5539093791983924, -1.5569173012079685, -1.5562494664061093, 1.5431667932509703, 1.5462994014411897], [-1.5589239207380323,
1.1733268586785741, -1.5605579164505456, -1.5141710727583415, -0.094108820426104287]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.5568545563458982, -1.5578375973438927, -1.55490836758775, 1.5567958786717648,
-1.5599374287193295], [1.5591862121193236, -1.5576265635211364, -1.5520812830291499, 1.5580163277677324, -1.5600704510968921],
[1.5512470322905925, 1.5157110808099072, 1.5309059297790453, -1.5549998176728896, 1.5476282073492109], [1.5586404907725899,
-1.5538588285718626, 1.5477692261183333, -1.5604577159532746, 1.5564992621352502]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank3(self):
arg=Data(numpy.array([[[-51.439521199073958, -42.19343667612452], [-57.017857592051982, 42.357194430644654]],
[[-94.668069377568997, -97.267849956585323], [95.97751393208145, 72.083118529080309]], [[13.931243087317895,
48.314113039294057], [16.667854995256334, 7.2354641537036457]], [[-35.24080770006239, 6.1151782689095882], [7.2809362943481659,
-47.744411262592919]], [[-20.3055738368638, 38.892625438958902], [13.233199681154034, -53.915986490531978]],
[[-94.858405410928896, 99.82234948258116], [48.503882060032311, 30.917187302988793]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[91.878551047153621, -29.510183345226608], [-83.224925312144791, 83.845227550590039]],
[[88.225493436926286, 11.532409624377109], [2.5458204819547916, -60.296569325231573]], [[54.038333175852074,
-4.1384279748112789], [23.760565206323776, 21.555672180440922]], [[-33.836072987446173, -77.826802360528504],
[42.386301929980959, -28.853499934767314]], [[-92.851208362115315, 51.187485401450829], [-17.065728694813885,
53.278176745218417]], [[-99.442133011258861, -62.662157473770883], [-58.469826126068639, 17.824111168192687]]]))
res=atan(arg)
ref=Data(numpy.array([[[-1.5513584698690774, -1.5471003942988308], [-1.5532597596608479, 1.5471919713913882]],
[[-1.5602334958935613, -1.5605157996684915], [1.5603775966626152, 1.5569243429573056]], [[1.4991381283805512,
1.5501013957909033], [1.5108724340083037, 1.433458236465541]], [[-1.5424277452541746, 1.4087034902352189], [1.4343053177252822,
-1.5498545289386529]], [[-1.5215885210143809, 1.5450901753003594], [1.4953721636022326, -1.5522510785229557]],
[[-1.5602546889363347, 1.5607788652232746], [1.5501823406963127, 1.5384631301370586]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.559912823466391, -1.5369226810631742], [-1.5587812739091436, 1.5588701549951265]],
[[1.5594622198376147, 1.4843005338925794], [1.1965116310113704, -1.554213155607542]], [[1.5522930567093209,
-1.3337034661793217], [1.5287346084657718, 1.5244380631272727]], [[-1.5412506699602671, -1.5579479899433375],
[1.547208174948046, -1.5361523528598342]], [[-1.5600268240968616, 1.5512627864671464], [-1.5122662848001094,
1.5520291193633591]], [[-1.5607405660888007, -1.5548390862365009], [-1.5536951554558593, 1.5147513012507214]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank4(self):
arg=Data(numpy.array([[[[-60.786516054960018, -97.443926672116945, -59.730786105649635, -60.552672183608202],
[0.067178496327429116, 76.097886671263439, -10.611506241697001, 62.520570069561273], [98.734673799525353, 84.863778955168158,
38.252515267672806, -5.1350082880344985]], [[77.589460335333513, 84.577328211722374, 67.229876917205758, -27.884169005126182],
[-55.428762309397705, -42.909045438315019, 5.4478832792561178, 98.719071343277363], [50.041890457414098, -61.217337389541605,
-31.625752675276402, 68.601618873426759]]], [[[-25.801223779222397, -29.445798766093745, -44.798995576077047,
-13.060782989795968], [-24.868508822871931, 54.456897228862431, -69.005823306352426, -38.013099364990332], [52.520288283056431,
75.090539051454812, 88.11571157196164, 29.940468771848515]], [[-69.126425092990985, 78.527356119425946, -53.114343044690806,
83.848543031621091], [-33.685382028364685, 20.306687112361828, 32.650151957007466, -81.059070647169236], [83.469680236467951,
97.861914954668038, -43.032363052419306, 45.804898833159399]]], [[[72.4991519048626, -92.751679859513942, 26.266552717005069,
26.556036707565454], [-65.853696462593632, 49.53468229171304, -4.1588537709789364, -98.080606358862553], [-75.680355306374167,
28.449553815517618, -86.195746809070101, -79.665759321116923]], [[-3.0262101017673757, -86.441575573369178,
-58.423640738004678, 16.659518568604952], [61.02394512053786, -82.308850756533687, -63.48331127418183, 77.928338187268736],
[13.711361913844101, -40.319664743955784, -20.625042794109746, -40.404672515921902]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[34.440676365049114, -8.928215565852156, 82.549857592202045, -23.402816990850781],
[81.444943705518057, 24.836590977242707, 12.529228281117838, -30.233298854898962], [-27.744182872849407, 52.425228768719734,
61.267860951786759, -33.849170571356012]], [[-31.944123108883531, -77.558579126251928, -86.569996998801585,
-22.877811832681274], [-68.796333830065777, 38.3887156141069, 55.836989989650135, -88.366850454930685], [30.300995863327671,
4.9007881921832848, -27.507754163421041, 76.045942075681637]]], [[[64.072551296634714, 74.888616072380785, -33.075348532233932,
-32.445757899913886], [42.326950917626533, 85.004838459602013, -52.797797801406674, 61.710543752406068], [95.848068180331182,
-60.15064788445914, 16.670945317241291, -65.666197079729741]], [[76.675531866034845, -46.804170258023724, -79.050686910748411,
20.250281649863666], [44.623682968710199, -23.035506964204686, -54.724272963446111, 40.878243061765005], [53.368726075681678,
-99.662799699787371, -0.39118677248215761, 29.872163016238545]]], [[[23.227160380259122, -40.824445575015567,
9.9914772245777641, -70.767759555201451], [43.089621482280933, -84.683624327833257, -72.192063858769018, 55.872440414467803],
[-58.127478581268498, 34.87567327109511, 10.580861885981534, -58.73857948325918]], [[77.723769033663018, -18.880641278910758,
-80.850165897272433, -58.112001436928253], [53.595081958851097, 5.6247875895140282, -29.124774133157288, -98.409076696537625],
[46.599025926989782, -63.970578322466842, -22.107674051037463, 45.27636423071533]]]]))
res=atan(arg)
ref=Data(numpy.array([[[[-1.554346793780754, -1.5605343748051601, -1.5540561053483741, -1.5542832800750914],
[0.06707771134963493, 1.5576561137349634, -1.4768364848555173, 1.5548029547769511], [1.5606685188986997, 1.5590132818747466,
1.5446602078043219, -1.3784619450341296]], [[1.5579086919105407, 1.5589733781788726, 1.5559230843172502, -1.5349490465472442],
[-1.5527571087994305, -1.5474954351984298, 1.3892596545420326, 1.560666918317682], [1.5508157282893953, -1.5544625378689159,
-1.5391870576196998, 1.5562204436506224]]], [[[-1.53205786478674, -1.5368486716248135, -1.5484781040920352,
-1.4943803282798005], [-1.5306064810252038, 1.5524352435915465, -1.5563058104960985, -1.5444956717247109], [1.5517583661301895,
1.5574798570707853, 1.5594481000782767, 1.5374091272945045]], [[-1.5563310879232202, 1.5580625993455297, -1.5519712446798413,
1.5588706265268419], [-1.5411185754107688, 1.5215912143863453, 1.540178169450183, -1.5584602703189787], [1.5588165018096578,
1.5605782026465265, -1.547562184377139, 1.5489680683305043]]], [[[1.5570039366370194, -1.5600152686245654, 1.5327434733828007,
1.5331578889109807], [-1.5556123173747718, 1.550611193130053, -1.3348250771073642, -1.5606009845295099], [-1.5575836270993995,
1.5356608560533649, -1.559195346750025, -1.5582445417846158]], [[-1.2516463284150974, -1.5592283355033394, -1.5536816391795429,
1.5108425198654833], [1.5544107833647154, -1.5586475628061378, -1.5550454580714337, 1.5579647286965086], [1.4979930117462867,
-1.5459996168270638, -1.5223495175888366, -1.5460517655438717]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.541769047752825, -1.4592567339075215, 1.5586830280198429, -1.5280924056594469],
[1.5585187106853997, 1.5305548882051436, 1.4911517816513042, -1.5377322682685537], [-1.5347683314855143, 1.5517238541043337,
1.5544760048742774, -1.5412620956961551]], [[-1.539501884061409, -1.5579035610577179, -1.5592454944007521, -1.527113658719671],
[-1.5562616921013832, 1.544752894838392, 1.5528889663387913, -1.5594803490192568], [1.5378060847306976, 1.3695107286356141,
-1.5344589427920325, 1.5576471391021702]]], [[[1.5551902865231184, 1.5574439559923015, -1.5405715366363946,
-1.5399854101404904], [1.5471751118445947, 1.5590328331973433, -1.5518584072127872, 1.5545930587248358], [1.5603635268687615,
-1.5541729333474901, 1.5108835156383955, -1.5555689685427048]], [[1.5577550959903859, -1.5494339594471369, -1.5581468900546853,
1.5214543797077005], [1.5483904520035858, -1.5274123226048211, -1.5525249334605411, 1.5463383139491345], [1.5520609537393462,
-1.5607628293839917, -0.37288575485185699, 1.5373328409638529]]], [[[1.5277698519717662, -1.5463060966366591,
1.4710432191222749, -1.5566665386668717], [1.5475930473251533, -1.5589882171355174, -1.5569452745436581, 1.552900325927786],
[-1.5535944562222297, 1.5421308993211618, 1.4765659635152497, -1.5537733852560058]], [[1.5579309596200162, -1.5178814663571647,
-1.5584283990382004, -1.5535898757003221], [1.552140063159527, 1.3948501935545574, -1.5364747794967917, -1.5606350122506203],
[1.5493399440355367, -1.5551654135988773, -1.5255939766631132, 1.5487133380455611]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank0(self):
arg=Data(1.99198759591,self.functionspace)
arg.setTaggedValue(1,2.30052765476)
res=sinh(arg)
ref=Data(3.59683227352,self.functionspace)
ref.setTaggedValue(1,4.93962040641)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank1(self):
arg=Data(numpy.array([3.7121592231366236, -4.5205237974298704]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.30788026299313653, 2.5568342424834434]))
res=sinh(arg)
ref=Data(numpy.array([20.45884479360328, -45.93641525360411]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.31276737629464152, 6.408690275759203]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank2(self):
arg=Data(numpy.array([[-4.8144899441545528, 0.78267245526398277, -3.9902900754326529, 3.5628490203251033,
2.1183518970045636], [-2.5070220439451694, 2.22072356217594, -2.3873817066715231, -3.8412251092257801, -4.7972608158325922],
[-3.1935509834404807, -4.3564582503990001, -2.9952608734330868, 2.2478968274432152, -3.5759483501851563], [-1.3426146737839293,
4.3069146426030791, -4.4619207942108039, -0.17652673360272075, -3.3503263854852361]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-1.0103477084057824, -1.4588465915190465, 2.8809504601572611, 2.3288760653961047,
2.2303693881343687], [1.0183167542036395, 1.93447626412806, 3.7104269993832428, 4.3335802861769874, 4.3921380450277265],
[-1.3774340986519782, 0.92228142402738822, -2.5941241432249171, -3.3237064911008796, 4.6346105466494585], [1.0666499877056488,
-0.89076238705677468, 4.5302531678878104, 4.5060336794272828, 2.0384873019197407]]))
res=sinh(arg)
ref=Data(numpy.array([[-61.637901615656197, 0.86506368613794438, -27.026038642285407, 17.617581367615905,
4.0985943037096586], [-6.093415206821609, 4.5527324006135492, -5.3965433394599067, -23.280520435716621, -60.584890612216782],
[-12.166901315420557, -38.983816886070187, -9.9702753219255733, 4.6810906645722357, -17.850249501178922], [-1.7839401011571518,
37.098846158833695, -43.321126637656903, -0.17744497495638895, -14.238482567850086]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-1.1912317449878338, -2.0342457843344053, 8.8875657756020434, 5.0844955627700941,
4.5979066282350889], [1.2036641503921339, 3.3879593528934069, 20.423393864250595, 38.101777842909016, 40.400321013863852],
[-1.8562452941244159, 1.0587053970421214, -6.6550738613616067, -13.863522278520332, 51.489045675912898], [1.2807348782797063,
-1.0133220385263433, 46.385634798441771, 45.275432746886217, 3.7743794349039121]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank3(self):
arg=Data(numpy.array([[[-1.6699752087038435, -0.2455134886023238], [-2.1687245044555992, -1.5692632008931549]],
[[4.5726976745572561, -2.7279776977250503], [-2.2847357293147286, 2.6373859093927754]], [[-0.73793065293643068,
0.38302358868877739], [-3.6068322119333249, -2.7337954547102572]], [[-0.45348176104555638, -2.6490340460279951],
[-4.3886113896165462, 1.8804425676698937]], [[-2.8375230103816538, 1.111346967580932], [4.1139208151096955,
-0.49572589111884202]], [[-2.3299326190666081, -0.62551236912490715], [1.3632704432597551,
-2.2917130664840233]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-2.0562165096337282, 0.35936888959956814], [-3.6372329621674284,
-2.4786029292995906]], [[0.65206578693234096, 4.2616217865668187], [1.5056871818097299, -4.3212094251499877]],
[[4.4704679225004966, -4.3597404599188021], [-0.87623360805733697, -4.4308201122339383]], [[-0.9558721806683721,
0.017485009222809822], [-0.5985209090958632, -3.281153953325652]], [[2.6733102681464143, 0.58838863251234308],
[-3.4875728007909155, -2.8625086273204059]], [[-1.538276838516841, -4.4373864115838035], [-3.3984080066326641,
-2.2757928405839403]]]))
res=sinh(arg)
ref=Data(numpy.array([[[-2.5618921853515806, -0.24798739704939532], [-4.3163983334752745, -2.2974547188559029]],
[[48.39728795997064, -7.6182796518722613], [-4.8606444823509278, 6.9525358015487786]], [[-0.80675026447437204,
0.39245790554232302], [-18.410999897933262, -7.6631103274115127]], [[-0.4691851238549285, -7.0348268024411853],
[-40.258050303704749, 3.2019416421082258]], [[-8.5074259309283864, 1.3546664880209558], [30.584901942778352,
-0.51628045436067105]], [[-5.0899733579461612, -0.66710809345606392], [1.8265668432775661,
-4.8953877347643511]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.8442018204801496, 0.36715416750958968], [-18.98012788845724,
-5.9203664863683336]], [[0.69926678187260138, 35.455407944451679], [2.1426925432268189, -37.633167256872468]],
[[43.693083396997459, -39.11202224549578], [-0.99274414975503578, -41.99418680140694]], [[-1.1082307083479541,
0.017485900172098688], [-0.63490086709424287, -13.284436370815889]], [[7.2094126025166956, 0.62293131907508703],
[-16.337945591940521, -8.7241305581397839]], [[-2.2209042584132921, -42.27091867492458], [-14.941504453786477,
-4.816459660325112]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-3.261368733617156, -1.7097421056887061, 2.4770674176220533, 2.6184686144257991],
[4.2685570141508649, 0.26471561412312994, 1.8677744659878321, 1.2974082786792565], [-3.1644599226939727, -0.98342184660922349,
-1.5818484011953915, 1.7453818193010404]], [[-3.1409423348480905, -1.503135136465966, -4.1317701664086295,
-2.6487331291018479], [-2.9344044174187998, -3.9794616897516244, -1.0636935004548986, 2.4681705131671574], [1.7248384820876259,
-0.61623840444111444, -3.182398118986538, 1.8412600667009382]]], [[[-1.2967881315097731, -3.2351451577555101,
4.2939167700693446, 4.8638089405008476], [2.9825316536154167, 2.5020908411933496, -4.341334514919474, -4.8611901904936659],
[2.5682832345839106, 3.1020556388946119, 4.0616444500999389, 0.58287106482411755]], [[-0.92122010658250453, 3.6178485248091157,
4.1247030396785007, -1.7719429245102813], [-4.2954747345288151, -2.1268789753154813, -2.0943498166080774, -3.5016915119012504],
[4.0796282018855194, -0.70150021612442082, 3.9968340640276256, -2.8002455053291451]]], [[[2.7768169172372295,
3.5354672331727777, -4.9833519421308301, -2.7281177342220486], [-3.6388062639788132, -0.24005900416176473, -3.9369129849748861,
-2.7620876149297002], [3.7327362822865435, 1.1752019535122198, -4.968172850646587, 0.45185628083386664]],
[[0.78465593645643406, -4.8933212754874553, -3.887154034060468, -2.5322616232125039], [-1.0742963974782302, 3.3289574703031288,
1.1899668301054973, -2.9336209382110057], [-2.4653189902719017, 1.4461862760456876, 2.7436930808359143,
4.5571819794183686]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[2.1332145838014913, 1.2177971547397419, -1.9450668430632225, -3.8915509651360161],
[0.34984554597693851, -3.5724187380684516, -0.36417739374160707, -1.4056767503527468], [3.6886009926451795,
-2.3524969636932003, -4.5102454679585238, 4.3770785828462468]], [[2.4748540247021777, -3.1040806961176317, 4.3318809570689503,
4.7424354863417442], [4.4482502719559776, -3.5813275761400054, 1.6790719518347696, 3.3826258086123921], [0.41668209832613634,
3.5649415125773416, 1.6284709294696516, 0.46697670447318185]]], [[[2.4215260841968869, 4.4326478509045142, -4.4189346955673434,
-3.159517367887986], [0.77984444249549512, 0.52106214096843129, 2.2959783381073375, 2.8620825221995148], [-0.83928142017985685,
2.3961087684562008, -1.5912632794131518, -2.6327788717179725]], [[3.9275031010752279, 0.31923128079452301, -2.2762341712323044,
2.3868873445961158], [4.1180987252889008, 0.58883415948539231, -3.5287883781020488, 2.7441939267749778], [4.0184160311880355,
-4.6753857407642982, -0.90603684893730918, 1.3999807565920435]]], [[[3.9197722873955865, 0.8568499678692918,
3.7086105728287162, -0.21167328998860668], [-1.8846910022439625, 4.0109184386500658, -3.7641561030274531,
-0.47027664746013542], [-1.5388852245439333, 2.3534855378811139, 4.2231499372068146, -3.2804922592029619]],
[[-0.05624724006019477, -2.8226259374957232, 2.7214574181168203, -1.3648584776772541], [1.458452045981061, 4.3818228672288857,
-1.779611092507988, -4.0543356587838], [-1.3735137487486937, 4.7330680902546227, 2.3550068942781071, -3.9075745779532935]]]]))
res=sinh(arg)
ref=Data(numpy.array([[[[-13.023440270699259, -2.6733116654343072, 5.9111539019062862, 6.8208952782641425],
[35.7022516817728, 0.26781809379022642, 3.1597027400448123, 1.6932800751791159], [-11.816858393542429, -1.1497800913986027,
-2.329171645463961, 2.7767549168418548]], [[-11.541203334322933, -2.1366650567825722, -31.136015622546896,
-7.0326889417647314], [-9.3785671704044926, -26.734768755711737, -1.2759364890266949, 5.858048933103599], [2.7167063739913448,
-0.65598843191103351, -12.031501841254393, 3.072929944227814]]], [[[-1.6920608694436352, -12.685352743257289,
36.619585226109336, 64.754431865133057], [9.8435290579275119, 6.0630393584281839, -38.398477943694964, -64.585057821227878],
[6.4833728384402045, 11.099336246706669, 29.026252346926242, 0.61644023180207441]], [[-1.0571603791351021, 18.615241481692724,
30.916635699137355, -2.856134354291088], [-36.67670297233353, -4.1347179890900154, -3.998504686318193, -16.570684097163284],
[29.553285005086391, -0.7604675809797572, 27.20359770161199, -8.1939446899377977]]], [[[8.0027790235537211, 17.140948545266145,
-72.977985228256998, -7.6193557153388518], [-19.010054278242894, -0.24237135670329799, -25.620303855164558,
-7.8848508059247102], [20.884691434092481, 1.4650201726547696, -71.878506579845961, 0.46739024222723913]],
[[0.86768804011917233, -66.69419356195327, -24.375691040284792, -6.2512252509030963], [-1.2931970487315634, 13.936699944900679,
1.4913704109166104, -9.3711804841265494], [-5.841126728603502, 2.0057102769093333, 7.7399766017332157,
47.652005601622172]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[4.161752367783798, 1.541926632612777, -3.4255608402399025, -24.483195400361897],
[0.3570257223960186, -17.787257314136472, -0.37228079161732119, -1.9165423642645187], [19.981928064707756, -5.2083266003894222,
-45.466571941208194, 39.796285782641952]], [[5.8978987635397626, -11.121926851445268, 38.037063069187369, 57.352263445220188],
[42.732776058437857, -17.946679307528704, 2.5870158831439984, 14.707018154821265], [0.42884487385080838, 17.654543955312313,
2.4499235521097709, 0.48413477435416613]]], [[[5.5871242630376541, 42.071033145542899, -41.497881296562845,
-11.758388262639029], [0.86132781108094103, 0.54496286536137339, 4.9167436772901087, 8.7203896119493791],
[-0.94134111336199311, 5.4446471750107008, -2.3531397442848156, -6.9202492513096638]], [[25.380166933590768,
0.32468104508626694, -4.8186311121891992, 5.3938307354249373], [30.71301849893301, 0.62345627965547068, -17.026653430917108,
7.7438863291025068], [27.797482713465271, -53.632308557373527, -1.0351868092899348, 1.9042601111874786]]],
[[[25.184561827402003, 0.96561560781209588, 20.386285498772896, -0.21325752577179305], [-3.2162218030768943,
27.589713049238945, -21.552054166525547, -0.48780375093616124], [-2.2223864880448345, 5.21357200785015, 34.116735473841551,
-13.275624175244463]], [[-0.05627690347203064, -8.3807569501370018, 7.5683418170849297, -1.8298760541851917],
[2.0333516065142763, 39.985598858890903, -2.8794234783627348, -28.814753045485492], [-1.8479935964675063, 56.817448854328696,
5.2216543312111838, -24.87899171972731]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank0(self):
arg=Data(-3.33964228765,self.functionspace)
arg.setTaggedValue(1,4.45561542511)
res=cosh(arg)
ref=Data(14.1222419084,self.functionspace)
ref.setTaggedValue(1,43.0603707159)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank1(self):
arg=Data(numpy.array([-0.13877438581352841, -0.98259382848166865]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.3574558223327253, -1.677224337266833]))
res=cosh(arg)
ref=Data(numpy.array([1.0096446284726606, 1.5228576115054628]),self.functionspace)
ref.setTaggedValue(1,numpy.array([39.035549183605326, 2.7687878265704762]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank2(self):
arg=Data(numpy.array([[3.6339325575992536, 3.6003092610162302, 1.9584137399530954, 1.0694664510664698,
-0.37917195236293022], [2.8785993252032664, -3.0900484176532297, 3.1752833068489892, -3.5111549864703031, 4.255322840622437],
[2.0391630012792916, -4.3327016874450539, -2.6982102068084535, -3.0441546421582411, 4.214997939810214], [4.805038197601009,
-3.2837195225291618, -2.531898818520947, -2.9915602053560839, 0.95310199737158996]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.23915076781097699, 3.7715468041843607, -4.3893406000801383, 3.9409596096136763,
-4.4591106172531809], [-0.79570459044341924, -4.9855183758363273, -2.1377195897080261, -1.2985129868699143,
2.5687675010839595], [-0.72076603643646031, -1.2171966834491519, -0.70239230129778285, -4.1634325077059406,
0.14043642198635808], [-2.1962284423656131, -0.68365662414911732, -1.1633302866935447, 4.5948451769437177,
-0.4700634110282973]]))
res=cosh(arg)
ref=Data(numpy.array([[18.943914275341193, 18.318434937398699, 3.6145783248028835, 1.6285079976630699,
1.0727510814321364], [8.9227760436425374, 11.011820915712233, 11.987690862915786, 16.758392401505439, 35.246877846320565],
[3.9071562261311463, 38.081436864226781, 7.460224902554307, 10.5199571391222, 33.854399380223668], [61.066172085048954,
13.356147253406316, 6.3284367588194961, 9.983470747464299, 1.4896430948431345]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0287330987029193, 21.735116828944143, 40.299835565663813, 25.743698163829791,
43.211097722447775], [1.3336324938705348, 73.143109653419216, 4.2990004786149294, 1.9683908914628332, 6.5631803398985999],
[1.2711935565261079, 1.8368823113377528, 1.2569873598703747, 32.153690276380928, 1.0098774121609404], [4.5511305484919777,
1.2429382704244676, 1.756509148044366, 49.491457436632956, 1.1125291456069766]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank3(self):
arg=Data(numpy.array([[[2.2594094214565343, -3.3290383685707656], [4.5122363492198989, 2.3616061614668551]],
[[-0.78315440920111534, -4.9445078035481647], [-2.5870583922278079, 0.54336796085071803]], [[-2.2564960296248815,
-1.0922215776887101], [1.7220877741473926, -1.2680128955602719]], [[-4.7968660202901106, 1.8820956661735808],
[-0.55071124481399369, -2.392255525132474]], [[-1.9561751623976353, 3.6639866209171164], [1.6308692498233102,
1.3156981063393305]], [[0.93858824856890788, -0.37594139126048809], [3.0401317577773099,
3.3062633851984806]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[4.5082410257474734, 2.7799840957038571], [2.9998965498752392, 0.8376503607327983]],
[[-3.0742571047141567, -2.6501688991516961], [4.8047153237939639, 3.6547840727268941]], [[1.4892709697169861,
3.1091178911469477], [0.84905211092721178, -4.0113192232287229]], [[0.57916069108996293, -3.8200016353254016],
[-4.3902497028351064, 1.1074094810753987]], [[1.0867920869099219, -4.9751281118846391], [4.6498844446615237,
-4.6057390252989627]], [[4.2083455246221426, -3.299687693396459], [-4.8051197517458437, -3.7372411259947755]]]))
res=cosh(arg)
ref=Data(numpy.array([[[4.8409217004581384, 13.973657885406432], [45.568176435498991, 5.3511222948482491]],
[[1.3226633378307002, 70.204425514379224], [6.6829296515419099, 1.1512924649961667]], [[4.8271429171721767,
1.6581796951312628], [2.8874462830250218, 1.9175870101400594]], [[60.569229009714817, 3.3597619421386007], [1.1555129136535931,
5.5147803140711771]], [[3.606811684222702, 19.521103835073294], [2.6520362629520506, 1.9978195651943991]],
[[1.4737748024949511, 1.0715021754303207], [10.477913216116429, 13.659820812115026]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[45.386523884584705, 8.090402041223733], [10.066625699236537, 1.371828409565327]],
[[10.84001412211161, 7.1135343667655935], [61.046461244057511, 19.342519863210843]], [[2.3296994698881761, 11.222957817551869],
[1.3826252214777885, 27.61888954682524]], [[1.1724542565995428, 22.813105331832602], [40.336477634532706, 1.6784609004147599]],
[[1.6510224629867831, 72.387139007404855], [52.291231814415738, 50.03344721314393]], [[33.630031580706699, 13.57053373731201],
[61.071151819778152, 21.002913453619939]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-4.2974496482443385, -4.5141752496351151, 1.279224746016018, 3.6686464638653025],
[1.5642465306660078, -3.6550169225724973, -4.783271042866363, 2.3639071234236022], [-4.8837760852087531, -0.92880494563806426,
1.1661108769933435, -2.869160403619524]], [[-3.4776906208283864, -0.40395614262695556, -1.5446529839825116,
-2.7579904120989451], [1.8349405477146661, -3.8378201393977585, 1.3433045012858305, 4.3472141505175408], [0.7181640331745669,
-0.41398944472294108, 4.6769407318346552, -4.8778258871553088]]], [[[-2.4849883440153242, 2.1345431108307942,
-0.09067629606352412, -4.3150106990500738], [4.3118061338163951, 2.4975944032177999, 4.175128675721389, -0.7758441537477232],
[-3.9713262603505806, -3.3159913471560118, -0.18233452053653032, -0.66293502941204352]], [[4.9788464217729853,
0.65529004311657868, 3.3675428303822663, -0.079349711831890879], [-1.8723533081093855, -3.0300430916460952,
0.51874134723845078, 4.5086381057403386], [2.9026067240258424, -3.6918617768589703, -4.0661575918193105,
-3.5278224263153781]]], [[[0.0015231207730606044, -3.4671950612278115, -4.7151737408852421, 1.3355453672089359],
[-3.611921752531714, 1.3179876222783404, 2.4802830245059315, 3.4904987636760065], [1.6663642593810604, 3.9457517334304164,
0.61643625583120443, 2.7089869134481699]], [[1.2885016388929635, -1.4828486104919558, 3.4244574147497708, -2.1274761147070054],
[-4.9722256226904884, 2.1224822895404829, -3.3802787629574036, 4.6747109104337454], [3.8750910639279113, 4.8191435251217936,
2.4467943595773542, -0.015571286302071385]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.43479655428703889, -4.453019927481642, -0.57836799850371357, -2.1495952675388672],
[-4.5343815245793015, 1.5310770099215967, 2.8984213237035359, 3.0780399132153011], [4.5122783435803022, 4.4843493252518059,
1.3586522171022919, -2.3081057748737344]], [[-1.1824987809769425, 2.4884096785953993, -4.5317815425489592,
0.67363953752730943], [-1.9843232485967732, -2.0085682378296066, -4.1607211508996613, -1.0505768606305352],
[-4.4766448951046929, -1.0969734828880795, 2.5602234150431045, 1.6978621896143666]]], [[[4.3644036146205014,
1.5620129800850844, 4.5439913548426514, 2.3331518079278046], [-3.8887166881366895, 0.0069936039297040509, -0.20591644090240813,
4.4383248956821379], [-0.87172659086694004, 0.82903722202787833, 4.1864529892369333, -2.8022144333005317]],
[[4.5115179717289795, -3.6483711729962587, 0.81876939622194733, 1.689971164217746], [-0.33458354116486344, 3.5336789816595413,
-3.3644747866075297, -4.6902428060034644], [4.2804200141624467, -0.56490089533574483, 3.3489598815617807,
-0.009823071629792679]]], [[[4.5748546036038853, -4.5647170547892832, -4.4595799485564624, -3.8442023386090449],
[-2.5666480472200193, -3.9269313339727696, 2.6734901478379864, 0.8905586703061914], [3.0050076872803686, 4.9517454070040685,
-4.3279279631812981, 0.42460718079153104]], [[4.3245850003831627, 4.445343421639647, 3.892492263981266, -2.5689296346773061],
[2.0304529199152448, -0.93601555140498416, 4.4870935497851416, 1.9197064347913448], [-0.7708198383595688, 3.9685457595406746,
-3.4935088288480665, 3.1260055792475629]]]]))
res=cosh(arg)
ref=Data(numpy.array([[[[36.762837994027265, 45.65659302281535, 1.9360527151951246, 19.612161951845756],
[2.494159154100938, 19.347018267235629, 59.751475973278325, 5.3632322912433219], [66.0681102505865, 1.4632538079216522,
1.7605312973779623, 8.8394814942999496]], [[16.207862700025089, 1.0827058308402404, 2.4498655073561295, 7.9157713718384866],
[3.2121923357012863, 23.222853177989286, 2.0463331206435584, 38.637931391546402], [1.2691557688218085, 1.0869245433467289,
53.72509313313838, 65.676204192935117]]], [[[6.0421534482658936, 4.2857410419310309, 1.0041139129568435, 37.413895144702927],
[37.294234611722473, 6.1177528566639348, 32.53179123057128, 1.3163701272671944], [26.536848580189528, 13.79299490344593,
1.0166690434382355, 1.2279079665662052]], [[72.656772164263117, 1.2224961982676807, 14.520819069300805, 1.0031498405789798],
[3.328672363997796, 10.37321902129923, 1.1375906033763266, 45.404545168785155], [9.1382310638755566, 20.072198262374123,
29.174768845073917, 17.039554903078329]]], [[[1.0000011599486689, 16.038965793559022, 55.818581090736359, 2.032541800778572],
[18.532080317128734, 2.001784597279733, 6.0141820470909675, 16.416394943974005], [2.7409109645024752, 25.867268111398666,
1.1960900211712384, 7.5403307581926216]], [[1.9515154764366831, 2.3162337362313838, 15.369273332817901, 4.2563965180112602],
[72.17736078389747, 4.2357887581890381, 14.706498851241967, 53.605449988802455], [24.103919322318855, 61.933518473582737,
5.8189143906114449, 1.0001212349281225]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0960225692535404, 42.948782682946323, 1.171969429063197, 4.3489579207932341],
[46.588305110529134, 2.4197279528705185, 9.1002935322558294, 10.880922619136969], [45.570089621262504, 44.315276977830948,
2.07397627472012, 5.0774044710830113]], [[1.7845141072786201, 6.0625759919859421, 46.467361591383025, 1.2356061898862878],
[3.7057982822633435, 3.7934099749489825, 32.066670403577341, 1.6045180010307729], [43.975251828592121, 1.6644838293860444,
6.5079975829609102, 2.8226660571902742]]], [[[39.307615406934822, 2.4890619012612207, 47.038065761129225, 5.2036881533245731],
[24.434315230816242, 1.0000244553476394, 1.021275808528771, 42.322434694633777], [1.4046322236615734, 1.363790576586579,
32.902111174866675, 8.2708891230076151]], [[45.535460921925825, 19.219040991386912, 1.3543407276740012, 2.8019246386266583],
[1.0564971895349591, 17.139468791631344, 14.476442601019187, 54.444398838889946], [36.142312618675867, 1.1638449492852287,
14.254111851189526, 1.0000482467560752]]], [[[48.512120216524281, 48.022915117799073, 43.23137737168971, 23.371402546329659],
[6.5494471264698602, 25.385352322226726, 7.2797329080762765, 1.4234584308282261], [10.117954823599094, 70.714328957337898,
37.900142492657835, 1.0915081670348838]], [[37.773699375110674, 42.620437709879383, 24.526665923553246, 6.5642321138931639],
[3.8744058806768513, 1.4709944873492109, 44.437024098405821, 3.4828031871171841], [1.3120856335155002, 26.463217744961234,
16.465792043660247, 11.413343128307044]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank0(self):
arg=Data(-2.6745098131,self.functionspace)
arg.setTaggedValue(1,2.24999429988)
res=tanh(arg)
ref=Data(-0.990539348174,self.functionspace)
ref.setTaggedValue(1,0.978025866982)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank1(self):
arg=Data(numpy.array([4.0429210388498369, -4.553079746235924]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.0380504188786954, 4.5601616689616158]))
res=tanh(arg)
ref=Data(numpy.array([0.99938445513124963, -0.99977806439502981]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.99937843153347994, 0.99978118535812588]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank2(self):
arg=Data(numpy.array([[0.94929715636828504, -1.1252080104850992, 3.0775450372379431, 0.39392537807042416,
4.6611413956493752], [-2.2781170875126575, 0.39864122049943695, 1.4807319239075429, 0.31907060217879479, 2.7340061400084306],
[4.8281201559380023, 0.33278204427698199, -0.03019536420573754, 0.85159999608142201, -2.8485402708762573],
[-4.0655536970027297, -2.3543586836588961, -3.0951504374238881, 3.9262504055167966, 4.7776251671833698]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-2.7905907538531682, -1.9238988637436236, 2.4967949227313904, -0.98038029629172208,
-0.53276288893683077], [3.1990612919316312, 4.2720182948609757, -3.544755273211091, -0.63684518647695842, -4.212099692504844],
[4.4910646440702422, 3.6890101388015637, -4.5732677642340178, -3.0827917856459921, -4.8189743425755394], [1.5007530732396921,
-1.3393262317293884, 3.1997937399572649, -0.11051581408317634, 0.6210324333144257]]))
res=tanh(arg)
ref=Data(numpy.array([[0.73946469369799694, -0.80937282834822533, 0.99576369263661357, 0.37473932010645611,
0.99982119683898318], [-0.97921520913008431, 0.37878573799455306, 0.90160502733343728, 0.30866631305972148,
0.99159623814157904], [0.99987195863139711, 0.32101834788909706, -0.030186190576096662, 0.69190442064171198,
-0.99331095049135554], [-0.99941168874938979, -0.98212846610382587, -0.9959099611394906, 0.99922274519998722,
0.99985835320805616]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.99249206904458975, -0.95823735370164276, 0.98652879793628578, -0.75323048485240829,
-0.48749022343925108], [0.9966761738303791, 0.99961067033016027, -0.99833377855485317, -0.56274764873125716,
-0.99956111435622053], [0.99974876146582037, 0.99875110702314973, -0.99978684584692601, -0.99580782119916667,
-0.99986959514911722], [0.90528424625476578, -0.87151032140220963, 0.99668103125140428, -0.11006806410778014,
0.55184645934107424]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank3(self):
arg=Data(numpy.array([[[3.9319419321492326, -4.6773949429983581], [-1.6957074074546696, -0.3229077445540991]],
[[-1.8266060427348019, -1.730201856159189], [1.5347509785248175, 1.5800451406727483]], [[-3.9746856156268384,
-3.378147662479086], [-4.700768864980577, -2.8259083525191455]], [[-4.7178549411356228, 2.3147937121451543],
[-2.5287575330841805, -4.9759550410080289]], [[-1.9889442091701359, 2.0763858673555893], [-0.89324314818143424,
4.8435662338741032]], [[1.0855719509476556, 1.3980856479384425], [-3.3411855727218343,
1.1884422150953995]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.18007476897031971, 2.6390160533098843], [0.66061662665767606, 3.6870086302202214]],
[[1.7676633929476484, -3.1629355019247907], [-2.2255792588360457, 2.7793800480694166]], [[3.7616232526774755,
-3.5469102256069442], [3.3179169097905596, 3.7942072824756714]], [[-3.781196309444411, -1.8379556904559688],
[-3.6304445504488481, 0.19089611625277758]], [[1.6588646948326788, -0.82644991727768691], [-1.3992808319709527,
1.1558232933098402]], [[-0.16027203890771524, -1.1043264438901468], [-4.4435817783439955, 0.75121698416917582]]]))
res=tanh(arg)
ref=Data(numpy.array([[[0.99923153918711738, -0.99982691525909118], [-0.93487029410812394, -0.31213374827028922]],
[[-0.94949297086745355, -0.9390797830900246], [0.91123410731563181, 0.91860894220900358]], [[-0.99929448101174156,
-0.99767564105310336], [-0.99983481973131216, -0.99300230526074806]], [[-0.99984036849946445, 0.98067105796147169],
[-0.98735772705782232, -0.99990473142376457]], [[-0.96323809712209596, 0.96904507128497563], [-0.71299189138499997,
0.99987585339128593]], [[0.79525625932489785, 0.88493715428778319], [-0.99749752737187658,
0.83009511356467591]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.17815326499048401, 0.98984688185870251], [0.5787736291230885,
0.99874610080415804]], [[0.943352763565379, -0.99642757853354436], [-0.97693891859499782, 0.99232248276775581]],
[[0.99891983292877162, -0.9983409384133336], [0.99737847288418935, 0.99898794619179809]], [[-0.99896127879588104,
-0.95059853090699908], [-0.99859601991351088, 0.1886105895864382]], [[0.93006410027832387, -0.67856517259541438],
[-0.88519609925054854, 0.81967404386980236]], [[-0.15891368471195128, -0.80204771283378307], [-0.99972373624583699,
0.63587442674619299]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-4.3904786729676273, -1.3677399701000903, 1.2492768693436567, 1.6695091701042077],
[-1.1627795517358663, 0.35660855309170714, -0.46129260277138862, -1.7611848739159894], [-3.7810577291663838,
-2.0467897156040902, 1.1706919709615651, 3.7103119132055848]], [[-1.2077170353024047, -3.0220575428815022, 0.54424950738834799,
-4.0974212221222857], [1.0870071905494978, -3.9509226719566701, 4.9383274385239844, -3.462350798012598], [1.106586239998987,
-0.73756837828936561, -1.1154183434553619, -1.010742353319559]]], [[[4.2742091858943052, -4.4776532415095307,
0.78417239720070508, -2.9515811360954225], [0.26505756362717126, 0.20435778865898513, 4.0715597053665036, -0.5982678836059776],
[2.3077118446636131, -0.87190481734862324, -3.4819139618408279, 3.2206093747921081]], [[-2.1956521025267159,
2.8685733283260415, -0.16719147893383024, 2.4042635129004015], [0.87519664955802856, -3.7014687212979061, 2.638378779605917,
-0.83596449534645867], [2.3208311754025477, 3.8824298417061609, 2.4604804345754143, 3.5948332657865727]]],
[[[-0.26749703618618348, -1.2826804689293789, -3.264731588047407, -1.7231794009474299], [1.3682684482114569,
-2.0116005895625753, -3.1642117538144801, -3.5928937676969785], [-4.1224336582508689, 3.7918402890951022, 2.1360683781019247,
-4.5145324695032318]], [[3.676700537089971, -1.3877142255602957, 2.3563702753938749, 4.6093296257172032],
[-0.079417586476702162, -1.4360928554366206, 2.4988989289519861, -3.5116673714054611], [3.720863735327919, 0.38544618147401621,
1.7572959880964749, 3.4491478912053424]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-2.7769145726094999, 0.11945281953911202, -1.4067552037790296, 1.5589683509896632],
[-2.7510965901641535, -1.0370837248525575, -0.28874003212379407, -0.98700887192113651], [3.1057519393572068,
-1.4003943846167175, 0.70059038875906499, -0.36200211282703698]], [[4.6955874639656408, -0.66345419856759413,
1.7641256430203081, -1.9073904288166155], [4.4353606592905734, -0.36559209725971975, -4.9392722083184717, 4.9226038002143397],
[3.836648384845299, 2.6929475146669537, 2.4329110513061476, 2.8445249965550436]]], [[[-0.89043276203920829, 4.3113456509537027,
1.778020452151754, 0.12369555701304868], [4.7844077059707608, 1.4489307402634841, -3.6503174739201869, -1.99283222133817],
[-4.7433953583709343, 4.6096860898229437, 0.18591799454264546, 3.3160695299285372]], [[-4.624734823596115, 2.2410297338758411,
-0.83950933296629859, -2.4824381853162203], [2.0752791842153231, -3.8967447836233173, 3.083827109819147, -2.2637401638383436],
[0.55648965417526242, 4.7482343482895963, 3.3904464820695814, -2.9209716988816012]]], [[[-1.9149730632634174,
-4.2971583535961386, -0.26052672348999018, -0.71626825945409145], [-1.1898289287067843, 4.7073960353737476,
-1.1931947279614228, -1.5559762703348001], [2.1894866324076894, 1.391085552226893, 1.9956929272477284, -1.7691043597388703]],
[[-3.0697918472745589, -3.5894138807767617, 0.38379736482434179, -0.72520622611928953], [-2.2696826972861492,
-2.7804417698429775, -0.16059887652981075, 1.1741874888349084], [4.9025685475518319, 0.76347964182875927, 3.5862084201560478,
-0.34438423874314417]]]]))
res=tanh(arg)
ref=Data(numpy.array([[[[-0.99969278532403616, -0.87817610848912808, 0.84808073894312919, 0.93148676159885946],
[-0.8219436543648142, 0.3422232651605423, -0.4311371323644399, -0.94263519675608121], [-0.99896099101301683,
-0.96718843060907411, 0.8244938727134159, 0.99880316564106586]], [[-0.83599336463688867, -0.99526766658497667,
0.49619794188256428, -0.99944800485077057], [0.79578320492270638, -0.9992601537435607, 0.99989728568405345,
-0.99803553852335436], [0.80285236632034096, -0.6276737849707098, -0.80596936061049773, -0.7660688874136774]]],
[[[0.99961237222589605, -0.99974193223298102, 0.65509503516962486, -0.99455328260935616], [0.25901992634275139,
0.20155971686626986, 0.99941871123847936, -0.53581588511280043], [0.98039804226660943, -0.70234062733546598,
-0.99811084524004323, 0.99681615203384977]], [[-0.97553386909203443, 0.99357281253397534, -0.16565086856274386,
0.98381234887456548], [0.70400480322945558, -0.99878182267865834, 0.98983399879723288, -0.68366567252371069],
[0.98090083462602384, 0.99915158173096452, 0.98552133873443648, 0.99849245594704916]]], [[[-0.26129428984398761,
-0.85719744491289385, -0.99708467717848859, -0.93824473321643498], [0.87829697173019017, -0.96483807098709573,
-0.99643666929946806, -0.99848660126662192], [-0.99947493196034642, 0.99898314622584328, 0.97248007751598686,
-0.99976027967480008]], [[0.99871999858964711, -0.88266698117705134, 0.98219958347946346, 0.99980167654271335],
[-0.07925104034615385, -0.892908449932727, 0.98658498639005543, -0.99821988625575575], [0.99882814391514707,
0.36742779628281924, 0.94220023134945963, 0.99798302738464895]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.99228467821875421, 0.11888788724678255, -0.88680309929422041,
0.91525316568888726], [-0.99187748567038492, -0.77673384310464333, -0.28097472652808358, -0.7560840082512007],
[0.99599559753350175, -0.88543686564725355, 0.6047423865273871, -0.3469763293343231]], [[0.99983309924459796,
-0.58065757519364325, 0.9429620059078242, -0.95686570094178047], [0.99971915694545332, -0.35013016421316723,
-0.99989747957363229, 0.99989400444715759], [0.99907026835111246, 0.99088029778882336, 0.98470685939018232,
0.99325719931769052]]], [[[-0.71160741539570893, 0.99964011430124544, 0.94448185426893583, 0.12306852263067239],
[0.99986026155085483, 0.89548124193836365, -0.99865069097308279, -0.96351765578941984], [-0.99984831724864787,
0.99980181786870692, 0.18380508703408843, 0.99736878182221311]], [[-0.99980769321539753, 0.97763278263585041,
-0.685549085155853, -0.98613910571125796], [0.96897754473209763, -0.9991755175591378, 0.99581647460748857,
-0.97861537415463118], [0.5053682585729965, 0.99984977804083497, 0.99773205331379355, -0.99421041813147026]]],
[[[-0.95750111372509072, -0.9996297583526631, -0.25478812887481112, -0.61459245431351639], [-0.83052580516350172,
0.99983699446428242, -0.83156705197266667, -0.91476618401113408], [0.97523406991146411, 0.88340949152279369,
0.9637220149745247, -0.94351117775814486]], [[-0.99569763330906691, -0.99847603966904497, 0.36600071186854327,
-0.62012380533737677], [-0.97886535761949167, -0.99233870586149786, -0.15923225196341351, 0.82560995086101829],
[0.99988967114712812, 0.64312197086910849, 0.99846624581148857, -0.33138582492983976]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank0(self):
arg=Data(46.3645811357,self.functionspace)
arg.setTaggedValue(1,98.4380067047)
res=asinh(arg)
ref=Data(4.52979928711,self.functionspace)
ref.setTaggedValue(1,5.28259995573)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank1(self):
arg=Data(numpy.array([-50.957589662824198, 43.941100766909756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-31.858501623280745, 39.107585495989866]))
res=asinh(arg)
ref=Data(numpy.array([-4.6242371551287169, 4.4761267522983275]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-4.1546976770753421, 4.3596270535740214]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank2(self):
arg=Data(numpy.array([[-20.169170810618326, -4.4530711308543118, -5.3065110218440452, -8.4088220772265316,
-56.444316808490115], [-33.229801569473778, -44.603828873814734, 39.260385275691903, -60.813530866399979, -67.011560484373405],
[63.34900773972393, 13.17996875841969, -84.621298599133738, -27.161422270695113, 78.248898320973581], [-98.098038498193404,
95.682616010306447, -58.113208847615525, -79.134026237356125, -29.391569621781727]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[17.836298714213882, -77.588227218715232, -61.26367562584587, 19.375142389965802,
89.334409995076243], [2.9065687663115227, 51.893692489828197, 11.895367000745495, -8.1024096735480953, 71.448735058484459],
[-50.921060735037948, 40.334991542461438, -11.902046289316189, 56.33007303532878, -27.166995246623955], [-82.821608578095123,
-91.599639663887103, 86.585921151704355, 48.186701674446084, -3.9531724905915979]]))
res=asinh(arg)
ref=Data(numpy.array([[-3.6979164153723203, -2.1991164930555258, -2.3708439269598305, -2.8259456306618453,
-4.7264802495600335], [-4.1968206188493351, -4.4910925210264905, 4.3635253359028381, -4.8010270839542413, -4.898067997438317],
[4.8418687142690242, 3.2732814095310392, -5.1313680826244967, -3.9952835475913395, 5.0530827588070446], [-5.2791405298139438,
5.2542111175109474, -4.7556141841594481, -5.0643300499069621, -4.0741443379636699]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.5751677238153401, -5.0446044112554631, -4.8084008830850919, 3.6578034038105751,
5.1855652488628774], [1.7884793917224828, 4.642437253245169, 3.1710583006908988, -2.7890952467104695, 4.9621763496592148],
[-4.6235201866096052, 4.3905201792510207, -3.17161767446549, 4.724454508591605, -3.9954885674968175], [-5.109872625312927,
-5.2106043127238761, 5.1543177540685887, 4.5683379150373344, -2.0832921903606496]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank3(self):
arg=Data(numpy.array([[[-53.045516481019646, -34.703398617100873], [58.744179081028165, 45.73939652168292]],
[[88.640179862797964, -15.929739850377061], [20.336500323486419, -26.009231077461465]], [[67.483452353018436,
-83.415215077694313], [-43.73819066557256, 0.34649147770160482]], [[94.466567030758256, 57.78821000816265],
[-93.07193931131404, -65.081452487206121]], [[-54.611456218841695, 17.51214150630156], [5.6853926345566492,
38.237862836031212]], [[1.5782708895186488, -79.609362925181571], [47.883885039412519,
99.778654373519828]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-81.045113203624751, 0.65914527304526871], [32.93544022845623, 58.747988317145939]],
[[-12.311575835767854, -70.143366604591705], [-1.6980341384122681, -27.212534038212041]], [[55.458512265543362,
-94.003044543229095], [-62.792580806533628, -27.521709794921676]], [[41.596851570120577, 44.487697223450283],
[2.7831853943152538, -67.591557346139922]], [[47.14957401263112, -88.752613111599388], [91.038711972236257,
18.784281872602193]], [[66.890360146771712, -3.1392983005148949], [-98.753784215323947, -58.363920786326858]]]))
res=asinh(arg)
ref=Data(numpy.array([[[-4.6643863622180026, -4.2401923259759915], [4.7664116866516526, 4.5162266515773712]],
[[5.17776424960676, -3.4623187168170242], [3.7061684380230009, -3.9519680523364986]], [[4.9050844901356578,
-5.1170138356918358], [-4.4714994864581099, 0.33990819970999336]], [[5.24142117780635, 4.7500068099460622],
[-5.2265487748261839, -4.868845799503112]], [[-4.6934746764153612, 3.5568558212420287], [2.4386934192694749,
4.3371443181086304]], [[1.2374103926768709, -5.0703183356413444], [4.5620352151292289,
5.2961265670636051]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-5.0881911926886616, 0.61887608041426068], [4.1879268588038681, 4.7664765196432715]],
[[-3.2053324091778697, -4.9437392306615724], [-1.2998232670923699, -3.997162286806859]], [[4.7088636688405723,
-5.2365026413753837], [-4.8330475059782119, -4.008452214765553]], [[4.4213161140266797, 4.4884861572133286],
[1.7475584270504236, -4.9066849811247062]], [[4.5465845923807562, -5.1790317883473698], [5.2044621654621297,
3.6268753961564348]], [[4.8962579135667461, -1.8615996019862997], [-5.2858025387415069, -4.7599184690473226]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank4(self):
arg=Data(numpy.array([[[[32.142378157394234, -7.8682084560468724, -32.972614582663724, 50.856847074540553],
[72.329877464044415, 6.0619145811457571, 71.261710531993657, 70.2065904388474], [61.147646057937493, -26.137436099401938,
48.323697144662191, 29.857105568663485]], [[81.14862167131389, -28.070075464932472, 54.484029947945999, 53.274297598689998],
[51.817829777738496, 55.524654561168546, 31.893469267783274, 98.108247444728335], [25.185957882420567, 56.589702849849886,
29.257428051768414, -49.316002216427599]]], [[[91.093502909783012, 30.593790782804035, -52.906781759597266,
37.807168034506248], [91.33559788100942, 46.479645801342286, 45.285940387630603, 17.009006113589351], [98.990499666054916,
20.732810397625983, -52.586859007443024, -97.39008994479434]], [[60.855541035297279, 43.563415593268758, -10.416755000859922,
19.761378421237396], [45.545393669751689, 34.038254695973365, 61.458790464133983, -93.805588539667809], [70.373745615324566,
-69.821983987919253, -17.526059272214738, 99.463265178516878]]], [[[42.375759778528959, -71.513498720101126,
43.403494376930126, 11.702516371887256], [-68.02507709473943, -82.804863052600837, 17.935644233624799, -1.5560052642727271],
[1.3086438337024902, 19.0159623777798, -43.415467037283427, -1.6840694232704436]], [[-76.523723879344232, 36.460220047753864,
74.414529475659975, -40.585507061813097], [61.18925351487826, 60.973990669294437, -56.486512227103702, -91.992194442103738],
[-50.821095523487195, -8.7683370172323407, 99.212906160042508, -49.787947715823513]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[5.2104342432591579, -30.037610349220742, 89.76520642245714, 84.346276912645891],
[-55.935949781763505, 3.6554505577462351, -69.732922792584205, -85.696618441581251], [34.087801962805088, -57.540358433913227,
-66.932756076465267, -61.739307728871459]], [[-38.745454217109639, 47.2458765604907, -48.024451295756876, 98.938828051951276],
[-18.113719915986181, 30.600562603733465, 62.13859672089356, 79.646004829972981], [62.93949402434626, 85.362178604516401,
-79.088554588305286, -30.549957095115914]]], [[[-21.024971919379709, -46.9921546943443, -77.839828653838069,
30.940535083915421], [70.790958255553022, -44.895476702573319, -36.648852352895432, 12.929335827946304], [-6.9478133681051872,
-62.232340492245108, -42.755472485742985, -56.420558326951522]], [[-32.044278205615356, 79.157959500980951,
-76.393704765628769, -52.443645665174962], [16.265823630600167, -55.034754577520985, -47.645861374723552, -89.04121484500331],
[94.688526939952055, -16.61551229436607, -99.980912127854069, -47.709640655711503]]], [[[2.1087843309750127,
-46.754097185308829, -43.01720776980433, 85.276796349298849], [-4.6606838290411474, -81.34895135365592, -85.417222857880887,
-96.332056825957508], [-79.83581002747087, 21.866282224322433, 68.064610754277766, -47.003477247839534]],
[[-62.743770898030562, 72.147582177197421, 69.296613493157508, 28.171166780459345], [75.529397553659948, -35.058371858520204,
-28.47809790522318, -75.017021702145499], [-37.177757115795629, 38.676084888663922, -63.72524923587919, 1.7494417076027844]]]]))
res=asinh(arg)
ref=Data(numpy.array([[[[4.1635644265624778, -2.7599915358235867, -4.1890544070007278, 4.6222585911216543],
[4.9744322502971672, 2.5019077838324715, 4.9595555706500258, 4.9446400848058811], [4.806505402870445, -3.9568815433193461,
4.5711762859685958, 4.0898503596713338]], [[5.0894674517671321, -4.0281683959008951, 4.6911390175212615, 4.6686892479006827],
[4.6409745686395487, 4.7100554093212761, 4.1557941279942145, 5.2792445878073861], [3.9198276751840293, 4.7290522782152147,
4.0695726016881411, -4.5914985748389192]]], [[[5.2050637903432921, 4.1142112460980922, -4.6617680124018888,
4.3258207506496307], [5.2077177590877843, 4.5322773741285998, 4.5062656785445956, 3.5277531738452392], [5.2881965746419173,
3.7254457616153438, -4.6557038316413522, -5.2718979969231512]], [[4.8017175551692963, 4.4674965942390115, -3.0388587872128419,
3.6775161997905594], [4.5119771682426055, 4.2208479181377072, 4.811580237341091, -5.234400023643297], [4.9470179186672389,
-4.9391473745425918, -3.5576489640531608, 5.2929608319555426]]], [[[4.4398628679964025, -4.96308228583203, 4.4638198136319165,
3.1547715938291749], [-4.9130776200696218, -5.1096704317305806, 3.5807134591217551, -1.225433176641918], [1.0837103112235609,
3.6391165749315824, -4.4640955479804072, -1.2927150793063693]], [[-5.0307906787819405, 4.2895569942507352, 5.0028435354728717,
-4.3967099542129784], [4.8071855230902134, 4.8036618081252804, -4.7272274114351864, -5.214880451554639], [-4.6215554958906369,
-2.8675302639627946, 5.2904406860829596, -4.6010209599440053]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[2.3528942953432184, -4.0958744228371096, 5.1903756488916493, 5.1281129886870076],
[-4.7174343552973603, 2.0075716195155695, -4.937871144778371, -5.1439945873654223], [4.22230186808053, -4.7457092677460411,
-4.8968914542943383, -4.8161335672436891]], [[-4.3503271096734606, 4.5486245418147186, -4.5649658426227173,
5.2876744794258697], [-3.5905779156353059, 4.1144324497476124, 4.8225792420650633, 5.0707784647637046], [4.8353841364967165,
5.1400846170845353, -5.0637553150269641, -4.112778220940899]]], [[[-3.7394231192664891, -4.5432410388930284,
-5.0478416752898454, 4.1254753641580617], [4.9529283477765169, -4.4976082378834796, -4.2947153739876791, 3.2541381655755188],
[-2.6367133653925436, -4.8240865342451933, -4.448781110277845, -4.726059308101533]], [[-4.1605091983900415, 5.0646324196519528,
-5.0290903098032347, -4.6529772433272774], [3.4831569454011433, -4.7011945976992884, -4.5570530599693662, -5.1822780622147535],
[5.2437679039808112, -3.5043882377854798, -5.2981514782154209, -4.5583904818282788]]], [[[1.4912530895747718,
-4.5381634201485017, -4.4548824695960123, 5.1390839500525125], [-2.2436246899234331, -5.0919328992003212, -5.1407291969411935,
-5.2609752679328521], [-5.0731585525994474, 3.7786154625242512, 4.9136585515533389, -4.5434819009655412]],
[[-4.8322699827017939, 4.9719089789160158, 4.9315952755712287, 4.0317610464510798], [5.0177129533122331, -4.2503649608354834,
-4.0425905960208626, -5.0109066456916977], [-4.3090386599181487, 4.3485357163877962, -4.8477895988422537,
1.3256207429919689]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank0(self):
arg=Data(49.9810509193,self.functionspace)
arg.setTaggedValue(1,71.3408711101)
res=acosh(arg)
ref=Data(4.60469104168,self.functionspace)
ref.setTaggedValue(1,4.96056744693)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank1(self):
arg=Data(numpy.array([75.872128581964489, 31.270745005346555]),self.functionspace)
arg.setTaggedValue(1,numpy.array([91.194940269901991, 60.292904573535402]))
res=acosh(arg)
ref=Data(numpy.array([5.0221531537701187, 4.1355744181179075]),self.functionspace)
ref.setTaggedValue(1,numpy.array([5.2061165345882037, 4.7922928301529595]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank2(self):
arg=Data(numpy.array([[13.716727126294922, 18.582048298979366, 7.5020529608606203, 37.240476559713919,
47.923636526032062], [23.137297999502238, 93.601586495900719, 44.214564115710346, 36.167402243946711, 46.702642863490553],
[23.270622841679405, 9.2774257115223389, 59.291871515770787, 33.506154158989204, 38.271499005024928], [46.757553911983621,
6.8257457794847447, 22.981256925823288, 86.170385026518829, 23.420848755718815]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[15.948822919888016, 2.6658485927005215, 60.224282793904251, 44.876404405068655,
34.120337847111642], [62.222746267715351, 21.245738679003445, 45.817023654907636, 40.859047475717304, 58.128988691848726],
[10.190092458920921, 48.417808389183413, 42.896938034834868, 70.93329041076818, 8.3231821063895897], [26.439411367064803,
15.072763430534389, 72.467415365655967, 32.34764058755561, 40.90238765596505]]))
res=acosh(arg)
ref=Data(numpy.array([[3.3104318336497132, 3.6146183386321131, 2.7038519866369914, 4.3103631168831464,
4.5626471481294111], [3.834125802828078, 5.2321659777026861, 4.4820735137429342, 4.2811142287291988, 4.5368332971619001],
[3.839876941498396, 2.9178139584744245, 4.7755482825351914, 4.2046535493993629, 4.3376819221817646], [4.5380086345560136,
2.6084392106579743, 3.8273524505590331, 5.1494400678077143, 3.8463177083491402]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.4615479170038235, 1.6364755314613311, 4.7911538849173159, 4.4969351619322433,
4.2228259954588951], [4.8238032312056154, 3.7487492070394848, 4.5176837838527817, 4.4031256752655068, 4.7557376697168952],
[3.0121467628386225, 4.5729082091092721, 4.4518117430828532, 4.9548373538850878, 2.8085633470967837], [3.9676451082613573,
3.4049343192673835, 4.9762365895508731, 4.1694492579956304, 4.4041861546123844]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank3(self):
arg=Data(numpy.array([[[96.020179154808503, 91.79778167929949], [95.326949143229541, 45.421316747623791]],
[[30.65219771657458, 74.770295168847696], [77.989358990586055, 11.574100860239977]], [[92.626717442077236, 3.1700861207519435],
[81.107542243865836, 58.693576539606504]], [[19.827981381026582, 98.929766771654783], [93.210281085417222,
17.872534507474096]], [[15.212656462365901, 45.839114797078196], [67.995696601337741, 21.57180672061461]],
[[88.431893439575802, 86.459272754032739], [93.400261681763538, 3.5041690372595453]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[25.302663405742905, 90.965425641453351], [31.884491697764958, 35.880019812296034]],
[[87.641297339838275, 20.377144107642813], [80.276026842238238, 90.841319587541577]], [[14.097491687234964,
99.790641727293078], [14.181879052710332, 69.957347613100836]], [[81.947655870784715, 62.082411711815226], [8.6837333697858377,
15.244370873759896]], [[61.74742196011465, 29.437516030577598], [54.649929929545088, 40.35589353447758]], [[94.022187035702345,
83.335572962817793], [87.379860935581533, 36.951175898939482]]]))
res=acosh(arg)
ref=Data(numpy.array([[[5.2576784330296311, 5.212705644723707], [5.2504322211627388, 4.5090075081620471]],
[[4.1155853549672061, 5.0075231423119817], [5.0496784696098871, 3.1400456206904903]], [[5.2216956660972746,
1.8210457157023874], [5.0888851315296018, 4.7654048945882801]], [[3.6796048228214242, 5.2875318075733615], [5.2279764321968374,
3.5756287593263258]], [[3.4141929052963755, 4.5181659425846679], [4.9125375214849907, 3.7639967265035321]],
[[5.1753479006258116, 5.1527872028336335], [5.2300126684264212, 1.9260894616398991]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[3.9236661269309159, 5.2035964629627127], [4.1550209149744246, 4.2731335198543867]],
[[5.1663669487865587, 3.7069584430952047], [5.0785794154217756, 5.202231127665808]], [[3.3378837578237039, 5.2961964832588198],
[3.3438668793761139, 4.9409818305305739]], [[5.0991906511154443, 4.8215450339501285], [2.8512667866370323,
3.4162799784592983]], [[4.8161338295407301, 4.0751284773769649], [4.694011395692308, 4.3907310833319526]],
[[5.2366496860798568, 5.11598668493777], [5.1633792680707424, 4.3025615032480333]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank4(self):
arg=Data(numpy.array([[[[46.496494307641363, 74.917094330056727, 39.893774938569727, 23.744943878587605],
[1.9737426076200388, 56.13429325455833, 31.478338229941585, 76.686853948479268], [43.543067819658987, 81.289887895435285,
32.113423511300105, 5.213549323262523]], [[26.577313488763004, 82.755886663842674, 6.4828955638004224, 81.780421145322038],
[84.79256558820957, 69.233222959376874, 73.836164807553629, 87.692408248293873], [37.136000517418708, 90.288377224446137,
62.614392713419683, 88.339987656018039]]], [[[61.202863958945962, 31.566286842895735, 7.1708278242804298, 98.950695215124099],
[87.222678883207024, 86.95839324301987, 17.656917302211554, 54.991339984079993], [92.159416624775972, 31.425747720223157,
47.207404840689208, 79.815101091507159]], [[13.75432234393317, 36.005105956151937, 80.930354510392675, 17.903169928485063],
[37.209969721856766, 68.392829385096988, 68.225744945843331, 25.131306602144075], [57.726340455843392, 45.183440336464102,
96.487976002311996, 74.482543907471182]]], [[[97.032639801911586, 59.169720141290711, 65.544382023430359, 27.350556781956005],
[85.48226011720655, 8.7268878117714603, 49.450968175354753, 75.078362059466997], [47.954002472987767, 16.036826907987312,
99.975563170888265, 78.829796914932373]], [[39.21420494818117, 42.262998162260104, 73.751675519611155, 51.828252577302301],
[60.148666432515796, 37.443825584849876, 97.665835616597235, 78.975812123743339], [6.9121385596705096, 34.189572613115473,
27.703168010672275, 50.045255814521546]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[76.45018571903698, 24.717910838106601, 78.98873320779046, 62.765160850734503],
[61.239518935391644, 70.008902728343131, 78.300714796360708, 34.932147232071522], [37.022585726034904, 58.180738950315579,
27.287564890515544, 54.517546763840656]], [[15.093623698826033, 30.443962452205266, 89.802715985190773, 77.992879086297037],
[37.816659621995385, 64.854538050781173, 81.326022233556586, 1.9993032471653205], [38.637332121131173, 32.158640232053635,
71.342867154253426, 5.2704550021018708]]], [[[60.437096462714948, 49.090407043277665, 78.184244621340568, 60.917754368590664],
[42.949146499752899, 31.471629405983144, 36.886647249542328, 40.010907031786985], [9.581053748614563, 32.843241151168968,
75.216103702188008, 68.09522545374368]], [[82.504776175599545, 57.609847555036787, 95.669336674553534, 78.017033779006482],
[40.298523228110923, 14.938993210257649, 31.561252137958434, 28.44443377692734], [24.326622031518038, 61.769365476509179,
50.466775790330708, 40.289781067050903]]], [[[13.88323115651615, 6.714972583508235, 97.860470433016005, 75.032728358835342],
[11.04088136407165, 77.052563320907453, 97.427382444573666, 33.006120873883368], [1.7554298156787875, 51.058303561715107,
29.46416973203182, 94.334872484467382]], [[3.5895347426782043, 40.561254020265949, 67.84874109154778, 93.690445556218563],
[25.256475539837954, 56.511124744109935, 3.5800990775641948, 63.00192152079822], [42.748122023741885, 80.763225726336117,
74.43049456512324, 31.553184442256732]]]]))
res=acosh(arg)
ref=Data(numpy.array([[[[4.5324084412759778, 5.0094847279060319, 4.3792103554802608, 3.8600731017873722],
[1.3016637290797095, 4.7208147464351278, 4.1421944195445226, 5.0328349651237305], [4.4667658109698571, 5.0911309744336259,
4.1621787939300265, 2.3350809370517589]], [[3.9728510376447228, 5.109005824071593, 2.5563122244559442, 5.0971476629476182],
[5.1333202761679582, 4.9305758676636238, 4.9949499704360436, 5.1669499996633848], [4.3075527147709547, 5.196125251025741,
4.8300785753238706, 5.1743080104110604]]], [[[4.8072744174301363, 4.1449858695233406, 2.658270592315148, 5.2877433445020525],
[5.1615786939465957, 5.1585438835194486, 3.5634719544667166, 4.7002402176468125], [5.2166376113963704, 4.1405214877183933,
4.5475857430671622, 5.0728206578448036]], [[3.3131761972681315, 4.2766150387385435, 5.0866979727721846, 3.5773440805534538],
[4.3095433003567933, 4.9183617154061148, 4.9159154531733078, 3.916865458190002], [4.7487857259228701, 4.50375535715325,
5.2625387259978202, 5.0036669013457349]]], [[[5.2681680417753087, 4.7734856928164495, 4.8758164840313665, 4.0015496982806926],
[5.1414218364571465, 2.8562570863627581, 4.5940265687942565, 5.01163522143636], [4.5632807172725283, 3.4670614367497063,
5.29804795523276, 5.0603980061109874]], [[4.3620236184007943, 4.4369191414800992, 4.9938049295476628, 4.6409895153027874],
[4.7898973432892191, 4.3158106467268684, 5.2746727815426553, 5.0622487258630153], [2.6211521330965311, 4.2248539437647477,
4.014368049013294, 4.6059750587364245]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0297437651741488, 3.9002658632157274, 5.0624123337133149, 4.8324838701438377],
[4.8078732275406084, 4.9417185850198733, 5.0536631335958884, 4.2463497717069103], [4.3044928897720416, 4.756627671189591,
3.9992423655604172, 4.6915856649381364]], [[3.4063203831311251, 4.1087650332057839, 5.1907313988379462, 5.0497236078916279],
[4.325722057994339, 4.8652346239002213, 5.0915754213748823, 1.3165555330279051], [4.3471986353008933, 4.1635865170827602,
4.9605954282607705, 2.3461398613499078]]], [[[4.7946818261908994, 4.5867070641055383, 5.0521744319245858, 4.8026044713453375],
[4.4530284006885319, 4.1419811640723054, 4.3008130131764508, 4.3821430718817362], [2.9502001551652342, 4.184661309810318,
5.0134683412122536, 4.9140003614485606]], [[5.1059666365457144, 4.7467653633473841, 5.2540177010556652, 5.0500332892131663],
[4.3893080251791545, 3.3959998775424971, 4.1448262804214515, 4.0407905386517378], [3.8842957690757829, 4.8164891884239784,
4.6143642220081729, 4.3890909997782792]]], [[[3.3225293134567675, 2.5918958799586354, 5.276663767488472, 5.0110271683900773],
[3.0926948705710546, 5.0375928992743964, 5.2722281468793524, 4.1896106435456426], [1.1625825203718756, 4.6260194561242551,
4.0760340229896617, 5.2399700114788725]], [[1.9511766454658923, 4.3958084665836674, 4.910373700880009, 5.2331149143207876],
[3.9218376124513488, 4.7275064044247292, 1.9484357218467823, 4.8362494165490313], [4.4483356113795223, 5.0846305854377469,
5.0029677813756557, 4.1445704990230352]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank0(self):
arg=Data(-0.320619038958,self.functionspace)
arg.setTaggedValue(1,0.869122682798)
res=atanh(arg)
ref=Data(-0.332336921208,self.functionspace)
ref.setTaggedValue(1,1.32948203584)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank1(self):
arg=Data(numpy.array([-0.49724785679895611, 0.76485832136382981]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.71695223330373481, 0.98907589120670503]))
res=atanh(arg)
ref=Data(numpy.array([-0.5456433240595332, 1.0078187373348622]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.90134518516976136, 2.6022266354573262]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank2(self):
arg=Data(numpy.array([[-0.41875373709407426, -0.031282543674564844, -0.37120980277072957, -0.33787277754808165,
0.9056835178923357], [0.10920509246927712, -0.9517935928864919, -0.38928920166887748, 0.51987390317679982,
-0.38673372014824514], [0.84666821394639546, 0.70139465198953088, 0.65524269199234908, -0.76892126906681368,
0.53641715611532659], [0.8319590120911895, 0.54197223487670665, 0.96505599773867456, 0.18013767879594189,
-0.23629819004673036]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.36214053363992749, -0.664498666560283, -0.18821662111337301, -0.16146935463873657,
0.6826053287306455], [0.94446504653387175, -0.33438894541106456, -0.024588916748005452, -0.85997299749442313,
0.7590303783132617], [-0.17183976558739666, -0.58358085652249014, 0.31083502908173499, 0.85373153758284226,
-0.75382778617691071], [0.02157269345526025, -0.2087677756939843, -0.3645241397483423, 0.076955395055613884,
0.49258045667332828]]))
res=atanh(arg)
ref=Data(numpy.array([[-0.44617979391481238, -0.031292754010403323, -0.38982552275887766, -0.35168921785961199,
1.5029700335665168], [0.10964234311011919, -1.8505060400721478, -0.41096200383131098, 0.57616694042294059,
-0.40795359483226379], [1.2442671095703073, 0.8700403910046729, 0.78443110215353462, -1.017683354133686, 0.59911167917750008],
[1.1944666231886989, 0.60694387161398944, 2.0147645883194851, 0.18212498120425324, -0.24084972556636608]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.37934733814448002, -0.80082666251032752, -0.19048764725210868, -0.1628950195664004,
0.8339763214069672], [1.7778647815902611, -0.34776162471495142, -0.024593874154403211, -1.2932409850373054,
0.99392357017656985], [-0.17356179326165472, -0.66787580856210826, 0.32146948112818524, 1.2697561085057214,
-0.98176231871677033], [0.021576040897969627, -0.21188262231678223, -0.38209346340171296, 0.077107850497316832,
0.53946179405081751]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank3(self):
arg=Data(numpy.array([[[0.10445331614917319, 0.2730814888903883], [-0.60232593544217883, 0.96715501656915182]],
[[-0.17016809723013615, -0.089807528529218916], [0.23654377024927897, 0.83272135685004955]], [[0.016551420278897,
-0.38236850351537788], [-2.0657074242591555e-05, -0.40819212706994223]], [[-0.3729914622085253, 0.62722527860088206],
[0.82747007179222232, 0.25145176276119274]], [[-0.73980019966402311, 0.96693217416513644], [0.90586640577652378,
0.21899534641151908]], [[0.19566248084568705, 0.47149584732702499], [-0.48621869468657664,
-0.79464808240093432]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.12685419941171083, 0.86382397828941637], [0.55687203880422764,
-0.43398285832464556]], [[-0.063925399703885222, 0.92085617372342865], [0.55098040379713153, 0.39890555903637726]],
[[0.58521949389478389, -0.47732531221219043], [-0.52649872740584502, -0.05400171295475209]], [[-0.20264962628344207,
0.89825210951105694], [0.42220448045958414, -0.56965978536278339]], [[-0.1625462217423036, -0.45516235003933736],
[-0.81533961227730445, -0.34509219866705487]], [[-0.13641943291083147, -0.020985590801264165], [0.89028940365120146,
-0.60385493016714553]]]))
res=atanh(arg)
ref=Data(numpy.array([[[0.10483570163371808, 0.2801906138917637], [-0.69678941288133878, 2.0462723010157]],
[[-0.1718397684110442, -0.090050148021531648], [0.24110984512183212, 1.1969481388179688]], [[0.01655293194521994,
-0.40283080913531905], [-2.0657074245538708e-05, -0.43343996902678245]], [[-0.39189351498418051, 0.73682864354852906],
[1.1800585483131474, 0.25696195979910064]], [[-0.95003787955311769, 2.0428347777140869], [1.5039885009533656,
0.22260060180307562]], [[0.19821835350061251, 0.51199204142601529], [-0.5310963428117158,
-1.0839195243854924]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.12754129113092447, 1.3082186161867242], [0.628287702104393,
-0.46479335197476535]], [[-0.064012689955305346, 1.5946297532263998], [0.61978799882113045, 0.42234670222024745]],
[[0.67036433965710951, -0.51951462581386376], [-0.58528868912562182, -0.054054297989210139]], [[-0.20549411284441793,
1.4630953537864373], [0.45037168207904948, -0.64701904247591646]], [[-0.16400091263674657, -0.49119244692959763],
[-1.1427547236990911, -0.35986168723884399]], [[-0.13727527703665104, -0.020988672265444559], [1.4233196317293295,
-0.69919242983311702]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.47883527804935155, 0.31661906413420882, -0.5872020411014347, 0.43673454125507827],
[0.88136900358406756, 0.94422263261031114, 0.2891277683997282, 0.33076405922761665], [-0.21998818991754054,
0.18048183712899557, -0.25041587475362848, -0.25214869329219225]], [[-0.84430089168638789, 0.76091050674122229,
-0.020508996055177531, 0.29404167593808239], [0.68934167553083725, -0.95776231509069376, -0.88145154914219881,
0.77744088285490931], [0.78737460506857015, -0.4719906631589994, -0.76786773432549871, 0.56997395187593303]]],
[[[-0.78365368219581333, 0.94733757009719777, 0.34808028723294027, -0.036308308933188926], [0.44280850580686337,
-0.46404387448339723, -0.14428462443261891, 0.46397654196715088], [-0.72398981623771386, -0.71386141901887123,
0.11403708557516423, -0.53026185484339128]], [[0.68236920590850803, -0.24036690933114169, 0.24096702828664163,
0.98040177083805702], [0.094472694677199653, 0.6400010790928965, -0.047874513368018401, 0.52630540675289095],
[-0.22171125448934548, 0.70527518354726904, -0.47501571993423464, -0.72243237090033174]]], [[[0.0082387877639482632,
0.68679756821273141, -0.25873096009920205, 0.20529892260052329], [-0.19243878258846681, 0.05455697263932402,
0.16135364518328221, -0.24061386219544423], [-0.34484162854240008, -0.67668499260554094, 0.40134472749143524,
0.50389744806479242]], [[0.20426635215593136, 0.3870127342101326, -0.75330067354803076, 0.71517180319260576],
[-0.59636722535596642, 0.16902679259625208, 0.60854487911843269, 0.6080127077236932], [0.21358767561056036,
0.43831246429727311, 0.87978778079261621, 0.86710279990576811]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.55014926999716374, -0.59690189470907706, -0.67176566257807679,
0.60736951817123375], [0.8190301281645469, -0.55001881887120574, -0.6964571059396939, -0.16068048124821255],
[0.23872416972055821, -0.44608459772892295, 0.59084321623347447, 0.42861986752160219]], [[0.80353157195060887,
0.012109217448764475, -0.59731241459393281, 0.93750011998591587], [0.3427576485459467, -0.72154456903205721,
-0.81933208907246291, 0.80205913114622973], [0.035634874594440369, -0.97323376998325939, 0.67274012944959649,
-0.34467983303558469]]], [[[-0.10479839282841275, -0.64423003763584374, -0.85763880950837013, 0.55412818085007887],
[0.24284448011249649, -0.67346626588488845, -0.5046529367889272, -0.36718085565034209], [0.055975396650420395,
0.47512512598591927, -0.090812732728584811, -0.94658819925651927]], [[0.28524615600658376, -0.29676570986200579,
-0.51954918876546308, 0.48518566577271982], [-0.86630407866681014, 0.5790670951452479, 0.42401975127206515,
-0.92355038400324141], [-0.61578149445748631, -0.15303976680596276, 0.18150698656005404, -0.84959247418575401]]],
[[[0.64224812282493415, 0.16779589620113322, -0.32146937769322403, -0.2795229445390589], [0.41598563468316607,
0.3894916483189903, -0.44123474668060192, 0.6000180351672102], [0.38205455307082259, -0.54806836352622523,
0.0088431283841204911, 0.88616403016408252]], [[0.064301128460620083, 0.2913061121724092, -0.17972507421612294,
-0.49258468089632418], [0.77347160470842447, -0.032497425496647359, -0.58649359459086758, 0.23987952531047596],
[-0.33256196883461886, 0.77464344490024484, -0.64400203692561186, -0.10111214352067044]]]]))
res=atanh(arg)
ref=Data(numpy.array([[[[-0.52147196294207276, 0.32788498573535435, -0.67338487643976419, 0.46818855662601327],
[1.3818685896095428, 1.7756246583188797, 0.29761420443632619, 0.34368593107252376], [-0.22364369829230918, 0.1824807046839074,
-0.25585646084994412, -0.25770606941434526]], [[-1.2359651061495662, 0.99837417850627852, -0.020511872271671897,
0.30298473846351426], [0.84670025891519385, -1.9181222970821421, -1.3822385567527231, 1.0388685705919063], [1.0644856064871606,
-0.51262849467258542, -1.0151110548081639, 0.64748426157864258]]], [[[-1.0547693615321863, 1.8051580368562086,
0.36325771777197419, -0.036324276565210908], [0.47571893391859066, -0.50245268032031365, -0.1452985614521865,
0.50236687302329996], [-0.91597939071280587, -0.89501400057946223, 0.11453530898741597, -0.59050937260928005]],
[[0.83353431857678084, -0.24516348409085417, 0.24580049937095722, 2.3078079021972875], [0.094755268528087466,
0.75817557241805289, -0.047911139382633972, 0.58502126565893153], [-0.22545511450504421, 0.87771983963299549,
-0.51652780344410243, -0.91271406398924038]]], [[[0.0082389741813193099, 0.84186867590027514, -0.26474784084557118,
0.20825842151635696], [-0.19486851141497685, 0.054611198454615352, 0.16277621481062421, -0.24542559638535763],
[-0.35957727095671943, -0.82297340064389157, 0.42525082339072229, 0.55451632638526749]], [[0.20718065376792391,
0.40828171962949339, -0.98054254816788611, 0.89769117554517319], [-0.68749017583866245, 0.17066466375457767,
0.70660718620863483, 0.70576246545920529], [0.21692751606572119, 0.47014004829152839, 1.3748277463307201,
1.3212836529188918]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.61859534593457599, -0.68832037278553204, -0.81395391660559058,
0.70474268120208172], [1.1538640889775169, -0.61840829443535583, -0.86038718410040294, -0.16208513353159817],
[0.24342074184310508, -0.47980146702787174, 0.67896053116777877, 0.45820470516013301]], [[1.1085001296138581,
0.012109809371730973, -0.68895835800676963, 1.7169945930949277], [0.35721394930077227, -0.9108595789817534,
-1.1547820644444977, 1.104358453933548], [0.035649969677798599, -2.1501439822914454, 0.8157318952041559,
-0.35939365030578813]]], [[[-0.10518459752972166, -0.76537157126945188, -1.2843470399300845, 0.62431925114105657],
[0.24779464619772806, -0.81705954941654779, -0.55552944455890785, -0.38516072985708483], [0.056033968346053203,
0.51666909868942745, -0.091063618089528731, -1.7979008842415258]], [[0.29338366539773703, -0.30596920988869425,
-0.57572206515026991, 0.52974437563961629], [-1.3180736740413761, 0.66105802771914668, 0.45258279622299263,
-1.6126480031695198], [-0.71818130025412152, -0.15425163308315157, 0.18354057447044783, -1.2546860803299611]]],
[[[0.76199086156420348, 0.16939783695662922, -0.33328497538680557, -0.28716450911655816], [0.44282777426673942,
0.41120063253722738, -0.47376302178526142, 0.69317536098519239], [0.40246314878330663, -0.61561614700266343,
0.0088433589085275318, 1.4037709403877687]], [[0.064389969528247493, 0.2999928969476674, -0.18169857147454585,
-0.53946737159589209], [1.0289119803083038, -0.032508872740294195, -0.67230428425667288, 0.24464627841275868],
[-0.34570605594566106, 1.0318354841886035, -0.76498190274334987, -0.10145885170923308]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank0(self):
arg=Data(-0.749952155816,self.functionspace)
arg.setTaggedValue(1,1.87435313957)
res=exp(arg)
ref=Data(0.472389153274,self.functionspace)
ref.setTaggedValue(1,6.51660242443)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank1(self):
arg=Data(numpy.array([3.7583213879690298, -4.0248042968760078]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.0326117793437213, 2.0857030228564621]))
res=exp(arg)
ref=Data(numpy.array([42.876392709074644, 0.017866920423330365]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.017727967895924832, 8.050249001830192]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank2(self):
arg=Data(numpy.array([[3.766157828602319, -4.5823946696429996, 0.66617764913528088, 3.1886036213038711,
3.2584512608104532], [-2.7859216252555252, -1.4135218651939963, 1.3112977143029916, 4.7018302511834644, 1.7097977950670256],
[1.4508523744480133, 0.55253078019373714, -2.6877047949953683, -2.6846750320431956, 0.10904970548395898], [-1.8038527766057699,
0.13601142120047616, -3.0528315745434984, -2.9504614920251693, 4.9405838296608291]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.77557025230999699, -3.8554442789337271, -0.91546431588342969, -4.5959540113452011,
-3.2190686796674086], [1.4523968377221514, 3.5863184621410138, -0.095093254500025814, -1.1206737181718895,
-0.035904973775130244], [1.2975960427889675, -0.79138326811261184, 4.8737751190258649, 2.4737950526565182, 1.4934451722788324],
[2.1259309043066335, 2.3309264909898095, 4.874579950755157, 3.0897776496287364, -3.5758573980747324]]))
res=exp(arg)
ref=Data(numpy.array([[43.213710976177346, 0.010230368587152355, 1.9467817978129194, 24.254535269112523,
26.009224428474024], [0.061672224151586852, 0.24328495576601736, 3.7109863889101957, 110.14858765860721, 5.5278436073622528],
[4.2667498341569088, 1.737645055572572, 0.068036918977638014, 0.068243367300953484, 1.1152177814033668], [0.16466325379708055,
1.1456949787321744, 0.047225013744888564, 0.052315557164201748, 139.85187540287308]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.1718302653500525, 0.021164198413568151, 0.40033070302660334, 0.010092587743486195,
0.039992286551843337], [4.2733447640782849, 36.10092405638504, 0.90928813530390384, 0.32606004802823046, 0.96473196396405092],
[3.6604864293964274, 0.45321744024910665, 130.81382367479199, 11.867398903728049, 4.4524084401639898], [8.3806954834885605,
10.287468365848504, 130.91914916968017, 21.972191907545891, 0.027991415696668204]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank3(self):
arg=Data(numpy.array([[[-3.6030801562417594, -4.5294423627776723], [-1.557160197615659, -0.64085226193734268]],
[[3.1534384970888407, -1.4188623741518338], [-0.32228628176517304, 0.50951392267301809]], [[-1.3476041299826225,
-3.32599590280795], [2.0029626527776827, 0.17501479898352912]], [[0.027034969552835797, 3.5645457550243353],
[4.1592609451144007, -2.4301462095345872]], [[-1.4609157602736733, -2.6511851511926929], [2.600171679270459,
-0.70192657249718238]], [[-1.7778158632064134, -2.1404423164026731], [3.9519788369582631,
-4.5989900094571379]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.13931485388617304, -0.92433138388086356], [1.2828540748799355e-05,
-4.8469702777681896]], [[-4.4080333086228478, -1.1631495098987896], [-2.2966600443214649, 0.064924432335386584]],
[[2.916822038268009, 0.87499562050241675], [2.7965207298154517, 1.7350460169549091]], [[-1.4466065967927477,
-2.5710156312892343], [-4.0840019104915717, 3.2422451755687174]], [[-4.3762443959870501, 1.2266604944000514],
[-4.8494607331215622, -4.418190947910178]], [[-0.10058151287573036, 1.8710638004256079], [-3.2551884826265995,
-2.0233995710584862]]]))
res=exp(arg)
ref=Data(numpy.array([[[0.027239690595102155, 0.010786689455490304], [0.21073366423825682, 0.5268432242253307]],
[[23.416443683529156, 0.24198915348209477], [0.72449075214705883, 1.6644819313514767]], [[0.25986211125282049,
0.035936711454248524], [7.4109797665781239, 1.1912638459759448]], [[1.0274037299843257, 35.32340428052094],
[64.024187721822358, 0.088023961699000022]], [[0.232023699324167, 0.070567530289168845], [13.466049678150117,
0.49562951719433684]], [[0.16900687858407906, 0.11760281386238031], [52.038240209413715,
0.010061993132243331]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.86995407691125259, 0.39679663502748053], [1.000012828623035,
0.0078521313242256845]], [[0.012179107337097306, 0.31250040622116754], [0.10059426364925389, 1.0670783847158629]],
[[18.482457486951546, 2.3988647881215508], [16.387530820420302, 5.6691886766023503]], [[0.235367631750873,
0.076457852989352454], [0.0168399386970619, 25.59111383914971]], [[0.012572487351205171, 3.4098233771610715],
[0.0078326002724350228, 0.012056022541700125]], [[0.90431139638593339, 6.4952023243694939], [0.038573549534221326,
0.13220525908750566]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank4(self):
arg=Data(numpy.array([[[[-2.5238222687744738, -4.7456197569138041, 2.4673965431652034, -1.244247551702399],
[3.9651636170231548, 4.0776910888432969, 0.33317305369197836, 3.6969964378853799], [3.9522407667631043, -1.0872449164824829,
1.6801184274785852, -2.8666709250314359]], [[4.2594562258901689, -4.889578615758472, -3.1308615369041681, 2.1392519037970645],
[0.17641976501634815, -1.5746448211534414, -0.84524587352898095, 2.9243780446190479], [1.2842983675630162, 2.7926593166299227,
-0.070666242007418667, -1.4855591873635543]]], [[[1.5763070240757084, -4.7399252089965671, -4.4330538755629174,
3.681196791537392], [-2.8156600221094874, -1.2778689800443024, 1.4019765504207253, 0.73514942949937634], [-1.1656020759259276,
-3.7433522348482926, 1.4569591575424701, -0.53394250890947514]], [[2.3786113981284966, -0.87615865560504158,
-0.41129136755905193, -4.7966371337088347], [-1.5388053274330717, 4.1842415750395165, -0.82823766333376536,
2.0074764920300883], [3.5655062419593779, -1.0807913303813055, 0.14992361631707851, 1.5118919445282142]]],
[[[-3.3648564449762963, -3.078810214450729, 2.1446259920400266, 3.0442472934728944], [2.4303560614080606, 4.5022657497612339,
-3.6084630275613758, -3.8599028815954508], [0.39985165592526428, -1.0962344443252938, -4.0342766535351613,
3.1574594571937133]], [[2.9382085600344032, -2.1075141636809769, -4.4790587859448125, 3.8831338833937394],
[-0.88934483455337077, 3.2530676768122309, 1.1912487104888179, 1.5913330617778207], [2.5683829199460373, -4.8954076890133447,
4.5839373708486839, 3.8595928573139471]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[4.2648314236496532, 2.8364333726771429, -3.3447905621878982, -2.4414228825719086],
[-4.0502262415385548, 4.1458941943236809, -2.6143141305910502, 4.2118330275753131], [1.6496044974526356, -2.7777414297140846,
-3.9663660028469661, 0.045758605819602671]], [[0.58717311654154702, -2.7605066566946723, 4.603529134451751,
-3.9524884858165077], [-4.6202718129902269, -0.85933524206832246, 4.3247077827126894, 0.33279730693200626],
[-3.2409615827119742, -3.8063217097324467, 0.39137752345011467, -3.5665802016338057]]], [[[4.8607266697872795,
-4.969367327992023, -0.98962871483763237, -1.7804206030760605], [1.3076035022144179, 3.1626720342784047, -2.1039601570953614,
-2.4314893070462995], [4.668111404954228, -4.7790518839956428, 4.6299993091851555, -1.8861309215463971]], [[-1.960385423872796,
-2.2347760247542316, 0.41911304951970063, -1.8633640886195613], [-0.73179718793669579, 2.9290999688245485, -1.9420212454055177,
-0.38085093142676385], [-4.7675775804253231, -2.0447338138388274, -0.49968774313595965, -1.1945768914876465]]],
[[[-3.29590409812511, -3.0251482714096891, -3.3217228493760187, -3.7250915734222811], [2.257355212448271, 4.8692559789410161,
2.8160941264843631, 0.47498556914010859], [-4.8594977762105271, -3.6563196957128827, -1.0925704610547982,
0.88273087888402202]], [[2.9052807354938324, -0.41588815824257974, -4.0638914834502842, -4.1298152285808962],
[2.188339245387195, 1.1100247395792611, -0.48625551670779821, 3.2114719610297282], [4.7983149598282502, -0.5953446380306362,
4.6452284950547842, 2.2952676397007332]]]]))
res=exp(arg)
ref=Data(numpy.array([[[[0.080152655505830658, 0.0086896748505372011, 11.791707653037269, 0.28815765031031876],
[52.728896028898035, 59.009065747752359, 1.3953887547525055, 40.326000631888107], [52.051872360771483, 0.33714407542267766,
5.3661914380144706, 0.056887996095747358]], [[70.771489285571263, 0.0075245925517613936, 0.043680148977066881,
8.4930816096222337], [1.1929387075019384, 0.2070810904436986, 0.4294517544102977, 18.622639996115122], [3.6121326784506551,
16.324373808965724, 0.93177282686446505, 0.22637571883571311]]], [[[4.8370596417137266, 0.0087392997820314143,
0.011878159805614893, 39.693870945779999], [0.059865193346532772, 0.27863043525547398, 4.0632232005962257, 2.0857936483198323],
[0.31173491743100945, 0.023674607118095742, 4.2928856716344859, 0.58628895778987566]], [[10.789909567861674,
0.41637929986988848, 0.66279378685620638, 0.0082574691747315451], [0.21463736969557601, 65.643696214852767,
0.43681842951266009, 7.4445073500822074], [35.357348247592512, 0.3393268996880992, 1.1617455009391346, 4.5353032245954843]]],
[[[0.034566978025192006, 0.04601397085108954, 8.5388470432411587, 20.994222765561766], [11.362927259672304, 90.221318825839518,
0.027093456777850244, 0.021070045712901889], [1.4916034107005398, 0.33412689121892897, 0.017698477615323543,
23.510789823294036]], [[18.881990043978675, 0.12153971907203348, 0.011344085344041271, 48.576208955970465],
[0.41092488836655877, 25.869577820936623, 3.2911883839679281, 4.910290287640346], [13.044713030863051, 0.007480858738198164,
97.899101410590205, 47.446030131719859]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[71.152924262038169, 17.054828714195565, 0.035267600753315483, 0.087036919996353851],
[0.01741843342050033, 63.174086562400987, 0.073217989457584703, 67.480119432792065], [5.2049208605842301, 0.062178784059445436,
0.018942143909195415, 1.0468216837923499]], [[1.7988959516791232, 0.063259709283642204, 99.836029425246196,
0.019206846291659703], [0.0098501183071684206, 0.4234434761873957, 75.543434536535472, 1.3948645404410878],
[0.039126253875001821, 0.022229796402444147, 1.4790167715431195, 0.028252305852203917]]], [[[129.11799417067959,
0.0069475421600283986, 0.37171467754869364, 0.16856723245965202], [3.697302509270763, 23.633661588632254, 0.12197244052600395,
0.087905816293231603], [106.49642376925129, 0.0084039631233603397, 102.51399329228067, 0.15165744973159559]],
[[0.14080414118396287, 0.10701609612551928, 1.520612249558166, 0.15514981373760095], [0.48104368685579674, 18.71078262832426,
0.14341378217821199, 0.68327973756414706], [0.0085009481008868799, 0.12941463377750104, 0.60672008264714627,
0.30283205790085627]]], [[[0.037034547042194353, 0.048550622067129998, 0.036090599494215864, 0.024110892408788401],
[9.5577774186815603, 130.22399145161907, 16.711450221512699, 1.6079909926256291], [0.0077543773461377754, 0.025827390686055354,
0.33535337208917088, 2.417492579588719]], [[18.270372074237216, 0.65975405441116064, 0.017182025280169905,
0.016085850753487244], [8.9203862336155364, 3.0344334641142736, 0.6149246637123601, 24.815586931080908], [121.30583999301004,
0.55137250918361824, 104.08714646666205, 9.9270925408500208]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank0(self):
arg=Data(66.1187077838,self.functionspace)
arg.setTaggedValue(1,79.3562796516)
res=sqrt(arg)
ref=Data(8.1313410815,self.functionspace)
ref.setTaggedValue(1,8.90821416736)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank1(self):
arg=Data(numpy.array([26.950944903716938, 22.036031901881039]),self.functionspace)
arg.setTaggedValue(1,numpy.array([79.659180592587674, 98.693397510457103]))
res=sqrt(arg)
ref=Data(numpy.array([5.191429947877265, 4.6942552020401527]),self.functionspace)
ref.setTaggedValue(1,numpy.array([8.9251991906392583, 9.9344550686213839]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank2(self):
arg=Data(numpy.array([[83.203409742087729, 20.459876166893444, 41.094599447534264, 43.257432617853716,
84.805361343651796], [80.986686844056138, 49.344401691041469, 57.262902954007956, 32.781817377127261, 20.98870933921323],
[41.864732471259813, 72.898239703170674, 97.169858294017487, 72.127581542658106, 69.84393540957619], [7.2057070639609844,
12.014479889224537, 12.730936911149628, 79.860562402939749, 72.136801812195543]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[81.711392673450959, 87.088569596996351, 67.659203023768697, 62.902795439904644,
36.572517618275612], [54.411575073623894, 80.455555320083377, 9.8079978674290373, 54.140158144982514, 36.141067802738704],
[32.839293835627899, 69.388569353812997, 63.803860822764101, 51.68690733801651, 54.337516161355843], [82.133256868736865,
57.914763460609464, 6.2550368291751957, 50.321505083945027, 50.092735399229653]]))
res=sqrt(arg)
ref=Data(numpy.array([[9.1215903077307594, 4.5232594626987126, 6.4105069571395257, 6.5770382861781878,
9.2089826443343785], [8.9992603498318751, 7.0245570458955964, 7.5672255783746767, 5.7255407934209375, 4.5813436172386401],
[6.470296165652683, 8.5380465976223547, 9.8574772783921496, 8.4927958613555585, 8.3572684179447165], [2.6843448109289136,
3.4661909770271655, 3.568043849387172, 8.9364737118697857, 8.4933386728774423]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[9.0394354178483383, 9.3321256740892835, 8.2255214438823696, 7.9311282576884761,
6.0475216095749182], [7.3764202072295131, 8.969702075324653, 3.1317723204966605, 7.3579996021325327, 6.0117441564606446],
[5.7305578991602468, 8.3299801532664528, 7.9877318947723888, 7.1893607044031747, 7.3713985214039166], [9.0627400309584552,
7.6101749953998734, 2.5010071629595938, 7.0937652261648063, 7.0776221571393352]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank3(self):
arg=Data(numpy.array([[[24.58711525153694, 26.513450628905794], [25.894046225897309, 76.099882908832683]],
[[96.86666740650108, 98.675082396336464], [53.846636874764542, 14.27238078898271]], [[97.63850940329813, 90.151928905789063],
[71.648695201571115, 74.209156956430576]], [[14.632460270663838, 46.13289266526926], [49.330643833957971, 72.03527701414572]],
[[47.999222087494871, 33.838367468886382], [75.127786968398865, 4.3599320763477758]], [[46.943202068363867,
80.275429008214473], [82.397086218544985, 62.859283550169593]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[28.930635932458532, 33.388573112772498], [16.88000592657939, 68.038470757620274]],
[[54.548353448446811, 81.081327110376535], [1.6631949481682415, 75.743741576440513]], [[28.384086636568604,
37.299784516494626], [4.5145476012025636, 68.176274025525743]], [[41.635212589459222, 64.391054127502827], [30.992011628576464,
19.448152032617305]], [[3.2357282019044789, 10.803298318703028], [99.439413034365714, 70.801579823071009]],
[[38.608472990519523, 38.359870069009858], [89.941281924017275, 47.089103130495459]]]))
res=sqrt(arg)
ref=Data(numpy.array([[[4.9585396289166574, 5.1491213453273552], [5.0886192848254339, 8.7235246837979812]],
[[9.8420865372389947, 9.9335332282293418], [7.338026769831556, 3.7778804625057569]], [[9.8812200361745877, 9.494836960463779],
[8.4645552276283897, 8.614473690042276]], [[3.8252398971389803, 6.7921198947949426], [7.0235777089712599, 8.487359837673063]],
[[6.9281470890487649, 5.8170755082675676], [8.667628681963647, 2.0880450369538908]], [[6.8515109332441311, 8.9596556300013273],
[9.0772840772196268, 7.9283846747095712]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[5.3787206594559764, 5.7782846168021615], [4.1085284380881912, 8.2485435537202729]],
[[7.3856857128127791, 9.0045170392629359], [1.2896491570067581, 8.7030880482987474]], [[5.3276717838628729,
6.1073549525547168], [2.1247464792776016, 8.2568925162875733]], [[6.4525353613489962, 8.0244036617995995], [5.5670469396778453,
4.4100058993857711]], [[1.7988129980363381, 3.2868371299325174], [9.9719312590072402, 8.4143674642287287]],
[[6.2135716774267218, 6.1935345376456779], [9.4837377612425193, 6.8621500370143069]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank4(self):
arg=Data(numpy.array([[[[42.370128850225534, 64.183097608646705, 84.366078659801104, 16.515641051464272],
[73.785291697946164, 87.312657572269515, 69.428277252879781, 92.656721478953287], [69.698296458864249, 23.393048019403704,
88.109689395116419, 42.032468891247021]], [[2.6375163201099578, 59.041525354643206, 20.356627995608768, 11.897333150828965],
[37.925080098983869, 59.075116440382075, 56.144969467546872, 64.519772619227496], [20.071418547844651, 38.634724148514344,
80.718254953798279, 50.41857305264454]]], [[[59.576738600481768, 23.930830924167143, 18.360753569890573, 20.231150076534181],
[98.25922486474947, 68.178149570093638, 13.776804530518866, 51.945871290407467], [35.14832429151236, 67.415812532502684,
12.757339586205628, 44.063833075360989]], [[6.7443440383699587, 84.841224148387312, 29.790278716866581, 78.659203162433926],
[62.669492083963888, 49.131163942783786, 57.031266775264292, 23.536235325724508], [66.812957309291249, 93.23023188694566,
72.378130120625073, 76.741950163200173]]], [[[54.340441189118657, 38.923007006981855, 8.4805132822780038, 81.769308743472948],
[10.431711506617603, 89.283700349417444, 11.054894136992893, 69.501130141011274], [59.878072146242665, 16.353174235971739,
33.911661927785339, 43.933788186658099]], [[95.155663331834987, 40.374769085669357, 76.504062733443291, 24.269622789956216],
[19.066641354097424, 16.216531435944937, 2.9090938643160769, 36.193423650217412], [85.035802964632353, 33.758549347144886,
22.607096658902456, 59.29059546236266]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[45.461195758665305, 48.232713963030115, 20.574896465244645, 77.042123378126419],
[29.408402377656021, 89.644199991043934, 37.626983926343691, 54.665500847799954], [55.279852765647405, 9.8770030737031362,
84.692626914039323, 74.569785022600129]], [[86.480614059580262, 39.44670516230191, 33.913192480299656, 91.064507717285494],
[82.514056419443719, 79.033061042943729, 21.205161548235484, 63.447931595442171], [70.771892050057431, 85.179849589202817,
2.2976273736104913, 54.100290092830974]]], [[[44.920675505591753, 79.571083257406727, 70.036069047171736, 89.378823845420271],
[36.90881183703241, 11.022173065677999, 70.62518800399917, 26.439709298221533], [94.630299128155968, 22.253654455466215,
73.180687011483016, 62.69738947991646]], [[25.809044438683465, 63.915976605926396, 7.8198717354648863, 86.721732417437238],
[75.044351249076755, 84.229123933895082, 21.901578729424983, 6.8349416985481879], [37.271119471339006, 64.583431723199155,
27.275670855550583, 49.995981801746105]]], [[[0.17259469145601952, 9.252449088483786, 21.995295729482201, 34.634327605762955],
[36.564437853430213, 36.464875685420154, 23.468662561443722, 63.709619389652808], [77.299728799249792, 35.131812814366654,
52.447111388935163, 59.193503430356337]], [[28.172021648921582, 6.9193925888337438, 33.521086135909869, 32.466778283077545],
[80.781854588010745, 60.326859582936031, 60.965229409096004, 2.4499363952717865], [91.552398032123989, 20.523937743647728,
27.819367237334685, 53.113877776584353]]]]))
res=sqrt(arg)
ref=Data(numpy.array([[[[6.5092341216325549, 8.011435427477819, 9.1851009063483406, 4.063944026615558],
[8.5898365349956549, 9.3441242271423981, 8.3323632453752143, 9.6258361444060174], [8.3485505603586212, 4.8366360230436714,
9.3866761633240774, 6.4832452437993595]], [[1.624043201429678, 7.6838483427670026, 4.5118319999318199, 3.4492510999967756],
[6.1583341983838347, 7.6860338563125046, 7.4929946928812692, 8.0324200972824809], [4.4801136757725972, 6.2156837233336084,
8.9843338625519849, 7.1006037104350881]]], [[[7.7185969321167285, 4.8919148525058302, 4.284944990299242, 4.4979050764254884],
[9.9125791227485021, 8.25700608998768, 3.7117118059621581, 7.2073484229921521], [5.9286022207188402, 8.2107132779377139,
3.5717418140461423, 6.638059435961762]], [[2.5969874929175072, 9.2109296028352805, 5.4580471523124992, 8.8690023769550272],
[7.9164065133091723, 7.009362591761378, 7.5519048441611272, 4.8514158063110306], [8.1739193358688862, 9.6555803495670656,
8.5075337272693243, 8.7602482934674963]]], [[[7.3715969225886635, 6.2388305800832464, 2.912132085307602, 9.0426383729237418],
[3.2298160174563511, 9.4490052571377827, 3.3248900939719634, 8.3367337813445417], [7.7380922808042722, 4.0439058144288849,
5.8233720409901117, 6.6282567984846592]], [[9.7547764367941809, 6.3541143431377876, 8.7466600901969027, 4.9264208904595446],
[4.3665365398788802, 4.026975469995433, 1.7056065971718322, 6.0160970446143409], [9.2214859412478827, 5.8102107833662009,
4.754692067726622, 7.7000386662901024]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[6.7424918063476582, 6.944977607093497, 4.5359559593590237, 8.777364261446964],
[5.4229514452607832, 9.4680621032523824, 6.1340837886634452, 7.3936121650922395], [7.4350422168033052, 3.1427699683087109,
9.2028597139171548, 8.6353798424041628]], [[9.2994953658561634, 6.280661204228573, 5.8235034541330579, 9.5427725382765729],
[9.0837248097596905, 8.8900540517447766, 4.6049062474968459, 7.9654209929822395], [8.4126031672757176, 9.2292930167593461,
1.5157926552172269, 7.3552899394130602]]], [[[6.7022888258856579, 8.9202625105658591, 8.368755525594695, 9.4540374362184689],
[6.0752622854517488, 3.3199658229683626, 8.4038793425417033, 5.1419557853234732], [9.7278106030162803, 4.7173779216283078,
8.5545711179160246, 7.9181683159627552]], [[5.0802602727304693, 7.9947468131221262, 2.7964033570758149, 9.3124503981195641],
[8.6628142799598766, 9.17764261310578, 4.6799122565946663, 2.6143721423217827], [6.1050077372054989, 8.0363817556907513,
5.2226114976657589, 7.0707836766334538]]], [[[0.41544517262331926, 3.0417838661686312, 4.6899142560906375, 5.8850936785885537],
[6.0468535498579934, 6.0386153781657725, 4.8444465691597554, 7.9818305788617696], [8.79202643303862, 5.9272095301555394,
7.2420377925646839, 7.6937314373687578]], [[5.3077322510580336, 2.6304738335200644, 5.7897397295482866, 5.6979626431802401],
[8.9878726397302025, 7.7670367311437394, 7.8080233996252861, 1.5652272663328437], [9.5683017318709176, 4.5303352793858123,
5.2744068137881328, 7.2879268503864907]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank0(self):
arg=Data(36.4809356241,self.functionspace)
arg.setTaggedValue(1,80.0302228229)
res=log(arg)
ref=Data(3.59678981247,self.functionspace)
ref.setTaggedValue(1,4.38240434862)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank1(self):
arg=Data(numpy.array([46.59425457123556, 68.172569815688405]),self.functionspace)
arg.setTaggedValue(1,numpy.array([65.427006436370618, 86.065200529495556]))
res=log(arg)
ref=Data(numpy.array([3.8414772410677034, 4.2220422818284451]),self.functionspace)
ref.setTaggedValue(1,numpy.array([4.1809351156780332, 4.455105154698046]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank2(self):
arg=Data(numpy.array([[39.075877450675698, 97.653545876008806, 47.988417825860637, 53.932052577242985,
60.380431949040442], [93.350133391903867, 38.347348519622287, 60.437021565597611, 2.0425923742169343, 77.266159584750397],
[47.678248212978616, 91.262336639629311, 11.671719403634887, 49.71988614105117, 77.648453231227109], [49.229327831457574,
82.102378053363054, 49.729354379527422, 35.684271737364277, 43.531040542575127]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[53.427459984069628, 13.93063057392779, 1.342075620281171, 69.536438982499547,
11.21650951883837], [1.9134484258021096, 36.756806175549656, 52.373018075302362, 61.699751656220478, 12.632621993377159],
[45.322161925956458, 91.126460370251877, 6.6306875352383692, 83.584252908685002, 78.274345299988568], [46.494546030822839,
91.712856654908848, 31.115030158406128, 82.946055505457963, 98.493589852718884]]))
res=log(arg)
ref=Data(numpy.array([[3.6655053316069397, 4.5814259687577241, 3.8709596864969273, 3.9877249686516278,
4.1006650780961058], [4.5363572989305121, 3.6466853864301547, 4.101601856997652, 0.71421977278415627, 4.3472560794647173],
[3.8644752815825361, 4.5137381793751707, 2.4571687708042789, 3.9064049766368951, 4.352191629643908], [3.8964895400400414,
4.4079669813660951, 3.9065953901273409, 3.5747100241263499, 3.7734742593119375]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.9783248457056453, 2.6340900540986794, 0.29421738590469837, 4.2418509170789935,
2.4173867571169083], [0.64890707271250014, 3.6043234105668351, 3.9583915364819653, 4.1222799058825039, 2.5362825152421582],
[3.8137961385956465, 4.5122482162114013, 1.8917084994605029, 4.4258551400119392, 4.3602199030764179], [3.8393350160546169,
4.5186625728689567, 3.4376909872768944, 4.4181904628289717, 4.5899914684230305]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank3(self):
arg=Data(numpy.array([[[21.699512716247472, 1.5282092621897898], [70.128490130107167, 99.241953232244768]],
[[78.501878729278019, 4.9314024598766171], [28.820316178767477, 1.3050055568589718]], [[77.79872786306737, 49.006304087499473],
[6.0837158518704957, 61.062626053957885]], [[54.00367327313819, 52.290800103947532], [28.218888543563132, 55.69049695930471]],
[[70.540276579110611, 10.438534795340397], [41.668397105605507, 16.209971253412206]], [[47.267330103680038,
54.403296082231499], [38.961522648812213, 76.555371480099396]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[51.938496576912861, 83.925735699153535], [1.6169120969879085, 6.9041394256440052]],
[[88.150381953863842, 98.403414431124375], [61.500668372426269, 88.227022389639899]], [[97.467337646036611,
69.782208819109613], [29.536218217059314, 56.802080857103853]], [[61.217789432766921, 4.925020459063683], [6.0472249368221469,
12.935582213769482]], [[62.146044807251613, 55.471641138480706], [81.292189922751476, 63.742035945518467]],
[[4.0955522013947023, 30.661458256155598], [17.597603579662788, 9.30923552928299]]]))
res=log(arg)
ref=Data(numpy.array([[[3.0772898048218194, 0.42409663306219014], [4.2503291327306743, 4.5975608405348396]],
[[4.3631225573590209, 1.5956234222193697], [3.3610805613220047, 0.26620729889507122]], [[4.354125079676205,
3.8919489446822295], [1.8056156691430751, 4.1118999941440908]], [[3.9890520678274091, 3.9568204493632884], [3.3399915602331363,
4.0198095212339595]], [[4.2561838456835561, 2.345504227331265], [3.7297429783063549, 2.7856265623599672]],
[[3.8558193613754197, 3.996424741773315], [3.6625745603723487, 4.3380142892056002]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[3.9500602604347153, 4.4299323089778522], [0.48051821732876043, 1.9321211484850926]],
[[4.4790442418335141, 4.5890755029594903], [4.1190480425977469, 4.4799132923888827]], [[4.5795173233770798, 4.245379089300493],
[3.3856172464655256, 4.039572959864234]], [[4.1144378242858997, 1.5943284287400294], [1.7995994786846001, 2.5599818253532565]],
[[4.1294871764666654, 4.0158719196846624], [4.3980499470284302, 4.1548442498897815]], [[1.4099015559293309, 3.423006434483387],
[2.8677627325270256, 2.2310069750585604]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank4(self):
arg=Data(numpy.array([[[[31.101305125560753, 98.170407256844626, 89.495246884407379, 99.98908850739052],
[2.4049273930600767, 36.880499975611961, 16.353617842398464, 58.781248047924151], [72.016717419675047, 88.390811768491815,
58.246083798359486, 66.583340970007129]], [[20.465803140117011, 24.0036287870054, 88.198587567358629, 48.083853917268023],
[32.183002238023384, 52.361813109969468, 91.165656746391832, 24.096203766223894], [34.219537794626028, 27.966364691635807,
48.391457442108091, 21.127135955159684]]], [[[92.173901880553899, 84.090306966759115, 68.663513749469857, 28.930103696589871],
[76.193472611006214, 23.122185422458525, 52.911657222861116, 25.431379093778077], [48.1123157662987, 63.084679424916168,
88.227262797374976, 25.223940757757774]], [[77.883594744394102, 4.766542541594764, 67.914391833582812, 44.354444036844214],
[43.449846921835778, 24.389274109879604, 52.005422364971146, 90.067558807201308], [16.219814551493748, 93.953208312531657,
89.304393662856739, 57.450106876298889]]], [[[83.17121819234076, 3.0876023794315675, 13.178062484577275, 32.720549483090331],
[28.923086723062763, 48.413131454470019, 19.329222518853427, 49.531895870836308], [61.750663719317927, 25.293283597003178,
14.112470421243229, 93.044089674063756]], [[64.150504828677711, 10.388481675167892, 60.095156109859765, 94.542246846329334],
[14.28716150453554, 22.491472839959545, 37.072742949787475, 56.544755150434312], [47.953455399965009, 6.8295119322974971,
81.092486719769227, 88.957569057433133]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[3.3654654280794065, 31.448597851056814, 25.912141884195076, 97.838879435572466],
[61.609361018170169, 23.411945067093868, 77.425261409500308, 49.641325811406979], [1.5957777077323345, 94.738957291578956,
26.540677293555149, 14.644250966443629]], [[94.393421059841273, 44.913143788307792, 48.37457688577048, 24.936916903319691],
[27.135697963907795, 7.1178102866629755, 82.53709971518164, 27.34969678954003], [84.534447098035358, 70.271477764969092,
91.498492500534539, 98.035238969369871]]], [[[23.350304037279159, 73.165148854851068, 20.805692503306975, 70.60969981889366],
[76.636921854602633, 99.536056139752134, 36.05432002833745, 68.437860465731404], [97.395060152216843, 83.220325374523995,
84.216569453631237, 41.096858608280385]], [[7.3882123001998607, 0.26399485183249632, 58.100940249092623, 11.539242598340968],
[83.194333821216958, 65.589029104330493, 18.19752451210422, 29.394627656416741], [53.733171964816421, 57.534725114847042,
65.718206728755177, 8.3980661515725163]]], [[[22.27274452496971, 27.206143810199485, 94.515538865049535, 5.403807179524474],
[69.179637950013159, 84.701966892623375, 79.155529928216239, 94.704415753621575], [11.066488989252838, 7.6878341916531605,
69.879561161866874, 42.170520250872158]], [[69.119995601366483, 97.630776657501471, 97.832335448609641, 60.475862682733805],
[43.772437615973402, 26.522975367447277, 67.711453522387615, 83.932152243212755], [72.510806999393893, 10.554199212907934,
45.076886402670027, 87.39127569037376]]]]))
res=log(arg)
ref=Data(numpy.array([[[[3.4372497837567777, 4.58670481818156, 4.4941855164337712, 4.6050610651085302],
[0.87751971310618737, 3.6076829554189502, 2.7944491476300444, 4.073822893292145], [4.2768982784493446, 4.4817680249710827,
4.0646768592562585, 4.1984544106717419]], [[3.0187553537439253, 3.1782050183770179, 4.4795909489089949, 3.8729464433637721],
[3.4714384323655101, 3.9581775682108784, 4.5126782553964633, 3.1820543080153221], [3.5327967614945712, 3.3310025270698476,
3.8793232990147835, 3.0505582784353522]]], [[[4.5236770306297025, 4.4318913042931216, 4.2292179627974305, 3.3648827033780897],
[4.3332757977505798, 3.1407925645297952, 3.9686236779689277, 3.2359838089418784], [3.8735381893751741, 4.1444779417295763,
4.4799160172616155, 3.227793573594179]], [[4.3552153369168716, 1.5616212080338248, 4.2182479684170557, 3.7922129074975159],
[3.771607328617411, 3.1941434499923838, 3.9513479893944825, 4.5005600421442526], [2.7862336152984626, 4.5427968744252256,
4.4920506878239275, 4.0509168646289595]]], [[[4.4209013527895706, 1.1273948607343995, 2.5785535140631213, 3.4880033050865178],
[3.3646401247575981, 3.8797710879671699, 2.9616180709345326, 3.9026168230902107], [4.1231047239413003, 3.2305388900157679,
2.6470588335475065, 4.5330734633284591]], [[4.1612319605346206, 2.340697661147535, 4.0959292411193209, 4.5490467911745931],
[2.6593613371192286, 3.1131362524867643, 3.6128820082150246, 4.0350324511420927], [3.8702308612975078, 1.9212532116292804,
4.3955903146568103, 4.4881595038860205]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.2135662679688048, 3.4483543987203631, 3.2547116574713217, 4.5833220382836135],
[4.1208138235747116, 3.1532463650693905, 4.3493131021467581, 3.9048236685092923], [0.46736120845767137, 4.5511252914352394,
3.2786785483054603, 2.6840478329453119]], [[4.5474713785601448, 3.8047304866159513, 3.8789744047643144, 3.2163493120536915],
[3.3008501290901413, 1.9626001341145185, 4.4132478857761619, 3.3087054425008704], [4.4371591092581104, 4.2523659947961194,
4.5163224967417603, 4.5853269953568034]]], [[[3.1506100048986219, 4.2927191990513212, 3.0352266273078725, 4.2571675262643494],
[4.3390789691634684, 4.6005199517870539, 3.5850266909019495, 4.2259261870761291], [4.578775492239302, 4.4214916133587838,
4.4333916887485154, 3.7159316856924915]], [[1.9998857977389439, -1.3318256766604444, 4.062181847015947, 2.445753626199946],
[4.4211792423976677, 4.1834084427007436, 2.9012855690133099, 3.3808119248720274], [3.9840305382637378, 4.0523886805608198,
4.1853760062592782, 2.1280014592936731]]], [[[3.1033637123365332, 3.303442823170931, 4.5487642534165529, 1.6871037383963188],
[4.2367065700465449, 4.4391388232611524, 4.3714146503200348, 4.5507606279710613], [2.4039215319374505, 2.0396391042727164,
4.2467732053644385, 3.7417214046724601]], [[4.2358440608581605, 4.5811927783170869, 4.5832551507041686, 4.1022443215077606],
[3.779004341129665, 3.278011352367117, 4.2152553461462992, 4.4300087610783878], [4.2837356128125892, 2.3565238103888038,
3.8083696185225007, 4.4703954572028213]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank0(self):
arg=Data(-27.6238811345,self.functionspace)
arg.setTaggedValue(1,-26.6188411821)
res=sign(arg)
ref=Data(-1.0,self.functionspace)
ref.setTaggedValue(1,-1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank1(self):
arg=Data(numpy.array([65.988765188781628, 43.633425826032123]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-43.177017978677057, 18.498142369576271]))
res=sign(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank2(self):
arg=Data(numpy.array([[-78.167710802301741, -55.033204362154265, 37.991255141785302, 50.222020893267029,
95.714640431707807], [-81.178579764161256, 21.860275678254681, 91.336894263942668, 37.932598260023099, -87.965842239718057],
[63.15669717506313, 80.395599270502714, 58.958976516236106, -19.250836112072108, -48.102635913480874], [-98.409552362349558,
29.763756955023496, -70.007046431425664, 16.56379790064571, -41.607232959589481]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[74.159820400094588, -19.916385054691645, 15.40425479416308, -45.475971811633698,
-27.911268074280457], [-29.786605106833306, -82.877167864945875, -84.972063657379977, -96.028192775161585,
-91.132164399088751], [58.162335820187224, -36.266848654009443, -72.489996854551606, 7.6308625158186771, 25.612300558077663],
[93.912536630409363, 0.74947614971907228, 11.966503685953754, -88.781531775281678, -95.942997369506429]]))
res=sign(arg)
ref=Data(numpy.array([[-1.0, -1.0, 1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, 1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0], [-1.0, 1.0,
-1.0, 1.0, -1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, -1.0, 1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0], [1.0, -1.0, -1.0, 1.0,
1.0], [1.0, 1.0, 1.0, -1.0, -1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank3(self):
arg=Data(numpy.array([[[40.659064171526694, -30.296135196623325], [65.135534658139875, -3.189213180563371]],
[[-16.553351525146027, -61.720967726757102], [76.972713494862035, 99.275215333914559]], [[47.475762989245681,
-97.393738249661268], [29.171397306032645, -63.642498879346746]], [[-65.809714821242551, 25.104527515218038],
[-25.908107285024215, -16.761112108721733]], [[-91.771675890562236, -30.217560827961364], [57.01823721886862,
13.089158046532233]], [[-9.2038411577464814, -51.536713875708799], [24.738016649301201,
-43.097223742291945]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[75.518086628986168, 5.7702731728101782], [-42.11765774528169, -9.9313715009520962]],
[[-33.56415502329628, 12.290219019430509], [-71.076843320533584, -48.936347244622212]], [[-29.458297241970527,
-0.79873463559410141], [56.480807815607648, 77.468899350274796]], [[-15.657215310960197, -97.911217676078493],
[97.233823754667782, 28.179624489186494]], [[-38.154815907369802, -2.8953583985458664], [-94.411611022922287,
-89.520621976287586]], [[20.02986172489021, 45.555499658943972], [-92.298172881010984, 50.848484074958037]]]))
res=sign(arg)
ref=Data(numpy.array([[[1.0, -1.0], [1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]], [[1.0, -1.0], [1.0, -1.0]], [[-1.0, 1.0],
[-1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [-1.0, -1.0]], [[-1.0, 1.0], [-1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]],
[[-1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [-1.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank4(self):
arg=Data(numpy.array([[[[48.404617181522724, 27.642908048648465, -32.063735049783858, -25.287287320564673],
[35.529509315351362, 20.52958828294021, 50.132503108908566, 21.961444483043181], [-30.536862467175467, -73.366944239336476,
-1.9104283260391099, -40.116305423450392]], [[-3.2592728125080583, -66.978916413183839, 51.039430417752982,
34.318446063538232], [-10.778480760564307, 34.381886410487425, -96.662586157020499, 23.49291212424454], [33.936269866253809,
6.0438240456580417, 53.991390973572862, 34.843592016698238]]], [[[-52.275180577618798, 15.674362624980304, 46.042809742277655,
38.412209266363305], [66.461375379834692, 45.821627659544617, 58.528372762759147, -77.609658246727861], [-91.311967332091655,
62.061963370741864, -42.381631148565965, -19.376230129856737]], [[-82.817924353598301, -94.396836339797801,
-80.332788125711602, -53.122903800926544], [58.309151553617909, -63.690512047675661, 12.750432515234706, 88.616992933489428],
[-76.463210395801909, -88.55862414809792, -53.122216991054394, 94.306145635218115]]], [[[31.191484321029691,
33.483202066627882, -68.553556516172563, -30.761725450809905], [39.954033622863392, 31.391308803793095, 7.0924416508365056,
82.108147705338354], [28.677362945828122, 76.875499532889648, -98.899773427430574, 63.640543048776806]], [[48.003219667446018,
32.816178561644875, -47.97394425834738, 64.03620964542236], [22.449712578557794, 72.880134481879196, -66.599797223033192,
-95.855372244240456], [-5.2195963768147777, 53.688991692833952, -4.6935389526849463,
-20.020330663766899]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-63.279749648503802, -91.258116713624531, -34.456755905701655, -68.15939250069377],
[-42.719644685839995, -50.608567888041001, 86.926567471745585, 85.713364201437258], [-28.138127931660478, -48.833678656434088,
92.324131209205206, 46.337966629590767]], [[42.90425842608937, 64.851867297440293, -44.286214357511192, -83.350387738656664],
[81.716073201887639, -81.864272503868293, 92.45506396473931, 57.706464829259602], [-91.437108351660243, 53.053916668830283,
71.118625640502444, -27.194756979681543]]], [[[48.738615887994285, -2.8909177216855966, -26.101711802319798,
12.384670566250364], [-59.707938829568683, 7.1712734206349751, 13.096740235902374, 13.95432941544199], [67.638350545270868,
-69.038896120399571, -52.761020748111505, -34.828120061695998]], [[-40.401312312884819, -58.575266259290814,
-5.6760646716001304, 92.205219596258189], [87.355330242760971, 40.300165196433568, -55.950410136680517, 33.57412513030539],
[-99.413320460986569, 85.272736206140081, -8.649704146529686, -72.352005495304866]]], [[[76.119465279689791,
42.566334567806138, -50.386490732119427, 71.20528114907242], [61.744996594644761, 22.082948637093295, 78.339113397478116,
-49.481789958643674], [-96.910012358949714, 21.340439990309633, 92.448839100352387, -11.980830731257086]],
[[48.862626595701954, 89.576908309497242, -24.930909752705006, -56.400828022332483], [70.708156511024811, -52.976842818709493,
96.644726353542865, 68.041659790587545], [96.085623722167952, 49.460250235353953, 48.149498918497216, 57.854757467958734]]]]))
res=sign(arg)
ref=Data(numpy.array([[[[1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]], [[-1.0, -1.0, 1.0, 1.0],
[-1.0, 1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[-1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, -1.0], [-1.0, 1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, 1.0]]], [[[1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, -1.0, 1.0]], [[1.0, 1.0, -1.0, 1.0], [1.0, 1.0, -1.0, -1.0], [-1.0, 1.0, -1.0, -1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, 1.0, 1.0]], [[1.0,
1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, -1.0]]], [[[1.0, -1.0, -1.0, 1.0], [-1.0, 1.0, 1.0, 1.0], [1.0, -1.0,
-1.0, -1.0]], [[-1.0, -1.0, -1.0, 1.0], [1.0, 1.0, -1.0, 1.0], [-1.0, 1.0, -1.0, -1.0]]], [[[1.0, 1.0, -1.0, 1.0], [1.0, 1.0,
1.0, -1.0], [-1.0, 1.0, 1.0, -1.0]], [[1.0, 1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank0(self):
arg=Data(-14.3673757927,self.functionspace)
arg.setTaggedValue(1,-91.0616949648)
res=abs(arg)
ref=Data(14.3673757927,self.functionspace)
ref.setTaggedValue(1,91.0616949648)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank1(self):
arg=Data(numpy.array([-81.821732775420642, -68.22226512766818]),self.functionspace)
arg.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
res=abs(arg)
ref=Data(numpy.array([81.821732775420642, 68.22226512766818]),self.functionspace)
ref.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank2(self):
arg=Data(numpy.array([[-35.703961827623615, 94.271132011685381, -77.391701661321079, -48.396751261576078,
-89.628632351273765], [49.30062196572834, -45.716685546575796, -91.97360399287524, -46.086717554689407, 94.50160817876062],
[23.260490557882292, -46.121623208221905, 64.433592032582311, 18.144341652350775, -44.21085548471779], [-61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-51.972473313741155, -55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [-48.087336536814519, -76.786375607585896, 37.410127192208563, 87.684260266087875, -26.905076717599144],
[16.189496788130981, 87.750429072332139, -36.767204229576599, -71.524650024934132, 81.291275301664541], [7.3770859265969762,
-24.93630589052367, 61.708601944027265, 89.294133020898926, -2.7788897536858315]]))
res=abs(arg)
ref=Data(numpy.array([[35.703961827623615, 94.271132011685381, 77.391701661321079, 48.396751261576078,
89.628632351273765], [49.30062196572834, 45.716685546575796, 91.97360399287524, 46.086717554689407, 94.50160817876062],
[23.260490557882292, 46.121623208221905, 64.433592032582311, 18.144341652350775, 44.21085548471779], [61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[51.972473313741155, 55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [48.087336536814519, 76.786375607585896, 37.410127192208563, 87.684260266087875, 26.905076717599144],
[16.189496788130981, 87.750429072332139, 36.767204229576599, 71.524650024934132, 81.291275301664541], [7.3770859265969762,
24.93630589052367, 61.708601944027265, 89.294133020898926, 2.7788897536858315]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank3(self):
arg=Data(numpy.array([[[-73.879162728531952, 53.891922757125315], [-30.709972491596574, -49.27453562582631]],
[[99.200427899109769, -0.10455889631015225], [24.929977391825204, -25.196431617614095]], [[99.69470286180362,
49.629118870818502], [-18.286571682827372, -99.882333404908422]], [[94.596602624460871, -48.944752738316531],
[-86.357256849018469, 94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614,
-48.600306234165757]], [[-1.3540803820464049, 43.87503589064076], [24.242456069744136,
86.552268702416399]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[37.413937980923265, -0.28411691206147793], [33.044360612769196, -66.03355173059704]],
[[37.536911848971073, 0.023208526118992268], [-26.527789666862972, -33.2402194708271]], [[55.327425103705878,
-84.395066891225468], [45.013227563401301, -95.875031468356525]], [[64.760193848108571, -73.302359966808424],
[54.095816937340203, 37.527678340113113]], [[-76.71381733348575, -39.352383403035063], [80.080299993848996,
0.010359221408108965]], [[-96.050890564474372, -42.823985894886071], [3.4476034725966258, -36.523928707662435]]]))
res=abs(arg)
ref=Data( | numpy.array([[[73.879162728531952, 53.891922757125315], [30.709972491596574, 49.27453562582631]],
[[99.200427899109769, 0.10455889631015225], [24.929977391825204, 25.196431617614095]], [[99.69470286180362,
49.629118870818502], [18.286571682827372, 99.882333404908422]], [[94.596602624460871, 48.944752738316531], [86.357256849018469,
94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614, 48.600306234165757]],
[[1.3540803820464049, 43.87503589064076], [24.242456069744136, 86.552268702416399]]]) | numpy.array |
#!/usr/bin/env python
######################################################
## Edits ROMS masks using a GUI
## Nov 2014
## <EMAIL>
######################################################
import os
import wx
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg as FigureCanvas
from matplotlib.backends.backend_wxagg import NavigationToolbar2WxAgg as Navbar
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.path import Path
import scipy.io as sp
import netCDF4 as nc
from mpl_toolkits.basemap import Basemap
# TO-DO LIST: ====================================================
# - improve point selection based in find_lower_left_node
# - create better icons for mask/unmask area
# - resolve untoggle/toggle between mask/unmask functions
# - add support to other models (POM, SWAN, WW3)
# - move matplotlib toolbar to the lower part
# - add a wx.TaskBarIcon to show up on the unity launcher
# - display local depth of the pixel we are looking at
# - transform mask/unmask and mask_area and unmask_area in the same
# function, and figure out how to decide wether to mask or unmask
# ================================================================
# NICE TIP TO DEBUG THIS PROGRAM: ================================
# - comment out app.MainLoop at the last line of this script
# - ipython --gui=wx
# - run pyeditmask.py
# - trigger the events and check out the objects in the shell
# ================================================================
global currentDirectory
currentDirectory = os.getcwd()
PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))
DEFAULT_VMIN = 0
DEFAULT_VMAX=1.5
DEFAULT_CMAP = plt.cm.BrBG
DEFAULT_DEPTH_FOR_LAND = -50
# ROMS related objects ---------------------------------------------
class RomsGrid(object):
"""
Stores and manipulates netcdf ROMS grid file information
"""
def __init__(self,filename):
self.filename = filename
self.ncfile = nc.Dataset(filename, mode='r+')
self.lonr = self.ncfile.variables['lon_rho'][:]
self.latr = self.ncfile.variables['lat_rho'][:]
self.lonu = self.ncfile.variables['lon_u'][:]
self.latu = self.ncfile.variables['lat_u'][:]
self.lonv = self.ncfile.variables['lon_v'][:]
self.latv = self.ncfile.variables['lat_v'][:]
self.lonvert = self.ncfile.variables['lon_vert'][:]
self.latvert = self.ncfile.variables['lat_vert'][:]
self.h = self.ncfile.variables['h'][:]
self.maskr = self.ncfile.variables['mask_rho'][:]
self.masku = self.ncfile.variables['mask_u'][:]
self.maskv = self.ncfile.variables['mask_v'][:]
def uvp_mask(rfield):
Mp, Lp = rfield.shape
M = Mp - 1
L = Lp - 1
vfield = rfield[0:M,:] * rfield[1:Mp,:]
ufield = rfield[:,0:L] * rfield[:,1:Lp]
pfield = ufield[0:M,:] * ufield[1:Mp,:]
return ufield, vfield, pfield
# -------------------------------------------------------------------
class App(wx.App):
def OnInit(self):
self.frame = Interface("PyEditMask 0.1.0", size=(1024,800))
self.frame.Show()
return True
class Interface(wx.Frame):
def __init__(self, title=wx.EmptyString, pos=wx.DefaultPosition,
size=wx.DefaultSize, style=wx.DEFAULT_FRAME_STYLE,
*args, **kwargs):
wx.Frame.__init__(self, None, -1, "PyEditMask 0.1.0", pos=pos,
size=size, style=style, *args, **kwargs)
# Initializing toolbar
self.toolbar = MainToolBar(self)
# BASIC LAYOUT OF THE NESTED SIZERS ======================
panel1 = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel = wx.Panel(self, wx.ID_ANY, style=wx.SUNKEN_BORDER)
mplpanel.SetBackgroundColour("WHITE")
# BOX 1 is the main sizer
box1 = wx.BoxSizer(wx.HORIZONTAL)
box1.Add(panel1, 1, wx.EXPAND)
box1.Add(mplpanel, 15, wx.EXPAND)
# BOX 2 is the inner sizer of the left big control panel
box2 = wx.BoxSizer(wx.VERTICAL)
# BOX 3 is the sizer of the right big parent panel(panel1), the one that will
# serve as base for two child panels which will hold
# the two matplotlib canvas's
box3 = wx.BoxSizer(wx.VERTICAL)
# panel 1 content ========================================
main_label = wx.StaticText(panel1, label=" ")
box2.Add(main_label, proportion=0, flag=wx.CENTER)
# set_land = wx.Button(panel1, label="Set Land", style=wx.ID_CANCEL)
# box2.Add(set_land, proportion=0, flag=wx.CENTER)
# set_land.Bind(wx.EVT_BUTTON, self.onSetLand)
# set_water = wx.Button(panel1, label="Set Water", style=wx.ID_CANCEL)
# box2.Add(set_water, proportion=0, flag=wx.CENTER)
# set_water.Bind(wx.EVT_BUTTON, self.onSetWater)
# mplpanel content ========================================
self.mplpanel = SimpleMPLCanvas(mplpanel)
box3.Add(self.mplpanel.canvas, 1, flag=wx.CENTER)
# FINAL LAYOUT CONFIGURATIONS ============================
self.SetAutoLayout(True)
panel1.SetSizer(box2)
# panel2.SetSizer(box4)
mplpanel.SetSizer(box3)
self.SetSizer(box1)
self.InitMenu()
self.Layout()
self.Centre()
# self.ShowModal()
def InitMenu(self):
menubar = wx.MenuBar()
fileMenu = wx.Menu()
fileMenu.Append(wx.ID_OPEN, u'&Open ROMS grid file')
fileMenu.Append(wx.ID_OPEN, u'&Open coastline file')
fileMenu.Append(wx.ID_OPEN, u'&Open bathymetry file')
fileMenu.Append(wx.ID_SAVE, '&Save grid')
fileMenu.AppendSeparator()
qmi = wx.MenuItem(fileMenu, wx.ID_EXIT, '&Quit\tCtrl+W')
opf = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O')
opc = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O+C')
opb = wx.MenuItem(fileMenu, wx.ID_OPEN, '&Open\tCtrl+O+B')
svf = wx.MenuItem(fileMenu, wx.ID_SAVE, '&Save\tCtrl+S')
fileMenu.AppendItem(qmi)
# fileMenu.AppendItem(svf)
self.Bind(wx.EVT_MENU, self.OnQuit, qmi)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadGrid, opf)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadCoastline, opc)
self.Bind(wx.EVT_MENU, self.toolbar.OnLoadBathymetry, opb)
self.Bind(wx.EVT_MENU, self.toolbar.OnSaveGrid, svf)
menubar.Append(fileMenu, u'&PyEditMask')
self.SetMenuBar(menubar)
def OnQuit(self, e):
"""Fecha o programa"""
self.Close()
self.Destroy()
def OnCloseWindow(self, e):
self.Destroy()
class SimpleMPLCanvas(object):
"""docstring for SimpleMPLCanvas"""
def __init__(self, parent):
super(SimpleMPLCanvas, self).__init__()
self.parent = parent
self.plot_properties()
self.make_navbar()
def make_navbar(self):
self.navbar = Navbar(self.canvas)
self.navbar.SetPosition(wx.Point(0,0)) # this is not working !!
def plot_properties(self):
# Create matplotlib figure
self.fig = Figure(facecolor='w', figsize=(12,8))
self.canvas = FigureCanvas(self.parent, -1, self.fig)
self.ax = self.fig.add_subplot(111)
# tit = self.ax1.set_title("ROMS mask_rho", fontsize=12, fontweight='bold')
# tit.set_position([0.9, 1.05])
class MainToolBar(object):
def __init__(self, parent):
self.currentDirectory = os.getcwd()
self.parent = parent
self.toolbar = parent.CreateToolBar(style=1, winid=1,
name="Toolbar")
self.tools_params ={
'load_grid': (load_bitmap('grid.png'), u"Load grid",
"Load ocean_grd.nc ROMS grid netcdf file"),
'load_coastline': (load_bitmap('coast.png'), u"Load coastline",
"Load *.mat coastline file [lon / lat poligons]"),
'load_bathymetry': (load_bitmap('bathy.png'), u"Load bathy",
"Load ocean_grd.nc ROMS bathy netcdf file"),
'save_grid': (load_bitmap('save.png'), u"Apply and save",
"Save changes to ocean_grd.nc ROMS grid netcdf file"),
'set_land': (load_bitmap('land.png'), u"Set land",
"Set grid point to land"),
'set_land_area': (load_bitmap('land_area.png'), u"Set land area",
"Set poligon area to land"),
'set_water': (load_bitmap('water.png'), u"Set water",
"Set grid point to water"),
'set_water_area': (load_bitmap('water_area.png'), u"Set water area",
"Set poligon area to water"),
'settings': (load_bitmap('settings.png'), u"PyEditMask settings",
"PyEditMask configurations"),
'quit': (load_bitmap('exit.png'), u"Quit",
"Quit PyEditMask"),
}
self.createTool(self.toolbar, self.tools_params['load_grid'],
self.OnLoadGrid)
self.createTool(self.toolbar, self.tools_params['load_coastline'],
self.OnLoadCoastline)
self.createTool(self.toolbar, self.tools_params['load_bathymetry'],
self.OnLoadBathymetry)
self.createTool(self.toolbar, self.tools_params['save_grid'],
self.OnSaveGrid)
self.toolbar.AddSeparator()
self.mask_tool = self.createTool(self.toolbar, self.tools_params['set_land'],
self.OnSetLand, isToggle=True)
self.mask_area_tool = self.createTool(self.toolbar,
self.tools_params['set_land_area'],
self.OnSetLandArea, isToggle=True)
self.unmask_tool = self.createTool(self.toolbar, self.tools_params['set_water'],
self.OnSetWater, isToggle=True)
self.unmask_area_tool = self.createTool(self.toolbar,
self.tools_params['set_water_area'],
self.OnSetWaterArea, isToggle=True)
self.toolbar.AddSeparator()
self.createTool(self.toolbar, self.tools_params['settings'],
self.OnSettings)
self.createTool(self.toolbar, self.tools_params['quit'],
self.parent.OnQuit)
self.toolbar.Realize()
def createTool(self, parent, params, evt, isToggle=False):
tool = parent.AddTool(wx.NewId(), bitmap=params[0], shortHelpString=params[1],
longHelpString=params[2], isToggle=isToggle)
self.parent.Bind(wx.EVT_TOOL, evt, id=tool.GetId())
return tool
def OnLoadGrid(self, evt):
openFileDialog = wx.FileDialog(self.parent, "Open grid netcdf file [*.nc]",
"/static/hindcast/roms", " ",
"netcdf files (*.nc)|*.nc",
wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return # the user changed idea...
filename = openFileDialog.GetPath()
grd = RomsGrid(filename)
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
self.pcolor = ax.pcolormesh(grd.lonvert, grd.latvert, grd.maskr,
vmin=DEFAULT_VMIN, vmax=DEFAULT_VMAX,
cmap=DEFAULT_CMAP)
ax.plot(grd.lonvert, grd.latvert, 'k', alpha=0.2)
ax.plot(grd.lonvert.transpose(), grd.latvert.transpose(), 'k', alpha=0.2)
ax.set_xlim([grd.lonvert.min(), grd.lonvert.max()])
ax.set_ylim([grd.latvert.min(), grd.latvert.max()])
ax.set_aspect('equal')
mplpanel.canvas.draw()
self.grd = grd
self.grd.hmin = grd.ncfile.variables['h'][:].min()
def OnLoadCoastline(self, evt):
mplpanel = app.frame.mplpanel
ax = mplpanel.ax
try:
m = Basemap( resolution='f', projection='cyl',
llcrnrlon=self.grd.lonvert.min(), urcrnrlon=self.grd.lonvert.max(),
llcrnrlat=self.grd.latvert.min(), urcrnrlat=self.grd.latvert.max() )
coasts = m.drawcoastlines(zorder=100, linewidth=0.0)
coasts_paths = coasts.get_paths()
for ipoly in xrange(len(coasts_paths)):
r = coasts_paths[ipoly]
# Convert into lon/lat vertices
polygon_vertices = [ (vertex[0],vertex[1]) for (vertex,code) in
r.iter_segments(simplify=False) ]
px = [polygon_vertices[i][0] for i in xrange(len(polygon_vertices))]
py = [polygon_vertices[i][1] for i in xrange(len(polygon_vertices))]
ax.plot(px, py, '-', linewidth=1.5, color='lightgray')
except AttributeError: # just in case a grid was not loaded before
ax.set_xlim([np.nanmin(self.grd.lonvert), np.nanmax(self.grd.lonvert)])
ax.set_ylim([ | np.nanmin(self.grd.latvert) | numpy.nanmin |
import numpy as np
import matplotlib.pyplot as plt
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.)))
c = 342 # speed of sound
lx = 342/2 # length in meters
t = 2 # time in seconds
# TIME
Fs_t = 2000 # samples/second time is dependent of space
# SPACE
Fs_x = 2 # samples/meter
num_div_x = int(lx*Fs_x) # divisions of all the space
# Simulation steps in Time
num_div_t = int(Fs_t*t)
delta_t = t / num_div_t
t_axis = np.arange(0, t, delta_t)
# number of divisions in x axis
delta_x = lx / num_div_x
x_axis = np.arange(0, lx, delta_x)
# force signal
t_values = np.arange(0, num_div_t, 1)
x_values = np.arange(0, num_div_x, 1)
x_n = np.zeros([num_div_t, num_div_x])
k_x = 40
# x_n[:, 0] = np.cos((np.pi * k_x / num_div_x) * x_values)
k_t = 1 / ((2 * lx) / (k_x*c))
A = 100
# pos_x = int(num_div_x/2)
pos_x = 0
# x_n[:, pos_x] = A * np.sin((2*np.pi * k_t / Fs_t) * t_values)
offset = 30
x_n[:, pos_x] = A*gaussian(t_values, 38 + offset, 9) - A*gaussian(t_values, 74 + offset, 9)
# x_n[:, pos_x + 100] = gaussian(t_values, 5, 1) - gaussian(t_values, 10, 1)
# plt.figure()
# plt.imshow(x_n, cmap='hot')
plt.figure()
plt.plot(x_n[:, pos_x])
print("num_div_t %i " % num_div_t)
print("num_div_x %i " % num_div_x)
print("delta t: %f" % delta_t)
print("CFL Condition %f" % (delta_x/((3**0.5)*c)))
# Init Simulation time-stepping scheme----
p_n_minus1 = np.zeros(shape=[num_div_x, 1])
p_n = np.zeros(shape=[num_div_x, 1])
p_n_plus1 = np.zeros(shape=[num_div_x, 1])
k_matrix = np.zeros(shape=[num_div_x, num_div_x])
fdtd_kernel_2 = | np.array([1, -2, 1]) | numpy.array |
#!/usr/bin/env python
"""
Program to play a given wav audio file and
print scaled amplitude values to a data file in real time.
<NAME>, 8/14/16
see License file for license details
Tested on Python version 2.7.12, Ubuntu 16.04.1 LTS
"""
import sys
import wave
import logging as log
import numpy
import pyaudio
try:
import mute_alsa #not essential, just to mute some error messages
except:
pass
log.basicConfig(format="%(levelname)s: %(message)s")
## file names
FILENAME = "chopin.wav" #audio file
DATA_FILENAME = "light_amp.dat" #file to write amp data to
#number of audio samples to process at each time
CHUNKSIZE = 1024
#scaling parameters
OFFSET = 3000
GAIN = 12
#filter parameter
FILTER_WEIGHT = 0.5
#constants related to amplitude
MAX_AMP = 65535
MIN_AMP = 1
HALF_AMP = 32768
def filter(ar, a_old):
""" scales, filters, and clamps raw amplitude values """
a = (ar-OFFSET)*GAIN #scaling
a = FILTER_WEIGHT*a_old + (1-FILTER_WEIGHT)*a #filter
a = max(min(int(a), MAX_AMP), MIN_AMP) #clamp
a_old = a #last value
return a, a_old
def write_to_file(a):
""" write amplitude to data file """
try:
f = open(DATA_FILENAME, 'w')
f.write(str(a))
f.close()
except:
log.error("Data file I/O error.")
sys.exit(2)
# open wav file
try:
wf = wave.open(FILENAME, 'rb')
except:
log.error("Unable to open wav file: %s ... Quitting!" % FILENAME)
sys.exit(1)
# open and set up audio stream
p = pyaudio.PyAudio()
stream = p.open(format =
p.get_format_from_width(wf.getsampwidth()),
channels = wf.getnchannels(),
rate = wf.getframerate(),
output = True)
# read the first chunk
data = wf.readframes(CHUNKSIZE)
amp_old = HALF_AMP
# keep reading the file and writing the amp from it to file
while data != '':
stream.write(data) #play the sound
npdata = numpy.fromstring(data, dtype=numpy.int16) #read it as array
raw_amp = int( | numpy.max(npdata) | numpy.max |
"""
Test core functionality of normaliser objects
"""
import numpy
import sys
import unittest
sys.path.append("..")
from nPYc.utilities.normalisation._nullNormaliser import NullNormaliser
from nPYc.utilities.normalisation._totalAreaNormaliser import TotalAreaNormaliser
from nPYc.utilities.normalisation._probabilisticQuotientNormaliser import ProbabilisticQuotientNormaliser
class test_utilities_normalisation(unittest.TestCase):
"""
Test class for the normalisation objects. Contains tests covering the basic functionality of individual objects
and their interaction and usage inside the nPYc Dataset objects.
"""
def setUp(self):
# Simulate some data
self.noSamp = numpy.random.randint(5, high=50, size=None)
self.noFeat = numpy.random.randint(60, high=200, size=None)
self.X = numpy.random.randn(self.noSamp, self.noFeat)
# Object test
def test_nullNormaliser(self):
"""
Check that the NullNormaliser works
"""
# Check if output data = input data (its not supposed to do anything)
numpy.testing.assert_array_equal(self.X, NullNormaliser().normalise(self.X), err_msg="Null Normaliser not working as expected")
self.assertEqual(1, NullNormaliser().normalisation_coefficients)
def test_nullNormaliser_eq_(self):
"""
Check that the NullNormaliser equality testing works
"""
with self.subTest():
norm = NullNormaliser()
norm2 = NullNormaliser()
self.assertEqual(norm, norm2)
pqn = ProbabilisticQuotientNormaliser()
tanorm = TotalAreaNormaliser(keepMagnitude=False)
tanorm2 = TotalAreaNormaliser(keepMagnitude=True)
notEqualList = [1, True, 'str', 1.1, list(), dict(), tanorm, tanorm2, pqn]
norm = NullNormaliser()
for comparison in notEqualList:
with self.subTest(msg=comparison):
self.assertNotEqual(norm, comparison)
class test_utilities_totalAreaNormaliser(unittest.TestCase):
def setUp(self):
# Simulate some data
self.noSamp = numpy.random.randint(5, high=50, size=None)
self.noFeat = numpy.random.randint(60, high=200, size=None)
self.X = | numpy.random.randn(self.noSamp, self.noFeat) | numpy.random.randn |
from warnings import catch_warnings
from warnings import filterwarnings
from warnings import warn
import numpy as np
from numpy import empty
from numpy import vstack
from pydmd import DMD
from pydmd import DMDBase
from sklearn.base import BaseEstimator
from sklearn.metrics import r2_score
from sklearn.pipeline import Pipeline
from sklearn.utils.validation import check_is_fitted
from .common import validate_input
from .observables import Identity
from .observables import TimeDelay
from .regression import BaseRegressor
from .regression import DMDc
from .regression import DMDRegressor
class Koopman(BaseEstimator):
"""
Discrete-Time Koopman class.
Parameters
----------
observables: observables object, optional \
(default :class:`pykoopman.observables.Identity`)
Map(s) to apply to raw measurement data before estimating the
Koopman operator.
Must extend :class:`pykoopman.observables.BaseObservables`.
The default option, :class:`pykoopman.observables.Identity` leaves
the input untouched.
regressor: regressor object, optional (default ``DMD``)
The regressor used to learn the Koopman operator from the observables.
``regressor`` can either extend the
:class:`pykoopman.regression.BaseRegressor`, or ``pydmd.DMDBase``.
In the latter case, the pydmd object must have both a ``fit``
and a ``predict`` method.
quiet: booolean, optional (default False)
Whether or not warnings should be silenced during fitting.
Attributes
----------
model: sklearn.pipeline.Pipeline
Internal representation of the forward model.
Applies the observables and the regressor.
n_input_features_: int
Number of input features before computing observables.
n_output_features_: int
Number of output features after computing observables.
n_control_features_: int
Number of control features used as input to the system.
time: dictionary
Time vector properties.
"""
def __init__(self, observables=None, regressor=None, quiet=False):
if observables is None:
observables = Identity()
if regressor is None:
regressor = DMD(svd_rank=2)
if isinstance(regressor, DMDBase):
regressor = DMDRegressor(regressor)
elif not isinstance(regressor, (BaseRegressor)):
raise TypeError("Regressor must be from valid class")
self.observables = observables
self.regressor = regressor
self.quiet = quiet
def fit(self, x, u=None, dt=1):
"""
Fit the Koopman model by learning an approximate Koopman operator.
Parameters
----------
x: numpy.ndarray, shape (n_samples, n_features)
Measurement data to be fit. Each row should correspond to an example
and each column a feature. It is assumed that examples are
equi-spaced in time (i.e. a uniform timestep is assumed).
u: numpy.ndarray, shape (n_samples, n_control_features)
Control/actuation/external parameter data. Each row should correspond
to one sample and each column a control variable or feature.
The control variable may be amplitude of an actuator or an external,
time-varying parameter. It is assumed that samples are equi-spaced
in time (i.e. a uniform timestep is assumed) and correspond to the
samples in x.
dt: float, (default=1)
Time step between samples
Returns
-------
self: returns a fit ``Koopman`` instance
"""
x = validate_input(x)
if u is None:
self.n_control_features_ = 0
elif not isinstance(self.regressor, DMDc):
raise ValueError(
"Control input u was passed, but self.regressor is not DMDc"
)
steps = [
("observables", self.observables),
("regressor", self.regressor),
]
self.model = Pipeline(steps)
action = "ignore" if self.quiet else "default"
with catch_warnings():
filterwarnings(action, category=UserWarning)
if u is None:
self.model.fit(x)
else:
self.model.fit(x, u)
self.n_input_features_ = self.model.steps[0][1].n_input_features_
self.n_output_features_ = self.model.steps[0][1].n_output_features_
if hasattr(self.model.steps[1][1], "n_control_features_"):
self.n_control_features_ = self.model.steps[1][1].n_control_features_
self.time = dict(
[
("tstart", 0),
("tend", dt * (self.model.steps[1][1].n_samples_ - 1)),
("dt", dt),
]
)
return self
def predict(self, x, u=None):
"""
Predict the state one timestep in the future.
Parameters
----------
x: numpy.ndarray, shape (n_samples, n_input_features)
Current state.
u: numpy.ndarray, shape (n_samples, n_control_features), \
optional (default None)
Time series of external actuation/control.
Returns
-------
y: numpy.ndarray, shape (n_samples, n_input_features)
Predicted state one timestep in the future.
"""
check_is_fitted(self, "n_output_features_")
return self.observables.inverse(self._step(x, u))
def simulate(self, x0, u=None, n_steps=1):
"""
Simulate an initial state forward in time with the learned Koopman
model.
Parameters
----------
x0: numpy.ndarray, shape (n_input_features,) or \
(n_consumed_samples + 1, n_input_features)
Initial state from which to simulate.
If using :code:`TimeDelay` observables, ``x0`` should contain
enough examples to compute all required time delays,
i.e. ``n_consumed_samples + 1``.
u: numpy.ndarray, shape (n_samples, n_control_features), \
optional (default None)
Time series of external actuation/control.
n_steps: int, optional (default 1)
Number of forward steps to be simulated.
Returns
-------
y: numpy.ndarray, shape (n_steps, n_input_features)
Simulated states.
Note that ``y[0, :]`` is one timestep ahead of ``x0``.
"""
check_is_fitted(self, "n_output_features_")
# Could have an option to only return the end state and not all
# intermediate states to save memory.
y = empty((n_steps, self.n_input_features_), dtype=self.koopman_matrix.dtype)
if u is None:
y[0] = self.predict(x0)
elif u is not None:
y[0] = self.predict(x0, u[0])
if isinstance(self.observables, TimeDelay):
n_consumed_samples = self.observables.n_consumed_samples
for k in range(n_consumed_samples):
y[k + 1] = self.predict(vstack((x0[k + 1 :], y[: k + 1])))
for k in range(n_consumed_samples, n_steps - 1):
y[k + 1] = self.predict(y[k - n_consumed_samples : k + 1])
else:
if u is None:
for k in range(n_steps - 1):
y[k + 1] = self.predict(y[k])
else:
for k in range(n_steps - 1):
y[k + 1] = self.predict(y[k], u[k + 1])
return y
def score(self, x, y=None, cast_as_real=True, metric=r2_score, **metric_kws):
"""
Score the model prediction for the next timestep.
Parameters
----------
x: numpy.ndarray, shape (n_samples, n_input_features)
State measurements.
Each row should correspond to the system state at some point
in time.
If ``y`` is not passed, then it is assumed that the examples are
equi-spaced in time and are given in sequential order.
If ``y`` is passed, then this assumption need not hold.
y: numpy.ndarray, shape (n_samples, n_input_features), optional \
(default None)
State measurements one timestep in the future.
Each row of this array should give the corresponding row in x advanced
forward in time by one timestep.
If None, the rows of ``x`` are used to construct ``y``.
cast_as_real: bool, optional (default True)
Whether to take the real part of predictions when computing the score.
Many Scikit-learn metrics do not support complex numbers.
metric: callable, optional (default ``r2_score``)
The metric function used to score the model predictions.
metric_kws: dict, optional
Optional parameters to pass to the metric function.
Returns
-------
score: float
Metric function value for the model predictions at the next timestep.
"""
check_is_fitted(self, "n_output_features_")
x = validate_input(x)
if isinstance(self.observables, TimeDelay):
n_consumed_samples = self.observables.n_consumed_samples
# User may pass in too-large
if y is not None and len(y) == len(x):
warn(
f"The first {n_consumed_samples} entries of y were ignored because "
"TimeDelay obesrvables were used."
)
y = y[n_consumed_samples:]
else:
n_consumed_samples = 0
if y is None:
if cast_as_real:
return metric(
x[n_consumed_samples + 1 :].real,
self.predict(x[:-1]).real,
**metric_kws,
)
else:
return metric(
x[n_consumed_samples + 1 :], self.predict(x[:-1]), **metric_kws
)
else:
if cast_as_real:
return metric(y.real, self.predict(x).real, **metric_kws)
else:
return metric(y, self.predict(x), **metric_kws)
def get_feature_names(self, input_features=None):
"""
Get the names of the individual features constituting the observables.
Parameters
----------
input_features: list of string, length n_input_features, \
optional (default None)
String names for input features, if available. By default,
the names "x0", "x1", ... ,"xn_input_features" are used.
Returns
-------
output_feature_names: list of string, length n_ouput_features
Output feature names.
"""
check_is_fitted(self, "n_input_features_")
return self.observables.get_feature_names(input_features=input_features)
def _step(self, x, u=None):
"""
Map x one timestep forward in the space of observables.
Parameters
----------
x: numpy.ndarray, shape (n_samples, n_input_features)
State vectors to be stepped forward.
u: numpy.ndarray, shape (n_samples, n_control_features), \
optional (default None)
Time series of external actuation/control.
Returns
-------
X': numpy.ndarray, shape (n_samples, self.n_output_features_)
Observables one timestep after x.
"""
check_is_fitted(self, "n_output_features_")
if u is None or self.n_control_features_ == 0:
if self.n_control_features_ > 0:
# TODO: replace with u = 0 as default
raise TypeError(
"Model was fit using control variables, so u is required"
)
elif u is not None:
warn(
"Control variables u were ignored because control variables were"
" not used when the model was fit"
)
return self.model.predict(X=x)
else:
if not isinstance(self.regressor, DMDc):
raise ValueError(
"Control input u was passed, but self.regressor is not DMDc"
)
return self.model.predict(X=x, u=u)
@property
def koopman_matrix(self):
"""
The Koopman matrix K satisfying g(X') = g(X) * K
where g denotes the observables map and X' denotes x advanced
one timestep.
"""
check_is_fitted(self, "n_output_features_")
return self.model.steps[-1][1].coef_
@property
def state_transition_matrix(self):
"""
The state transition matrix A satisfies x' = Ax + Bu.
# TODO: consider whether we want to match sklearn and have A and B satisfy
# x' = xA + uB instead
"""
check_is_fitted(self, "model")
if not isinstance(self.regressor, DMDc):
raise ValueError(
"self.regressor is not DMDc, so object has no "
"state_transition_matrix"
)
return self.model.steps[-1][1].state_matrix_
@property
def control_matrix(self):
"""
The control matrix (or vector) B satisfies x' = Ax + Bu.
"""
check_is_fitted(self, "model")
if not isinstance(self.regressor, DMDc):
raise ValueError(
"self.regressor is not DMDc, so object has no control_matrix"
)
return self.model.steps[-1][1].control_matrix_
@property
def projection_matrix(self):
"""
The control matrix (or vector) B satisfies x' = Ax + Bu.
"""
check_is_fitted(self, "model")
if not isinstance(self.regressor, DMDc):
raise ValueError(
"self.regressor is not DMDc, so object has no projection_matrix"
)
return self.model.steps[-1][1].projection_matrix_
@property
def projection_matrix_output(self):
"""
The control matrix (or vector) B satisfies x' = Ax + Bu.
"""
check_is_fitted(self, "model")
if not isinstance(self.regressor, DMDc):
raise ValueError(
"self.regressor is not DMDc, so object has no "
"projection_matrix_output"
)
return self.model.steps[-1][1].projection_matrix_output_
@property
def modes(self):
"""
Koopman modes
"""
check_is_fitted(self, "model")
return self.model.steps[-1][1].modes_
@property
def eigenvalues(self):
"""
Discrete-time Koopman eigenvalues obtained from spectral decomposition
of the Koopman matrix
"""
check_is_fitted(self, "model")
return self.model.steps[-1][1].eigenvalues_
@property
def frequencies(self):
"""
Oscillation frequencies of Koopman modes/eigenvectors
"""
check_is_fitted(self, "model")
dt = self.time["dt"]
return np.imag(np.log(self.eigenvalues) / dt) / (2 * np.pi)
# return self.model.steps[-1][1].frequencies_
@property
def eigenvalues_continuous(self):
"""
Continuous-time Koopman eigenvalues obtained from spectral decomposition
of the Koopman matrix
"""
check_is_fitted(self, "model")
dt = self.time["dt"]
return | np.log(self.eigenvalues) | numpy.log |
import io
import numpy as np
import torch
from PIL import Image
from pkg_resources import resource_filename
from torchvision import transforms
from . import data_loader
from . import u2net
class BackgroundRemover:
"""
Creates a u2-net based background remover object.
"""
def __init__(self, small: bool = False):
"""
Keyword arguments:
:param small: True to use smaller model
"""
big_model_path = resource_filename('bgrm', 'resources/u2net.pth')
small_model_path = resource_filename('bgrm', 'resources/u2netp.pth')
self.model_path = small_model_path if small else big_model_path
self.net = u2net.U2NETP() if small else u2net.U2NET()
try:
if torch.cuda.is_available():
self.net.load_state_dict(torch.load(self.model_path))
self.net.to(torch.device("cuda"))
else:
self.net.load_state_dict(
torch.load(
self.model_path,
map_location="cpu",
)
)
except FileNotFoundError:
raise FileNotFoundError(
"Make sure models are stored in resources."
)
self.net.eval()
@staticmethod
def norm_pred(d):
ma = torch.max(d)
mi = torch.min(d)
dn = (d - mi) / (ma - mi)
return dn
@staticmethod
def preprocess(image):
label_3 = np.zeros(image.shape)
label = np.zeros(label_3.shape[0:2])
if 3 == len(label_3.shape):
label = label_3[:, :, 0]
elif 2 == len(label_3.shape):
label = label_3
if 3 == len(image.shape) and 2 == len(label.shape):
label = label[:, :, np.newaxis]
elif 2 == len(image.shape) and 2 == len(label.shape):
image = image[:, :, np.newaxis]
label = label[:, :, np.newaxis]
transform = transforms.Compose(
[data_loader.RescaleT(320), data_loader.ToTensorLab(flag=0)]
)
sample = transform({"imidx": np.array([0]), "image": image, "label": label})
return sample
def predict(self, item):
sample = self.preprocess(item)
with torch.no_grad():
if torch.cuda.is_available():
inputs_test = torch.cuda.FloatTensor(
sample["image"].unsqueeze(0).cuda().float()
)
else:
inputs_test = torch.FloatTensor(sample["image"].unsqueeze(0).float())
d1, d2, d3, d4, d5, d6, d7 = self.net(inputs_test)
pred = d1[:, 0, :, :]
prediction = self.norm_pred(pred).squeeze()
predict_np = prediction.cpu().detach().numpy()
img = Image.fromarray(predict_np * 255).convert("RGB")
del d1, d2, d3, d4, d5, d6, d7, pred, prediction, predict_np, inputs_test, sample
return img
@staticmethod
def naive_cutout(img, mask):
empty = Image.new("RGBA", img.size, "white")
cutout = Image.composite(img, empty, mask.resize(img.size, Image.LANCZOS))
return cutout
def remove(self, data):
img = Image.open(io.BytesIO(data)).convert("RGB")
mask = self.predict( | np.array(img) | numpy.array |
import numpy as np
import pandas as pd
import ast
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
#------------------------------------------------------------------------------
N = len(pd.read_csv("../../data/case_1_spatial.csv"))
step_count = len(pd.read_csv("../../data/case_1_temporal.csv"))
print(N, step_count)
#------------------------------------------------------------------------------
e = np.empty(shape=(step_count, N), dtype=float)
f = np.empty(shape=(step_count, N), dtype=float)
t = np.empty(shape=(step_count, N), dtype=float)
tr = | np.empty(shape=(step_count, N), dtype=float) | numpy.empty |
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D # noqa: F401 unused import
import numpy as np
#np.set_printoptions(threshold=np.nan)
import os, shutil
import glob
import platform
from utils import read, compute_voxelgrid_and_sceneflow, generate_numpy, PlyFile, create_dir
from config import Config
##################################################################################
system = platform.system()
cfg = Config(system=system)
print("Config init from ", cfg.dataset_path)
##################################################################################
# generate = input("Generate numpy files? ")
# if generate == "Y" or generate == "y":
# generate = True
# elif generate == "F" or generate == "f":
# generate = False
#
# print(generate)
generate = True
if generate:
if cfg.dataset == 'FlyingThings3D':
data_folder_split = "TRAIN"
letter = "A"
data_folders = []
elif cfg.dataset == 'Driving':
focallength = "15mm_focallength"
direction = "scene_forwards"
speed = "slow"
cam = "left"
## Get the "number" folders under the "letter" folder set above
## Since it's the same distribution for every feature (disparity, optical_clow, ...)
## we can just check one
for nubmer in os.listdir(os.path.join(cfg.dataset_path, "frames_cleanpass", data_folder_split, letter)):
data_folders.append(os.path.join(data_folder_split, letter, nubmer))
max_n_frames = -1
n_frames = 0
for data_folder in data_folders:
print(data_folder)
## Get PATHS of the COLOR frames
color_frame_paths = []
if cfg.dataset == 'FlyingThings3D':
for data_folder in data_folders:
files = glob.glob(os.path.join(cfg.dataset_path, "frames_cleanpass", data_folder, "left", "*"))
elif cfg.dataset == 'Driving':
files = glob.glob(os.path.join(cfg.dataset_path, "frames_cleanpass", focallength, direction, speed, cam, "*"))
for color_frame_path in sorted(files):
if n_frames == max_n_frames:
break
color_frame_paths.append(color_frame_path.replace('\\', '/'))
n_frames += 1
## Get PATHS of the OPTICAL FLOW frames
n_frames = 0
of_frame_paths = []
if cfg.dataset == 'FlyingThings3D':
files = glob.glob(os.path.join(cfg.dataset_path, "optical_flow", data_folder, cfg.into, "left", "*"))
elif cfg.dataset == 'Driving':
files = glob.glob(os.path.join(cfg.dataset_path, "optical_flow", "15mm_focallength", "scene_forwards",
"slow", cfg.into, "left", "*"))
for of_frame_path in sorted(files):
if n_frames == max_n_frames:
break
of_frame_paths.append(of_frame_path.replace('\\', '/'))
n_frames += 1
## Get PATHS of the DISPARITY frames
n_frames = 0
disp_frame_paths = []
if cfg.dataset == 'FlyingThings3D':
files = glob.glob(os.path.join(cfg.dataset_path, "disparity", data_folder, "left", "*"))
elif cfg.dataset == 'Driving':
files = glob.glob(os.path.join(cfg.dataset_path, "disparity", "15mm_focallength", "scene_forwards",
"slow", "left", "*"))
for disp_frame_path in sorted(files):
if n_frames == max_n_frames:
break
disp_frame_paths.append(disp_frame_path.replace('\\', '/'))
n_frames += 1
# Get PATHS of the DISPARITY CHANGE frames
n_frames = 0
dispChange_frame_paths = []
if cfg.dataset == 'FlyingThings3D':
files = glob.glob(os.path.join(cfg.dataset_path, "disparity_change", data_folder, cfg.into, "left", "*"))
elif cfg.dataset == 'Driving':
files = glob.glob(os.path.join(cfg.dataset_path, "disparity_change", "15mm_focallength", "scene_forwards",
"slow", cfg.into,"left", "*"))
for dipsChange_frame_path in sorted(files):
if n_frames == max_n_frames:
break
dispChange_frame_paths.append(dipsChange_frame_path.replace('\\', '/'))
n_frames += 1
assert len(color_frame_paths) == len(of_frame_paths) == len(disp_frame_paths) == len(dispChange_frame_paths)
# print(len(color_frame_paths), len(of_frame_paths), len(disp_frame_paths), len(dispChange_frame_paths))
if len(color_frame_paths) == 0:
raise Exception("No files were loaded!")
##################################################################################
# Get the color frames
color_frames = []
for color_frame_path in color_frame_paths:
print(color_frame_path)
color_frames.append(read(color_frame_path))
# Get the optical flow frames
of_frames = []
for of_frame_path in of_frame_paths:
of_frame = read(of_frame_path)
of_frames.append(of_frame)
# Get the depth frames
disp_frames = []
depth_frames = []
for disp_frame_path in disp_frame_paths:
disp_frame = read(disp_frame_path)
disp_frames.append(disp_frame)
depth_frame = cfg.baseline * cfg.fx / disp_frame
depth_frames.append(depth_frame)
# Get the depth change frames
dispChange_frames = []
depthChange_frames = []
for i, dispChange_frame_path in enumerate(dispChange_frame_paths):
dispChange_frame = read(dispChange_frame_path)
dispChange_frames.append(dispChange_frame)
depthChange_frame = ((cfg.baseline * cfg.fx / (disp_frames[i] + dispChange_frame)) - (cfg.baseline * cfg.fx / disp_frames[i]))
depthChange_frames.append(depthChange_frame)
assert len(color_frames) == len(of_frames) == len(depth_frames) == len(depthChange_frames)
if len(color_frames) == 0:
raise Exception("Could not read files!")
# plt.imshow(disp_frames[1])
# plt.colorbar()
# plt.show()
# plt.imshow(dispChange_frames[1])
# plt.colorbar()
# plt.show()
# plt.imshow(depthChange_frames[1])
# plt.colorbar()
# plt.scatter(325, 270)
# plt.show()
#
# print(depthChange_frames[1][400][400])
# print(depthChange_frames[1][280][320])
##################################################################################
##################################################################################
## Generate numpy arrays from raw data and store it
vg_dir = os.path.join("data", cfg.dataset, str(cfg.n_voxels), "voxelgrids")
sf_vg_dir = os.path.join("data", cfg.dataset, str(cfg.n_voxels), "sceneflows_vg")
create_dir(vg_dir)
create_dir(sf_vg_dir)
for i in range(len(disp_frame_paths)):
print(i, "- Processing", disp_frame_paths[i])
# Generate point cloud ( [colors, coordinates, sceneflow] ) as panda's DataFrame
pointcloud_data = compute_voxelgrid_and_sceneflow(color_frames[i], of_frames[i],
depth_frames[i], depthChange_frames[i],
compute_sceneflow=True, plot=False)
## Store as numpy array
generate_numpy(vg_dir, sf_vg_dir, color_frame_paths[i], pointcloud_data)
else:
### Read numpy files and work with them ###
## First, get the voxelgrids and sceneflow paths
dataset = cfg.dataset
vg_dir = os.path.join(cfg.val_dir, "voxelgrids")
sf_vg_dir = os.path.join(cfg.val_dir, "sceneflows_vg")
voxelgrid_paths = []
for path in sorted(glob.glob(vg_dir + "/*")):
voxelgrid_paths.append(path.replace('\\', '/'))
sceneflow_vg_paths = []
for path in sorted(glob.glob(sf_vg_dir + "/*")):
sceneflow_vg_paths.append(path.replace('\\', '/'))
########################################################
## Read one voxelgrid
voxelgrid = np.load(voxelgrid_paths[0])
voxel_vertices = np.argwhere(voxelgrid)
## Read the corresponding sceneflow voxelgrid groundtruth
sceneflow = np.load(sceneflow_vg_paths[0])
sf_vg_vertices = sceneflow[ | np.where(voxelgrid == 1) | numpy.where |
# -*- coding: utf-8 -*-
import os
import timeit
from contextlib import contextmanager
import numpy as np
from scipy.io import wavfile
from scipy import linalg, fftpack, signal
import librosa
from librosa import feature as acoustic_feature
from path import FSDD_PATH
def read_audio_files():
"""
Return
------
sample_rate : int
outputs : dictionary (mapping name -> audio_raw_data)
"""
all_files = [os.path.join(FSDD_PATH, i)
for i in os.listdir(FSDD_PATH) if '.wav' == i[-4:]]
assert len(all_files) > 0, "Cannot find .wav file at path: %s" % FSDD_PATH
outputs = {}
sample_rate = []
print('======== Reading Audio Files ========')
print('Found: %d audio files' % len(all_files))
for i, path in enumerate(all_files):
name = os.path.basename(path).replace('.wav', '')
rate, data = wavfile.read(path)
# store results
sample_rate.append(rate)
outputs[name] = data
# logging
if (i + 1) % 500 == 0:
print("Loaded %d files ..." % len(outputs))
assert len(set(sample_rate)) == 1, "Found multiple sample rate: %s" % str(set(sample_rate))
return sample_rate[0], outputs
def extract_acoustic_features(data, sample_rate=8000,
n_fft=512, hop_length=0.005, win_length=0.025,
n_mels=40, n_mfcc=20, fmin=64.0, fmax=None):
"""
data : array (n_samples,)
sample_rate : int
n_fft : int
hop_length : float (in second)
win_length : flaot (in second)
"""
get_pow_spec = True
get_mel_spec = True
get_mfcc = True
# ====== check arguments ====== #
data = pre_emphasis(data)
win_length = int(win_length * sample_rate)
hop_length = int(hop_length * sample_rate)
if fmax is None:
fmax = sample_rate // 2
results = []
# ====== extract features ====== #
s = librosa.stft(data.astype('float32'),
n_fft=n_fft, hop_length=hop_length, win_length=win_length)
mag_spec = np.abs(s)
if get_pow_spec:
pow_spec = librosa.amplitude_to_db(mag_spec)
results.append(pow_spec)
if get_mel_spec or get_mfcc:
mel_spec = acoustic_feature.melspectrogram(sr=sample_rate, S=mag_spec,
n_mels=n_mels, fmin=fmin, fmax=fmax)
results.append(mel_spec)
if get_mfcc:
mfcc = acoustic_feature.mfcc(sr=sample_rate, S=mel_spec, n_mfcc=n_mfcc)
mfcc = rastafilt(mfcc.T).T
results.append(mfcc)
# ====== return results ====== #
# normalizing features
results = [cmvn(i) for i in results]
# all the features are shape [feat_dim, time_dim]
# deep network require order [time_dim, feat_dim]
# so we transpose everythign
return tuple([i.astype('float32').T for i in results])
# ===========================================================================
# Others
# ===========================================================================
def one_hot(y, nb_classes=None, dtype='float32'):
'''Convert class vector (integers from 0 to nb_classes)
to binary class matrix, for use with categorical_crossentropy
Note
----
if any class index in y is smaller than 0, then all of its one-hot
values is 0.
'''
if 'int' not in str(y.dtype):
y = y.astype('int32')
if nb_classes is None:
nb_classes = np.max(y) + 1
else:
nb_classes = int(nb_classes)
return np.eye(nb_classes, dtype=dtype)[y]
def cmvn(frames):
m = np.mean(frames, axis=1, keepdims=True)
s = np.std(frames, axis=1, keepdims=True)
frames = frames - m
frames = frames / s
return frames
def pre_emphasis(s, coeff=0.97):
"""Pre-emphasis of an audio signal.
Parameters
----------
s: np.ndarray
the input vector of signal to pre emphasize
coeff: float (0, 1)
coefficience that defines the pre-emphasis filter.
"""
if s.ndim == 1:
return np.append(s[0], s[1:] - coeff * s[:-1])
else:
return s - np.c_[s[:, :1], s[:, :-1]] * coeff
def stack_frames(X, frame_length, step_length=1,
keep_length=True, make_contigous=True):
"""
Parameters
----------
X: numpy.ndarray
2D arrray
frame_length: int
number of frames will be stacked into 1 sample.
step_length: {int, None}
number of shift frame, if None, its value equal to
`frame_length // 2`
keep_length: bool
if True, padding zeros to begin and end of `X` to
make the output array has the same length as original
array.
make_contigous: bool
if True, use `numpy.ascontiguousarray` to ensure input `X`
is contiguous.
Example
-------
>>> X = [[ 0 1]
... [ 2 3]
... [ 4 5]
... [ 6 7]
... [ 8 9]
... [10 11]
... [12 13]
... [14 15]
... [16 17]
... [18 19]]
>>> frame_length = 5
>>> step_length = 2
>>> stack_frames(X, frame_length, step_length)
>>> [[ 0 1 2 3 4 5 6 7 8 9]
... [ 4 5 6 7 8 9 10 11 12 13]
... [ 8 9 10 11 12 13 14 15 16 17]]
"""
if keep_length:
if step_length != 1:
raise ValueError("`keepdims` is only supported when `step_length` = 1.")
add_frames = (int(np.ceil(frame_length / 2)) - 1) * 2 + \
(1 if frame_length % 2 == 0 else 0)
right = add_frames // 2
left = add_frames - right
X = np.pad(X,
pad_width=((left, right),) + ((0, 0),) * (X.ndim - 1),
mode='constant')
# ====== check input ====== #
assert X.ndim == 2, "Only support 2D matrix for stacking frames."
if not X.flags['C_CONTIGUOUS']:
if make_contigous:
X = np.ascontiguousarray(X)
else:
raise ValueError('Input buffer must be contiguous.')
# ====== stacking ====== #
frame_length = int(frame_length)
if step_length is None:
step_length = frame_length // 2
shape = (1 + (X.shape[0] - frame_length) // step_length,
frame_length * X.shape[1])
strides = (X.strides[0] * step_length, X.strides[1])
return np.lib.stride_tricks.as_strided(X, shape=shape, strides=strides)
def rastafilt(x):
""" Based on rastafile.m by <NAME>
rows of x = critical bands, cols of x = frame
same for y but after filtering
default filter is single pole at 0.94
The filter is applied on frequency axis
Parameters
----------
x: [t, f]
time x frequency
"""
x = x.T # lazy style to reuse the code from [f, t] libraries
ndim, nobs = x.shape
numer = np.arange(-2, 3)
# careful with division here (float point suggested by <NAME>)
numer = -numer / | np.sum(numer * numer) | numpy.sum |
# encoding:utf-8
import numpy as np
import codecs
import re
import os
import pandas as pd
import collections
from operator import itemgetter
from itertools import chain
from functools import reduce
import random
import gensim.models.word2vec as word2vec
import platform
if platform.system() == 'Windows':
import pickle
else:
import cPickle as pickle
def build_pre_train_emb_matrix(emb_vec_file, vocab_file):
embedding = []
temp_emb = dict()
question_vec_file = codecs.open(emb_vec_file, 'r', 'utf-8')
for index, line in enumerate(question_vec_file.readlines()):
if index != 0:
key = line.strip().split(' ')[0]
vector = list(line.strip().split(' ')[1:])
temp_emb[key] = vector
question_vec_file.close()
vocab_word = list(chain.from_iterable(pd.DataFrame(
pd.read_csv(vocab_file, encoding='utf_8_sig'), columns=['word']).values))
count = 0
for i, item in enumerate(vocab_word):
if item in temp_emb:
vec = temp_emb[item]
embedding.append(vec)
count += 1
else:
print("new word.", item, "idx ", i, " initialized as <unk>")
vec = temp_emb['\0']
embedding.append(vec)
del temp_emb
return np.array(embedding, np.float32)
def get_keyword_num_log_tfidf(sentence_length, input_type="question"):
# for q: keyword_num = min(10 * ln(x), x)
# for a: keyword_num = min(x[logx] / lg(2x), x)
"""
:param sentence_length: numpy.int32, sequence_length of inputs
:param input_type: question or answer
:return: the same shape and type with sentence_length,
the keyword_num for attention_layer and representation_layer
"""
sentence_length = np.array(sentence_length, np.int32)
if input_type == "question":
keyword_num = np.minimum(
np.int32(10 * np.log(sentence_length)), sentence_length)
elif input_type == "answer":
keyword_num = np.minimum(
np.int32(sentence_length * np.int32(np.log10(sentence_length)) / np.log10(2.0 * sentence_length)),
sentence_length)
else:
keyword_num = sentence_length
if 0 in keyword_num:
keyword_num = np.where(keyword_num > 0, keyword_num, np.ones_like(sentence_length))
return keyword_num
class TrainingDatasetLoader(object):
def __init__(self, num_answer_for_per_question, batch_size, train_file):
# self.num_answer_for_per_question = num_answer_for_per_question
self.question_batch_size = batch_size * num_answer_for_per_question // 8
self.train_file = train_file
self.question = []
self.question_length = []
self.answer = []
self.answer_length = []
self.pointer = 0
self.create_batches()
def create_batches(self):
question_input = codecs.open(self.train_file, 'r', 'utf-8')
question_data = question_input.readlines()
for index_q, line_q in enumerate(question_data):
line_str = line_q.strip().split('\t')
line_int = [int(x) for x in line_str[0].split()]
self.question.append(line_int)
self.question_length.append(len(line_int))
for i in range(1, 6):
answer_i_str = line_str[i].split()
answer_i_int = [int(x) for x in answer_i_str]
self.answer.append(answer_i_int)
self.answer_length.append(len(answer_i_int))
del question_data
question_input.close()
self.num_batch = len(self.question_length) // self.question_batch_size
self.question = self.question[: self.num_batch * self.question_batch_size]
self.question_length = self.question_length[: self.num_batch * self.question_batch_size]
self.answer = self.answer[: self.num_batch * 5 * self.question_batch_size]
self.answer_length = self.answer_length[: self.num_batch * 5 * self.question_batch_size]
self.point = list(range(self.num_batch))
random.shuffle(self.point)
def next_batch(self):
question_batch_length_before_tile = self.question_length[
self.point[self.pointer] * self.question_batch_size: (self.point[self.pointer] + 1) * self.question_batch_size]
max_question_batch_length = max(question_batch_length_before_tile)
question_batch_before_tile = self.question[
self.point[self.pointer] * self.question_batch_size: (self.point[self.pointer] + 1) * self.question_batch_size]
for idx_q, q in enumerate(question_batch_before_tile):
if len(q) < max_question_batch_length:
question_batch_before_tile[idx_q] = q + [0] * (max_question_batch_length - len(q))
question_batch = []
question_batch_length = []
for question_before_tile in question_batch_before_tile:
for _ in range(8):
question_batch.append(question_before_tile)
for question_length_before_tile in question_batch_length_before_tile:
for _ in range(8):
question_batch_length.append(question_length_before_tile)
del question_batch_before_tile
del question_batch_length_before_tile
answer_batch_length_before_tile = self.answer_length[
self.point[self.pointer] * 5 * self.question_batch_size: (self.point[self.pointer] + 1) * 5 * self.question_batch_size]
max_answer_batch_length = max(answer_batch_length_before_tile)
answer_batch_before_tile = self.answer[self.point[self.pointer] * 5 * self.question_batch_size: (self.point[self.pointer] + 1) * 5 * self.question_batch_size]
for idx_a, a in enumerate(answer_batch_before_tile):
if len(a) < max_answer_batch_length:
answer_batch_before_tile[idx_a] = a + [0] * (max_answer_batch_length - len(a))
answer_batch = []
answer_batch_length = []
for i in range(self.question_batch_size):
for j in range(4):
answer_batch.append(answer_batch_before_tile[(5*i+4)])
answer_batch_length.append(answer_batch_length_before_tile[(5*i+4)])
answer_batch.append(answer_batch_before_tile[(5*i+j)])
answer_batch_length.append(answer_batch_length_before_tile[(5*i+j)])
question_keyword_num = get_keyword_num_log_tfidf(question_batch_length, "question")
answer_keyword_num = get_keyword_num_log_tfidf(answer_batch_length, "answer")
self.pointer = (self.pointer + 1) % self.num_batch
if self.pointer == 0:
random.shuffle(self.point)
print('data shuffle. point: ', self.point[:5])
return np.array(question_batch, np.int32), | np.array(question_batch_length, np.int32) | numpy.array |
"""Utility functions for computing feature association statistics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import sys; sys.path.append('../..')
import src.msc.utils as utils
np.warnings.filterwarnings('ignore')
def log_odds(word, outcome_variable, dataset, config):
"""Computes log-odds between a word and each level of a categorical outcome.
Log-odds = log pi - log (1 - pi)
Where pi = the probability of `word` occurring in examples belonging to
level i of the `outcome_variable`.
Args:
word: string, the word we are interested in.
outcome_variable: string, the name of the outcome variable we are
interested in. This MUST be a categorical variable.
dataset: src.data.dataset.Dataset object, the dataset we are computing over.
config: a configuration object.
Returns:
out: dict(string => float), a mapping from categorical level names
to the log-odds between `word` and that particular level.
"""
# Get a binary matrix where the rows are bag-of-words representations of each
# input sequence and the columns are individual words.
all_word_occurences = dataset.np_data[config.train_suffix][
dataset.input_varname()].toarray()
# Pull out the column that corresponds to the `word` of interest.
# This is a 0/1 vector where a 1 at index i means the word occurred in example
# i.
selected_word_occurances = all_word_occurences[:, dataset.features[word]]
out = {}
one_hots = dataset.np_data[config.train_suffix][outcome_variable].toarray()
for level_name, level_id in dataset.class_to_id_map[outcome_variable].items():
# Get a 0/1 vector where a 1 at index i means example i belongs to the
# current class.
level_mask = one_hots[:, level_id]
# Get probability (num within-class occurances / total occurances).
prob_occurrence = np.sum(selected_word_occurances *
level_mask) / np.sum(selected_word_occurances)\
# If the word doesn't occur in the data at all, then we will be dividing
# by zero at line 49. Instead of keeping the nan we say that the word
# isn't informative at all (i.e. that it has a probability of 0.5).
if np.isnan(prob_occurrence):
prob_occurrence = 0.5
if not prob_occurrence:
prob_occurrence += 1e-3
elif prob_occurrence == 1:
prob_occurrence -= 1e-3
out[level_name] = math.log(prob_occurrence) - math.log(1 - prob_occurrence)
return out
def cramers_v(feature, text, targets, possible_labels):
"""Computes the association strength between a word and a categorical outcome.
See: https://en.wikipedia.org/wiki/Cram%C3%A9r%27s_V
Args:
feature: string, the word which is to be computed for.
text: list(list(string)), the provided corpus.
targets: list(string), labels for each sequence in `text`.
possible_labels: list(string), the set of possible elements in `labels`.
Returns:
V: int, the chisq statistic for a single feature, given some text
and target info (Y) and possible_labels (possible values for Y).
"""
num_rows = 2
num_cols = len(possible_labels)
obs = np.zeros((num_rows, num_cols))
for description, target in zip(text, targets):
if feature in description:
obs[1, possible_labels.index(target)] += 1
else:
obs[0, possible_labels.index(target)] += 1
row_totals = np.sum(obs, axis=1)
col_totals = np.sum(obs, axis=0)
n = np.sum(obs)
expected = | np.outer(row_totals, col_totals) | numpy.outer |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os.path
import numpy as np
import abel
from scipy.linalg import inv
from scipy import dot
###############################################################################
#
# Dasch two-point, three_point, and onion-peeling deconvolution
# as described in Applied Optics 31, 1146 (1992), page 1147-8 sect. B & C.
# https://www.osapublishing.org/ao/abstract.cfm?uri=ao-31-8-1146
# see also discussion in PR #155 https://github.com/PyAbel/PyAbel/pull/155
#
# 2016-03-25 <NAME> - one line Abel transform
# 2016-03-24 <NAME> - Python code framework
# 2015-12-29 <NAME> - original three_point code and
# highlighting the Dasch paper,see issue #61
# https://github.com/PyAbel/PyAbel/issues/61
#
###############################################################################
_dasch_parameter_docstring = \
"""dasch_method deconvolution
C. J. Dasch Applied Optics 31, 1146 (1992).
http://dx.doi.org/10.1364/AO.31.001146
Parameters
----------
IM : 1D or 2D numpy array
right-side half-image (or quadrant)
basis_dir: str
path to the directory for saving / loading
the "dasch_method" operator matrix.
If None, the operator matrix will not be saved to disk.
dr : float
sampling size (=1 for pixel images), used for Jacobian scaling.
The resulting inverse transform is simply scaled by 1/dr.
direction: str
only the `direction="inverse"` transform is currently implemented
Returns
-------
inv_IM: 1D or 2D numpy array
the "dasch_method" inverse Abel transformed half-image
"""
def two_point_transform(IM, basis_dir='.', dr=1, direction="inverse"):
return _dasch_transform(IM, basis_dir=basis_dir, dr=dr,
direction=direction, method="two_point")
def three_point_transform(IM, basis_dir='.', dr=1, direction="inverse"):
return _dasch_transform(IM, basis_dir=basis_dir, dr=dr,
direction=direction, method="three_point")
def onion_peeling_transform(IM, basis_dir='.', dr=1, direction="inverse"):
return _dasch_transform(IM, basis_dir=basis_dir, dr=dr,
direction=direction, method="onion_peeling")
two_point_transform.__doc__ =\
_dasch_parameter_docstring.replace("dasch_method", "two-point")
three_point_transform.__doc__ =\
_dasch_parameter_docstring.replace("dasch_method", "three-point")
onion_peeling_transform.__doc__ =\
_dasch_parameter_docstring.replace("dasch_method", "onion-peeling")
def _dasch_transform(IM, basis_dir='.', dr=1, direction="inverse",
method="three_point"):
if direction != 'inverse':
raise ValueError('Forward "two_point" transform not implemented')
# make sure that the data has 2D shape
IM = | np.atleast_2d(IM) | numpy.atleast_2d |
import matplotlib.patches as patches
from scipy import signal
import numpy as np
from . import fcc5
def SegmentacionPaisaje(x, resiz, s, fs, ax1, en):
featuresAudio = []
[u, v] = s.shape
band_1=1/u
band_2=1
p = 1
for j in range(0,p):
mfband = s[np.int(np.round(band_1*u)):np.int(np.round(band_2*u)),:]
mfband = signal.medfilt2d(mfband, 5)
selband = mfband
D = np.std(mfband,1)
L = 10 ##Filtro de media movil
coefs = np.ones(L)/float(L)
D = np.convolve(D, coefs, mode='same')
D = np.convolve(D, coefs, mode='same')
Y = -D+np.max(D)
locsY = signal.find_peaks_cwt(Y,np.arange(1,20))
peaksY = Y[locsY]
locsD = signal.find_peaks_cwt(D,np.arange(1,20))
peaksD = D[locsD]
indx = []
for x in range(0,len(peaksY)):
indloc = np.where(locsD<locsY[x])[0]
if len(indloc) == 0:
firstpeak = np.max(D);
else:
firstpeak = peaksD[indloc[-1]]
indloc = np.where(locsD>locsY[x])[0]
if len(indloc) == 0:
secondpeak = np.max(D)
else:
secondpeak = peaksD[indloc[0]]
if firstpeak<secondpeak:
if (D[locsY[x]]-np.min(D))>(0.6*(firstpeak-np.min(D))):
indx.append(x)
else:
if (D[locsY[x]]-np.min(D))>(0.6*(secondpeak-np.min(D))):
indx.append(x)
peaksY = np.delete(peaksY, indx)
if len(peaksY) == 0:
puntos = D>np.mean(D)
dpuntos = np.diff(puntos, axis=0)
else:
thres = []
indices = np.where((peaksY>np.mean(peaksY)))[0]
peaksY = np.delete(peaksY, indices)
peaksY.sort()
num_peaks = len(peaksY)
for r in range(0,num_peaks):
thres.append(Y<peaksY[r])
thres = np.array(thres)
for r in range(1,thres.shape[0]):
dthres = np.diff(thres[r,:], axis=0)
bandthres = np.where(abs(dthres))[0]
if dthres[bandthres[0]] == -1: #Si la primera derivada es -1, agregue el cero al indice
bandthres = np.concatenate(([1],bandthres))
if dthres[bandthres[-1]] == 1: #Si la ultima derivada es 1, agregue el último numero de la banda
bandthres = np.concatenate((bandthres, [len(D)]))
for g in range(0,np.int(np.array(bandthres).shape[0]/2)):
if sum((thres[r,bandthres[2*g]:bandthres[2*g+1]]) & (thres[r-1,bandthres[2*g]:bandthres[2*g+1]]))>0: #Hay puntos de la banda superior
thres[r,bandthres[2*g]:bandthres[2*g+1]] = thres[r-1,bandthres[2*g]:bandthres[2*g+1]] #Prima la banda superior
puntos = np.matrix(thres[-1,:]).T
dpuntos = np.diff(puntos, axis=0)
indband = np.where(np.abs(dpuntos))[0] #Hace las veces de find()
if dpuntos[indband[0]] == -1:#Si la primera derivada es -1, agregue el cero al indice
indband = np.concatenate(([1],indband))
if dpuntos[indband[-1]] == 1: #Si la ultima derivada es 1, agregue el último numero de la banda
indband = np.concatenate((indband, [len(D)]))
ind = []
for h in range(0,np.int(np.size(indband,axis=0)/2)):
if indband[2*h] == indband[2*h+1]:
ind.append(2*h)
ind.append(2*h+1)
indband = np.delete(indband, ind)
for g in range(0,np.int(np.size(indband,axis=0)/2)):
fband = mfband[indband[2*g]:indband[2*g+1],:]
D = np.sum(fband, axis=0)
L = 40 ##Filtro de media movil
coefs = np.ones(L)/float(L)
D = | np.convolve(D, coefs, mode='same') | numpy.convolve |
from collections import OrderedDict
import periodictable as pdtb
import lmfit as lm
r_e = pdtb.constants.electron_radius * 1e10 # classical electron radius, in A
N_A = pdtb.constants.avogadro_number # Avogadro number, unitless
k_B = 1.38065e-23 # Boltzman constant, in J/K
import numpy as np
import scipy.stats as stat
import fit_ref as mfit
import flu_geometry_routines as gm
def penetrate(beta, delta, alpha, k0):
alpha[alpha == np.inf] = 0
alpha = alpha.astype(complex)
beta_top, beta_bot = beta
delta_top, delta_bot = delta
alpha_c = np.sqrt(2 * (delta_bot - delta_top))
trans = 4 * np.abs(alpha / (alpha + np.sqrt(alpha ** 2 - alpha_c ** 2))) ** 2
penetration_coeff = 2 * k0 * np.imag(np.sqrt(alpha ** 2 - alpha_c ** 2 + beta_bot * 2j))
return 1/penetration_coeff, trans
def update_flu_parameters(p, *args):
assert type(p) is OrderedDict
# *args have to be at least fitting parameters
flu_par = args[0]
# update the fitting parameter whatsoever
p['hisc'] = flu_par['hisc'].value # scale factor for the top phase, unitless.
p['losc'] = flu_par['losc'].value # scale factor for the bottom phase, unitless.
p['bg'] = flu_par['bg'].value # background intensity, unitless.
p['tC'] = flu_par['upbk'].value # ion concentration of the top phase, in M.
p['bC'] = flu_par['lobk'].value # ion concentration of the bottom phase, in M.
p['sC'] = flu_par['surd'].value # ion surface number density, in A^-2.
p['qoff'] = flu_par['qoff'].value # q off set for the data
p['doff'] = flu_par['soff'].value * 1e7 # det range offset for the measurement
p['l2off'] = flu_par['loff'].value * 1e7 # l2 offset for the measurement
p['curv'] = flu_par['curv'].value * 1e10 # the curvature of the interface, in A.
if p['curv'] == 0: p['curv'] = 10000 * 1e10
if len(args) == 1:
return p
# if the *args is tuple (flu_par, sys_par, flu_elements), do the following
try:
sys_par = args[1]
flu_elements = args[2]
except IndexError:
print("update_flu_parameters takes 3 extra arguments!")
# parameterize beam profile
width = sys_par['width'] * 1e7 # width or FWHM of the beam, in A
beam_profile = sys_par['beam']
steps = 500
if beam_profile == 'Uniform':
beam_size = width
weights = np.ones(steps + 1)
elif beam_profile == 'Gaussian':
stdev = width / 2.355 # FWHM of the beam, i.e. 2.355 sigma
beam_size = 2 * (3 * stdev) # keep the beam up to +/-3 standard deviation, or 99.73% of intensity.
rays = | np.linspace(-beam_size / 2, beam_size / 2, steps + 1) | numpy.linspace |
import sys
import numpy as np
import h5py
import cv2
from tqdm import tqdm
####
# list of utility functions
# 0. I/O util
# 1. binary pred -> instance seg
# 2. instance seg + pred heatmap -> instance score
# 3. instance seg -> bbox
# 4. instance seg + gt seg + instance score -> sorted match result
# 0. I/O
def seg2im(seg): # seg -> 3-channel image
if seg.max()>255:
return np.stack([seg//65536, seg//256, seg%256],axis=2).astype(np.uint8)
else:
return seg.astype(np.uint8)
def im2seg(im): # image -> seg
if im.ndim==2:
return im
else:
return im[:,:,0].astype(np.uint32)*65536+im[:,:,1].astype(np.uint32)*256+im[:,:,2].astype(np.uint32)
def heatmap_by_channel(im, channel=-1): # image to heatmap
if channel != -1:
heatmap = im[channel]
else:
heatmap = im.mean(axis=0)
return heatmap
def readh5(path, vol=''):
# do the first key
fid = h5py.File(path, 'r')
if vol == '':
if sys.version[0]=='3':
vol = list(fid)[0]
else: # python 2
vol = fid.keys()[0]
return np.array(fid[vol]).squeeze()
# 1. binary pred -> instance seg
def seg_bbox2d(seg,do_count=False, uid=None):
sz = seg.shape
assert len(sz)==2
if uid is None:
uid = np.unique(seg)
uid = uid[uid>0]
um = uid.max()
out = np.zeros((1+int(um),5+do_count),dtype=np.uint32)
out[:,0] = np.arange(out.shape[0])
out[:,1] = sz[0]
out[:,3] = sz[1]
# for each row
rids = np.where((seg>0).sum(axis=1)>0)[0]
for rid in rids:
sid = np.unique(seg[rid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,1] = np.minimum(out[sid,1],rid)
out[sid,2] = np.maximum(out[sid,2],rid)
cids = np.where((seg>0).sum(axis=0)>0)[0]
for cid in cids:
sid = np.unique(seg[:,cid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,3] = np.minimum(out[sid,3],cid)
out[sid,4] = np.maximum(out[sid,4],cid)
if do_count:
ui,uc = np.unique(seg,return_counts=True)
out[ui,-1]=uc
return out[uid]
def getSegType(mid):
m_type = np.uint64
if mid<2**8:
m_type = np.uint8
elif mid<2**16:
m_type = np.uint16
elif mid<2**32:
m_type = np.uint32
return m_type
def label_chunk(get_chunk, numC, rr=1, m_type=np.uint64):
# need to implement get_chunk function
# INPUT: chunk_id
# OUTPUT: 3d chunk
# label chunks or slices
sz = get_chunk(0).shape
numD = len(sz)
mid = 0
seg = [None]*numC
for zi in range(numC):
print('%d/%d [%d], '%(zi,numC,mid)),
sys.stdout.flush()
# as split as possible
_, seg_c = cv2.connectedComponents(get_chunk(zi)>0, connectivity=4).astype(m_type)
if numD==2:
seg_c = seg_c[np.newaxis]
if zi == 0: # first seg, relabel seg index
print('_%d_'%0)
slice_b = seg_c[-1]
seg[zi] = seg_c[:,::rr,::rr] # save a low-res one
mid += seg[zi].max()
rlA = np.arange(mid+1,dtype=m_type)
else: # link to previous slice
slice_t = seg_c[0]
_, slices = cv2.connectedComponents(np.stack([slice_b>0, slice_t>0],axis=0), connectivity=4).astype(m_type)
# create mapping for seg cur
lc = np.unique(seg_c);lc=lc[lc>0]
rl_c = np.zeros(int(lc.max())+1, dtype=int)
# merge curr seg
# for 1 pre seg id -> slices id -> cur seg ids
l0_p = np.unique(slice_b*(slices[0]>0))
bbs = seg_bbox2d(slice_b, uid=l0_p)[:,1:]
print('_%d_'%len(l0_p))
for i,l in enumerate(l0_p):
bb = bbs[i]
sid = np.unique(slices[0,bb[0]:bb[1]+1,bb[2]:bb[3]+1]*(slice_b[bb[0]:bb[1]+1,bb[2]:bb[3]+1]==l))
sid = sid[sid>0]
# multiple ids
if len(sid)==1:
cid = np.unique(slice_t*(slices[1]==sid))
else:
cid = np.unique(slice_t*np.in1d(slices[1].reshape(-1),sid).reshape(sz[-2:]))
rl_c[cid[cid>0]] = l
# new id
new_num = np.where(rl_c==0)[0][1:] # except the first one
new_id = np.arange(mid+1,mid+1+len(new_num),dtype=m_type)
rl_c[new_num] = new_id
slice_b = rl_c[seg_c[-1]] # save a high-res
seg[zi] = rl_c[seg_c[:,::rr,::rr]]
mid += len(new_num)
# update global id
rlA = np.hstack([rlA,new_id])
# merge prev seg
# for 1 cur seg id -> slices id -> prev seg ids
l1_c = np.unique(slice_t*(slices[1]>0))
for l in l1_c:
sid = np.unique(slices[1]*(slice_t==l))
sid = sid[sid>0]
pid = np.unique(slice_b*np.in1d(slices[0].reshape(-1),sid).reshape(sz[-2:]))
pid = pid[pid>0]
# get all previous m-to-1 labels
pid_p = np.where(np.in1d(rlA,rlA[pid]))[0]
if len(pid_p)>1:
rlA[pid_p] = pid.max()
# memory reduction: each seg
m2_type = getSegType(seg[zi].max())
seg[zi] = seg[zi].astype(m2_type)
# memory reduction: final output
m2_type = getSegType(rlA.max())
rlA = rlA.astype(m2_type)
print('output type:',m2_type)
return rlA[np.vstack(seg)]
# 2. heatmap + seg -> detection score
def heatmap_to_score(seg, heatmap, channel=-1, do_avg=True):
# 3D vol version
if heatmap.ndim>seg.ndim:
heatmap = heatmap_by_channel(heatmap, channel)
seg_id, seg_count = np.unique(seg, return_counts=True)
seg_view = seg.ravel()
seg_len = int(seg_id.max())+1
# relabel bincount(minlen = max_len) with ids
score = np.bincount(seg_view.astype(int), weights=heatmap.ravel(), minlength=seg_len)[seg_id.astype(int)]
if do_avg:
score = score/seg_count
if score.max()>1: # assume 0-255
score = score/255.
return seg_id, score, seg_count
def heatmap_to_score_tile(seg_tiles, heatmap_tiles, max_id=-1, channel=-1):
if max_id == -1:# rough estimate of the largest seg id
max_id = max(100, 2*im2seg(cv2.imread(seg_tiles[-1])).max())
count = np.zeros((max_id+1,2)) # num_voxel, sum_score
for z in range(len(seg_tiles)):
# 3D vol version
seg = im2seg(cv2.imread(seg_tiles[z]))
heatmap = cv2.imread(heatmap_tiles[z])
t_id, t_score, t_count = heatmap_to_score(seg, heatmap, channel=-1, do_avg=False)
# in case of wrong max_id input
if t_id[-1]>max_id:
out = np.vstack([out,np.zeros((max_id,2))])
max_id *= 2
count[t_id,0] += t_count
count[t_id,1] += t_score
pred_id = np.where(out[:,0]>0)[0]
score = count[pred_id,1]/count[pred_id,0]
if score.max()>1: # assume 0-255
score = score/255.
out = np.vstack([pred_id, score]).T
return out
# 3. instance seg -> bbox
def seg_bbox3d(seg,do_count=False, uid=None):
"""returns bounding box of segments"""
sz = seg.shape
assert len(sz)==3
if uid is None:
uid = np.unique(seg)
uid = uid[uid>0]
um = int(uid.max())
out = np.zeros((1+um,7+do_count),dtype=np.uint32)
out[:,0] = np.arange(out.shape[0])
out[:,1], out[:,3], out[:,5] = sz[0], sz[1], sz[2]
# for each slice
zids = np.where((seg>0).sum(axis=1).sum(axis=1)>0)[0]
for zid in tqdm(zids):
sid = np.unique(seg[zid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,1] = np.minimum(out[sid,1],zid)
out[sid,2] = np.maximum(out[sid,2],zid)
# for each row
rids = np.where((seg>0).sum(axis=0).sum(axis=1)>0)[0]
for rid in rids:
sid = np.unique(seg[:,rid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,3] = np.minimum(out[sid,3],rid)
out[sid,4] = np.maximum(out[sid,4],rid)
# for each col
cids = np.where((seg>0).sum(axis=0).sum(axis=0)>0)[0]
for cid in cids:
sid = np.unique(seg[:,:,cid])
sid = sid[(sid>0)*(sid<=um)]
out[sid,5] = np.minimum(out[sid,5],cid)
out[sid,6] = np.maximum(out[sid,6],cid)
if do_count:
ui,uc = np.unique(seg,return_counts=True)
out[ui[ui<=um],-1]=uc[ui<=um]
return out[uid]
def seg_bbox3d_tile(seg_tiles, do_count=False, max_id=-1):
"""returns bounding box of segments"""
if max_id == -1:
max_id = max(100, 2*im2seg(cv2.imread(seg_tiles[-1])).max())
sz = cv2.imread(seg_tiles[0]).shape
out = | np.zeros((max_id+1, 7+do_count),dtype=np.uint32) | numpy.zeros |
# pylint: disable=E1101
import torch
import torch.nn as nn
from torch.utils.data import DataLoader, TensorDataset
from sklearn.metrics import average_precision_score, confusion_matrix
import numpy as np
from physionet import PhysioNet, get_data_min_max, variable_time_collate_fn2
from sklearn import model_selection
from sklearn import metrics
from sklearn.metrics import precision_score, recall_score, f1_score
from person_activity import PersonActivity
def one_hot(y_):
# Function to encode output labels from number indexes
# e.g.: [[5], [0], [3]] --> [[0, 0, 0, 0, 0, 1], [1, 0, 0, 0, 0, 0], [0, 0, 0, 1, 0, 0]]
y_ = y_.reshape(len(y_))
y_ = [int(x) for x in y_]
n_values = np.max(y_) + 1
return np.eye(n_values)[np.array(y_, dtype=np.int32)]
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def log_normal_pdf(x, mean, logvar, mask):
const = torch.from_numpy(np.array([2. * np.pi])).float().to(x.device)
const = torch.log(const)
return -.5 * (const + logvar + (x - mean) ** 2. / torch.exp(logvar)) * mask
def normal_kl(mu1, lv1, mu2, lv2):
v1 = torch.exp(lv1)
v2 = torch.exp(lv2)
lstd1 = lv1 / 2.
lstd2 = lv2 / 2.
kl = lstd2 - lstd1 + ((v1 + (mu1 - mu2) ** 2.) / (2. * v2)) - .5
return kl
def mean_squared_error(orig, pred, mask):
error = (orig - pred) ** 2
error = error * mask
return error.sum() / mask.sum()
def normalize_masked_data(data, mask, att_min, att_max):
# we don't want to divide by zero
att_max[att_max == 0.] = 1.
if (att_max != 0.).all():
data_norm = (data - att_min) / att_max
else:
raise Exception("Zero!")
if torch.isnan(data_norm).any():
raise Exception("nans!")
# set masked out elements back to zero
data_norm[mask == 0] = 0
return data_norm, att_min, att_max
def evaluate(dim, rec, dec, test_loader, args, num_sample=10, device="cuda"):
mse, test_n = 0.0, 0.0
with torch.no_grad():
for test_batch in test_loader:
test_batch = test_batch.to(device)
observed_data, observed_mask, observed_tp = (
test_batch[:, :, :dim],
test_batch[:, :, dim: 2 * dim],
test_batch[:, :, -1],
)
if args.sample_tp and args.sample_tp < 1:
subsampled_data, subsampled_tp, subsampled_mask = subsample_timepoints(
observed_data.clone(), observed_tp.clone(), observed_mask.clone(), args.sample_tp)
else:
subsampled_data, subsampled_tp, subsampled_mask = \
observed_data, observed_tp, observed_mask
out = rec(torch.cat((subsampled_data, subsampled_mask), 2), subsampled_tp)
qz0_mean, qz0_logvar = (
out[:, :, : args.latent_dim],
out[:, :, args.latent_dim:],
)
epsilon = torch.randn(
num_sample, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]
).to(device)
z0 = epsilon * torch.exp(0.5 * qz0_logvar) + qz0_mean
z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2])
batch, seqlen = observed_tp.size()
time_steps = (
observed_tp[None, :, :].repeat(num_sample, 1, 1).view(-1, seqlen)
)
pred_x = dec(z0, time_steps)
pred_x = pred_x.view(num_sample, -1, pred_x.shape[1], pred_x.shape[2])
pred_x = pred_x.mean(0)
mse += mean_squared_error(observed_data, pred_x, observed_mask) * batch
test_n += batch
return mse / test_n
def compute_losses(dim, dec_train_batch, qz0_mean, qz0_logvar, pred_x, args, device):
observed_data, observed_mask \
= dec_train_batch[:, :, :dim], dec_train_batch[:, :, dim:2*dim]
noise_std = args.std
noise_std_ = torch.zeros(pred_x.size()).to(device) + noise_std
noise_logvar = 2. * torch.log(noise_std_).to(device)
logpx = log_normal_pdf(observed_data, pred_x, noise_logvar,
observed_mask).sum(-1).sum(-1)
pz0_mean = pz0_logvar = torch.zeros(qz0_mean.size()).to(device)
analytic_kl = normal_kl(qz0_mean, qz0_logvar,
pz0_mean, pz0_logvar).sum(-1).sum(-1)
if args.norm:
logpx /= observed_mask.sum(-1).sum(-1)
analytic_kl /= observed_mask.sum(-1).sum(-1)
return logpx, analytic_kl
def evaluate_classifier(model, test_loader, dec=None, args=None, classifier=None,
dim=41, device='cuda', reconst=False, num_sample=1, dataset='P12'):
pred = []
true = []
test_loss = 0
for test_batch, label in test_loader:
test_batch, label = test_batch.to(device), label.to(device)
batch_len = test_batch.shape[0]
observed_data, observed_mask, observed_tp \
= test_batch[:, :, :dim], test_batch[:, :, dim:2*dim], test_batch[:, :, -1]
with torch.no_grad():
out = model(
torch.cat((observed_data, observed_mask), 2), observed_tp)
if reconst:
qz0_mean, qz0_logvar = out[:, :,
:args.latent_dim], out[:, :, args.latent_dim:]
epsilon = torch.randn(
num_sample, qz0_mean.shape[0], qz0_mean.shape[1], qz0_mean.shape[2]).to(device)
z0 = epsilon * torch.exp(.5 * qz0_logvar) + qz0_mean
z0 = z0.view(-1, qz0_mean.shape[1], qz0_mean.shape[2])
if args.classify_pertp:
pred_x = dec(z0, observed_tp[None, :, :].repeat(
num_sample, 1, 1).view(-1, observed_tp.shape[1]))
out = classifier(pred_x)
else:
out = classifier(z0)
if args.classify_pertp:
N = label.size(-1)
out = out.view(-1, N)
label = label.view(-1, N)
_, label = label.max(-1)
test_loss += nn.CrossEntropyLoss()(out, label.long()).item() * batch_len * 50.
else:
label = label.unsqueeze(0).repeat_interleave(
num_sample, 0).view(-1)
test_loss += nn.CrossEntropyLoss()(out, label).item() * batch_len * num_sample
pred.append(out.cpu().numpy())
true.append(label.cpu().numpy())
pred = np.concatenate(pred, 0)
true = np.concatenate(true, 0)
acc = np.mean(pred.argmax(1) == true)
if dataset == 'P12' or dataset == 'P19' or dataset == 'eICU':
auc = metrics.roc_auc_score(true, pred[:, 1]) if not args.classify_pertp else 0.
aupr = average_precision_score(true, pred[:, 1]) if not args.classify_pertp else 0.
return test_loss / pred.shape[0], acc, auc, aupr, None, None, None
elif dataset == 'PAM':
auc = metrics.roc_auc_score(one_hot(true), pred) if not args.classify_pertp else 0.
aupr = average_precision_score(one_hot(true), pred) if not args.classify_pertp else 0.
precision = precision_score(true, pred.argmax(1), average='macro', ) if not args.classify_pertp else 0.
recall = recall_score(true, pred.argmax(1), average='macro', ) if not args.classify_pertp else 0.
F1 = 2 * (precision * recall) / (precision + recall) if not args.classify_pertp else 0.
return test_loss/pred.shape[0], acc, auc, aupr, precision, recall, F1
def random_sample(idx_0, idx_1, batch_size):
"""
Returns a balanced sample by randomly sampling without replacement.
:param idx_0: indices of negative samples
:param idx_1: indices of positive samples
:param batch_size: batch size
:return: indices of balanced batch of negative and positive samples
"""
idx0_batch = np.random.choice(idx_0, size=int(batch_size / 2), replace=False)
idx1_batch = np.random.choice(idx_1, size=int(batch_size / 2), replace=False)
idx = np.concatenate([idx0_batch, idx1_batch], axis=0)
return idx
def preprocess_P19(PT_dict, arr_outcomes, labels_ts):
total = []
for i, patient in enumerate(PT_dict):
length = patient['length']
record_id = patient['id']
tt = torch.squeeze(torch.tensor(patient['time'][:length]), 1)
vals = torch.tensor(patient['arr'][:length, :], dtype=torch.float32)
m = np.zeros(shape=patient['arr'][:length, :].shape)
m[patient['arr'][:length, :].nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i][0], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
'''
# calculate and save P19 statistics - age, gender, density scores (can be used for all algorithms)
idx_under_65 = []
idx_over_65 = []
idx_male = []
idx_female = []
for i in range(len(PT_dict)):
if total[i][0] == PT_dict[i]['id']:
age, gender, _, _, _, _ = PT_dict[i]['extended_static']
if age > 0:
if age < 65:
idx_under_65.append(i)
else:
idx_over_65.append(i)
if gender == 0:
idx_female.append(i)
if gender == 1:
idx_male.append(i)
np.save('P19_idx_under_65.npy', np.array(idx_under_65), allow_pickle=True)
np.save('P19_idx_over_65.npy', np.array(idx_over_65), allow_pickle=True)
np.save('P19_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('P19_idx_female.npy', np.array(idx_female), allow_pickle=True)
# save density scores
X_features = np.array([d['arr'] for d in PT_dict])
counts = np.count_nonzero(X_features, axis=(0, 1))
ascending_indices = np.argsort(counts)
density_scores = counts / (X_features.shape[0] * 60)
res = [[ind, density_scores[ind], labels_ts[:-1][ind]] for ind in ascending_indices]
np.save('P19_density_scores.npy', res, allow_pickle=True)
'''
return total
def preprocess_eICU(PT_dict, arr_outcomes, labels_ts):
total = []
for i, patient in enumerate(PT_dict):
record_id = str(i)
tt = torch.squeeze(torch.tensor(patient['time']), 1)
vals = torch.tensor(patient['arr'], dtype=torch.float32)
m = np.zeros(shape=patient['arr'].shape)
m[patient['arr'].nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
'''
# calculate and save P19 statistics - gender, density scores (can be used for all algorithms)
idx_male = []
idx_female = []
for i in range(len(PT_dict)):
if total[i][0] == str(i):
vec = PT_dict[i]['extended_static']
if vec[-3] > 0:
idx_female.append(i)
if vec[-4] > 0:
idx_male.append(i)
print('\nOnly 1.329/36.443 samples have gender data available.\n')
np.save('eICU_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('eICU_idx_female.npy', np.array(idx_female), allow_pickle=True)
# save density scores
X_features = np.array([d['arr'] for d in PT_dict])
counts = np.count_nonzero(X_features, axis=(0, 1))
ascending_indices = np.argsort(counts)
density_scores = counts / (X_features.shape[0] * 300)
res = [[ind, density_scores[ind], labels_ts[ind]] for ind in ascending_indices]
np.save('eICU_density_scores.npy', res, allow_pickle=True)
'''
return total
def preprocess_PAM(PT_dict, arr_outcomes):
length = 600
total = []
for i, patient in enumerate(PT_dict):
record_id = str(i)
tt = torch.tensor(list(range(length)))
vals = torch.tensor(patient, dtype=torch.float32)
m = np.zeros(shape=patient.shape)
m[patient.nonzero()] = 1
mask = torch.tensor(m, dtype=torch.float32)
outcome = torch.tensor(arr_outcomes[i][0], dtype=torch.float32)
total.append((record_id, tt, vals, mask, outcome))
return total
def random_sample_8(ytrain, B, replace=False):
""" Returns a balanced sample of tensors by randomly sampling without replacement. """
idx0_batch = np.random.choice(np.where(ytrain == 0)[0], size=int(B / 8), replace=replace)
idx1_batch = np.random.choice(np.where(ytrain == 1)[0], size=int(B / 8), replace=replace)
idx2_batch = np.random.choice(np.where(ytrain == 2)[0], size=int(B / 8), replace=replace)
idx3_batch = np.random.choice(np.where(ytrain == 3)[0], size=int(B / 8), replace=replace)
idx4_batch = np.random.choice(np.where(ytrain == 4)[0], size=int(B / 8), replace=replace)
idx5_batch = np.random.choice(np.where(ytrain == 5)[0], size=int(B / 8), replace=replace)
idx6_batch = np.random.choice(np.where(ytrain == 6)[0], size=int(B / 8), replace=replace)
idx7_batch = np.random.choice(np.where(ytrain == 7)[0], size=int(B / 8), replace=replace)
idx = np.concatenate([idx0_batch, idx1_batch, idx2_batch, idx3_batch, idx4_batch, idx5_batch, idx6_batch, idx7_batch], axis=0)
return idx
def get_data(args, dataset, device, q, upsampling_batch, split_type, feature_removal_level, missing_ratio, flag=1,
reverse=False, predictive_label='mortality'):
if dataset == 'P12':
train_dataset_obj_1 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='a')
train_dataset_obj_2 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='b')
train_dataset_obj_3 = PhysioNet('data/physionet', train=True,
quantization=q,
download=True, n_samples=12000,
device=device, set_letter='c')
dataset_1 = train_dataset_obj_1[:len(train_dataset_obj_1)]
dataset_2 = train_dataset_obj_2[:len(train_dataset_obj_2)]
dataset_3 = train_dataset_obj_3[:len(train_dataset_obj_3)]
total_dataset = dataset_1 + dataset_2 + dataset_3
if predictive_label == 'LoS':
los_outcomes = np.load('../saved/LoS_y1_out.npy', allow_pickle=True)
for i, tpl in enumerate(total_dataset):
a, b, c, d, _ = tpl
los_label = los_outcomes[i][0]
los_label = torch.tensor(los_label, dtype=torch.float32)
total_dataset[i] = (a, b, c, d, los_label)
'''
# calculate and save statistics
idx_under_65 = []
idx_over_65 = []
idx_male = []
idx_female = []
P_list = np.load('P_list.npy', allow_pickle=True)
for i in range(len(P_list)):
if total_dataset[i][0] == P_list[i]['id']:
age, gender, _, _, _ = P_list[i]['static']
if age > 0:
if age < 65:
idx_under_65.append(i)
else:
idx_over_65.append(i)
if gender == 0:
idx_female.append(i)
if gender == 1:
idx_male.append(i)
np.save('mtand_idx_under_65.npy', np.array(idx_under_65), allow_pickle=True)
np.save('mtand_idx_over_65.npy', np.array(idx_over_65), allow_pickle=True)
np.save('mtand_idx_male.npy', np.array(idx_male), allow_pickle=True)
np.save('mtand_idx_female.npy', np.array(idx_female), allow_pickle=True)
'''
elif dataset == 'P19':
PT_dict = np.load('../../../P19data/processed_data/PT_dict_list_6.npy', allow_pickle=True)
labels_ts = np.load('../../../P19data/processed_data/labels_ts.npy', allow_pickle=True)
labels_demogr = np.load('../../../P19data/processed_data/labels_demogr.npy', allow_pickle=True)
arr_outcomes = np.load('../../../P19data/processed_data/arr_outcomes_6.npy', allow_pickle=True)
total_dataset = preprocess_P19(PT_dict, arr_outcomes, labels_ts)
elif dataset == 'eICU':
PT_dict = np.load('../../../eICUdata/processed_data/PTdict_list.npy', allow_pickle=True)
labels_ts = np.load('../../../eICUdata/processed_data/eICU_ts_vars.npy', allow_pickle=True)
labels_demogr = np.load('../../../eICUdata/processed_data/eICU_static_vars.npy', allow_pickle=True)
arr_outcomes = np.load('../../../eICUdata/processed_data/arr_outcomes.npy', allow_pickle=True)
total_dataset = preprocess_eICU(PT_dict, arr_outcomes, labels_ts)
elif dataset == 'PAM':
PT_dict = np.load('../../../PAMdata/processed_data/PTdict_list.npy', allow_pickle=True)
arr_outcomes = np.load('../../../PAMdata/processed_data/arr_outcomes.npy', allow_pickle=True)
total_dataset = preprocess_PAM(PT_dict, arr_outcomes)
print('len(total_dataset):', len(total_dataset))
if split_type == 'random':
# Shuffle and split
train_data, test_data = model_selection.train_test_split(total_dataset, train_size=0.9, # 80% train, 10% validation, 10% test
shuffle=True)
elif split_type == 'age' or split_type == 'gender':
if dataset == 'P12':
prefix = 'mtand'
elif dataset == 'P19':
prefix = 'P19'
elif dataset == 'eICU': # possible only with split_type == 'gender'
prefix = 'eICU'
if split_type == 'age':
if dataset == 'eICU':
print('\nCombination of eICU dataset and age split is not possible.\n')
if reverse == False:
idx_train = np.load('%s_idx_under_65.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_over_65.npy' % prefix, allow_pickle=True)
else:
idx_train = np.load('%s_idx_over_65.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_under_65.npy' % prefix, allow_pickle=True)
elif split_type == 'gender':
if reverse == False:
idx_train = np.load('%s_idx_male.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_female.npy' % prefix, allow_pickle=True)
else:
idx_train = np.load('%s_idx_female.npy' % prefix, allow_pickle=True)
idx_vt = np.load('%s_idx_male.npy' % prefix, allow_pickle=True)
np.random.shuffle(idx_train)
np.random.shuffle(idx_vt)
train_data = [total_dataset[i] for i in idx_train]
test_data = [total_dataset[i] for i in idx_vt]
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
data_min, data_max = get_data_min_max(total_dataset, device)
batch_size = 128
if flag:
if args.classif:
if split_type == 'random':
train_data, val_data = model_selection.train_test_split(train_data, train_size=0.8889, shuffle=True) # 80% train, 10% validation, 10% test
elif split_type == 'age' or split_type == 'gender':
val_data, test_data = model_selection.train_test_split(test_data, train_size=0.5, shuffle=False)
if dataset == 'P12':
num_all_features = 36
elif dataset == 'P19':
num_all_features = 34
elif dataset == 'eICU':
num_all_features = 14
elif dataset == 'PAM':
num_all_features = 17
num_missing_features = round(missing_ratio * num_all_features)
if feature_removal_level == 'sample':
for i, tpl in enumerate(val_data):
idx = np.random.choice(num_all_features, num_missing_features, replace=False)
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
val_data[i] = tuple(tpl)
for i, tpl in enumerate(test_data):
idx = np.random.choice(num_all_features, num_missing_features, replace=False)
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
test_data[i] = tuple(tpl)
elif feature_removal_level == 'set':
if dataset == 'P12':
dict_params = train_dataset_obj_1.params_dict
density_scores_names = np.load('../saved/IG_density_scores_P12.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'P19':
labels_ts = np.load('../../../P19data/processed_data/labels_ts.npy', allow_pickle=True)
dict_params = {label: i for i, label in enumerate(labels_ts[:-1])}
density_scores_names = np.load('../saved/IG_density_scores_P19.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'eICU':
labels_ts = np.load('../../../eICUdata/processed_data/eICU_ts_vars.npy', allow_pickle=True)
dict_params = {label: i for i, label in enumerate(labels_ts)}
density_scores_names = np.load('../saved/IG_density_scores_eICU.npy', allow_pickle=True)[:, 1]
idx = [dict_params[name] for name in density_scores_names[:num_missing_features]]
elif dataset == 'PAM':
density_scores_indices = np.load('../saved/IG_density_scores_PAM.npy', allow_pickle=True)[:, 0]
idx = list(map(int, density_scores_indices[:num_missing_features]))
for i, tpl in enumerate(val_data):
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
val_data[i] = tuple(tpl)
for i, tpl in enumerate(test_data):
_, _, values, _, _ = tpl
tpl = list(tpl)
values[:, idx] = torch.zeros(values.shape[0], num_missing_features)
tpl[2] = values
test_data[i] = tuple(tpl)
if upsampling_batch:
train_data_upsamled = []
true_labels = np.array(list(map(lambda x: float(x[7]), np.array(train_data)[:, 4])))
if dataset == 'P12' or dataset == 'P19' or dataset == 'eICU': # 2 classes
idx_0 = np.where(true_labels == 0)[0]
idx_1 = np.where(true_labels == 1)[0]
for _ in range(len(true_labels) // batch_size):
indices = random_sample(idx_0, idx_1, batch_size)
for i in indices:
train_data_upsamled.append(train_data[i])
elif dataset == 'PAM': # 8 classes
for b in range(len(true_labels) // batch_size):
indices = random_sample_8(true_labels, batch_size)
for i in indices:
train_data_upsamled.append(train_data[i])
train_data = train_data_upsamled
test_data_combined = variable_time_collate_fn(test_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
train_data_combined = variable_time_collate_fn(train_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
val_data_combined = variable_time_collate_fn(
val_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
print(train_data_combined[1].sum(
), val_data_combined[1].sum(), test_data_combined[1].sum())
print(train_data_combined[0].size(), train_data_combined[1].size(),
val_data_combined[0].size(), val_data_combined[1].size(),
test_data_combined[0].size(), test_data_combined[1].size())
train_data_combined = TensorDataset(
train_data_combined[0], train_data_combined[1].long().squeeze())
val_data_combined = TensorDataset(
val_data_combined[0], val_data_combined[1].long().squeeze())
test_data_combined = TensorDataset(
test_data_combined[0], test_data_combined[1].long().squeeze())
else:
train_data_combined = variable_time_collate_fn(
train_data, device, classify=args.classif, data_min=data_min, data_max=data_max)
train_dataloader = DataLoader(
train_data_combined, batch_size=batch_size, shuffle=False)
test_dataloader = DataLoader(
test_data_combined, batch_size=batch_size, shuffle=False)
else:
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn2(batch, args, device, data_type="train",
data_min=data_min, data_max=data_max))
test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False,
collate_fn=lambda batch: variable_time_collate_fn2(batch, args, device, data_type="test",
data_min=data_min, data_max=data_max))
data_objects = {"dataset_obj": {},
"train_dataloader": train_dataloader,
"test_dataloader": test_dataloader,
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"attr": {}, # optional
"classif_per_tp": False, # optional
"n_labels": 1} # optional
if args.classif:
val_dataloader = DataLoader(
val_data_combined, batch_size=batch_size, shuffle=False)
data_objects["val_dataloader"] = val_dataloader
return data_objects
def variable_time_collate_fn(batch, device=torch.device("cpu"), classify=False, activity=False,
data_min=None, data_max=None):
"""
Expects a batch of time series data in the form of (record_id, tt, vals, mask, labels) where
- record_id is a patient id
- tt is a 1-dimensional tensor containing T time values of observations.
- vals is a (T, D) tensor containing observed values for D variables.
- mask is a (T, D) tensor containing 1 where values were observed and 0 otherwise.
- labels is a list of labels for the current patient, if labels are available. Otherwise None.
Returns:
combined_tt: The union of all time observations.
combined_vals: (M, T, D) tensor containing the observed values.
combined_mask: (M, T, D) tensor containing 1 where values were observed and 0 otherwise.
"""
D = batch[0][2].shape[1]
# number of labels
N = batch[0][-1].shape[1] if activity else 1
len_tt = [ex[1].size(0) for ex in batch]
maxlen = np.max(len_tt)
enc_combined_tt = torch.zeros([len(batch), maxlen]).to(device)
enc_combined_vals = torch.zeros([len(batch), maxlen, D]).to(device)
enc_combined_mask = torch.zeros([len(batch), maxlen, D]).to(device)
if classify:
if activity:
combined_labels = torch.zeros([len(batch), maxlen, N]).to(device)
else:
combined_labels = torch.zeros([len(batch), N]).to(device)
for b, (record_id, tt, vals, mask, labels) in enumerate(batch):
currlen = tt.size(0)
enc_combined_tt[b, :currlen] = tt.to(device)
enc_combined_vals[b, :currlen] = vals.to(device)
enc_combined_mask[b, :currlen] = mask.to(device)
if classify:
if activity:
combined_labels[b, :currlen] = labels.to(device)
else:
if labels is not None:
combined_labels[b] = labels.to(device)
if not activity:
enc_combined_vals, _, _ = normalize_masked_data(enc_combined_vals, enc_combined_mask,
att_min=data_min, att_max=data_max)
if torch.max(enc_combined_tt) != 0.:
enc_combined_tt = enc_combined_tt / torch.max(enc_combined_tt)
combined_data = torch.cat(
(enc_combined_vals, enc_combined_mask, enc_combined_tt.unsqueeze(-1)), 2)
if classify:
return combined_data, combined_labels
else:
return combined_data
def get_activity_data(args, device):
n_samples = min(10000, args.n)
dataset_obj = PersonActivity('data/PersonActivity',
download=True, n_samples=n_samples, device=device)
print(dataset_obj)
train_data, test_data = model_selection.train_test_split(dataset_obj, train_size=0.8,
random_state=42, shuffle=True)
record_id, tt, vals, mask, labels = train_data[0]
input_dim = vals.size(-1)
batch_size = min(min(len(dataset_obj), args.batch_size), args.n)
test_data_combined = variable_time_collate_fn(test_data, device, classify=args.classif,
activity=True)
train_data, val_data = model_selection.train_test_split(train_data, train_size=0.8,
random_state=11, shuffle=True)
train_data_combined = variable_time_collate_fn(
train_data, device, classify=args.classif, activity=True)
val_data_combined = variable_time_collate_fn(
val_data, device, classify=args.classif, activity=True)
print(train_data_combined[1].sum(
), val_data_combined[1].sum(), test_data_combined[1].sum())
print(train_data_combined[0].size(), train_data_combined[1].size(),
val_data_combined[0].size(), val_data_combined[1].size(),
test_data_combined[0].size(), test_data_combined[1].size())
train_data_combined = TensorDataset(
train_data_combined[0], train_data_combined[1].long())
val_data_combined = TensorDataset(
val_data_combined[0], val_data_combined[1].long())
test_data_combined = TensorDataset(
test_data_combined[0], test_data_combined[1].long())
train_dataloader = DataLoader(
train_data_combined, batch_size=batch_size, shuffle=False)
test_dataloader = DataLoader(
test_data_combined, batch_size=batch_size, shuffle=False)
val_dataloader = DataLoader(
val_data_combined, batch_size=batch_size, shuffle=False)
data_objects = {"train_dataloader": train_dataloader,
"test_dataloader": test_dataloader,
"val_dataloader": val_dataloader,
"input_dim": input_dim,
"n_train_batches": len(train_dataloader),
"n_test_batches": len(test_dataloader),
"classif_per_tp": False, # optional
"n_labels": 1} # optional
return data_objects
def irregularly_sampled_data_gen(n=10, length=20, seed=0):
np.random.seed(seed)
obs_values, ground_truth, obs_times = [], [], []
for i in range(n):
t1 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
t2 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
t3 = np.sort(np.random.uniform(low=0.0, high=1.0, size=length))
a = 10 * np.random.randn()
b = 10 * np.random.rand()
f1 = .8 * np.sin(20*(t1+a) + np.sin(20*(t1+a))) + \
0.01 * np.random.randn()
f2 = -.5 * np.sin(20*(t2+a + 20) + np.sin(20*(t2+a + 20))
) + 0.01 * np.random.randn()
f3 = np.sin(12*(t3+b)) + 0.01 * np.random.randn()
obs_times.append(np.stack((t1, t2, t3), axis=0))
obs_values.append(np.stack((f1, f2, f3), axis=0))
t = np.linspace(0, 1, 100)
fg1 = .8 * np.sin(20*(t+a) + np.sin(20*(t+a)))
fg2 = -.5 * np.sin(20*(t+a + 20) + np.sin(20*(t+a + 20)))
fg3 = np.sin(12*(t+b))
ground_truth.append(np.stack((fg1, fg2, fg3), axis=0))
return obs_values, ground_truth, obs_times
def sine_wave_data_gen(args, seed=0):
np.random.seed(seed)
obs_values, ground_truth, obs_times = [], [], []
for _ in range(args.n):
t = np.sort(np.random.choice(np.linspace(
0, 1., 101), size=args.length, replace=True))
b = 10 * np.random.rand()
f = np.sin(12*(t+b)) + 0.1 * np.random.randn()
obs_times.append(t)
obs_values.append(f)
tc = np.linspace(0, 1, 100)
fg = np.sin(12*(tc + b))
ground_truth.append(fg)
obs_values = np.array(obs_values)
obs_times = np.array(obs_times)
ground_truth = np.array(ground_truth)
print(obs_values.shape, obs_times.shape, ground_truth.shape)
mask = np.ones_like(obs_values)
combined_data = np.concatenate((np.expand_dims(obs_values, axis=2), np.expand_dims(
mask, axis=2), | np.expand_dims(obs_times, axis=2) | numpy.expand_dims |
import os
import argparse
import random
import glob
import itertools
import time
import pandas as pd
import numpy as np
import soundfile as sf
import pyloudnorm as pyln
import warnings
from tqdm import tqdm
from codetiming import Timer
import multiprocessing
from loky import get_reusable_executor
# Global parameters
EPS = 1e-10 # secures log and division
MAX_AMP = 0.9 # max amplitude in sources and mixture
RATE = 24000 # JVS has all the sources at 24KHz
MIN_LOUDNESS = -33 # loudness randomized between this min and max
MAX_LOUDNESS = -25
n_cpu = multiprocessing.cpu_count()
random.seed(673) # sum([ord(ch) for ch in 'jvsmix'])
def main(args):
if args.multiproc:
create_jvsmix_metadata_multiproc(args.jvs_dir, args.jvs_md_file,
args.md_dir, args.jsut_name_file,
args.n_src)
else:
create_jvsmix_metadata(args.jvs_dir, args.jvs_md_file, args.md_dir,
args.jsut_name_file, args.n_src)
@Timer(name='decorator')
def create_jvsmix_metadata(jvs_dir, jvs_md_file, md_dir, jsut_name_file, n_src):
dataset = f'jvs{n_src}mix'
os.makedirs(os.path.join(md_dir, dataset), exist_ok=True)
jvs_md = pd.read_csv(jvs_md_file, engine='python')
jsut_name = pd.read_csv(jsut_name_file, header=None, engine='python')
jsut_name = jsut_name.set_index(0).to_dict()[1]
# Generate speaker combinations
speaker_combs = make_combs(jvs_md, n_src)
# Create and save empty dataframes as CSVs
mixture_md_cols = ['mixture_id']
mixture_info_cols = ['mixture_id']
for i in range(n_src):
mixture_md_cols.append(f'source_{i + 1}_path')
mixture_md_cols.append(f'source_{i + 1}_gain')
mixture_info_cols.append(f'speaker_{i + 1}_id')
mixture_info_cols.append(f'speaker_{i + 1}_gender')
mixture_md = pd.DataFrame(columns=mixture_md_cols)
mixture_info = pd.DataFrame(columns=mixture_info_cols)
# Create metadata for each subset with all speakers
for subset in ['non-parallel', 'parallel']:
print('--- Subset:', subset)
clip_counter = 0
save_path = os.path.join(md_dir, dataset, dataset+'_'+subset)
mixture_md.to_csv(save_path+'.csv', index=None)
mixture_info.to_csv(save_path+'_info.csv', index=None)
for comb in tqdm(speaker_combs, total=len(speaker_combs)):
sources_info, sources_list_max = read_sources(
comb, jvs_md, jvs_dir,
jsut_name, n_src, subset)
# Compute original loudness and normalized sources
loudness, _, sources_list_norm = set_loudness(sources_list_max)
# Create mixture
mixtures = mix(sources_list_norm)
# Check for clipping and renormalize if needed
renormalize_loudness, did_clip = check_for_clipping(
mixtures, sources_list_norm)
# Keep track of number of clippings done
# clip_counter += sum([int(i) for i in did_clip])
# Compute gain
gains_list = compute_gain(loudness, renormalize_loudness)
# Add all the info to dataframe
mix_md_df, mix_info_df = get_dfs(sources_info, gains_list, n_src)
# Save interim results to file
mix_md_df.to_csv(save_path+'.csv',
index=None, header=None, mode='a')
mix_info_df.to_csv(save_path+'_info.csv',
index=None, header=None, mode='a')
# print(f'Among {len(mixture_md)} mixtures, {clip_counter} clipped.')
@Timer(name='decorator')
def create_jvsmix_metadata_multiproc(jvs_dir, jvs_md_file, md_dir,
jsut_name_file, n_src):
def do_multiproc(arg_list):
comb = arg_list[0]
jvs_md = arg_list[1]
jvs_dir = arg_list[2]
jsut_name = arg_list[3]
n_src = arg_list[4]
subset = arg_list[5]
save_path = arg_list[6]
save_id = arg_list[7]
sources_info, sources_list_max = read_sources(comb, jvs_md, jvs_dir,
jsut_name, n_src, subset)
# Compute original loudness and normalized sources
loudness, _, sources_list_norm = set_loudness(sources_list_max)
# Create mixture
mixtures = mix(sources_list_norm)
# Check for clipping and renormalize if needed
renormalize_loudness, did_clip = check_for_clipping(
mixtures, sources_list_norm)
# Keep track of number of clippings done
# clip_counter += sum([int(i) for i in did_clip])
# Compute gain
gains_list = compute_gain(loudness, renormalize_loudness)
# Add all the info to dataframe
tmp_mix_md_df, tmp_mix_info_df = get_dfs(sources_info, gains_list, n_src)
# Save interim results to file
tmp_mix_md_df.to_csv(
os.path.join(save_path, f'{os.getpid()}_{save_id}.csv'),
index=None, header=None)
tmp_mix_info_df.to_csv(
os.path.join(save_path, f'{os.getpid()}_{save_id}_info.csv'),
index=None, header=None)
dataset = f'jvs{n_src}mix'
os.makedirs(os.path.join(md_dir, dataset), exist_ok=True)
jvs_md = pd.read_csv(jvs_md_file, engine='python')
jsut_name = pd.read_csv(jsut_name_file, header=None, engine='python')
jsut_name = jsut_name.set_index(0).to_dict()[1]
# Generate speaker combinations
speaker_combs = make_combs(jvs_md, n_src)
# Create and save empty dataframes as CSVs
mixture_md_cols = ['mixture_id']
mixture_info_cols = ['mixture_id']
for i in range(n_src):
mixture_md_cols.append(f'source_{i + 1}_path')
mixture_md_cols.append(f'source_{i + 1}_gain')
mixture_info_cols.append(f'speaker_{i + 1}_id')
mixture_info_cols.append(f'speaker_{i + 1}_gender')
mixture_md = pd.DataFrame(columns=mixture_md_cols)
mixture_info = pd.DataFrame(columns=mixture_info_cols)
# Create 'tmp' directory for multiprocess results
os.makedirs(os.path.join(md_dir, dataset, 'tmp'), exist_ok=True)
# Create metadata for each subset with all speakers
for subset in ['non-parallel', 'parallel']:
print('--- Subset:', subset)
clip_counter = 0
save_path = os.path.join(md_dir, dataset, dataset+'_'+subset)
mixture_md.to_csv(save_path+'.csv', index=None)
mixture_info.to_csv(save_path+'_info.csv', index=None)
subargs = [jvs_md, jvs_dir, jsut_name, n_src, subset,
os.path.join(os.path.split(save_path)[0], 'tmp')]
arg_list = []
for i, comb in enumerate(speaker_combs):
arg_list.append([comb]+subargs+[i])
executor = get_reusable_executor(max_workers=int(n_cpu*0.75), timeout=5)
with tqdm(total=len(arg_list)) as pbar:
for i, _ in enumerate(executor.map(do_multiproc, arg_list)):
pbar.update()
time.sleep(1)
# Consolidate results from multiple processes saved inside 'tmp'
print('Consolidating results...')
tmp_csvs = glob.glob(os.path.join(
os.path.split(save_path)[0], 'tmp', '*.csv'))
for tc in tqdm(tmp_csvs, total=len(tmp_csvs)):
tmp_df = pd.read_csv(tc, header=None)
if 'info' in tc:
tmp_df.to_csv(save_path+'_info.csv',
index=None, header=None, mode='a')
else:
tmp_df.to_csv(save_path+'.csv',
index=None, header=None, mode='a')
os.system('rm -rf ' + os.path.join(os.path.split(save_path)[0], 'tmp'))
def make_combs(jvs_md, n_src):
speakers = list(set(jvs_md['speaker_id']))
# As number of speakers are less, we are making all possible speaker
# pairs instead of randomly sampling them
speaker_combs = list(itertools.combinations(speakers, n_src))
return speaker_combs
def read_sources(comb, jvs_md, jvs_dir, jsut_name, n_src, subset):
subdfs = [jvs_md[ (jvs_md['speaker_id']==comb[i]) &
(jvs_md['subset']==subset) ] for i in range(n_src)]
# Get sources info
speaker_id_list = comb
gender_list = [subdfs[i].iloc[0]['gender'] for i in range(n_src)]
# Each speaker has multiple utterances
length_list = [list(subdfs[i]['length']) for i in range(n_src)]
lengths = list(itertools.product(*length_list))
path_list = [list(subdfs[i]['path']) for i in range(n_src)]
paths = list(itertools.product(*path_list))
# Randomly select 50 mixtures for each speaker combination
indices_to_keep = random.choices(list(range(len(paths))), k=50)
lengths = [lengths[i] for i in indices_to_keep]
paths = [paths[i] for i in indices_to_keep]
# Generate mixture ids
mixture_ids = []
max_lengths = []
for pc, leng in zip(paths, lengths):
mixture_ids.append('-'.join([speaker_id_list[i] + '_' +
jsut_name[os.path.split(pc[i])[1].replace('.wav','')]
for i in range(n_src)]))
max_lengths.append(max(leng))
assert(len(lengths) == len(paths) ==
len(mixture_ids) == len(max_lengths))
# print(f'\tTotal {len(mixture_ids)} combinations generated.')
# Read the source and compute info
sources_list = []
for j in range(len(paths)):
sltmp = []
# Randomly pad the smaller audio to the start, end or equally
padway = random.randint(0, 2)
for i in range(n_src):
s, _ = sf.read(os.path.join(jvs_dir, paths[j][i]), dtype='float32')
padlen = max_lengths[j] - len(s)
if padway == 0:
# pad start
sltmp.append(np.pad(s, (padlen, 0), mode='constant'))
elif padway == 1:
# pad equally
sltmp.append(
np.pad(s, (padlen//2, padlen-(padlen//2)), mode='constant'))
elif padway == 2:
# pad end
sltmp.append(np.pad(s, (0, padlen), mode='constant'))
sources_list.append(sltmp)
sources_info = {'mixture_ids': mixture_ids,
'speaker_id_list': speaker_id_list,
'gender_list': gender_list,
'paths': paths}
return sources_info, sources_list
def set_loudness(sources_list):
loudness_list = []
meter = pyln.Meter(RATE)
target_loudness_list = []
sources_list_norm = []
for srcs in sources_list:
src_list_norm = []
trg_loudness_list = []
loudness = []
for i in range(len(srcs)):
# Initialize loudness
loudness.append(meter.integrated_loudness(srcs[i]))
# Pick a random loudness
target_loudness = random.uniform(MIN_LOUDNESS, MAX_LOUDNESS)
# Normalize source to target loudness
with warnings.catch_warnings():
warnings.simplefilter('ignore')
src = pyln.normalize.loudness(srcs[i], loudness[i],
target_loudness)
if np.max(np.abs(src)) >= 1:
src = srcs[i] * MAX_AMP / np.max(np.abs(srcs[i]))
target_loudness = meter.integrated_loudness(src)
# Save tmp results
src_list_norm.append(src)
trg_loudness_list.append(target_loudness)
# Save final results
sources_list_norm.append(src_list_norm)
target_loudness_list.append(trg_loudness_list)
loudness_list.append(loudness)
return loudness_list, target_loudness_list, sources_list_norm
def mix(source_list_norm):
mixtures = []
for srcs in source_list_norm:
mixture_max = | np.zeros_like(srcs[0]) | numpy.zeros_like |
import scipy.ndimage as scnd
import scipy.optimize as sio
import numpy as np
import numba
import warnings
import stemtool as st
@numba.jit
def fit_nbed_disks(corr_image, disk_size, positions, diff_spots):
warnings.filterwarnings("ignore")
positions = np.asarray(positions, dtype=np.float64)
diff_spots = np.asarray(diff_spots, dtype=np.float64)
fitted_disk_list = np.zeros_like(positions)
disk_locations = np.zeros_like(positions)
for ii in range(int(np.shape(positions)[0])):
posx = positions[ii, 0]
posy = positions[ii, 1]
par = st.util.fit_gaussian2D_mask(corr_image, posx, posy, disk_size)
fitted_disk_list[ii, 0] = par[0]
fitted_disk_list[ii, 1] = par[1]
disk_locations = np.copy(fitted_disk_list)
disk_locations[:, 1] = 0 - disk_locations[:, 1]
center = disk_locations[
np.logical_and((diff_spots[:, 0] == 0), (diff_spots[:, 1] == 0)), :
]
cx = center[0, 0]
cy = center[0, 1]
disk_locations[:, 0:2] = disk_locations[:, 0:2] - np.asarray(
(cx, cy), dtype=np.float64
)
lcbed, _, _, _ = np.linalg.lstsq(diff_spots, disk_locations, rcond=None)
cy = (-1) * cy
return fitted_disk_list, np.asarray((cx, cy), dtype=np.float64), lcbed
def sobel_filter(image, med_filter=50):
ls_image, _ = st.util.sobel(st.util.image_logarizer(image))
ls_image[ls_image > (med_filter * np.median(ls_image))] = med_filter * np.median(
ls_image
)
ls_image[ls_image < (np.median(ls_image) / med_filter)] = (
np.median(ls_image) / med_filter
)
return ls_image
@numba.jit
def strain_and_disk(data4D, disk_size, pixel_list_xy, disk_list, ROI=1, med_factor=50):
warnings.filterwarnings("ignore")
if np.size(ROI) < 2:
ROI = np.ones((data4D.shape[2], data4D.shape[3]), dtype=bool)
# Calculate needed values
scan_size = np.asarray(data4D.shape)[2:4]
sy, sx = np.mgrid[0 : scan_size[0], 0 : scan_size[1]]
scan_positions = (np.asarray((np.ravel(sy), np.ravel(sx)))).astype(int)
cbed_size = np.asarray(data4D.shape)[0:2]
yy, xx = np.mgrid[0 : cbed_size[0], 0 : cbed_size[1]]
center_disk = (
st.util.make_circle(cbed_size, cbed_size[1] / 2, cbed_size[0] / 2, disk_size)
).astype(np.float64)
i_matrix = (np.eye(2)).astype(np.float64)
sobel_center_disk, _ = st.util.sobel(center_disk)
# Initialize matrices
e_xx = np.zeros(scan_size, dtype=np.float64)
e_xy = np.zeros(scan_size, dtype=np.float64)
e_th = np.zeros(scan_size, dtype=np.float64)
e_yy = np.zeros(scan_size, dtype=np.float64)
disk_x = np.zeros(scan_size, dtype=np.float64)
disk_y = np.zeros(scan_size, dtype=np.float64)
COM_x = np.zeros(scan_size, dtype=np.float64)
COM_y = np.zeros(scan_size, dtype=np.float64)
# Calculate for mean CBED if no reference
mean_cbed = np.mean(data4D, axis=(-1, -2), dtype=np.float64)
mean_ls_cbed, _ = st.util.sobel(st.util.image_logarizer(mean_cbed))
mean_ls_cbed[
mean_ls_cbed > med_factor * np.median(mean_ls_cbed)
] = med_factor * np.median(mean_ls_cbed)
mean_ls_cbed[mean_ls_cbed < np.median(mean_ls_cbed) / med_factor] = (
np.median(mean_ls_cbed) / med_factor
)
mean_lsc = st.util.cross_corr_unpadded(mean_ls_cbed, sobel_center_disk)
_, mean_center, mean_axes = fit_nbed_disks(
mean_lsc, disk_size, pixel_list_xy, disk_list
)
axes_lengths = ((mean_axes[:, 0] ** 2) + (mean_axes[:, 1] ** 2)) ** 0.5
beam_r = axes_lengths[1]
inverse_axes = np.linalg.inv(mean_axes)
for pp in range(np.size(sy)):
ii = scan_positions[0, pp]
jj = scan_positions[1, pp]
pattern = data4D[:, :, ii, jj]
pattern_ls, _ = st.util.sobel(st.util.image_logarizer(pattern))
pattern_ls[pattern_ls > med_factor * np.median(pattern_ls)] = np.median(
pattern_ls
)
pattern_lsc = st.util.cross_corr_unpadded(pattern_ls, sobel_center_disk)
_, pattern_center, pattern_axes = fit_nbed_disks(
pattern_lsc, disk_size, pixel_list_xy, disk_list
)
pcirc = (
(((yy - pattern_center[1]) ** 2) + ((xx - pattern_center[0]) ** 2)) ** 0.5
) <= beam_r
pattern_x = np.sum(pattern[pcirc] * xx[pcirc]) / np.sum(pattern[pcirc])
pattern_y = np.sum(pattern[pcirc] * yy[pcirc]) / np.sum(pattern[pcirc])
t_pattern = np.matmul(pattern_axes, inverse_axes)
s_pattern = t_pattern - i_matrix
e_xx[ii, jj] = -s_pattern[0, 0]
e_xy[ii, jj] = -(s_pattern[0, 1] + s_pattern[1, 0])
e_th[ii, jj] = -(s_pattern[0, 1] - s_pattern[1, 0])
e_yy[ii, jj] = -s_pattern[1, 1]
disk_x[ii, jj] = pattern_center[0] - mean_center[0]
disk_y[ii, jj] = pattern_center[1] - mean_center[1]
COM_x[ii, jj] = pattern_x - mean_center[0]
COM_y[ii, jj] = pattern_y - mean_center[1]
return e_xx, e_xy, e_th, e_yy, disk_x, disk_y, COM_x, COM_y
@numba.jit
def dpc_central_disk(data4D, disk_size, position, ROI=1, med_val=20):
"""
DPC routine on only the central disk
Parameters
----------
data4D: ndarray
The 4 dimensional dataset that will be analyzed
The first two dimensions are the Fourier space
diffraction dimensions and the last two dimensions
are the real space scanning dimensions
disk_size: float
Size of the central disk
position: ndarray
X and Y positions
This is the initial guess that will be refined
ROI: ndarray
The region of interest for the scanning region
that will be analyzed. If no ROI is given then
the entire scanned area will be analyzed
med_val: float
Sometimes some pixels are either too bright in
the diifraction patterns due to stray muons or
are zero due to dead detector pixels. This removes
the effect of such pixels before Sobel filtering
Returns
-------
p_cen: ndarray
P positions of the central disk
q_cen: ndarray
Q positions of the central disk
p_com: ndarray
P positions of the center of mass
of the central disk
q_com: ndarray
Q positions of the center of mass
of the central disk
Notes
-----
This is when we want to perform DPC without bothering
about the higher order disks. The ROI of the 4D dataset
is calculated, and the central disk is fitted in each ROI
point, and then a disk is calculated centered on the edge
fitted center and then the COM inside that disk is also
calculated.
:Authors:
<NAME> <<EMAIL>>
"""
warnings.filterwarnings("ignore")
if np.size(ROI) < 2:
ROI = np.ones((data4D.shape[2], data4D.shape[3]), dtype=bool)
yy, xx = np.mgrid[0 : data4D.shape[2], 0 : data4D.shape[3]]
data4D_ROI = data4D[:, :, yy[ROI], xx[ROI]]
pp, qq = np.mgrid[0 : data4D.shape[0], 0 : data4D.shape[1]]
no_points = np.sum(ROI)
fitted_pos = np.zeros((2, no_points), dtype=np.float64)
fitted_com = np.zeros((2, no_points), dtype=np.float64)
pos_p = position[0]
pos_q = position[1]
corr_disk = st.util.make_circle(
np.asarray(data4D.shape[0:2]), pos_p, pos_q, disk_size
)
sobel_corr_disk, _ = st.util.sobel(corr_disk)
p_cen = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
q_cen = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
p_com = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
q_com = np.zeros((data4D.shape[2], data4D.shape[3]), dtype=np.float64)
for ii in numba.prange(int(no_points)):
cbed_image = data4D_ROI[:, :, ii]
slm_image, _ = st.util.sobel(
scnd.gaussian_filter(st.util.image_logarizer(cbed_image), 3)
)
slm_image[slm_image > med_val * np.median(slm_image)] = med_val * np.median(
slm_image
)
slm_image[slm_image < np.median(slm_image) / med_val] = (
np.median(slm_image) / med_val
)
corr_image = st.util.cross_corr(slm_image, sobel_corr_disk, hybridizer=0.25)
fitted_disk_list = st.util.fit_gaussian2D_mask(
corr_image, pos_p, pos_q, disk_size
)
fitted_center = fitted_disk_list[0:2] + (
np.asarray((pos_p, pos_q))
- 0.5 * (np.flip(np.asarray(np.shape(cbed_image))))
)
fitted_pos[0:2, ii] = fitted_center
fitted_circle = st.util.make_circle(
np.asarray(cbed_image.shape), fitted_center[0], fitted_center[1], disk_size
)
fitted_circle = fitted_circle.astype(bool)
image_sum = | np.sum(cbed_image[fitted_circle]) | numpy.sum |
import os
from utils.common_utils import *
from skimage.restoration import denoise_nl_means
from skimage.metrics import structural_similarity
from skimage.metrics import peak_signal_noise_ratio as compare_psnr
def get_noisy_image(img_np, sigma):
"""Adds Gaussian noise to an image.
Args:
img_np: image, np.array with values from 0 to 1
sigma: std of the noise
"""
img_noisy_np = np.clip(img_np + np.random.normal(scale=sigma, size=img_np.shape), 0, 1).astype(np.float32)
img_noisy_pil = np_to_pil(img_noisy_np)
return img_noisy_pil, img_noisy_np
def non_local_means(noisy_np_img, sigma, fast_mode=True):
""" get a numpy noisy image
returns a denoised numpy image using Non-Local-Means
"""
sigma = sigma / 255.
h = 0.6 * sigma if fast_mode else 0.8 * sigma
patch_kw = dict(h=h, # Cut-off distance, a higher h results in a smoother image
sigma=sigma, # sigma provided
fast_mode=fast_mode, # If True, a fast version is used. If False, the original version is used.
patch_size=5, # 5x5 patches (Size of patches used for denoising.)
patch_distance=6, # 13x13 search area
multichannel=False)
denoised_img = []
n_channels = noisy_np_img.shape[0]
for c in range(n_channels):
denoise_fast = denoise_nl_means(noisy_np_img[c, :, :], **patch_kw)
denoised_img += [denoise_fast]
return np.array(denoised_img, dtype=np.float32)
def compare_ssim(a, b):
if a.shape[0] == 3:
a = np.mean(a, axis=0)
b = np.mean(b, axis=0)
elif a.shape[2] == 3:
a = np.mean(a, axis=2)
b = np.mean(b, axis=2)
else:
a,b = a[0], b[0]
return structural_similarity(a,b)
import math
import cv2
# ----------
# PSNR
# ----------
def calculate_psnr(img1, img2, border=0):
# img1 and img2 have range [0, 255]
img1 = np.squeeze(img1)
img2 = np.squeeze(img2)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
mse = np.mean((img1 - img2)**2)
if mse == 0:
return float('inf')
return 20 * math.log10(255.0 / math.sqrt(mse))
# ----------
# SSIM
# ----------
def calculate_ssim(img1, img2, border=0):
'''calculate SSIM
the same outputs as MATLAB's
img1, img2: [0, 255]
'''
img1 = np.squeeze(img1)
img2 = np.squeeze(img2)
if not img1.shape == img2.shape:
raise ValueError('Input images must have the same dimensions.')
h, w = img1.shape[:2]
img1 = img1[border:h-border, border:w-border]
img2 = img2[border:h-border, border:w-border]
if img1.ndim == 2:
return ssim(img1, img2)
elif img1.ndim == 3:
if img1.shape[0] == 3:
ssims = []
for i in range(3):
ssims.append(ssim(img1[i], img2[i]))
return np.array(ssims).mean()
elif img1.shape[0] == 1:
return ssim(np.squeeze(img1), np.squeeze(img2))
else:
raise ValueError('Wrong input image dimensions.')
def ssim(img1, img2):
C1 = (0.01 * 255)**2
C2 = (0.03 * 255)**2
img1 = img1.astype(np.float64)
img2 = img2.astype(np.float64)
kernel = cv2.getGaussianKernel(11, 1.5)
window = np.outer(kernel, kernel.transpose())
mu1 = cv2.filter2D(img1, -1, window)[5:-5, 5:-5] # valid
mu2 = cv2.filter2D(img2, -1, window)[5:-5, 5:-5]
mu1_sq = mu1**2
mu2_sq = mu2**2
mu1_mu2 = mu1 * mu2
sigma1_sq = cv2.filter2D(img1**2, -1, window)[5:-5, 5:-5] - mu1_sq
sigma2_sq = cv2.filter2D(img2**2, -1, window)[5:-5, 5:-5] - mu2_sq
sigma12 = cv2.filter2D(img1 * img2, -1, window)[5:-5, 5:-5] - mu1_mu2
ssim_map = ((2 * mu1_mu2 + C1) * (2 * sigma12 + C2)) / ((mu1_sq + mu2_sq + C1) *
(sigma1_sq + sigma2_sq + C2))
return ssim_map.mean()
from .PerceptualSimilarity import PerceptualLoss
def get_lpips(device="cuda"):
return PerceptualLoss(model='net-lin', net='alex', use_gpu=(device == 'cuda'))
def calculate_lpips(img1_, img2_, LPIPS= None, device="cuda", color= "BGR"):
if img1_.shape[0] < 3:
make_color = lambda x: cv2.cvtColor(x, cv2.COLOR_GRAY2BGR)
img1 = make_color(img1_[0].astype(np.uint8))
img2 = make_color(img2_.astype(np.uint8))
else:
img1 = img1_.transpose([1,2,0]).astype(np.uint8)
img2 = img2_.transpose([1,2,0]).astype(np.uint8)
if LPIPS is None:
LPIPS = PerceptualLoss(model='net-lin', net='alex', use_gpu=(device == 'cuda'))
if color == "BGR":
img1 = cv2.cvtColor(img1, cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(img2, cv2.COLOR_BGR2RGB)
img1 = torch.tensor(img1.transpose([2,0,1])) / 255.0
img2 = torch.tensor(img2.transpose([2,0,1])) / 255.0
if device == "cuda":
img1 = img1.cuda()
img2 = img2.cuda()
return LPIPS(img1, img2, normalize=True).item()
if __name__ == "__main__":
import numpy as np
img1 = | np.random.rand(255,255) | numpy.random.rand |
# This file is part of pyfesom
#
################################################################################
#
# Original matlab/python code by <NAME>, <NAME> and <NAME>.
#
# Contributers: <NAME>, <NAME>
#
# Modifications:
#
################################################################################
import numpy as np
import math as mt
import matplotlib as mpl
def scalar_r2g(al, be, ga, rlon, rlat):
'''
Converts rotated coordinates to geographical coordinates.
Parameters
----------
al : float
alpha Euler angle
be : float
beta Euler angle
ga : float
gamma Euler angle
rlon : array
1d array of longitudes in rotated coordinates
rlat : array
1d araay of latitudes in rotated coordinates
Returns
-------
lon : array
1d array of longitudes in geographical coordinates
lat : array
1d array of latitudes in geographical coordinates
'''
rad=mt.pi/180
al=al*rad
be=be*rad
ga=ga*rad
rotate_matrix=np.zeros(shape=(3,3))
rotate_matrix[0,0]=np.cos(ga)*np.cos(al)-np.sin(ga)*np.cos(be)*np.sin(al)
rotate_matrix[0,1]=np.cos(ga)*np.sin(al)+np.sin(ga)*np.cos(be)*np.cos(al)
rotate_matrix[0,2]=np.sin(ga)*np.sin(be)
rotate_matrix[1,0]=-np.sin(ga)*np.cos(al)-np.cos(ga)*np.cos(be)*np.sin(al)
rotate_matrix[1,1]=-np.sin(ga)*np.sin(al)+np.cos(ga)*np.cos(be)*np.cos(al)
rotate_matrix[1,2]=np.cos(ga)*np.sin(be)
rotate_matrix[2,0]=np.sin(be)*np.sin(al)
rotate_matrix[2,1]=-np.sin(be)*np.cos(al)
rotate_matrix[2,2]=np.cos(be)
rotate_matrix=np.linalg.pinv(rotate_matrix)
rlat=rlat*rad
rlon=rlon*rad
#Rotated Cartesian coordinates:
xr=np.cos(rlat)*np.cos(rlon)
yr=np.cos(rlat)*np.sin(rlon)
zr=np.sin(rlat)
#Geographical Cartesian coordinates:
xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr
yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr
zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr
#Geographical coordinates:
lat = np.arcsin(zg)
lon= np.arctan2(yg, xg)
a = np.where((np.abs(xg)+np.abs(yg))==0)
if a: lon[a]=0
lat = lat/rad
lon = lon/rad
return (lon,lat)
def scalar_g2r(al, be, ga, lon, lat):
'''
Converts geographical coordinates to rotated coordinates.
Parameters
----------
al : float
alpha Euler angle
be : float
beta Euler angle
ga : float
gamma Euler angle
lon : array
1d array of longitudes in geographical coordinates
lat : array
1d array of latitudes in geographical coordinates
Returns
-------
rlon : array
1d array of longitudes in rotated coordinates
rlat : array
1d araay of latitudes in rotated coordinates
'''
rad=mt.pi/180
al=al*rad
be=be*rad
ga=ga*rad
rotate_matrix=np.zeros(shape=(3,3))
rotate_matrix[0,0]=np.cos(ga)*np.cos(al)-np.sin(ga)*np.cos(be)*np.sin(al)
rotate_matrix[0,1]=np.cos(ga)*np.sin(al)+np.sin(ga)*np.cos(be)*np.cos(al);
rotate_matrix[0,2]=np.sin(ga)*np.sin(be)
rotate_matrix[1,0]=-np.sin(ga)*np.cos(al)-np.cos(ga)*np.cos(be)*np.sin(al);
rotate_matrix[1,1]=-np.sin(ga)*np.sin(al)+np.cos(ga)*np.cos(be)*np.cos(al);
rotate_matrix[1,2]=np.cos(ga)*np.sin(be);
rotate_matrix[2,0]=np.sin(be)*np.sin(al);
rotate_matrix[2,1]=-np.sin(be)*np.cos(al);
rotate_matrix[2,2]=np.cos(be);
lat=lat*rad;
lon=lon*rad;
# geographical Cartesian coordinates:
xr=np.cos(lat)*np.cos(lon);
yr=np.cos(lat)*np.sin(lon);
zr=np.sin(lat);
# rotated Cartesian coordinates:
xg=rotate_matrix[0,0]*xr + rotate_matrix[0,1]*yr + rotate_matrix[0,2]*zr;
yg=rotate_matrix[1,0]*xr + rotate_matrix[1,1]*yr + rotate_matrix[1,2]*zr;
zg=rotate_matrix[2,0]*xr + rotate_matrix[2,1]*yr + rotate_matrix[2,2]*zr;
# rotated coordinates:
rlat=np.arcsin(zg)
rlon=np.arctan2(yg, xg)
a = np.where((np.abs(xg)+np.abs(yg))==0)
if a: lon[a]=0
rlat = rlat/rad
rlon = rlon/rad
return (rlon, rlat)
def vec_rotate_r2g(al, be, ga, lon, lat, urot, vrot, flag):
'''
Rotate vectors from rotated coordinates to geographical coordinates.
Parameters
----------
al : float
alpha Euler angle
be : float
beta Euler angle
ga : float
gamma Euler angle
lon : array
1d array of longitudes in rotated or geographical coordinates (see flag parameter)
lat : array
1d array of latitudes in rotated or geographical coordinates (see flag parameter)
urot : array
1d array of u component of the vector in rotated coordinates
vrot : array
1d array of v component of the vector in rotated coordinates
flag : 1 or 0
flag=1 - lon,lat are in geographical coordinate
flag=0 - lon,lat are in rotated coordinate
Returns
-------
u : array
1d array of u component of the vector in geographical coordinates
v : array
1d array of v component of the vector in geographical coordinates
'''
# first get another coordinate
if (flag==1):
(rlon,rlat)=scalar_g2r(al, be, ga, lon, lat)
else:
rlon=lon
rlat=lat
(lon,lat)=scalar_r2g(al, be, ga, rlon, rlat)
# then proceed...
rad=mt.pi/180
al=al*rad
be=be*rad
ga=ga*rad
rotate_matrix=np.zeros(shape=(3,3))
rotate_matrix[0,0]=np.cos(ga)*np.cos(al)-np.sin(ga)*np.cos(be)*np.sin(al)
rotate_matrix[0,1]=np.cos(ga)*np.sin(al)+np.sin(ga)*np.cos(be)*np.cos(al)
rotate_matrix[0,2]=np.sin(ga)*np.sin(be)
rotate_matrix[1,0]=-np.sin(ga)*np.cos(al)-np.cos(ga)*np.cos(be)*np.sin(al)
rotate_matrix[1,1]=-np.sin(ga)*np.sin(al)+np.cos(ga)*np.cos(be)*np.cos(al)
rotate_matrix[1,2]=np.cos(ga)*np.sin(be)
rotate_matrix[2,0]=np.sin(be)*np.sin(al)
rotate_matrix[2,1]=-np.sin(be)*np.cos(al)
rotate_matrix[2,2]=np.cos(be)
rotate_matrix=np.linalg.pinv(rotate_matrix)
rlat=rlat*rad
rlon=rlon*rad
lat=lat*rad
lon=lon*rad
# vector in rotated Cartesian
txg=-vrot*np.sin(rlat)*np.cos(rlon)-urot*np.sin(rlon)
tyg=-vrot*np.sin(rlat)*np.sin(rlon)+urot*np.cos(rlon)
tzg=vrot*np.cos(rlat)
# vector in geo Cartesian
txr=rotate_matrix[0,0]*txg + rotate_matrix[0,1]*tyg + rotate_matrix[0,2]*tzg
tyr=rotate_matrix[1,0]*txg + rotate_matrix[1,1]*tyg + rotate_matrix[1,2]*tzg
tzr=rotate_matrix[2,0]*txg + rotate_matrix[2,1]*tyg + rotate_matrix[2,2]*tzg
# vector in geo coordinate
v=-np.sin(lat)*np.cos(lon)*txr - np.sin(lat)*np.sin(lon)*tyr + np.cos(lat)*tzr
u=- | np.sin(lon) | numpy.sin |
import roslib
import sys
import rospy
import cv2
import math
import imutils
import statistics
import numpy as np
from std_msgs.msg import String
from sensor_msgs.msg import Image
from std_msgs.msg import Float64MultiArray, Float64
from cv_bridge import CvBridge, CvBridgeError
from scipy.spatial import distance as dist
class image_converter:
# Defines publisher and subscriber
def __init__(self):
# initialize the node named image_processing
rospy.init_node('image_processing', anonymous=True)
# initialize a publisher to send images from camera1 to a topic named image_topic1
self.image_pub1 = rospy.Publisher("image_topic1", Image, queue_size=1)
self.image_pub2 = rospy.Publisher("image_topic2", Image, queue_size=1)
#Initialize a publisher to send joints angular posiion toa topic called joints_pos
self.joints_pub=rospy.Publisher("joints_pos",Float64MultiArray,queue_size=10)
#initialize a publisher for the robot end effector
self.vision_end_effector_pub=rospy.Publisher("vision_end_effector",Float64MultiArray,queue_size=10)
self.fk_end_effector_pub = rospy.Publisher("fk_end_effector", Float64MultiArray, queue_size=10)
self.actual_target_trajectory_pub = rospy.Publisher("actual_target_trajectory", Float64MultiArray,queue_size=10)
self.vision_target_trajectory_pub = rospy.Publisher("vision_target_trajectory", Float64MultiArray,queue_size=10)
#initialize a publisher for the four angles
self.robot_joint1_pub = rospy.Publisher("/robot/joint1_position_controller/command", Float64, queue_size=10)
self.robot_joint2_pub = rospy.Publisher("/robot/joint2_position_controller/command", Float64, queue_size=10)
self.robot_joint3_pub = rospy.Publisher("/robot/joint3_position_controller/command", Float64, queue_size=10)
self.robot_joint4_pub = rospy.Publisher("/robot/joint4_position_controller/command", Float64, queue_size=10)
#Initialize the publisher for t target
self.target_x_pub = rospy.Publisher("/target/x_position_controller/command", Float64, queue_size=10)
self.target_y_pub = rospy.Publisher("/target/y_position_controller/command", Float64, queue_size=10)
self.target_z_pub = rospy.Publisher("/target/z_position_controller/command", Float64, queue_size=10)
# initialize a subscriber to recieve messages rom a topic named /robot/camera1/image_raw and use callback function to recieve data
self.image_sub1 = rospy.Subscriber("/camera1/robot/image_raw", Image, self.callback1)
self.image_sub2 = rospy.Subscriber("/camera2/robot/image_raw", Image, self.callback2)
#initialize a publisher to send desired trajectory
self.time_trajectory = rospy.get_time()
#initialize variables
self.red = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.green = np.array([0.0, 0.0, 0.0, 0.0], dtype='float64')
self.p2m = np.array([0.0], dtype='float64')
self.joint1 = np.array([0.0], dtype='float64')
self.joint2 = np.array([0.0], dtype='float64')
self.joint3 = np.array([0.0], dtype='float64')
self.joint4 = np.array([0.0], dtype='float64')
# initialize errors
self.time_previous_step = np.array([rospy.get_time()], dtype='float64')
self.time_previous_step2 = np.array([rospy.get_time()], dtype='float64')
# initialize error and derivative of error for trajectory tracking
self.error = np.array([0.0, 0.0,0.0], dtype='float64')
self.error_d = np.array([0.0, 0.0,0.0], dtype='float64')
# initialize the bridge between openCV and ROS
self.bridge = CvBridge()
# Recieve data from camera 1, process it, and publish
def callback1(self, data):
# Recieve the image
try:
self.image1 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
def callback2(self, data):
# Recieve the image
try:
self.image2 = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
#Blob detection starts here-------------------------------------------------------
#Same to 2_1_joint_estimation.py
def detect_red(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([0, 200, 0])
higher_red1 = np.array([0, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([0, 200, 0])
higher_red2 = np.array([0, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_blue(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([70, 0, 0])
higher_red1 = np.array([255, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([70, 0, 0])
higher_red2 = np.array([255, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_green(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = np.array([55, 0, 0])
higher_red1 = np.array([100, 255, 255])
red_range1 = cv2.inRange(hsv1, lower_red1, higher_red1)
res_red1 = cv2.bitwise_and(image_gau_blur1, image_gau_blur1, mask=red_range1)
red_s_gray1 = cv2.cvtColor(res_red1, cv2.COLOR_BGR2GRAY)
canny_edge1 = cv2.Canny(red_s_gray1, 30, 70)
contours1, hierarchy1 = cv2.findContours(canny_edge1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x1, y1), radius1 = cv2.minEnclosingCircle(contours1[0])
cy, cz1 = (int(x1), int(y1))
radius1 = int(radius1)
image_gau_blur2 = cv2.GaussianBlur(image2, (1, 1), 0)
hsv2 = cv2.cvtColor(image_gau_blur2, cv2.COLOR_BGR2HSV)
lower_red2 = np.array([55, 0, 0])
higher_red2 = np.array([100, 255, 255])
red_range2 = cv2.inRange(hsv2, lower_red2, higher_red2)
res_red2 = cv2.bitwise_and(image_gau_blur2, image_gau_blur2, mask=red_range2)
red_s_gray2 = cv2.cvtColor(res_red2, cv2.COLOR_BGR2GRAY)
canny_edge2 = cv2.Canny(red_s_gray2, 30, 70)
contours2, hierarchy2 = cv2.findContours(canny_edge2, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
(x2, y2), radius2 = cv2.minEnclosingCircle(contours2[0])
cx, cz2 = (int(x2), int(y2))
radius2 = int(radius2)
return np.array([cx, cy, cz1, cz2])
def detect_yellow(self,image1, image2):
image_gau_blur1 = cv2.GaussianBlur(image1, (1, 1), 0)
hsv1 = cv2.cvtColor(image_gau_blur1, cv2.COLOR_BGR2HSV)
lower_red1 = | np.array([16, 244, 0]) | numpy.array |
"""
A module for analysis tools dealing with uncertainties or error analysis in
spectra.
"""
import numpy as np
from ..spectra import SpectralRegion
from ..manipulation import extract_region
__all__ = ['snr', 'snr_derived']
def snr(spectrum, region=None):
"""
Calculate the mean S/N of the spectrum based on the flux and uncertainty
in the spectrum. This will be calculated over the regions, if they
are specified.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the equivalent width will be calculated.
region: `~specutils.utils.SpectralRegion` or list of `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the SNR.
Returns
-------
snr : `~astropy.units.Quantity` or list (based on region input)
Signal to noise ratio of the spectrum or within the regions
Notes
-----
The spectrum will need to have the uncertainty defined in order for the SNR
to be calculated. If the goal is instead signal to noise *per pixel*, this
should be computed directly as ``spectrum.flux / spectrum.uncertainty``.
"""
if not hasattr(spectrum, 'uncertainty') or spectrum.uncertainty is None:
raise Exception("Spectrum1D currently requires the uncertainty be defined.")
# No region, therefore whole spectrum.
if region is None:
return _snr_single_region(spectrum)
# Single region
elif isinstance(region, SpectralRegion):
return _snr_single_region(spectrum, region=region)
# List of regions
elif isinstance(region, list):
return [_snr_single_region(spectrum, region=reg)
for reg in region]
def _snr_single_region(spectrum, region=None):
"""
Calculate the mean S/N of the spectrum based on the flux and uncertainty
in the spectrum.
Parameters
----------
spectrum : `~specutils.spectra.spectrum1d.Spectrum1D`
The spectrum object overwhich the equivalent width will be calculated.
region: `~specutils.utils.SpectralRegion`
Region within the spectrum to calculate the SNR.
Returns
-------
snr : `~astropy.units.Quantity` or list (based on region input)
Signal to noise ratio of the spectrum or within the regions
Notes
-----
This is a helper function for the above `snr()` method.
"""
if region is not None:
calc_spectrum = extract_region(spectrum, region)
else:
calc_spectrum = spectrum
if hasattr(spectrum, 'mask') and spectrum.mask is not None:
flux = calc_spectrum.flux[~spectrum.mask]
uncertainty = calc_spectrum.uncertainty.quantity[~spectrum.mask]
else:
flux = calc_spectrum.flux
uncertainty = calc_spectrum.uncertainty.quantity
# the axis=-1 will enable this to run on single-dispersion, single-flux
# and single-dispersion, multiple-flux
return | np.mean(flux / uncertainty, axis=-1) | numpy.mean |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import LeaveOneOut
from sklearn import linear_model, datasets, metrics
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix
import pandas as pd
import seaborn as sn
from sklearn.linear_model import LogisticRegressionCV
from sklearn.svm import SVC
from sklearn.neural_network import MLPClassifier
from sklearn.model_selection import KFold
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.model_selection import RandomizedSearchCV
from sklearn.metrics import make_scorer
class TrainValTestModel:
# create object storing path of data
def __init__(self, X_train, X_val, X_test, y_train, y_val, y_test, model_name, cross_val=False):
# X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=.2, random_state=0)
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_val = self.scaler.transform(X_val)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_val = y_val
self.y_test = y_test
self.model_name = model_name
if cross_val == True:
best_params = self.tuning_hyperparameter()
self.best_params = best_params
self.clf = RandomForestClassifier(bootstrap=best_params['bootstrap'],
max_depth=best_params['max_depth'],
max_features=best_params['max_features'],
min_samples_leaf=best_params['min_samples_leaf'],
min_samples_split=best_params['min_samples_split'],
n_estimators=best_params['n_estimators'])
self.clf.fit(self.X_train, self.y_train)
else:
self.clf = self.get_model(model_name, self.X_train, self.y_train)
self.fpr, self.tpr, self.thrshd_roc = self.get_fpr_tpr()
# bulid model
def get_model(self, model_name, X_train, y_train):
# logistic regression
if model_name == 'LR':
clf = LogisticRegression(solver='lbfgs')
# random forest
elif model_name == 'RF':
clf = RandomForestClassifier(max_depth=2, random_state=0)
# C-Support Vector Classification
elif model_name == 'GB':
clf = clf = GradientBoostingClassifier(random_state=0)
clf.fit(X_train, y_train)
return clf
def tuning_hyperparameter(self):
n_estimators = [int(x) for x in np.linspace(start = 200, stop = 2000, num = 10)]
max_features = ['auto', 'sqrt']
max_depth = [int(x) for x in np.linspace(10, 110, num = 11)]
max_depth.append(None)
min_samples_split = [2, 5, 10]
min_samples_leaf = [1, 2, 4]
bootstrap = [True, False]
# Create the random grid
random_grid = {'n_estimators': n_estimators,
'max_features': max_features,
'max_depth': max_depth,
'min_samples_split': min_samples_split,
'min_samples_leaf': min_samples_leaf,
'bootstrap': bootstrap}
def my_scorer(clf, X, y_true):
y_pred_proba = clf.predict_proba(X)
y_pred = np.where(y_pred_proba > 0.5, 1, 0)
error = np.sum(np.logical_and(y_pred != y_true, y_pred == 1)) / np.count_nonzero(y_true == 0)
return error
def fp(y_true, y_pred): return confusion_matrix(y_true, y_pred)[0, 1]
score = make_scorer(fp)
rf = RandomForestClassifier()
rf_random = RandomizedSearchCV(estimator=rf, param_distributions=random_grid, scoring=score, n_iter=100, cv=4, verbose=2, random_state=42, n_jobs=-1)
# Fit the random search model
rf_random.fit(self.X_train, self.y_train)
return rf_random.best_params_
def get_fpr_tpr(self):
prob_on_val = self.clf.predict_proba(self.X_val)[:,1]
fpr, tpr, thrshd_roc = metrics.roc_curve(self.y_val, prob_on_val, pos_label=1)
return fpr, tpr, thrshd_roc
# see the metrics of model
def get_metrics(self, thresh=None):
if thresh == None:
p = 0.5
else:
p = thresh
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
y_pred = pred_proba_df.applymap(lambda x: 1 if x>p else 0).to_numpy().reshape((pred_proba_df.shape[0]))
print("%s:\n%s\n" % (self.model_name,
metrics.classification_report(self.y_test, y_pred)))
# get the indices of important features
def get_important_feature(self):
# logistic regression
if self.model_name == 'LR':
importance = self.clf.coef_[0]
# random forest
elif self.model_name == 'RF':
importance = self.clf.feature_importances_
# gradient boosting
elif self.model_name == 'GB':
importance = self.clf.feature_importances_
return importance
# false-positive rate
def test_false_positive(self):
# choose threshold
pred_proba_df = pd.DataFrame(self.clf.predict_proba(self.X_test)[:,1])
threshold_list = [0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55,0.6,0.65,.7,.75,.8,.85,.9, .95,.99]
for i in threshold_list:
print ('\n******** For i = {} ******'.format(i))
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>i else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
dataset = {'y_Actual': self.y_test,
'y_Predicted': y_test_pred
}
df = pd.DataFrame(dataset, columns=['y_Actual','y_Predicted'])
confusion_matrix = pd.crosstab(df['y_Actual'], df['y_Predicted'], rownames= ['Actual'], colnames=['Predicted'])
plt.show()
sn.heatmap(confusion_matrix, annot=True)
# get the index of false-positive image
def false_positive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_positives = np.logical_and(y_test != y_test_pred, y_test_pred == 1)
return np.arange(len(y_test))[false_positives]
# get the index of false-negtive image
def false_negtive_index(self, clf, X_test, y_test, threshold):
pred_proba_df = pd.DataFrame(clf.predict_proba(X_test)[:,1])
y_test_pred = pred_proba_df.applymap(lambda x: 1 if x>threshold else 0).to_numpy().reshape( (pred_proba_df.shape[0]))
false_negtives = np.logical_and(y_test != y_test_pred, y_test_pred == 0)
return np.arange(len(y_test))[false_negtives]
class LeaveOneOutModel:
# create object storing path of data
def __init__(self, X_train, X_test, y_train, y_test, model_name):
self.scaler = StandardScaler()
self.scaler.fit(X_train)
self.X_train = self.scaler.transform(X_train)
self.X_test = self.scaler.transform(X_test)
self.y_train = y_train
self.y_test = y_test
self.model_name = model_name
self.bst_thresh, self._y_prob, self.fpr, self.tpr, self.thrshd_roc = self.leave_one_out_cv_v1(self.X_train, self.y_train, self.model_name)
self.clf = self.get_model(model_name, self.X_train, self.y_train)
def leave_one_out_cv_v0(self, X, y, model_name):
# choose threshold
threshold_list = np.arange(0.01, 1, 0.01)
score = | np.zeros(threshold_list.shape) | numpy.zeros |
#%%
import sys
import os
os.chdir(os.path.dirname(os.getcwd())) # make directory one step up the current directory
sys.path.append('/Users/mwinding/repos/maggot_models')
from pymaid_creds import url, name, password, token
import pymaid
rm = pymaid.CatmaidInstance(url, token, name, password)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import pandas as pd
# allows text to be editable in Illustrator
plt.rcParams['pdf.fonttype'] = 42
plt.rcParams['ps.fonttype'] = 42
# font settings
plt.rcParams['font.size'] = 5
plt.rcParams['font.family'] = 'arial'
from src.data import load_metagraph
from src.visualization import CLASS_COLOR_DICT, adjplot
from src.traverse import Cascade, to_transmission_matrix
from src.traverse import TraverseDispatcher
from src.visualization import matrixplot
import connectome_tools.cascade_analysis as casc
import connectome_tools.celltype as ct
import connectome_tools.process_matrix as pm
adj_ad = pm.Promat.pull_adj(type_adj='ad', subgraph='brain')
#%%
# pull sensory annotations and then pull associated skids
order = ['olfactory', 'gustatory-external', 'gustatory-pharyngeal', 'enteric', 'thermo-warm', 'thermo-cold', 'visual', 'noci', 'mechano-Ch', 'mechano-II/III', 'proprio', 'respiratory']
sens = [ct.Celltype(name, ct.Celltype_Analyzer.get_skids_from_meta_annotation(f'mw {name}')) for name in order]
input_skids_list = [x.get_skids() for x in sens]
input_skids = ct.Celltype_Analyzer.get_skids_from_meta_meta_annotation('mw brain sensory modalities')
output_names = pymaid.get_annotated('mw brain outputs').name
output_skids_list = list(map(pymaid.get_skids_by_annotation, pymaid.get_annotated('mw brain outputs').name))
output_skids = [val for sublist in output_skids_list for val in sublist]
#%%
# cascades from each sensory modality
import pickle
p = 0.05
max_hops = 10
n_init = 1000
simultaneous = True
adj=adj_ad
'''
input_hit_hist_list = casc.Cascade_Analyzer.run_cascades_parallel(source_skids_list=input_skids_list, source_names = order, stop_skids=output_skids,
adj=adj_ad, p=p, max_hops=max_hops, n_init=n_init, simultaneous=simultaneous)
pickle.dump(input_hit_hist_list, open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'wb'))
'''
input_hit_hist_list = pickle.load(open('data/cascades/sensory-modality-cascades_1000-n_init.p', 'rb'))
# %%
# plot sensory cascades raw
fig, axs = plt.subplots(len(input_hit_hist_list), 1, figsize=(10, 20))
fig.tight_layout(pad=2.0)
for i, hit_hist in enumerate(input_hit_hist_list):
ax = axs[i]
sns.heatmap(hit_hist.skid_hit_hist, ax=ax)
ax.set_xlabel(hit_hist.get_name())
plt.savefig('cascades/plots/sensory_modality_signals.pdf', format='pdf', bbox_inches='tight')
os.system('say "code executed"')
# %%
# how close are descending neurons to sensory?
# load output types
dVNC = pymaid.get_skids_by_annotation('mw dVNC')
dSEZ = pymaid.get_skids_by_annotation('mw dSEZ')
RGN = pymaid.get_skids_by_annotation('mw RGN')
# generate Cascade_Analyzer objects containing name of pathway and the hit_hist to each output type
dVNC_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dVNC', hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-dSEZ', hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer(f'{hit_hist.get_name()}-RGN', hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
dVNC_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dVNC'], hit_hist.skid_hit_hist.loc[dVNC, :]) for hit_hist in input_hit_hist_list]
dSEZ_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'dSEZ'], hit_hist.skid_hit_hist.loc[dSEZ, :]) for hit_hist in input_hit_hist_list]
RGN_hits = [casc.Cascade_Analyzer([hit_hist.get_name(), 'RGN'], hit_hist.skid_hit_hist.loc[RGN, :]) for hit_hist in input_hit_hist_list]
# max possible hits that all output neuron types could receive
max_dVNC_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_dSEZ_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
max_RGN_hits = len(dVNC_hits[0].skid_hit_hist.index)*n_init
# organize data so that each sens -> dVNC, dSEZ, RGN is intercalated
sens_output_data = list(zip(dVNC_hits, dSEZ_hits, RGN_hits))
sens_output_data = [x for sublist in sens_output_data for x in sublist]
sens_output_df = pd.DataFrame([x.skid_hit_hist.sum(axis=0) for x in sens_output_data])
# set up multiindex
sens_output_df['source']=[x.get_name()[0] for x in sens_output_data]
sens_output_df['target']=[x.get_name()[1] for x in sens_output_data]
sens_output_df = sens_output_df.set_index(['source', 'target'])
# normalize by max possible input to each output type (num neurons * n_init)
sens_output_df_plot = sens_output_df.copy()
sens_output_df_plot.loc[(slice(None), 'dVNC'), :] = sens_output_df_plot.loc[(slice(None), 'dVNC'), :]/max_dVNC_hits
sens_output_df_plot.loc[(slice(None), 'dSEZ'), :] = sens_output_df_plot.loc[(slice(None), 'dSEZ'), :]/max_dSEZ_hits
sens_output_df_plot.loc[(slice(None), 'RGN'), :] = sens_output_df_plot.loc[(slice(None), 'RGN'), :]/max_RGN_hits
import cmasher as cmr
fig, ax = plt.subplots(1, 1, figsize=(1.5, 2))
fig.tight_layout(pad=3.0)
vmax = 0.35
cmap = cmr.torch
sns.heatmap(sens_output_df_plot, ax = ax, cmap = cmap, vmax=vmax)
ax.set_title('Signal to brain outputs')
ax.set(xlim = (0, 11))
plt.savefig('cascades/plots/sensory_modality_signals_to_output.pdf', format='pdf', bbox_inches='tight')
# determine mean/median hop distance from sens -> output
def counts_to_list(count_list):
expanded_counts = []
for i, count in enumerate(count_list):
expanded = np.repeat(i, count)
expanded_counts.append(expanded)
return([x for sublist in expanded_counts for x in sublist])
all_sens_output_dist = []
for row in sens_output_df.iterrows():
list_hits = counts_to_list(row[1])
all_sens_output_dist.append([row[0][0], row[0][1], np.mean(list_hits), np.median(list_hits)])
all_sens_output_dist = pd.DataFrame(all_sens_output_dist, columns = ['source', 'target', 'mean_hop', 'median_hop'])
# %%
# plotting visits by modality to each descending to VNC neuron pair
# supplemental figure
dVNC_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dVNC_hits]
dVNC_hits_summed = pd.concat(dVNC_hits_summed, axis=1)
dVNC_hits_pairwise = pm.Promat.convert_df_to_pairwise(dVNC_hits_summed)
dSEZ_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in dSEZ_hits]
dSEZ_hits_summed = pd.concat(dSEZ_hits_summed, axis=1)
dSEZ_hits_pairwise = pm.Promat.convert_df_to_pairwise(dSEZ_hits_summed)
RGN_hits_summed = [pd.DataFrame(x.skid_hit_hist.iloc[:, 0:8].sum(axis=1), columns=[x.get_name()[0]]) for x in RGN_hits]
RGN_hits_summed = pd.concat(RGN_hits_summed, axis=1)
RGN_hits_pairwise = pm.Promat.convert_df_to_pairwise(RGN_hits_summed)
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
ax = axs[0]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual VNC Descending Neurons')
sns.heatmap(dVNC_hits_pairwise.T, ax = ax)
ax = axs[1]
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual SEZ Descending Neurons')
sns.heatmap(dSEZ_hits_pairwise.T, ax = ax)
ax = axs[2]
ax.set_xlabel('Individual Ring Gland Neurons')
ax.get_xaxis().set_visible(False)
ax.set_title('Signal to Individual Ring Gland Neurons')
sns.heatmap(RGN_hits_pairwise.T, ax = ax)
plt.savefig('cascades/plots/signal_to_individual_outputs.pdf', format='pdf', bbox_inches='tight')
#%%
# alternative clustermap plot of descending neurons
# supplemental figure plot
vmax = n_init
fig = sns.clustermap(dVNC_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dVNCs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dVNCs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(dSEZ_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual dSEZs')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_dSEZs.pdf', format='pdf', bbox_inches='tight')
fig = sns.clustermap(RGN_hits_pairwise.T, row_cluster = False, figsize = (8, 4), vmax=vmax)
ax = fig.ax_heatmap
ax.set_xlabel('Individual RG neurons')
ax.set_xticks([])
fig.savefig('cascades/plots/signal_to_individual_RGs.pdf', format='pdf', bbox_inches='tight')
# %%
# distribution summary of signal to output neurons
dVNC_dist = (dVNC_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dSEZ_dist = (dSEZ_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
RGN_dist = (RGN_hits_pairwise.groupby('pair_id').sum()>=n_init).sum(axis=1)
dist_data = pd.DataFrame(list(zip(dVNC_dist.values, ['dVNC']*len(dVNC_dist))) + list(zip(dSEZ_dist.values, ['dSEZ']*len(dSEZ_dist))) + list(zip(RGN_dist.values, ['RGN']*len(RGN_dist))),
columns = ['combinations', 'type'])
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.stripplot(data = dist_data, y = 'combinations', x='type', s=1, ax=ax)
fig.savefig('cascades/plots/signal_to_outputs_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dVNC_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dVNC_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = dSEZ_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_dSEZ_dist.pdf', format='pdf', bbox_inches='tight')
fig, ax = plt.subplots(1,1, figsize=(4,4))
sns.histplot(data = RGN_dist-0.5, ax=ax, bins=len(sens))
fig.savefig('cascades/plots/signal_to_RGN_dist.pdf', format='pdf', bbox_inches='tight')
# %%
# parallel coordinates plots
from pandas.plotting import parallel_coordinates
linewidth = 0.75
alpha = 0.8
very_low_color = '#D7DF23'
low_color = '#C2DD26'
med_color = '#8DC63F'
high_color = '#00A651'
data = dVNC_hits_pairwise.groupby('pair_id').sum()
very_low = (dVNC_dist<=1)
low = (dVNC_dist>1) & (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<8)
high = dVNC_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, med_color, low_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dVNC_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = dSEZ_hits_pairwise.groupby('pair_id').sum()
very_low = (dSEZ_dist<=1)
low = (dSEZ_dist>1) & (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<8)
high = dSEZ_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, med_color, very_low_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-dSEZ_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
data = RGN_hits_pairwise.groupby('pair_id').sum()
very_low = (RGN_dist<=1)
low = (RGN_dist>1) & (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<8)
high = RGN_dist>=8
data['type'] = [0]*len(data.index)
data.loc[high, 'type'] = ['high']*len(data.loc[high, 'type'])
data.loc[med, 'type'] = ['med']*len(data.loc[med, 'type'])
data.loc[low, 'type'] = ['low']*len(data.loc[low, 'type'])
data.loc[very_low, 'type'] = ['very_low']*len(data.loc[very_low, 'type'])
data = data.sort_values(by='type')
fig, ax = plt.subplots(1,1, figsize=(4,4))
parallel_coordinates(data, class_column='type', color = [high_color, low_color, very_low_color, med_color], alpha=alpha, linewidth=linewidth)
fig.savefig('cascades/plots/signal-to-RGN_parallel-coordinates.pdf', format='pdf', bbox_inches='tight')
# %%
# PCA of descending input
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
data = dVNC_hits_pairwise.groupby('pair_id').sum()
data['type'] = ['dVNC']*len(data)
data2 = dSEZ_hits_pairwise.groupby('pair_id').sum()
data2['type'] = ['dSEZ']*len(data2)
data3 = RGN_hits_pairwise.groupby('pair_id').sum()
data3['type'] = ['RGN']*len(data3)
data = pd.concat([data, data2, data3])
x = data.drop(columns='type').values
x = StandardScaler().fit_transform(x)
pca = PCA(n_components=2)
principalComponents = pca.fit_transform(x)
principalDf = pd.DataFrame(data = principalComponents
, columns = ['pc1', 'pc2'], index=data.index)
principalDf['type'] = data['type']
ylim = (-2.25, 2.25)
xlim = (-5, 6)
size = 3
alpha = 0.75
# plot dVNC PCA
plot_data = principalDf[principalDf.type=='dVNC']
low = (dVNC_dist<4)
med = (dVNC_dist>=4) & (dVNC_dist<10)
high = dVNC_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dVNC_PCA.pdf', format='pdf', bbox_inches='tight')
# plot dSEZ PCA
plot_data = principalDf[principalDf.type=='dSEZ']
low = (dSEZ_dist<4)
med = (dSEZ_dist>=4) & (dSEZ_dist<10)
high = dSEZ_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-dSEZ_PCA.pdf', format='pdf', bbox_inches='tight')
# plot RGN PCA
plot_data = principalDf[principalDf.type=='RGN']
low = (RGN_dist<4)
med = (RGN_dist>=4) & (RGN_dist<10)
high = RGN_dist>=10
plot_data.loc[high, 'type'] = ['high']*len(plot_data.loc[high, 'type'])
plot_data.loc[med, 'type'] = ['med']*len(plot_data.loc[med, 'type'])
plot_data.loc[low, 'type'] = ['low']*len(plot_data.loc[low, 'type'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.scatterplot(data = plot_data, x='pc1', y='pc2', hue='type', hue_order = ['high', 'med', 'low'], s=size, linewidth=0, alpha=alpha, ax=ax)
ax.set(xlim=xlim, ylim=ylim)
fig.savefig('cascades/plots/signal-to-RGN_PCA.pdf', format='pdf', bbox_inches='tight')
# %%
# bar plot of high, med, low categories for each type of output
integration_data = [['dVNC', 'high', sum(dVNC_dist>=10)],
['dVNC', 'med', sum((dVNC_dist>=4) & (dVNC_dist<10))],
['dVNC', 'low', sum(dVNC_dist<4)],
['dSEZ', 'high', sum(dSEZ_dist>=10)],
['dSEZ', 'med', sum((dSEZ_dist>=4) & (dSEZ_dist<10))],
['dSEZ', 'low', sum(dSEZ_dist<4)],
['RGN', 'high', sum(RGN_dist>=10)],
['RGN', 'med', sum((RGN_dist>=4) & (RGN_dist<10))],
['RGN', 'low', sum(RGN_dist<4)]]
integration_data = pd.DataFrame(integration_data, columns = ['class', 'type', 'count'])
fig, ax = plt.subplots(1,1,figsize=(2,2))
sns.barplot(data = integration_data, x='class', y='count', hue='type', hue_order = ['high', 'med', 'low'], ax=ax)
fig.savefig('cascades/plots/signal-integration-counts_dVNCs.pdf', format='pdf', bbox_inches='tight')
# %%
##########
# **** Note Well: REALLY old code below, deprecated or never used in paper ****
##########
# %%
# num of descendings at each level
# this assumes that thresholding per node is useful; it might not be
threshold = 50
num_dVNC_dsSens = pd.DataFrame(([np.array(dVNC_ORN_hit>threshold).sum(axis = 0),
np.array(dVNC_AN_hit>threshold).sum(axis = 0),
np.array(dVNC_MN_hit>threshold).sum(axis = 0),
np.array(dVNC_A00c_hit>threshold).sum(axis = 0),
np.array(dVNC_vtd_hit>threshold).sum(axis = 0),
np.array(dVNC_thermo_hit>threshold).sum(axis = 0),
np.array(dVNC_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_dSEZ_dsSens = pd.DataFrame(([np.array(dSEZ_ORN_hit>threshold).sum(axis = 0),
np.array(dSEZ_AN_hit>threshold).sum(axis = 0),
np.array(dSEZ_MN_hit>threshold).sum(axis = 0),
np.array(dSEZ_A00c_hit>threshold).sum(axis = 0),
np.array(dSEZ_vtd_hit>threshold).sum(axis = 0),
np.array(dSEZ_thermo_hit>threshold).sum(axis = 0),
np.array(dSEZ_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
num_RG_dsSens = pd.DataFrame(([np.array(RG_ORN_hit>threshold).sum(axis = 0),
np.array(RG_AN_hit>threshold).sum(axis = 0),
np.array(RG_MN_hit>threshold).sum(axis = 0),
np.array(RG_A00c_hit>threshold).sum(axis = 0),
np.array(RG_vtd_hit>threshold).sum(axis = 0),
np.array(RG_thermo_hit>threshold).sum(axis = 0),
np.array(RG_photo_hit>threshold).sum(axis = 0)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
fig, axs = plt.subplots(
3, 1, figsize=(8, 8)
)
fig.tight_layout(pad=3.0)
vmax = 50
cmap = cmr.heat
ax = axs[0]
ax.set_title('Number of VNC Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dVNC_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[1]
ax.set_title('Number of SEZ Descending Neurons downstream of Sensory Signal')
sns.heatmap(num_dSEZ_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set(xlim = (0, 13))
ax = axs[2]
ax.set_title('Number of Ring Gland Neurons downstream of Sensory Signal')
sns.heatmap(num_RG_dsSens, ax = ax, vmax = vmax, rasterized=True, cmap = cmap)
ax.set_xlabel('Hops from sensory')
ax.set(xlim = (0, 13))
plt.savefig('cascades/plots/number_outputs_ds_each_sensory_modality.pdf', format='pdf', bbox_inches='tight')
# %%
# When modality are each outputs associated with?
dVNC_hits = pd.DataFrame(([ dVNC_skids,
dVNC_ORN_hit.sum(axis = 1),
dVNC_AN_hit.sum(axis = 1),
dVNC_MN_hit.sum(axis = 1),
dVNC_thermo_hit.sum(axis = 1),
dVNC_photo_hit.sum(axis = 1),
dVNC_A00c_hit.sum(axis = 1),
dVNC_vtd_hit.sum(axis = 1)]),
index = ['dVNC_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dVNC_hits = dVNC_hits.T
dSEZ_hits = pd.DataFrame(([ dSEZ_skids,
dSEZ_ORN_hit.sum(axis = 1),
dSEZ_AN_hit.sum(axis = 1),
dSEZ_MN_hit.sum(axis = 1),
dSEZ_thermo_hit.sum(axis = 1),
dSEZ_photo_hit.sum(axis = 1),
dSEZ_A00c_hit.sum(axis = 1),
dSEZ_vtd_hit.sum(axis = 1)]),
index = ['dSEZ_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
dSEZ_hits = dSEZ_hits.T
RG_hits = pd.DataFrame(([ RG_skids,
RG_ORN_hit.sum(axis = 1),
RG_AN_hit.sum(axis = 1),
RG_MN_hit.sum(axis = 1),
RG_thermo_hit.sum(axis = 1),
RG_photo_hit.sum(axis = 1),
RG_A00c_hit.sum(axis = 1),
RG_vtd_hit.sum(axis = 1)]),
index = ['RG_skid', 'ORN', 'AN', 'MN', 'thermo', 'photo', 'A00c', 'vtd'])
RG_hits = RG_hits.T
# %%
# sensory characterization of each layer of each sensory modality
import plotly.express as px
from pandas.plotting import parallel_coordinates
# replacement if I want to use this later
#sensory_profiles = [hit_hist.skid_hit_hist.sum(axis=1).values for hit_hist in input_hit_hist_list]
#sensory_profiles = pd.DataFrame(sensory_profiles, index=[hit_hist.get_name() for hit_hist in input_hit_hist_list], columns = input_hit_hist_list[0].skid_hit_hist.index)
sensory_profile = pd.DataFrame(([ORN_hit_hist.sum(axis = 1),
AN_hit_hist.sum(axis = 1),
MN_hit_hist.sum(axis = 1),
A00c_hit_hist.sum(axis = 1),
vtd_hit_hist.sum(axis = 1),
thermo_hit_hist.sum(axis = 1),
photo_hit_hist.sum(axis = 1)]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile0 = pd.DataFrame(([ORN_hit_hist[:, 0],
AN_hit_hist[:, 0],
MN_hit_hist[:, 0],
A00c_hit_hist[:, 0],
vtd_hit_hist[:, 0],
thermo_hit_hist[:, 0],
photo_hit_hist[:, 0]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile1 = pd.DataFrame(([ORN_hit_hist[:, 1],
AN_hit_hist[:, 1],
MN_hit_hist[:, 1],
A00c_hit_hist[:, 1],
vtd_hit_hist[:, 1],
thermo_hit_hist[:, 1],
photo_hit_hist[:, 1]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile2 = pd.DataFrame(([ORN_hit_hist[:, 2],
AN_hit_hist[:, 2],
MN_hit_hist[:, 2],
A00c_hit_hist[:, 2],
vtd_hit_hist[:, 2],
thermo_hit_hist[:, 2],
photo_hit_hist[:, 2]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile3 = pd.DataFrame(([ORN_hit_hist[:, 3],
AN_hit_hist[:, 3],
MN_hit_hist[:, 3],
A00c_hit_hist[:, 3],
vtd_hit_hist[:, 3],
thermo_hit_hist[:, 3],
photo_hit_hist[:, 3]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile4 = pd.DataFrame(([ORN_hit_hist[:, 4],
AN_hit_hist[:, 4],
MN_hit_hist[:, 4],
A00c_hit_hist[:, 4],
vtd_hit_hist[:, 4],
thermo_hit_hist[:, 4],
photo_hit_hist[:, 4]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile5 = pd.DataFrame(([ORN_hit_hist[:, 5],
AN_hit_hist[:, 5],
MN_hit_hist[:, 5],
A00c_hit_hist[:, 5],
vtd_hit_hist[:, 5],
thermo_hit_hist[:, 5],
photo_hit_hist[:, 5]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile6 = pd.DataFrame(([ORN_hit_hist[:, 6],
AN_hit_hist[:, 6],
MN_hit_hist[:, 6],
A00c_hit_hist[:, 6],
vtd_hit_hist[:, 6],
thermo_hit_hist[:, 6],
photo_hit_hist[:, 6]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile7 = pd.DataFrame(([ORN_hit_hist[:, 7],
AN_hit_hist[:, 7],
MN_hit_hist[:, 7],
A00c_hit_hist[:, 7],
vtd_hit_hist[:, 7],
thermo_hit_hist[:, 7],
photo_hit_hist[:, 7]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile8 = pd.DataFrame(([ORN_hit_hist[:, 8],
AN_hit_hist[:, 8],
MN_hit_hist[:, 8],
A00c_hit_hist[:, 8],
vtd_hit_hist[:, 8],
thermo_hit_hist[:, 8],
photo_hit_hist[:, 8]]),
index = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
sensory_profile = sensory_profile.T
sensory_profile0 = sensory_profile0.T
sensory_profile1 = sensory_profile1.T
sensory_profile2 = sensory_profile2.T
sensory_profile3 = sensory_profile3.T
sensory_profile4 = sensory_profile4.T
sensory_profile5 = sensory_profile5.T
sensory_profile6 = sensory_profile6.T
sensory_profile7 = sensory_profile7.T
sensory_profile8 = sensory_profile8.T
#%%
# multisensory elements per layer (apples to apples)
threshold = 25
ORN0_indices = np.where(ORN_hit_hist[:, 0]>threshold)[0]
ORN1_indices = np.where(ORN_hit_hist[:, 1]>threshold)[0]
ORN2_indices = np.where(ORN_hit_hist[:, 2]>threshold)[0]
ORN3_indices = np.where(ORN_hit_hist[:, 3]>threshold)[0]
ORN4_indices = np.where(ORN_hit_hist[:, 4]>threshold)[0]
ORN5_indices = np.where(ORN_hit_hist[:, 5]>threshold)[0]
ORN6_indices = np.where(ORN_hit_hist[:, 6]>threshold)[0]
ORN7_indices = np.where(ORN_hit_hist[:, 7]>threshold)[0]
ORN8_indices = np.where(ORN_hit_hist[:, 8]>threshold)[0]
AN0_indices = np.where(AN_hit_hist[:, 0]>threshold)[0]
AN1_indices = np.where(AN_hit_hist[:, 1]>threshold)[0]
AN2_indices = np.where(AN_hit_hist[:, 2]>threshold)[0]
AN3_indices = np.where(AN_hit_hist[:, 3]>threshold)[0]
AN4_indices = np.where(AN_hit_hist[:, 4]>threshold)[0]
AN5_indices = np.where(AN_hit_hist[:, 5]>threshold)[0]
AN6_indices = np.where(AN_hit_hist[:, 6]>threshold)[0]
AN7_indices = np.where(AN_hit_hist[:, 7]>threshold)[0]
AN8_indices = np.where(AN_hit_hist[:, 8]>threshold)[0]
MN0_indices = np.where(MN_hit_hist[:, 0]>threshold)[0]
MN1_indices = np.where(MN_hit_hist[:, 1]>threshold)[0]
MN2_indices = np.where(MN_hit_hist[:, 2]>threshold)[0]
MN3_indices = np.where(MN_hit_hist[:, 3]>threshold)[0]
MN4_indices = np.where(MN_hit_hist[:, 4]>threshold)[0]
MN5_indices = np.where(MN_hit_hist[:, 5]>threshold)[0]
MN6_indices = np.where(MN_hit_hist[:, 6]>threshold)[0]
MN7_indices = np.where(MN_hit_hist[:, 7]>threshold)[0]
MN8_indices = np.where(MN_hit_hist[:, 8]>threshold)[0]
A00c0_indices = np.where(A00c_hit_hist[:, 0]>threshold)[0]
A00c1_indices = np.where(A00c_hit_hist[:, 1]>threshold)[0]
A00c2_indices = np.where(A00c_hit_hist[:, 2]>threshold)[0]
A00c3_indices = np.where(A00c_hit_hist[:, 3]>threshold)[0]
A00c4_indices = np.where(A00c_hit_hist[:, 4]>threshold)[0]
A00c5_indices = np.where(A00c_hit_hist[:, 5]>threshold)[0]
A00c6_indices = np.where(A00c_hit_hist[:, 6]>threshold)[0]
A00c7_indices = np.where(A00c_hit_hist[:, 7]>threshold)[0]
A00c8_indices = np.where(A00c_hit_hist[:, 8]>threshold)[0]
vtd0_indices = np.where(vtd_hit_hist[:, 0]>threshold)[0]
vtd1_indices = np.where(vtd_hit_hist[:, 1]>threshold)[0]
vtd2_indices = np.where(vtd_hit_hist[:, 2]>threshold)[0]
vtd3_indices = np.where(vtd_hit_hist[:, 3]>threshold)[0]
vtd4_indices = np.where(vtd_hit_hist[:, 4]>threshold)[0]
vtd5_indices = np.where(vtd_hit_hist[:, 5]>threshold)[0]
vtd6_indices = np.where(vtd_hit_hist[:, 6]>threshold)[0]
vtd7_indices = np.where(vtd_hit_hist[:, 7]>threshold)[0]
vtd8_indices = np.where(vtd_hit_hist[:, 8]>threshold)[0]
thermo0_indices = np.where(thermo_hit_hist[:, 0]>threshold)[0]
thermo1_indices = np.where(thermo_hit_hist[:, 1]>threshold)[0]
thermo2_indices = np.where(thermo_hit_hist[:, 2]>threshold)[0]
thermo3_indices = np.where(thermo_hit_hist[:, 3]>threshold)[0]
thermo4_indices = np.where(thermo_hit_hist[:, 4]>threshold)[0]
thermo5_indices = np.where(thermo_hit_hist[:, 5]>threshold)[0]
thermo6_indices = np.where(thermo_hit_hist[:, 6]>threshold)[0]
thermo7_indices = np.where(thermo_hit_hist[:, 7]>threshold)[0]
thermo8_indices = np.where(thermo_hit_hist[:, 8]>threshold)[0]
photo0_indices = np.where(photo_hit_hist[:, 0]>threshold)[0]
photo1_indices = np.where(photo_hit_hist[:, 1]>threshold)[0]
photo2_indices = np.where(photo_hit_hist[:, 2]>threshold)[0]
photo3_indices = np.where(photo_hit_hist[:, 3]>threshold)[0]
photo4_indices = np.where(photo_hit_hist[:, 4]>threshold)[0]
photo5_indices = np.where(photo_hit_hist[:, 5]>threshold)[0]
photo6_indices = np.where(photo_hit_hist[:, 6]>threshold)[0]
photo7_indices = np.where(photo_hit_hist[:, 7]>threshold)[0]
photo8_indices = np.where(photo_hit_hist[:, 8]>threshold)[0]
ORN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[ORN0_indices, :].sum(axis=0)/len(ORN0_indices)),
np.array(sensory_profile1.iloc[ORN1_indices, :].sum(axis=0)/len(ORN1_indices)),
np.array(sensory_profile2.iloc[ORN2_indices, :].sum(axis=0)/len(ORN2_indices)),
np.array(sensory_profile3.iloc[ORN3_indices, :].sum(axis=0)/len(ORN3_indices)),
np.array(sensory_profile4.iloc[ORN4_indices, :].sum(axis=0)/len(ORN4_indices)),
np.array(sensory_profile5.iloc[ORN5_indices, :].sum(axis=0)/len(ORN5_indices)),
np.array(sensory_profile6.iloc[ORN6_indices, :].sum(axis=0)/len(ORN6_indices)),
np.array(sensory_profile7.iloc[ORN7_indices, :].sum(axis=0)/len(ORN7_indices)),
np.array(sensory_profile8.iloc[ORN8_indices, :].sum(axis=0)/len(ORN8_indices))],
index = ['ORN0', 'ORN1', 'ORN2', 'ORN3', 'ORN4', 'ORN5', 'ORN6', 'ORN7', 'ORN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
AN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[AN0_indices, :].sum(axis=0)/len(AN0_indices)),
np.array(sensory_profile1.iloc[AN1_indices, :].sum(axis=0)/len(AN1_indices)),
np.array(sensory_profile2.iloc[AN2_indices, :].sum(axis=0)/len(AN2_indices)),
np.array(sensory_profile3.iloc[AN3_indices, :].sum(axis=0)/len(AN3_indices)),
np.array(sensory_profile4.iloc[AN4_indices, :].sum(axis=0)/len(AN4_indices)),
np.array(sensory_profile5.iloc[AN5_indices, :].sum(axis=0)/len(AN5_indices)),
np.array(sensory_profile6.iloc[AN6_indices, :].sum(axis=0)/len(AN6_indices)),
np.array(sensory_profile7.iloc[AN7_indices, :].sum(axis=0)/len(AN7_indices)),
np.array(sensory_profile8.iloc[AN8_indices, :].sum(axis=0)/len(AN8_indices))],
index = ['AN0', 'AN1', 'AN2', 'AN3', 'AN4', 'AN5', 'AN6', 'AN7', 'AN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
MN_profile = pd.DataFrame([np.array(sensory_profile0.iloc[MN0_indices, :].sum(axis=0)/len(MN0_indices)),
np.array(sensory_profile1.iloc[MN1_indices, :].sum(axis=0)/len(MN1_indices)),
np.array(sensory_profile2.iloc[MN2_indices, :].sum(axis=0)/len(MN2_indices)),
np.array(sensory_profile3.iloc[MN3_indices, :].sum(axis=0)/len(MN3_indices)),
np.array(sensory_profile4.iloc[MN4_indices, :].sum(axis=0)/len(MN4_indices)),
np.array(sensory_profile5.iloc[MN5_indices, :].sum(axis=0)/len(MN5_indices)),
np.array(sensory_profile6.iloc[MN6_indices, :].sum(axis=0)/len(MN6_indices)),
np.array(sensory_profile7.iloc[MN7_indices, :].sum(axis=0)/len(MN7_indices)),
np.array(sensory_profile8.iloc[MN8_indices, :].sum(axis=0)/len(MN8_indices))],
index = ['MN0', 'MN1', 'MN2', 'MN3', 'MN4', 'MN5', 'MN6', 'MN7', 'MN8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
A00c_profile = pd.DataFrame([np.array(sensory_profile0.iloc[A00c0_indices, :].sum(axis=0)/len(A00c0_indices)),
np.array(sensory_profile1.iloc[A00c1_indices, :].sum(axis=0)/len(A00c1_indices)),
np.array(sensory_profile2.iloc[A00c2_indices, :].sum(axis=0)/len(A00c2_indices)),
np.array(sensory_profile3.iloc[A00c3_indices, :].sum(axis=0)/len(A00c3_indices)),
np.array(sensory_profile4.iloc[A00c4_indices, :].sum(axis=0)/len(A00c4_indices)),
np.array(sensory_profile5.iloc[A00c5_indices, :].sum(axis=0)/len(A00c5_indices)),
np.array(sensory_profile6.iloc[A00c6_indices, :].sum(axis=0)/len(A00c6_indices)),
np.array(sensory_profile7.iloc[A00c7_indices, :].sum(axis=0)/len(A00c7_indices)),
np.array(sensory_profile8.iloc[A00c8_indices, :].sum(axis=0)/len(A00c8_indices))],
index = ['A00c0', 'A00c1', 'A00c2', 'A00c3', 'A00c4', 'A00c5', 'A00c6', 'A00c7', 'A00c8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
vtd_profile = pd.DataFrame([np.array(sensory_profile0.iloc[vtd0_indices, :].sum(axis=0)/len(vtd0_indices)),
np.array(sensory_profile1.iloc[vtd1_indices, :].sum(axis=0)/len(vtd1_indices)),
np.array(sensory_profile2.iloc[vtd2_indices, :].sum(axis=0)/len(vtd2_indices)),
np.array(sensory_profile3.iloc[vtd3_indices, :].sum(axis=0)/len(vtd3_indices)),
np.array(sensory_profile4.iloc[vtd4_indices, :].sum(axis=0)/len(vtd4_indices)),
np.array(sensory_profile5.iloc[vtd5_indices, :].sum(axis=0)/len(vtd5_indices)),
np.array(sensory_profile6.iloc[vtd6_indices, :].sum(axis=0)/len(vtd6_indices)),
np.array(sensory_profile7.iloc[vtd7_indices, :].sum(axis=0)/len(vtd7_indices)),
np.array(sensory_profile8.iloc[vtd8_indices, :].sum(axis=0)/len(vtd8_indices))],
index = ['vtd0', 'vtd1', 'vtd2', 'vtd3', 'vtd4', 'vtd5', 'vtd6', 'vtd7', 'vtd8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
thermo_profile = pd.DataFrame([np.array(sensory_profile0.iloc[thermo0_indices, :].sum(axis=0)/len(thermo0_indices)),
np.array(sensory_profile1.iloc[thermo1_indices, :].sum(axis=0)/len(thermo1_indices)),
np.array(sensory_profile2.iloc[thermo2_indices, :].sum(axis=0)/len(thermo2_indices)),
np.array(sensory_profile3.iloc[thermo3_indices, :].sum(axis=0)/len(thermo3_indices)),
np.array(sensory_profile4.iloc[thermo4_indices, :].sum(axis=0)/len(thermo4_indices)),
np.array(sensory_profile5.iloc[thermo5_indices, :].sum(axis=0)/len(thermo5_indices)),
np.array(sensory_profile6.iloc[thermo6_indices, :].sum(axis=0)/len(thermo6_indices)),
np.array(sensory_profile7.iloc[thermo7_indices, :].sum(axis=0)/len(thermo7_indices)),
np.array(sensory_profile8.iloc[thermo8_indices, :].sum(axis=0)/len(thermo8_indices))],
index = ['thermo0', 'thermo1', 'thermo2', 'thermo3', 'thermo4', 'thermo5', 'thermo6', 'thermo7', 'thermo8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
photo_profile = pd.DataFrame([np.array(sensory_profile0.iloc[photo0_indices, :].sum(axis=0)/len(photo0_indices)),
np.array(sensory_profile1.iloc[photo1_indices, :].sum(axis=0)/len(photo1_indices)),
np.array(sensory_profile2.iloc[photo2_indices, :].sum(axis=0)/len(photo2_indices)),
np.array(sensory_profile3.iloc[photo3_indices, :].sum(axis=0)/len(photo3_indices)),
np.array(sensory_profile4.iloc[photo4_indices, :].sum(axis=0)/len(photo4_indices)),
np.array(sensory_profile5.iloc[photo5_indices, :].sum(axis=0)/len(photo5_indices)),
np.array(sensory_profile6.iloc[photo6_indices, :].sum(axis=0)/len(photo3_indices)),
np.array(sensory_profile7.iloc[photo7_indices, :].sum(axis=0)/len(photo4_indices)),
np.array(sensory_profile8.iloc[photo8_indices, :].sum(axis=0)/len(photo5_indices))],
index = ['photo0', 'photo1', 'photo2', 'photo3', 'photo4', 'photo5', 'photo6', 'photo7', 'photo8'], columns = ['ORN', 'AN', 'MN', 'A00c', 'vtd', 'thermo', 'photo'])
# %%
# plotting multisensory elements per layer
x_axis_labels = [0,1,2,3,4,5,6]
x_label = 'Hops from Sensory'
fig, axs = plt.subplots(
4, 2, figsize=(5, 8)
)
fig.tight_layout(pad=2.5)
#cbar_ax = axs.add_axes([3, 7, .1, .75])
ax = axs[0, 0]
ax.set_title('Signal from ORN')
ax.set(xticks=[0, 1, 2, 3, 4, 5])
sns.heatmap(ORN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[1, 0]
ax.set_title('Signal from AN')
sns.heatmap(AN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[2, 0]
ax.set_title('Signal from MN')
sns.heatmap(MN_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[3, 0]
ax.set_title('Signal from A00c')
sns.heatmap(A00c_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax.set_xlabel(x_label)
ax = axs[0, 1]
ax.set_title('Signal from vtd')
sns.heatmap(vtd_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[1, 1]
ax.set_title('Signal from thermo')
sns.heatmap(thermo_profile.T.iloc[:,0:7], ax = ax, cbar=False, xticklabels = x_axis_labels, rasterized=True)
ax = axs[2, 1]
ax.set_title('Signal from photo')
sns.heatmap(photo_profile.T.iloc[:,0:7], ax = ax, cbar_ax = axs[3, 1], xticklabels = x_axis_labels, rasterized=True)
ax.set_xlabel(x_label)
ax = axs[3, 1]
ax.set_xlabel('Number of Visits\nfrom Sensory Signal')
#ax.axis("off")
plt.savefig('cascades/plots/sensory_integration_per_hop.pdf', format='pdf', bbox_inches='tight')
# %%
# parallel coordinate plot of different sensory layer integration
fig, axs = plt.subplots(
6, 7, figsize=(30, 30), sharey = True
)
fig.tight_layout(pad=2.5)
threshold = 25
alpha = 0.10
#fig.tight_layout(pad=3.0)
sensory_profile0_parallel = sensory_profile0
sensory_profile0_parallel['class'] = np.zeros(len(sensory_profile0_parallel))
sensory_profile1_parallel = sensory_profile1
sensory_profile1_parallel['class'] = np.zeros(len(sensory_profile1_parallel))
sensory_profile2_parallel = sensory_profile2
sensory_profile2_parallel['class'] = np.zeros(len(sensory_profile2_parallel))
sensory_profile3_parallel = sensory_profile3
sensory_profile3_parallel['class'] = np.zeros(len(sensory_profile3_parallel))
sensory_profile4_parallel = sensory_profile4
sensory_profile4_parallel['class'] = np.zeros(len(sensory_profile4_parallel))
sensory_profile5_parallel = sensory_profile5
sensory_profile5_parallel['class'] = np.zeros(len(sensory_profile5_parallel))
column = 0
color = 'blue'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(ORN_hit_hist[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(ORN_hit_hist[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(ORN_hit_hist[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(ORN_hit_hist[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(ORN_hit_hist[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(ORN_hit_hist[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = AN_hit_hist
column = 1
color = 'orange'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[np.where(modality_list[:, 3]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[4, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile4_parallel.iloc[np.where(modality_list[:, 4]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[5, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile5_parallel.iloc[np.where(modality_list[:, 5]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
modality_list = MN_hit_hist
column = 2
color = 'green'
ax = axs[0, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile0_parallel.iloc[np.where(modality_list[:, 0]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[1, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile1_parallel.iloc[np.where(modality_list[:, 1]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[2, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile2_parallel.iloc[np.where(modality_list[:, 2]>threshold)[0], :], class_column = 'class', ax = ax, alpha = alpha, color = color)
ax = axs[3, column]
ax.set(ylim = (0, 100))
parallel_coordinates(sensory_profile3_parallel.iloc[ | np.where(modality_list[:, 3]>threshold) | numpy.where |
#!/usr/bin/env python2
# -----------------------------------------------------------------------------
# @author:
# <NAME>, Jun 23rd, 2017
# -----------------------------------------------------------------------------
import graph_util.init_path as init_path
from util import logger
import graph_util.mujoco_parser as mujoco_parser
import numpy as np
_BASE_DIR = init_path.get_base_dir()
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
'''
@brief:
i_value could be the logstd (1, num_action), policy_output/w
(64, num_action), policy_output/b (1, num_action)
'''
assert len(gnn_option_list) == 4
i_value = np.transpose(i_value) # make the num_action to the front
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
if len(i_value.shape) > 1:
o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
else:
# the b matrix
o_value = np.zeros([len(oenv_info['output_list'])])
assert len(i_value) == len(ienv_info['output_list'])
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in enumerate(oenv_info['output_list']):
# get the name of the joint
node_name = oenv_info['tree'][output_node_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
if ienv_node_name_list.index(node_name) not in \
ienv_info['output_list']:
logger.warning('Missing joint: {}'.format(node_name))
continue
o_value[output_id] = i_value[
ienv_info['output_list'].index(
ienv_node_name_list.index(node_name)
)
]
else:
# the name format: "@type_@name_@number", e.g.: joint_leg_1
assert len(node_name.split('_')) == 3
# find all the repetitive node and calculate the average
repetitive_struct_node_list = [
ienv_node_name_list.index(name)
for name in ienv_node_name_list
if node_name.split('_')[1] == name.split('_')[1]
]
num_reptitive_nodes = float(len(repetitive_struct_node_list))
assert len(repetitive_struct_node_list) >= 1
for i_node_id in repetitive_struct_node_list:
o_value[output_id] += i_value[
ienv_info['output_list'].index(i_node_id)
] / num_reptitive_nodes
return np.transpose(o_value) + added_constant
def map_input(transfer_env, i_value, added_constant, gnn_option_list):
assert len(gnn_option_list) == 4
ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_option_list[0],
root_connection_option=gnn_option_list[1],
gnn_output_option=gnn_option_list[2],
gnn_embedding_option=gnn_option_list[3]
)
o_value = np.zeros([oenv_info['debug_info']['ob_size'], i_value.shape[1]])
assert len(i_value) == ienv_info['debug_info']['ob_size']
ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
for output_id, output_node_id in oenv_info['input_dict'].iteritems():
# get the name of the joint
node_name = oenv_info['tree'][output_id]['name']
# if the node is alreay in the input environment?
if node_name in ienv_node_name_list:
o_value[output_node_id] = i_value[
ienv_info['input_dict'][
ienv_node_name_list.index(node_name)
]
]
else:
continue
return o_value
def map_transfer_env_running_mean(ienv, oenv, running_mean_info,
observation_size,
gnn_node_option, root_connection_option,
gnn_output_option, gnn_embedding_option):
# parse the mujoco information
ienv_info = mujoco_parser.parse_mujoco_graph(
ienv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
oenv_info = mujoco_parser.parse_mujoco_graph(
oenv,
gnn_node_option=gnn_node_option,
root_connection_option=root_connection_option,
gnn_output_option=gnn_output_option,
gnn_embedding_option=gnn_embedding_option
)
i_running_mean_info = running_mean_info
# we start the running mean by cutting the mean to 0.1
start_coeff = 1
o_running_mean_info = {
'step': i_running_mean_info['step'] * start_coeff,
'mean': np.zeros([observation_size]),
'variance': np.zeros([observation_size]),
'square_sum': | np.zeros([observation_size]) | numpy.zeros |
#!/usr/bin/python3
import functools
import multiprocessing
import random
import unittest
import numpy as np
import scipy.special
import helper.basis
import helper.grid
import tests.misc
class Test45SpatAdaptiveUP(tests.misc.CustomTestCase):
@staticmethod
def createDataHermiteHierarchization(p):
n, d, b = 4, 1, 0
bases = [helper.basis.HierarchicalWeaklyFundamentalSpline(p, nu=nu)
for nu in range((p+1)//2)]
grid = helper.grid.RegularSparseBoundary(n, d, b)
X, L, I = grid.generate()
X, L, I = X.flatten(), L.flatten(), I.flatten()
K = np.column_stack((L, I))
f = (lambda X: 0.3 + np.sin(2.3*np.pi*(X-0.2)))
fX = f(X)
return bases, n, X, L, I, K, fX
def hermiteHierarchizationCallback(self, fl, y, l, K, bases):
p = bases[0].p
nodalIl = helper.grid.getNodalIndices(l)
Kl = np.array([self.findLevelIndex(K, l, i) for i in nodalIl])
Xl = helper.grid.getCoordinates(l, nodalIl)
for q in range((p+1)//2):
Yl = np.zeros_like(Xl)
for lp in range(l+1):
hierIlp = helper.grid.getHierarchicalIndices(lp)
for ip in hierIlp:
Yl += (y[self.findLevelIndex(K, lp, ip)] *
bases[q].evaluate(lp, ip, Xl))
self.assertAlmostEqual(Yl, fl[l][Kl,q])
@staticmethod
def findLevelIndex(K, l, i):
lp, ip = helper.grid.convertNodalToHierarchical(l, i)
return (np.where((K == (lp, ip)).all(axis=1))[0][0])
@staticmethod
def dividedDifference(data):
# data in the form
# [(a, f(a), df(a), ...), (b, f(b), df(b), ...), ...]
if len(data) == 1:
return data[0][-1] / scipy.special.factorial(len(data[0]) - 2)
else:
dataLeft = list(data)
if len(dataLeft[-1]) > 2: dataLeft[-1] = dataLeft[-1][:-1]
else: del dataLeft[-1]
dataRight = list(data)
if len(dataRight[0]) > 2: dataRight[0] = dataRight[0][:-1]
else: del dataRight[0]
return ((Test45SpatAdaptiveUP.dividedDifference(dataRight) -
Test45SpatAdaptiveUP.dividedDifference(dataLeft)) /
(data[-1][0] - data[0][0]))
@staticmethod
def hermiteInterpolation1D(xx, data, nu=0):
# data in the form
# [(a, f(a), df(a), ...), (b, f(b), df(b), ...), ...]
yy = np.zeros((len(xx), nu+1))
xProduct = [1] + (nu * [0])
curXData = []
curData = []
for dataPoint in data:
x = dataPoint[0]
curData.append([x])
for k in range(1, len(dataPoint)):
curData[-1].append(dataPoint[k])
coeff = Test45SpatAdaptiveUP.dividedDifference(curData)
for q in range(nu, -1, -1):
yy[:,q] += coeff * xProduct[q]
xProduct[q] = (xProduct[q] * (xx - x) +
q * (xProduct[q-1] if q > 0 else 0))
return yy
@staticmethod
def hermiteHierarchization1D(u, n, K, bases, testCallback=None):
N = u.shape[0]
p = bases[0].p
y = np.zeros((N,))
fl = np.zeros((n+1, N, (p+1)//2))
k0 = Test45SpatAdaptiveUP.findLevelIndex(K, 0, 0)
k1 = Test45SpatAdaptiveUP.findLevelIndex(K, 0, 1)
for i in range(2):
k = (k0 if i == 0 else k1)
y[k] = u[k]
fl[0][k][0] = u[k]
if p > 1: fl[0][k][1] = (u[k1] - u[k0])
for l in range(1, n+1):
nodalIl = helper.grid.getNodalIndices(l)
Kl = np.array([Test45SpatAdaptiveUP.findLevelIndex(K, l, i)
for i in nodalIl])
Xl = helper.grid.getCoordinates(l, nodalIl)
hierIl = np.array(helper.grid.getHierarchicalIndices(l))
flm1 = np.zeros((len(nodalIl), (p+1)//2))
evenIl = [i for i in nodalIl if i not in hierIl]
flm1[evenIl] = fl[l-1][Kl[evenIl]]
for i in hierIl:
data = [np.hstack((Xl[i-1], flm1[i-1])),
np.hstack((Xl[i+1], flm1[i+1]))]
flm1[i] = Test45SpatAdaptiveUP.hermiteInterpolation1D(
[Xl[i]], data, nu=(p-1)//2)
rl = np.zeros_like(nodalIl, dtype=float)
rl[hierIl] = u[Kl[hierIl]] - flm1[hierIl][:,0]
A = np.zeros((len(hierIl), len(hierIl)))
for i in hierIl: A[:,(i-1)//2] = bases[0].evaluate(l, i, Xl[hierIl])
b = rl[hierIl]
yl = np.linalg.solve(A, b)
y[Kl[hierIl]] = yl
for q in range((p+1)//2):
rl = np.zeros_like(nodalIl, dtype=float)
for i in hierIl: rl += y[Kl[i]] * bases[q].evaluate(l, i, Xl)
for i in nodalIl: fl[l][Kl[i]][q] = flm1[i][q] + rl[i]
if testCallback is not None: testCallback(fl, y, l, K, bases)
return y
@staticmethod
def iterativeRefinement(u, y0, Linv, Lp):
r = u - Linv(y0)
y = np.array(y0)
for m in range(1000):
if np.max(np.abs(r)) < 1e-10: break
y += Lp(r)
r -= Linv(Lp(r))
return y, r
@staticmethod
def getChain(l1, i1, l2, i2, T):
chain = [(np.array(l1), np.array(i1))]
for t in T:
lNext, iNext = chain[-1]
lNext, iNext = np.array(lNext), np.array(iNext)
lNext[t], iNext[t] = l2[t], i2[t]
chain.append((lNext, iNext))
if np.all(chain[-1][0] == l2) and np.all(chain[-1][1] == i2):
return chain
else:
return None
def testLemmaIterativeRefinementEquivalent(self):
# tested in testPropIterativeRefinementSufficient
pass
def testPropIterativeRefinementSufficient(self):
tol = {"rtol" : 1e-3, "atol" : 1e-8}
for p in [1, 3, 5, 7]:
basisLin1D = helper.basis.HierarchicalBSpline(1)
basis1D = helper.basis.HierarchicalBSpline(p)
for d in range(1, 5):
f = tests.misc.getObjectiveFunction(d)
basisLin = (helper.basis.TensorProduct(basisLin1D, d) if d > 1 else
basisLin1D)
basis = (helper.basis.TensorProduct(basis1D, d) if d > 1 else
basis1D)
with self.subTest(p=p, d=d):
X, L, I = tests.misc.generateSpatiallyAdaptiveSparseGrid(
d, 500)
fX = f(X)
A = tests.misc.computeInterpolationMatrix(basis, X, L, I)
aX = np.linalg.solve(A, fX)
ALin = tests.misc.computeInterpolationMatrix(basisLin, X, L, I)
ALinInv = np.linalg.inv(ALin)
Linv = (lambda x: np.dot(A, x))
Lp = (lambda x: np.dot(ALinInv, x))
u = fX
y0 = 2 * np.random.random((X.shape[0],)) - 1
y, r = self.iterativeRefinement(u, y0, Linv, Lp)
if np.max(np.abs(r)) < 1e-10:
self.assertAlmostEqual(y, aX, **tol)
else:
self.assertNotAlmostEqual(y, aX, **tol)
N = X.shape[0]
m = 100
power = np.linalg.matrix_power(np.eye(N) - np.dot(A, ALinInv), m)
powerNormRoot = np.power(np.linalg.norm(power), 1/m)
if powerNormRoot < 1:
self.assertAlmostEqual(y, aX, **tol)
def testLemmaDualityUnidirectionalPrinciple(self):
n, b = 4, 0
hierarchical = True
bases = tests.misc.getExampleHierarchicalBases()
for basisName, d, basis in bases:
f = tests.misc.getObjectiveFunction(d)
modified = ("Modified" in basisName)
if "ClenshawCurtis" in basisName: distribution = "clenshawCurtis"
else: distribution = "uniform"
with self.subTest(basis=basisName, d=d):
#X, L, I = tests.misc.generateSpatiallyAdaptiveSparseGrid(
# d, 500)
grid = (helper.grid.RegularSparse(n, d) if modified else
helper.grid.RegularSparseBoundary(n, d, b))
X, L, I = grid.generate()
if distribution != "uniform":
X = helper.grid.getCoordinates(L, I, distribution=distribution)
fX = f(X)
u = np.array(fX)
K = tests.misc.convertToContinuous(L, I)
T = np.arange(d)
np.random.shuffle(T)
L1D = functools.partial(tests.misc.hierarchize1D,
basis, distribution, hierarchical)
bases1D = (basis.basis1D if d > 1 else [basis])
y = tests.misc.unidirectionalPrinciple(u, K, T, L1D)
TRev = T[::-1]
LInv1D = functools.partial(tests.misc.hierarchize1D,
basis, distribution, hierarchical,
mode="dehierarchize")
u2 = tests.misc.unidirectionalPrinciple(y, K, TRev, LInv1D)
if d == 1: X, L, I = X.flatten(), L.flatten(), I.flatten()
A = tests.misc.computeInterpolationMatrix(basis, X, L, I)
aX = np.linalg.solve(A, fX)
fX2 = | np.dot(A, y) | numpy.dot |
import scipy, numpy, typing, numbers
from tequila.objective import Objective
from tequila.objective.objective import assign_variable, Variable, format_variable_dictionary, format_variable_list
from .optimizer_base import Optimizer
from ._containers import _EvalContainer, _GradContainer, _HessContainer, _QngContainer
from collections import namedtuple
from tequila.utils.exceptions import TequilaException
from tequila.circuit.noise import NoiseModel
from tequila.tools.qng import get_qng_combos
class TequilaScipyException(TequilaException):
""" """
pass
SciPyReturnType = namedtuple('SciPyReturnType', 'energy angles history scipy_output')
class OptimizerSciPy(Optimizer):
""" """
gradient_free_methods = ['NELDER-MEAD', 'COBYLA', 'POWELL', 'SLSQP']
gradient_based_methods = ['L-BFGS-B', 'BFGS', 'CG', 'TNC']
hessian_based_methods = ["TRUST-KRYLOV", "NEWTON-CG", "DOGLEG", "TRUST-NCG", "TRUST-EXACT", "TRUST-CONSTR"]
@classmethod
def available_methods(cls):
""":return: All tested available methods"""
return cls.gradient_free_methods + cls.gradient_based_methods + cls.hessian_based_methods
def __init__(self, method: str = "L-BFGS-B",
tol: numbers.Real = None,
method_options=None,
method_bounds=None,
method_constraints=None,
silent: bool = True,
**kwargs):
"""
Optimize a circuit to minimize a given objective using scipy
See the Optimizer class for all other parameters to initialize
:param method: The scipy method passed as string
:param use_gradient: do gradient based optimization
:param tol: See scipy documentation for the method you picked
:param method_options: See scipy documentation for the method you picked
:param method_bounds: See scipy documentation for the method you picked
:param method_constraints: See scipy documentation for the method you picked
:param silent: if False the optimizer print out all evaluated energies
:param use_gradient: select if gradients shall be used. Can be done automatically for most methods
"""
super().__init__(**kwargs)
if hasattr(method, "upper"):
self.method = method.upper()
else:
self.method = method
self.tol = tol
self.method_options = method_options
if method_bounds is not None:
method_bounds = {assign_variable(k): v for k, v in method_bounds.items()}
self.method_bounds = method_bounds
self.silent = silent
if method_options is None:
self.method_options = {'maxiter': self.maxiter}
else:
self.method_options = method_options
if 'maxiter' not in method_options:
self.method_options['maxiter'] = self.maxiter
self.method_options['disp'] = not silent
if method_constraints is None:
self.method_constraints = ()
else:
self.method_constraints = method_constraints
def __call__(self, objective: Objective,
variables: typing.List[Variable] = None,
initial_values: typing.Dict[Variable, numbers.Real] = None,
gradient: typing.Dict[Variable, Objective] = None,
hessian: typing.Dict[typing.Tuple[Variable, Variable], Objective] = None,
reset_history: bool = True,
*args,
**kwargs) -> SciPyReturnType:
"""
Optimizes with scipy and gives back the optimized angles
Get the optimized energies over the history
:param objective: The tequila Objective to minimize
:param initial_values: initial values for the objective
:param return_scipy_output: chose if the full scipy output shall be returned
:param reset_history: reset the history before optimization starts (has no effect if self.save_history is False)
:return: tuple of optimized energy ,optimized angles and scipy output
"""
infostring = "{:15} : {}\n".format("Method", self.method)
infostring += "{:15} : {} expectationvalues\n".format("Objective", objective.count_expectationvalues())
if gradient is not None:
infostring += "{:15} : {}\n".format("grad instr", gradient)
if hessian is not None:
infostring += "{:15} : {}\n".format("hess_instr", hessian)
if self.save_history and reset_history:
self.reset_history()
active_angles, passive_angles, variables = self.initialize_variables(objective, initial_values, variables)
# Transform the initial value directory into (ordered) arrays
param_keys, param_values = zip(*active_angles.items())
param_values = | numpy.array(param_values) | numpy.array |
# This code is a part of XMM: Generate and Analyse (XGA), a module designed for the XMM Cluster Survey (XCS).
# Last modified by <NAME> (<EMAIL>) 25/02/2021, 13:52. Copyright (c) <NAME>
import numpy as np
from astropy.units import Quantity
from ...models.misc import power_law
from ...products.relation import ScalingRelation
# These are from the classic M-T relation paper published by Arnaud, as R-T relations are a byproduct of M-T relations
arnaud_r200 = ScalingRelation(np.array([0.57, 1674]), np.array([0.02, 23]), power_law, Quantity(5, 'keV'),
Quantity(1, 'kpc'), r"T$_{\rm{x}}$", "E(z)R$_{200}$",
relation_author='Arnaud et al.', relation_year='2005',
relation_doi='10.1051/0004-6361:20052856',
relation_name=r'R$_{200}$-Temperature', x_lims=Quantity([1, 12], 'keV'))
arnaud_r500 = ScalingRelation(np.array([0.57, 1104]), np.array([0.02, 13]), power_law, Quantity(5, 'keV'),
Quantity(1, 'kpc'), r"T$_{\rm{x}}$", "E(z)R$_{500}$",
relation_author='Arnaud et al.', relation_year='2005',
relation_doi='10.1051/0004-6361:20052856',
relation_name=r'R$_{500}$-Temperature', x_lims=Quantity([1, 12], 'keV'))
arnaud_r2500 = ScalingRelation(np.array([0.56, 491]), | np.array([0.02, 4]) | numpy.array |
#### Import needed libraries
import sys
import os
import h5py
import importlib
import numpy as np
import torch
import NNtools
import shutil
import time
import scipy.spatial as spat
from scipy.ndimage import affine_transform
import copy
from skimage.color import rgb2gray
from skimage.transform import warp
from skimage.registration import optical_flow_tvl1, optical_flow_ilk
import scipy.ndimage as sim
import targeted_augmentation_objects
torch.autograd.set_detect_anomaly(True)
#### This is the grammar to parse the command line parameters
sys.path.append("src/neural_network_scripts/models")
st=time.time()
#assert len(sys.argv)==3
## Don't change
logfn=sys.argv[2]#MB :the address of the log file
dataset_path=sys.argv[1]#MB: I think a copy of the whole data set is in this path
dataset_name=os.path.split(dataset_path)[1].split(".")[0]#MB first split pathname into a pair.component [1] has no slash
props=dataset_name.split("_")
NetName=props[-2]
print("NetName: "+NetName)
runname=props[-1]
print("runname: "+runname)
identifier="net/"+NetName+"_"+runname
GetTrain = int(sys.argv[5])#train on previous training set or not
print("GetTrain:"+str(GetTrain))
#### These are the run options
########################################################################
#Run status
deformInput=int(sys.argv[3])#determines whether to generate the deformed frames or not
if deformInput==1:
deformation_trick=True
else:
deformation_trick=False
adiabatic_trick=False
verbose=True
requeue=False
if requeue:
usenet="net"
skiptrain=False
#Run detail
channel_num=2
##directory handling
reusedirec="data/data_temp/zmdir"
#neural network training parameters
batch_size=1
#determines what augmentation method is used and when that is stopped and replaced by affine transformations
aug_dict={0:"aff_cut",50:"aff"}
num_epochs=int(sys.argv[4])#number of epochs
ep_augoff=30
lr=0.003
patience=8
num_workers=8
print_iou_every=10
if deformation_trick:
num_additional=int(sys.argv[8])#number of deformed frames you are generating
deformation_num_epochs=2
deformation_augdict={0:"aff_cut"}
if adiabatic_trick:
tnum="all"
k_by_func=True
if k_by_func:
def k_func():
k=list(np.linspace(3,25,150).astype(np.int16))
while np.sum(k)<T:
k.append(25)
return np.array(k)
else:
k=6#number or numbers to feed in at once, can be array or int
short_memory=4 #nearby memory to train in
num_random_memory=10 #randomly added memory
num_random_memory_train=5 #randomly added memory from original training data(potential overlap with above)
#dset size will be k*short_memory+num_random_memory+num_random_memory_train except for edge cases
def adiabatic_epoch_func(iters):#function giving epochs for adiabatic trick
min_adia_epochs=7
return max(100//iters,min_adia_epochs)
adiabatic_ep_augoff=2
lr_adia=0.0003
patience_adia=2
########################################################################
####Check the dependencies
dependencies=["W","H","D","C","T","N_neurons"]
h5=h5py.File(dataset_path,"r+")
for dep in dependencies:
if dep not in h5.attrs.keys():
h5.close()
assert False, "Dependency "+dep+" not in attributes"
T=h5.attrs["T"]
if "oldT" in h5.attrs.keys():
origfrNum = h5.attrs["oldT"]
else:
origfrNum = h5.attrs["T"]
C,W,H,D=h5.attrs["C"],h5.attrs["W"],h5.attrs["H"],h5.attrs["D"]#x,y,z ordering
channel_num=min(channel_num,C)
shape=(channel_num,W,H,D)#x,y,z ordering
gridpts=np.moveaxis(np.array(np.meshgrid(np.arange(W),np.arange(H),np.arange(D),indexing="ij")),0,3)
grid=gridpts.reshape(-1,3)
#### Extra parameters for adiabatic_trick
if adiabatic_trick:
if k_by_func:
k=k_func()
elif type(k)==int:
nums=(T//k+1)
k=np.full(nums,k)
assert np.sum(k)>=T
def get_lineup_def():
ts=np.arange(T)
ds=np.min(np.abs(ts[:,None]-np.array(traininds)[None,:]),axis=1)
return np.argsort(ds)
def get_pts(i):
pts,_=NNtools.get_pts_iou(NNtools.pack(h5,identifier,i,gridpts,num_classes,thres=thres))
pts=pts.astype(np.float32)
return pts
def updatemask(i,pts):
key=identifier+"/"+str(i)+"/pts"
if key in h5.keys():
del h5[key]
dset=h5.create_dataset(key,(num_classes,3),dtype="f4")
dset[...]=pts
fr=np.array(h5[str(i)+"/frame"])
mask=NNtools.get_mask(fr[0],pts,num_classes,grid,thres=thres,distthres=distthres).astype(np.int16)
h5[identifier+"/"+str(i)+"/predmask"][...]=mask
#### logging file function, this is the runtime log
def write_log(txt,end="\n"):
with open(logfn,"w") as f:
f.write(txt+end)
logform="Prepare={:04.4f} Train={:04.4f} Predict={:04.4f} GetPoints={:04.4f}"
write_log(logform.format(0.,0.,0.,0.))
#### saving backup function
def repack():# MB: closes current h5 and opens a new one
global h5
h5.close()
NNtools.repack(dataset_path)
h5=h5py.File(dataset_path,"r+")
def save_backup():#MB: closes, saves and opens the file in readable format again
global h5
h5.close()
shutil.copyfile(dataset_path,dataset_path+"_backup")
h5=h5py.File(dataset_path,"r+")
#### Handle run related parameters
write_log(logform.format(0.,0.,0.,0.))
if requeue:
assert identifier in h5.keys(), "requeue requested but identifier not present"
log=h5[identifier].attrs["log"]
else:
if identifier in h5.keys():
del h5[identifier]
h5.create_group(identifier)
log=""
h5[identifier].attrs["log"]=log
if reusedirec is not None:#MB: use the dir of masks and frames for training if it already exists
datadir=reusedirec
else:
datadir=os.path.join("data","data_temp",dataset_name)
#### The computation loop is all in a try except to delete the data
if verbose:
print("Preparing...")
try:
num_classes=h5.attrs["N_neurons"]+1
####Unpack for fast, multiprocessed loading, unless already unpacked
#MB: if not already done, this part saves all the frames and all the masks (which are less than the number of frames.)
DeforemeFrames = int(sys.argv[6])#whether or not add the deformed frames?
if reusedirec is None or not os.path.exists(reusedirec): # TODO: is this correct?
os.mkdir(datadir)
os.mkdir(os.path.join(datadir,"frames"))
os.mkdir(os.path.join(datadir,"highs"))
os.mkdir(os.path.join(datadir,"masks"))
Ntot=0
Nans=0
print("unpacking frames")#MB check
for i in range(T):# I think this unpacks all the segmented/mask frames -MB
write_log(logform.format(min(i/T,0.8),0.,0.,0.))
fr=np.array(h5[str(i)+"/frame"]).astype(np.int16) # TODO: access to dataset with methods?
np.save(os.path.join(datadir,"frames","frame_"+str(i)+".npy"),fr)
Ntot+=1
if str(i)+"/high" in h5.keys():
np.save(os.path.join(datadir,"highs","high_"+str(i)+".npy"),np.array(h5[str(i)+"/high"]).astype(np.int16))
else:
np.save(os.path.join(datadir,"highs","high_"+str(i)+".npy"),np.full((1,W,H),255).astype(np.int16))
if GetTrain == 0:
if str(i)+"/mask" in h5.keys():
np.save(os.path.join(datadir,"masks","mask_"+str(i)+".npy"),np.array(h5[str(i)+"/mask"]).astype(np.int16))
Nans+=1
if GetTrain == 1:
k = 0#index of the dataset you want to copy, usually 0 for the first datasset
NNname = list(h5['net'].keys())#the new network is NNname[0] so use NNname[1] to access previous networks training set
traininds = h5['net'][NNname[k]].attrs['traininds']#train indices are saved as an attribute of the first dataset(run w/O deformation)
for j in traininds:#placing the training frames and masks in the deformation folder
np.save(os.path.join(datadir,"masks","mask_"+str(j)+".npy"),np.array(h5[str(j)+"/mask"]).astype(np.int16))
Nans+=1
if DeforemeFrames == 1:
num_added_frames = int(sys.argv[7])
if num_added_frames==0:
for l in range(origfrNum,T):#to add the deformed frames
np.save(os.path.join(datadir,"masks","mask_"+str(l)+".npy"),np.array(h5[str(l)+"/mask"]).astype(np.int16))
Nans+=1
else:
for l in range(origfrNum,origfrNum+num_added_frames):#to add the deformed frames
np.save(os.path.join(datadir,"masks","mask_"+str(l)+".npy"),np.array(h5[str(l)+"/mask"]).astype(np.int16))
Nans+=1
tnum="all"
vnum=0
assert Nans>0, "At least one mask is needed"
allset=NNtools.TrainDataset(datadir,shape)
''' MB added the following section to get the right total number of cells as categories'''
U =set() #MB added
for i in allset.indlist :
U=U.union(set(np.unique(h5[str(i)+"/mask"])))
num_classes = len(U)#MB added
#### Initialize the network ####
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
log+="device= "+str(device)+"\n"
NetMod = importlib.import_module(NetName)
net=NetMod.Net(n_channels=shape[0],num_classes=num_classes)
if requeue:
net.load_state_dict(NNtools.load_from_h5(h5[identifier+"/"+usenet])) #MB: dictionary of h5[identifier] items where the values are converted to torch tensors.
log+="weight load Successful\n"
if verbose:
print("weight load Successful\n")
net.to(device=device)
n_params=sum([p.numel() for p in net.parameters()])
log+="Total number of parameters:"+str(n_params)+"\n"
allset=NNtools.TrainDataset(datadir,shape)
''' MB: added the following section to get the right total number of cells as categories'''
print("Total number of cells in the segmented frames")
print(num_classes)
print(U)
totnum=len(allset)
print("number of annotated frames: ")
print(totnum)
if True:#GetTrain == 0 :
'''
MB: partition the dataset from scratch to training set and validation set
'''
if GetTrain == 0:
tnum = int(sys.argv[7])
vnum = int(sys.argv[8])
if tnum=="all" or vnum==0:
traindataloader= torch.utils.data.DataLoader(allset, batch_size=batch_size,shuffle=True, num_workers=num_workers,pin_memory=True)
traininds=allset.indlist
tnum=len(allset)
vnum=0
else:
if totnum==(tnum+vnum):
tset,vset=torch.utils.data.random_split(allset,[tnum,vnum])
else:
tset,vset,_=torch.utils.data.random_split(allset,[tnum,vnum,totnum-tnum-vnum])
traindataloader= torch.utils.data.DataLoader(tset, batch_size=batch_size,shuffle=True, num_workers=num_workers,pin_memory=True)
valdataloader=torch.utils.data.DataLoader(vset,batch_size=batch_size,shuffle=True,num_workers=num_workers,pin_memory=True)
traininds=allset.indlist[tset.indices]
h5[identifier].attrs["traininds"]=traininds
if GetTrain == 0:
if vnum>0:
h5[identifier].attrs["Validinds"]=allset.indlist[vset.indices]# MB added
else:
h5[identifier].attrs["Validinds"]=[]
Ut =set() #MB added
for i in traininds :
Ut=Ut.union(set(np.unique(h5[str(i)+"/mask"])))
current_classes = len(Ut)#MB added
print("existing classes in the training set are:")
print(Ut)
#### requeue should match training indices
if requeue and not adiabatic_trick:
assert all(np.array(h5[identifier].attrs["traininds"])==traininds),"traininds not matching"
#### save training indices
log+="Training with: trainset: "+str(tnum)+" valset: "+str(vnum)+"\n"
num_trains=len(traindataloader)
digits=len(str(num_trains))#for pretty print
if vnum>0:
num_vals=len(valdataloader)
digits_v=len(str(num_vals))
#define the iou function (intersection over union)
'''This is a measure of how good the object detection task is performed.
iou = (the intersection of area of neuron i in ground truth and predicted mask)/
(union of i's areain GT and predicted mask)
'''
def get_ious(preds,mask,skip=False):
if skip:
return np.full(num_classes,np.nan)
maskgot=torch.argmax(preds,dim=1)
ioubins=np.zeros(num_classes)
for i in range(num_classes):
thismask=(mask==i)#MB: area of the neuron i in ground truth(GT) mask
if torch.sum(thismask).item()==0:#MB: the case when neuron i is absent in GT
ioubins[i]=np.nan
continue
thismaskgot=(maskgot==i)#MB: area of the neuron i in predicted mask
intersection=torch.sum(thismask&thismaskgot).item()
union=torch.sum(thismask|thismaskgot).item()
ioubins[i]=intersection/union
return ioubins
#define the criterion function
def selective_ce(pred_raw,target_mask):
existing=torch.unique(target_mask)
with torch.no_grad():
trf=torch.zeros(num_classes).to(device=device,dtype=torch.long)
trf[existing]=torch.arange(0,len(existing)).to(device=device,dtype=torch.long)
mask=trf[target_mask]
mask.requires_grad=False
return torch.nn.CrossEntropyLoss()(pred_raw[:,existing],mask)
criterion=selective_ce
#send to device and load the weights if requeue
optimizer=torch.optim.Adam(net.parameters(),lr=lr)
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode="min",factor=0.3,patience=patience,min_lr=5e-5)
#copy the log if we are on requeue
h5[identifier].attrs["log"]=log
if requeue:
loss_iou=np.array(h5[identifier]["loss_iou"])
inds=list(loss_iou[:,0])
losses=list(loss_iou[:,1])
iouss=[ious for ious in loss_iou[:,2:]]
else:
losses=[]
iouss=[]
inds=[]
#measure time
log+="Header Time"+str(time.time()-st)+" s \n"
log+="Starting Training\n\n"
#if int(sys.argv[3])==1:
# skiptrain=True
if skiptrain:
print("Skipping Training")
elif adiabatic_trick:
if verbose:
print("Starting Train")
ts=[]
st=time.time()
eplosses=[]
gc=0#global count
#typical neural network training script
for epoch in range(num_epochs):
ts.append(time.time()-st)
if epoch==ep_augoff:
text="augment is now:"+allset.change_augment("aff")
log+=text
if verbose:
print(text)
log+="Epoch: "+str(epoch)+" lr: "+str(optimizer.param_groups[0]['lr'])+"\n"
if verbose:
print("Epoch: "+str(epoch)+" lr: "+str(optimizer.param_groups[0]['lr']))
net.train()
eploss=0
count=0
for i,(fr,mask) in enumerate(traindataloader):
fr = fr.to(device=device, dtype=torch.float32)
mask= mask.to(device=device, dtype=torch.long)
preds=net(fr)
loss=criterion(preds,mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
eploss+=loss.item()
count+=1
losses.append(loss.item())
ious=get_ious(preds,mask,((gc%print_iou_every)!=0))
iouss.append(ious)
inds.append(0)
if verbose:
print(" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious)))
log+=" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious))+"\n"
gc+=1
eploss=eploss/count
if allset.augment=="aff":
scheduler.step(eploss)#step scheduler by epoch loss
log+="Epoch Loss: "+str(eploss)+"\n"+"\n"
eplosses.append(eploss)
#save net in h5
if "net" in h5[identifier].keys():
del h5[identifier]["net"]
h5.create_group(identifier+"/net")
NNtools.save_into_h5(h5[identifier+"/net"],net.state_dict())
#save loss and iou
if "loss_iou" in h5[identifier].keys():
del h5[identifier]["loss_iou"]
dset=h5[identifier].create_dataset("loss_iou",(len(losses),2+len(ious)),dtype="f4",compression="gzip")
dset[...]=np.concatenate((np.array(inds)[:,None],np.array(losses)[:,None],np.array(iouss)),axis=1).astype(np.float32)
log+="Results saved."+"\n"+"\n"
h5[identifier].attrs["log"]=log
#CRITICAL, emergency break
if os.path.exists("STOP"):
break
#save net in h5
if "basenet" in h5[identifier].keys():
del h5[identifier]["basenet"]
h5.create_group(identifier+"/basenet")
NNtools.save_into_h5(h5[identifier+"/basenet"],net.state_dict())
repack()
save_backup()#make h5 backup
#now call lineup of the frames
if "lineup" in h5.keys():
lineup=np.array(h5["lineup"])
else:#if non existing, make the default lineup
lineup=get_lineup_def()
dset=h5.create_dataset("lineup",shape=(T,),dtype="i2")
dset[...]=np.array(lineup).astype(np.int16)
assert all((np.sort(np.array(traininds))==np.sort(lineup[:len(traininds)]))), "The first lineup elements should be traininds"
repack()
save_backup()#make h5 backup
evalset=NNtools.EvalDataset(datadir,shape)#we need the evalutation set for aligned indices for all frames
lineup=list(lineup)[len(traininds):]#traininds should be at the begining
added=[list(traininds)]#traininds are already added
c=0
while True:
# regen optimizers
optimizer=torch.optim.Adam(net.parameters(),lr=lr_adia)
scheduler=torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,mode="min",factor=0.3,patience=patience_adia,min_lr=5e-5)
#add mask
ichilds=[]
breaker=False
#pop k[c] elemets from lineup
k_one=k[c]
for _ in range(k_one):
if len(lineup)==0:
break
ichilds.append(lineup.pop(0))
added.append(ichilds)
if verbose:
print(len(lineup)," left")
#make predictions
net.eval()
for ichild in ichilds:
with torch.no_grad():
fr,mask=evalset[ichild]
fr=fr.unsqueeze(0).to(device=device, dtype=torch.float32)
pred=net(fr)
predmask=torch.argmax(pred[0],dim=0).cpu().detach().numpy().astype(np.int16)
del fr,pred
if identifier+"/"+str(ichild)+"/predmask" in h5.keys():
del h5[identifier+"/"+str(ichild)+"/predmask"]
h5[identifier].create_dataset(str(ichild)+"/predmask",(predmask.shape),dtype="i2",compression="gzip")
h5[identifier][str(ichild)+"/predmask"][...]=predmask
#This re-masks the data
ptschild=get_pts(ichild)
updatemask(ichild,ptschild)
np.save(os.path.join(datadir,"masks","mask_"+str(ichild)+".npy"),np.array(h5[identifier+"/"+str(ichild)+"/predmask"]).astype(np.int16))
#update dataset
allset=NNtools.TrainDataset(datadir,shape)
# break if we are done: case if np.cumsum(k) exactly ends at lT
if len(allset)==T:
break
# training phase
#all available training indices
t_inds_all=[el for els in added for el in els]
#those in memory
t_inds_memory=[el for els in added[-short_memory:] for el in els]#last in memory
t_inds_memory_dict=dict(zip(t_inds_memory,[True for _ in range(len(t_inds_memory))]))
#those not in memory
buff=[]
for t in t_inds_all:
if t not in t_inds_memory_dict.keys():
buff.append(t)
#get epochs
ad_epochs=adiabatic_epoch_func(len(t_inds_memory))
for epoch in range(ad_epochs):
ts.append(time.time()-st)
# random select random memory
t_inds_random_memory=list(np.random.choice(buff,min(num_random_memory,len(buff)),replace=False))
# random select random memory train
t_inds_random_memory_train=list(np.random.choice(traininds,min(num_random_memory_train,len(traininds)),replace=False))
#all tinds
tinds=[*t_inds_memory,*t_inds_random_memory,*t_inds_random_memory_train]
tset=torch.utils.data.Subset(allset,allset.real_ind_to_dset_ind(tinds))
traindataloader= torch.utils.data.DataLoader(tset, batch_size=batch_size,shuffle=True, num_workers=num_workers,pin_memory=True)
num_trains=len(traindataloader)
if epoch==adiabatic_ep_augoff:
text="augment is now:"+allset.change_augment("aff")
log+=text
if verbose:
print(text)
log+="Epoch: "+str(epoch)+" lr: "+str(optimizer.param_groups[0]['lr'])+"\n"
if verbose:
print("Epoch: "+str(epoch)+"/"+str(ad_epochs)+" lr: "+str(optimizer.param_groups[0]['lr']))
net.train()
eploss=0
count=0
for i,(fr,mask) in enumerate(traindataloader):
fr = fr.to(device=device, dtype=torch.float32)
mask= mask.to(device=device, dtype=torch.long)
preds=net(fr)
loss=criterion(preds,mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
eploss+=loss.item()
count+=1
losses.append(loss.item())
ious=get_ious(preds,mask,((gc%print_iou_every)!=0))
iouss.append(ious)
inds.append(0)
if verbose:
print(" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious)))
log+=" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious))+"\n"
gc+=1
eploss=eploss/count
if allset.augment=="aff":
scheduler.step(eploss)
log+="Epoch Loss: "+str(eploss)+"\n"+"\n"
eplosses.append(eploss)
#save net in h5
if "net" in h5[identifier].keys():
del h5[identifier]["net"]
h5.create_group(identifier+"/net")
NNtools.save_into_h5(h5[identifier+"/net"],net.state_dict())
#save loss and iou
if "loss_iou" in h5[identifier].keys():
del h5[identifier]["loss_iou"]
dset=h5[identifier].create_dataset("loss_iou",(len(losses),2+len(ious)),dtype="f4",compression="gzip")
dset[...]=np.concatenate((np.array(inds)[:,None],np.array(losses)[:,None],np.array(iouss)),axis=1).astype(np.float32)
log+="Results saved."+"\n"+"\n"
h5[identifier].attrs["log"]=log
#CRITICAL
if os.path.exists("STOP"):
break
if os.path.exists("STOP"):
break
c+=1
if c%20==0:
repack()
save_backup()#save backup, harvard rc cluster
else:# not defTrick:#usual neural network train
if verbose:
print("Starting Train")
ts=[]
st=time.time()
eplosses=[]
gc=0#global count
#typical neural network training script
if not int(sys.argv[3]):
for epoch in range(num_epochs):
ts.append(time.time()-st)
if epoch in aug_dict.keys():
text="augment is now:"+allset.change_augment(aug_dict[epoch])
log+=text
if verbose:
print(text)
log+="Epoch: "+str(epoch)+" lr: "+str(optimizer.param_groups[0]['lr'])+"\n"
if verbose:
print("Epoch: "+str(epoch)+" lr: "+str(optimizer.param_groups[0]['lr']))
net.train()
eploss=0
count=0
for i,(fr,mask) in enumerate(traindataloader):
fr = fr.to(device=device, dtype=torch.float32)
mask= mask.to(device=device, dtype=torch.long)
preds=net(fr)
loss=criterion(preds,mask)
optimizer.zero_grad()
loss.backward()
optimizer.step()
eploss+=loss.item()
count+=1
losses.append(loss.item())
ious=get_ious(preds,mask,((gc%print_iou_every)!=0))
iouss.append(ious)
inds.append(0)
if verbose:
print(" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious)))
log+=" train"+str(i+1).zfill(digits)+"/"+str(num_trains)+" loss: "+str(loss.item())+" nanmeaniou: "+str("nan" if np.all(np.isnan(ious)) else np.nanmean(ious))+"\n"
gc+=1
eploss=eploss/count
if allset.augment=="aff":
scheduler.step(eploss)#step scheduler by epoch loss
log+="Epoch Loss: "+str(eploss)+"\n"+"\n"
eplosses.append(eploss)
if vnum>0:
net.eval()
log+="Validation:"+"\n"
eploss=0
count=0
for i,(fr,mask) in enumerate(valdataloader):
fr = fr.to(device=device, dtype=torch.float32)
mask= mask.to(device=device, dtype=torch.long)
with torch.no_grad():
preds=net(fr)
loss=criterion(preds,mask)
losses.append(loss.item())
eploss+=loss.item()
count+=1
ious=get_ious(preds,mask,False)
iouss.append(ious)
inds.append(1)
if verbose:
print(" val"+str(i+1).zfill(digits_v)+"/"+str(num_vals)+" loss: "+str(loss.item())+" nanmeaniou: "+str(np.nanmean(ious)))
log+=" val"+str(i+1).zfill(digits_v)+"/"+str(num_vals)+" loss: "+str(loss.item())+" nanmeaniou: "+str(np.nanmean(ious))+"\n"
eploss=eploss/count
log+="Mean Validation Loss: "+str(eploss)+"\n"+"\n"
eplosses.append(eploss)
#save net in h5
if "net" in h5[identifier].keys():
del h5[identifier]["net"]
h5.create_group(identifier+"/net")
NNtools.save_into_h5(h5[identifier+"/net"],net.state_dict())
#save loss and iou
if "loss_iou" in h5[identifier].keys():
del h5[identifier]["loss_iou"]
dset=h5[identifier].create_dataset("loss_iou",(len(losses),2+len(ious)),dtype="f4",compression="gzip")
dset[...]=np.concatenate((np.array(inds)[:,None],np.array(losses)[:,None],np.array(iouss)),axis=1).astype(np.float32)
log+="Results saved."+"\n"+"\n"
h5[identifier].attrs["log"]=log
#CRITICAL, emergency break
if os.path.exists("STOP"):
break
#log
write_log(logform.format(1.,(epoch+1)/num_epochs,0.,0.))
write_log(logform.format(1.,1.,0.,0.))
ts=np.array(ts).astype(np.float32)
if "ts" in h5[identifier].keys():
del h5[identifier]["ts"]
dset=h5[identifier].create_dataset("ts",ts.shape,dtype="f4")
dset[...]=ts
if verbose:
print("Training Successful\n")
log+="Training Successful\n\n"
'''
MB:
adding the new deformation technique for augmentation
'''
if deformation_trick:
import cv2
def noisy(noise_typ,image):
if noise_typ == "gauss":
row,col,ch= image.shape
mean = 0
var = 0.001
sigma = var**0.005
gauss = | np.random.normal(mean,sigma,(row,col,ch)) | numpy.random.normal |
import numpy as np
class DataSampler(object):
"""DataSampler samples the conditional vector and corresponding data for CTGAN."""
def __init__(self, data, output_info, log_frequency):
self._data = data
def is_discrete_column(column_info):
return (len(column_info) == 1
and column_info[0].activation_fn == "softmax")
n_discrete_columns = sum(
[1 for column_info in output_info if is_discrete_column(column_info)])
self._discrete_column_matrix_st = np.zeros(
n_discrete_columns, dtype="int32")
# Store the row id for each category in each discrete column.
# For example _rid_by_cat_cols[a][b] is a list of all rows with the
# a-th discrete column equal value b.
self._rid_by_cat_cols = []
# Compute _rid_by_cat_cols
st = 0
for column_info in output_info:
if is_discrete_column(column_info):
span_info = column_info[0]
ed = st + span_info.dim
rid_by_cat = []
for j in range(span_info.dim):
rid_by_cat.append(np.nonzero(data[:, st + j])[0])
self._rid_by_cat_cols.append(rid_by_cat)
st = ed
else:
st += sum([span_info.dim for span_info in column_info])
assert st == data.shape[1]
# Prepare an interval matrix for efficiently sample conditional vector
max_category = max(
[column_info[0].dim for column_info in output_info
if is_discrete_column(column_info)], default=0)
self._discrete_column_cond_st = np.zeros(n_discrete_columns, dtype='int32')
self._discrete_column_n_category = np.zeros(
n_discrete_columns, dtype='int32')
self._discrete_column_category_prob = np.zeros(
(n_discrete_columns, max_category))
self._n_discrete_columns = n_discrete_columns
self._n_categories = sum(
[column_info[0].dim for column_info in output_info
if is_discrete_column(column_info)])
st = 0
current_id = 0
current_cond_st = 0
for column_info in output_info:
if is_discrete_column(column_info):
span_info = column_info[0]
ed = st + span_info.dim
category_freq = np.sum(data[:, st:ed], axis=0)
if log_frequency:
category_freq = np.log(category_freq + 1)
category_prob = category_freq / np.sum(category_freq)
self._discrete_column_category_prob[current_id, :span_info.dim] = (
category_prob)
self._discrete_column_cond_st[current_id] = current_cond_st
self._discrete_column_n_category[current_id] = span_info.dim
current_cond_st += span_info.dim
current_id += 1
st = ed
else:
st += sum([span_info.dim for span_info in column_info])
def _random_choice_prob_index(self, discrete_column_id):
probs = self._discrete_column_category_prob[discrete_column_id]
r = np.expand_dims(np.random.rand(probs.shape[0]), axis=1)
return (probs.cumsum(axis=1) > r).argmax(axis=1)
def sample_condvec(self, batch):
"""Generate the conditional vector for training.
Returns:
cond (batch x #categories):
The conditional vector.
mask (batch x #discrete columns):
A one-hot vector indicating the selected discrete column.
discrete column id (batch):
Integer representation of mask.
category_id_in_col (batch):
Selected category in the selected discrete column.
"""
if self._n_discrete_columns == 0:
return None
batch = batch
discrete_column_id = np.random.choice(
np.arange(self._n_discrete_columns), batch)
cond = np.zeros((batch, self._n_categories), dtype='float32')
mask = np.zeros((batch, self._n_discrete_columns), dtype='float32')
mask[np.arange(batch), discrete_column_id] = 1
category_id_in_col = self._random_choice_prob_index(discrete_column_id)
category_id = (self._discrete_column_cond_st[discrete_column_id]
+ category_id_in_col)
cond[np.arange(batch), category_id] = 1
return cond, mask, discrete_column_id, category_id_in_col
def sample_original_condvec(self, batch):
"""Generate the conditional vector for generation use original frequency."""
if self._n_discrete_columns == 0:
return None
cond = np.zeros((batch, self._n_categories), dtype='float32')
for i in range(batch):
row_idx = np.random.randint(0, len(self._data))
col_idx = | np.random.randint(0, self._n_discrete_columns) | numpy.random.randint |
"""
Kernel topic model with Gibbs Sampler
=====================================
Reference: Hennig et al., 2012 & Murphy's MLPP book Ch. 27
"""
import numpy as np
from sklearn.gaussian_process import GaussianProcessRegressor as GPR
from sklearn.gaussian_process.kernels import RBF
from tqdm import tqdm, trange
np.random.seed(1)
# Words
W = np.array([0, 1, 2, 3, 4])
# D := document words
X = np.array([
[0, 0, 1, 2, 2],
[0, 0, 1, 1, 1],
[0, 1, 2, 2, 2],
[4, 4, 4, 4, 4],
[3, 3, 4, 4, 4],
[3, 4, 4, 4, 4]
])
N_D = X.shape[0] # num of docs
N_W = W.shape[0] # num of words
N_K = 2 # num of topics
N_F = 3 # num of features
# Document features
Phi = np.random.randn(N_D, N_F)
# Dirichlet priors
alpha = 1
beta = 1
# k independent GP priors
ls = 1 # length-scale for RBF kernel
tau = 1 # Observation noise variance
kernel = RBF([ls]*N_F)
GPRs = []
for k in range(N_K):
GPR_k = GPR(kernel=kernel, alpha=tau)
GPR_k = GPR_k.fit(Phi, np.zeros(N_D))
GPRs.append(GPR_k)
# -------------------------------------------------------------------------------------
# Laplace bridge
# -------------------------------------------------------------------------------------
def gauss2dir(mu, Sigma):
K = len(mu)
Sigma_diag = np.diag(Sigma)
alpha = 1/Sigma_diag * (1 - 2/K + np.exp(mu)/K**2 * np.sum(np.exp(-mu)))
return alpha
def dir2gauss(alpha):
K = len(alpha)
mu = | np.log(alpha) | numpy.log |
# coding=UTF-8
from manimlib.imports import *
import numpy as np
numbers = [21, 99, 49, 11, 66, 5, 78, 86]
class Sort(Scene):
def construct(self):
# 显示文字
text1 = Text("归并排序\n\n采用分治法\n先二分成无数个子序列\n再对每个子序列排序\n最后合并为有序序列", color=WHITE, font="黑体")
text1.scale(1.5)
text1.move_to(np.array([0.0, 0.0, 0.0]))
self.play(ShowCreation(text1))
self.wait(2)
self.play(Uncreate(text1))
# 1级
group1 = VGroup()
for i in range(8):
group1.add(Square(side_length=1))
if i > 0: group1[i].next_to(group1[i-1], RIGHT, 0)
group1.move_to(np.array([0.0, 3.0, 0.0]))
self.play(FadeIn(group1))
# 数字
elements = []
for i in range(len(numbers)):
elements.append(Integer(numbers[i]))
elements[i].move_to(np.array([-3.5 + i * 1.0, 3.0, 0.0]))
self.play(ShowCreation(elements[i]))
# 2级
arrow1to2_1 = Arrow(start=np.array([-0.5, 2.5, 0.0]), end=np.array([-3.0, 1.5, 0.0]))
arrow1to2_2 = Arrow(start=np.array([0.5, 2.5, 0.0]), end=np.array([3.0, 1.5, 0.0]))
self.play(ShowCreation(arrow1to2_1))
self.play(ShowCreation(arrow1to2_2))
group2_1 = VGroup()
for i in range(4):
group2_1.add(Square(side_length=1))
if i > 0: group2_1[i].next_to(group2_1[i-1], RIGHT, 0)
group2_1.move_to( | np.array([-2.0, 3.0, 0.0]) | numpy.array |
# Simply to take quick glance at the Mandelbrodt set project
# (animated plot with varying n_max)
from __future__ import print_function
import math as ma
import matplotlib.animation as manim
import matplotlib.cm as cm
import matplotlib.colors as col
import matplotlib.pyplot as plt
import matplotlib.ticker as tick
import numpy as np
from matplotlib.gridspec import GridSpec
import paramplots as prp
import utils as u
fractal_mode = 'Mandelbrodt'
# fractal_mode = 'Julia'
# julia_C = -0.7 + 0.27015j
julia_C = -0.8j
# julia_C = 1.0 - (1.0+ma.sqrt(5.0))/2.0
# julia_C = -0.7269+0.1889j
N_points = 512 # resolution of the image
fixed_n_max = 6e1
# color_map = cm.jet
temp_jet = cm.jet(np.linspace(0, 1, 256))
# color_map.set_under('black')
# color_map.set_over('white')
temp_black = cm.Greys(np.linspace(1, 1, 1))
colors = | np.vstack((temp_black, temp_jet)) | numpy.vstack |
import logging
from glob import glob
from pathlib import Path
import cv2
import numpy as np
from imgaug import augmenters as iaa
from imgaug.augmentables import BoundingBox, BoundingBoxesOnImage, Keypoint, KeypointsOnImage
from omegaconf.listconfig import ListConfig
from PIL import Image
from pycocotools.coco import COCO
from torch.utils import data
from utils.helper import instantiate_augmenters
from utils.image import draw_umich_gaussian as draw_gaussian
from utils.image import gaussian_radius
from utils.box import get_annotation_with_angle, rotate_bbox
cv2.setNumThreads(0)
log = logging.getLogger(__name__)
class Dataset(data.Dataset):
def __init__(
self, image_folder, annotation_file, input_size=(512, 512),
target_domain_glob=None, num_classes=80, num_keypoints=0,
rotated_boxes=False, mean=(0.40789654, 0.44719302, 0.47026115),
std=(0.28863828, 0.27408164, 0.27809835),
augmentation=None, augment_target_domain=False, max_detections=150,
down_ratio=4):
self.image_folder = Path(image_folder)
self.coco = COCO(annotation_file)
self.images = self.coco.getImgIds()
self.use_rotated_boxes = rotated_boxes
self.max_detections = max_detections
self.down_ratio = down_ratio
self.input_size = input_size
self.mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3)
self.std = np.array(std, dtype=np.float32).reshape(1, 1, 3)
self.augmentation = augmentation
self.num_classes = num_classes
self.num_keypoints = num_keypoints
self.string_id_mapping = {}
self.augment_target_domain = augment_target_domain
self.cat_mapping = {v: i for i,
v in enumerate(range(1, num_classes + 1))}
self.classes = {y: self.coco.cats[x] if x in self.coco.cats else ''
for x, y in self.cat_mapping.items()}
assert len(input_size) == 2
if isinstance(target_domain_glob, str):
self.target_domain_files = glob(target_domain_glob)
elif isinstance(target_domain_glob, (list, ListConfig)):
self.target_domain_files = []
for pattern in target_domain_glob:
self.target_domain_files.extend(glob(pattern))
else:
self.target_domain_files = []
if self.augmentation:
augmentation_methods = instantiate_augmenters(augmentation)
self.augmentation = iaa.Sequential(augmentation_methods)
self.resize = iaa.Resize((self.input_size[0], self.input_size[1]))
self.resize_out = iaa.Resize(
(self.input_size[0] // down_ratio,
self.input_size[1] // down_ratio))
log.info(
f"found {len(self.target_domain_files)} samples for target domain")
super().__init__()
def __len__(self):
return len(self.images)
def __getitem__(self, index):
img_id = self.images[index]
file_name = self.coco.loadImgs(ids=[img_id])[0]['file_name']
img_path = self.image_folder / file_name
ann_ids = self.coco.getAnnIds(imgIds=[img_id])
anns = self.coco.loadAnns(ids=ann_ids)
num_objs = min(len(anns), self.max_detections)
img = np.array(Image.open(img_path).convert("RGB"))
if self.use_rotated_boxes:
ret = self.__get_rotated_coco(img, anns, num_objs)
else:
ret = self.__get_default_coco(img, anns, num_objs)
if isinstance(img_id, str):
mapped_id = self.string_id_mapping.get(
img_id, 1 + len(self.string_id_mapping))
self.string_id_mapping[img_id] = mapped_id
img_id = mapped_id
ret['id'] = img_id
if len(self.target_domain_files):
target_domain_img = np.array(Image.open(
np.random.choice(self.target_domain_files)).convert("RGB"))
if self.augmentation is not None and self.augment_target_domain:
target_domain_img = self.augmentation(image=target_domain_img)
target_domain_img = self.resize(image=target_domain_img)
target_domain_img = np.array(
target_domain_img, dtype=np.float32) / 255.0
target_domain_img = (target_domain_img - self.mean) / self.std
target_domain_img = target_domain_img.transpose(2, 0, 1)
ret['target_domain_input'] = target_domain_img
return ret
def __get_default_coco(self, img, anns, num_objs):
boxes = []
if self.num_keypoints > 0:
kpts = []
for k in range(num_objs):
ann = anns[k]
bbox = self._coco_box_to_bbox(ann['bbox'])
boxes.append(BoundingBox(*bbox))
if self.num_keypoints > 0:
if 'keypoints' not in ann:
ann['keypoints'] = np.zeros((3 * self.num_keypoints,))
kpt = [
Keypoint(*x)
for x in np.array(ann['keypoints']).reshape(-1, 3)
[:, : 2]]
kpts.extend(kpt)
bbs = BoundingBoxesOnImage(boxes, shape=img.shape)
if self.num_keypoints > 0:
kpts = KeypointsOnImage(kpts, shape=img.shape)
if self.augmentation is not None:
if self.num_keypoints > 0:
img_aug, bbs_aug, kpts_aug = self.augmentation(
image=img, bounding_boxes=bbs, keypoints=kpts)
else:
img_aug, bbs_aug = self.augmentation(
image=img, bounding_boxes=bbs)
else:
if self.num_keypoints > 0:
kpts_aug = kpts.copy()
img_aug, bbs_aug = np.copy(img), bbs.copy()
if self.num_keypoints > 0:
img_aug, bbs_aug, kpts_aug = self.resize(
image=img_aug, bounding_boxes=bbs_aug, keypoints=kpts_aug)
else:
img_aug, bbs_aug = self.resize(
image=img_aug, bounding_boxes=bbs_aug)
img = (img_aug.astype(np.float32) / 255.)
inp = (img - self.mean) / self.std
inp = inp.transpose(2, 0, 1)
output_h = self.input_size[1] // self.down_ratio
output_w = self.input_size[0] // self.down_ratio
num_classes = self.num_classes
hm = np.zeros((num_classes, output_h, output_w), dtype=np.float32)
wh = np.zeros((self.max_detections, 2), dtype=np.float32)
reg = np.zeros((self.max_detections, 2), dtype=np.float32)
ind = np.zeros((self.max_detections), dtype=np.int64)
reg_mask = np.zeros((self.max_detections), dtype=np.uint8)
gt_det = np.zeros((self.max_detections, num_classes), dtype=np.float32)
gt_areas = np.zeros((self.max_detections), dtype=np.float32)
if self.num_keypoints > 0:
kp = np.zeros(
(self.max_detections,
self.num_keypoints * 2),
dtype=np.float32)
gt_kp = np.zeros(
(self.max_detections, self.num_keypoints, 2), dtype=np.float32)
kp_reg_mask = np.zeros(
(self.max_detections, self.num_keypoints * 2), dtype=np.uint8)
bbs_aug, kpts_aug = self.resize_out(
bounding_boxes=bbs_aug, keypoints=kpts_aug)
else:
bbs_aug = self.resize_out(bounding_boxes=bbs_aug)
for k in range(num_objs):
ann = anns[k]
bbox_aug = bbs_aug[k].clip_out_of_image((output_w, output_h))
bbox = np.array([bbox_aug.x1, bbox_aug.y1,
bbox_aug.x2, bbox_aug.y2])
cls_id = int(self.cat_mapping[ann['category_id']])
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
radius = gaussian_radius((np.ceil(h), np.ceil(w)))
radius = max(0, int(radius))
ct = np.array(
[(bbox[0] + bbox[2]) / 2, (bbox[1] + bbox[3]) / 2],
dtype=np.float32)
ct_int = ct.astype(np.int32)
draw_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
gt_det[k] = ([ct[0] - w / 2, ct[1] - h / 2,
ct[0] + w / 2, ct[1] + h / 2, 1, cls_id])
if self.num_keypoints > 0:
valid = np.array(ann["keypoints"]).reshape(-1, 3)[:, -1]
for i, p in enumerate(
kpts_aug[k * self.num_keypoints: k * self.num_keypoints + self.num_keypoints]):
kp[k][i * 2] = p.x - ct_int[0]
kp[k][i * 2 + 1] = p.y - ct_int[1]
is_valid = valid[i] == 2 and not p.is_out_of_image(
(output_w, output_w))
kp_reg_mask[k, i * 2] = int(is_valid)
kp_reg_mask[k, i * 2 + 1] = int(is_valid)
gt_kp[k][i] = p.x, p.y
if "area" not in ann:
gt_areas[k] = w * h
else:
gt_areas[k] = ann["area"]
del bbs
del bbs_aug
del img_aug
gt_det = | np.array(gt_det, dtype=np.float32) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 6 14:40:17 2020
@author: lukepinkel
"""
import numpy as np
import scipy as sp
def fo_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
g, h = np.zeros(n), np.zeros(n)
for i in range(n):
h[i] = eps
g[i] = (f(x+h, *args) - f(x, *args)) / eps
h[i] = 0
return g
def so_fc_fd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
H, hi, hj = np.zeros((n, n)), np.zeros(n), np.zeros(n)
eps2 = eps**2
for i in range(n):
hi[i] = eps
for j in range(i+1):
hj[j] = eps
H[i, j] = (f(x+hi+hj, *args) - f(x+hi, *args) - f(x+hj, *args) + f(x, *args)) / eps2
H[j, i] = H[i, j]
hj[j] = 0
hi[i] = 0
return H
def so_gc_fd(g, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
H, h = np.zeros((n, n)), np.zeros(n)
gx, gxh = np.zeros((n, n)), np.zeros((n, n))
for i in range(n):
h[i] = eps
gx[i] = g(x, *args)
gxh[i] = g(x+h, *args)
h[i] = 0
for i in range(n):
for j in range(i+1):
H[i, j] = ((gxh[i, j] - gx[i, j]) + (gxh[j, i] - gx[j, i])) / (2 * eps)
H[j, i] = H[i, j]
return H
def fo_fc_cd(f, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1.0/3.0)
n = len(np.asarray(x))
g, h = np.zeros(n), np.zeros(n)
for i in range(n):
h[i] = eps
g[i] = (f(x+h, *args) - f(x - h, *args)) / (2 * eps)
h[i] = 0
return g
def so_fc_cd(f, x, eps=None, args=()):
p = len(np.asarray(x))
if eps is None:
eps = (np.finfo(float).eps)**(1./3.)
H = np.zeros((p, p))
ei = np.zeros(p)
ej = np.zeros(p)
for i in range(p):
for j in range(i+1):
ei[i], ej[j] = eps, eps
if i==j:
dn = -f(x+2*ei, *args)+16*f(x+ei, *args)\
-30*f(x, *args)+16*f(x-ei, *args)-f(x-2*ei, *args)
nm = 12*eps**2
H[i, j] = dn/nm
else:
dn = f(x+ei+ej, *args)-f(x+ei-ej, *args)-f(x-ei+ej, *args)+f(x-ei-ej, *args)
nm = 4*eps*eps
H[i, j] = dn/nm
H[j, i] = dn/nm
ei[i], ej[j] = 0.0, 0.0
return H
def so_gc_cd(g, x, eps=None, args=()):
if eps is None:
eps = (np.finfo(float).eps)**(1./3.)
n = len(np.asarray(x))
H, h = np.zeros((n, n)), np.zeros(n)
gxp, gxn = np.zeros((n, n)), np.zeros((n, n))
for i in range(n):
h[i] = eps
gxp[i] = g(x+h, *args)
gxn[i] = g(x-h, *args)
h[i] = 0
for i in range(n):
for j in range(i+1):
H[i, j] = ((gxp[i, j] - gxn[i, j] + gxp[j, i] - gxn[j, i])) / (4 * eps)
H[j, i] = H[i, j]
return H
def fd_coefficients(points, order):
A = np.zeros((len(points), len(points)))
A[0] = 1
for i in range(len(points)):
A[i] = np.asarray(points)**(i)
b = np.zeros(len(points))
b[order] = sp.special.factorial(order)
c = np.linalg.inv(A).dot(b)
return c
def finite_diff(f, x, epsilon=None, order=1, points=None):
if points is None:
points = np.arange(-4, 5)
if epsilon is None:
epsilon = (np.finfo(float).eps)**(1./3.)
coefs = fd_coefficients(points, order)
df = 0.0
for c, p in list(zip(coefs, points)):
df+=c*f(x+epsilon*p)
df = df / (epsilon**order)
return df
def grad_approx(f, x, eps=1e-4, tol=None, d=1e-4, nr=6, v=2):
tol = np.finfo(float).eps**(1/3) if tol is None else tol
h = np.abs(d * x) + eps * (np.abs(x) < tol)
n = len(x)
u = np.zeros_like(h)
A = np.zeros((nr, n))
for i in range(nr):
for j in range(n):
u[j] = h[j]
A[i, j] = (f(x + u) - f(x - u)) / (2.0 * h[j])
u[j] = 0.0
h /= v
for i in range(nr-1):
t = 4**(i+1)
A = (A[1:(nr-i)]*t - A[:(nr-i-1)]) / (t-1.0)
return A
def jac_approx(f, x, eps=1e-4, tol=None, d=1e-4, nr=6, v=2):
tol = np.finfo(float).eps**(1/3) if tol is None else tol
h = np.abs(d * x) + eps * (np.abs(x) < tol)
n = len(x)
p = len(f(x))
u = np.zeros_like(h)
A = np.zeros((nr, n, p))
for i in range(nr):
for j in range(n):
u[j] = h[j]
A[i, j] = (f(x + u) - f(x - u)) / (2.0 * h[j])
u[j] = 0.0
h /= v
for i in range(nr-1):
t = 4**(i+1)
A = (A[1:(nr-i)]*t - A[:(nr-i-1)]) / (t-1.0)
return A
def _hess_approx(f, x, a_eps=1e-4, r_eps=1e-4, xtol=None, nr=6, s=2):
xtol = np.finfo(np.float).eps**(1/3) if xtol is None else xtol
d = np.abs(r_eps * x) + a_eps * (np.abs(x) < xtol)
y = f(x)
u = np.zeros_like(d)
v = np.zeros_like(d)
nx, ny = len(x), len(y)
D = np.zeros((ny, int(nx * (nx + 3) // 2)))
Da = np.zeros((ny, nr))
Hd = | np.zeros((ny, nx)) | numpy.zeros |
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
import numpy as np
from . import gl
from .globject import GLObject
from .wrappers import _check_conversion
from ..util import logger
GL_SAMPLER_3D = 35679
def glTexImage3D(target, level,
internalformat, format, type, pixels):
# Import from PyOpenGL
try:
import OpenGL.GL as _gl
except ImportError:
raise ImportError('PyOpenGL is required for 3D texture support')
border = 0
if isinstance(pixels, (tuple, list)):
depth, height, width = pixels
pixels = None
else:
depth, height, width = pixels.shape[:3]
_gl.glTexImage3D(target, level, internalformat,
width, height, depth, border, format, type, pixels)
def glTexSubImage3D(target, level,
xoffset, yoffset, zoffset, format, type, pixels):
# Import from PyOpenGL
try:
import OpenGL.GL as _gl
except ImportError:
raise ImportError('PyOpenGL is required for 3D texture support')
depth, height, width = pixels.shape[:3]
_gl.glTexSubImage3D(target, level, xoffset, yoffset, zoffset,
width, height, depth, format, type, pixels)
def _check_texture_format(value):
valid_dict = {'luminance': gl.GL_LUMINANCE,
'alpha': gl.GL_ALPHA,
'luminance_alpha': gl.GL_LUMINANCE_ALPHA,
'rgb': gl.GL_RGB,
'rgba': gl.GL_RGBA}
return _check_conversion(value, valid_dict)
# ----------------------------------------------------------- Texture class ---
class Texture(GLObject):
"""
A Texture is used to represent a topological set of scalar values.
Parameters
----------
target : GLEnum
gl.GL_TEXTURE2D
gl.GL_TEXTURE_CUBE_MAP
data : ndarray
Texture data (optional)
shape : tuple of integers
Texture shape (optional)
dtype : dtype
Texture data type (optional)
base : Texture
Base texture of this texture
offset : tuple of integers
Offset of this texture relative to base texture
store : bool
Specify whether this object stores a reference to the data,
allowing the data to be updated regardless of striding. Note
that modifying the data after passing it here might result in
undesired behavior, unless a copy is given. Default True.
resizeable : bool
Indicates whether texture can be resized
"""
_formats = {
1: gl.GL_LUMINANCE, # or ALPHA,
2: gl.GL_LUMINANCE_ALPHA,
3: gl.GL_RGB,
4: gl.GL_RGBA
}
_types = {
np.dtype(np.int8): gl.GL_BYTE,
np.dtype(np.uint8): gl.GL_UNSIGNED_BYTE,
np.dtype(np.int16): gl.GL_SHORT,
np.dtype(np.uint16): gl.GL_UNSIGNED_SHORT,
np.dtype(np.int32): gl.GL_INT,
np.dtype(np.uint32): gl.GL_UNSIGNED_INT,
# np.dtype(np.float16) : gl.GL_HALF_FLOAT,
np.dtype(np.float32): gl.GL_FLOAT,
# np.dtype(np.float64) : gl.GL_DOUBLE
}
def __init__(self, data=None, shape=None, dtype=None, base=None,
target=None, offset=None, store=True, resizeable=True):
GLObject.__init__(self)
self._data = None
self._base = base
self._store = store
self._copy = False # flag to indicate that a copy is made
self._target = target
self._offset = offset
self._pending_data = []
self._resizeable = resizeable
self._valid = True
self._views = []
# Extra stages that are handled in _activate()
self._need_resize = False
self._need_parameterization = True
self._interpolation = gl.GL_NEAREST
self._wrapping = gl.GL_CLAMP_TO_EDGE
# Do we have data to build texture upon ?
if data is not None:
self._need_resize = True
# Handle dtype
if dtype is not None:
data = np.array(data, dtype=dtype, copy=False)
else:
data = np.array(data, copy=False)
self._dtype = data.dtype
# Handle shape
data = self._normalize_shape(data)
if shape is not None:
raise ValueError('Texture needs data or shape, nor both.')
self._shape = data.shape
# Handle storage
if self._store:
if not data.flags["C_CONTIGUOUS"]:
logger.warning("Copying discontiguous data as CPU storage")
self._copy = True
data = data.copy()
self._data = data
# Set data
self.set_data(data, copy=False)
elif dtype is not None:
if shape is not None:
self._need_resize = True
shape = shape or ()
self._shape = self._normalize_shape(shape)
self._dtype = dtype
if self._store:
self._data = | np.zeros(self._shape, dtype=self._dtype) | numpy.zeros |
#!/usr/bin/env python3
# command line program
import argparse
# deepcopy
import copy
# file
import os.path
# numpy
import numpy as np
import scipy as sp
import scipy.integrate
import scipy.linalg
import scipy.interpolate
# internal modules
import libpost
# tmp plot
import matplotlib.pyplot as plt
# nu: 0.000185
# epsilon: 0.103
# kolmogorov length scale: 0.0028
# kolmogorov time scale: 0.0424
# batchelor scale: (nu D^2 / epsilon)^(1/4)
# batchelor (scale < kolmogorov length) eq (D < nu)
# D < 1.85e-4
sc = 1e0
D = 1.85e-4 / sc
s0 = 1e0 * 0.0028 # initial thickness
dA0 = (1e0 * 0.0028)**2 # initial surface
c_nb = 100
c_array = np.linspace(0.0, 1.0, num=c_nb)
C_nb = c_nb
C_array = np.linspace(0.0, 1.0, num=C_nb)
rho_nb = c_nb
log_rho_array = np.linspace(-np.log(10), np.log(10000), num=rho_nb)
#process_times = [0.0, 0.002, 0.004, 0.008, 0.016, 0.032, 0.064, 0.128]
process_times = [0.002, 0.004, 0.016, 0.032, 0.064, 0.128, 0.256, 0.512, 1.024, 1.536, 2.048, 4.096, 8.192] # step 0.002
def parse():
parser = argparse.ArgumentParser(description='Computes statistics of the lagrangian gradients matrix (computed along particle trajectories)')
return parser.parse_args()
def dL_dt(foperator, L, t):
return np.matmul(foperator(t), L.reshape((3,3), order='C')).flatten(order='C')
def F(a):
return np.log((4.0/a + 1) + np.sqrt((4.0/a + 1)**2 - 1.0)) - 1.6/np.sqrt(a + 1.4)
def P(rho, mu_pp, mu_p, mu):
# init result
result = np.zeros((rho.size, mu.size))
# broadcast
rho = np.broadcast_to(rho.reshape((-1, 1)), result.shape)
mu_pp = np.broadcast_to(mu_pp, result.shape)
mu_p = np.broadcast_to(mu_p, result.shape)
mu = np.broadcast_to(mu, result.shape)
#mask
mask_eq_2o37 = (np.power(rho, 2) > mu_p) & (np.power(rho, 2) < mu)
mask_eq_2o38 = (np.power(rho, 2) > mu_pp) & (np.power(rho, 2) < mu_p)
# compute
result[mask_eq_2o37] = rho[mask_eq_2o37] / (np.pi * np.sqrt(mu[mask_eq_2o37] - np.power(rho[mask_eq_2o37], 2)) * np.sqrt(mu_p[mask_eq_2o37] - mu_pp[mask_eq_2o37])) * F(2 * (np.power(rho[mask_eq_2o37], 2) - mu_p[mask_eq_2o37]) * (mu[mask_eq_2o37] - mu_pp[mask_eq_2o37]) / ((mu[mask_eq_2o37] - np.power(rho[mask_eq_2o37], 2)) * (mu_p[mask_eq_2o37] - mu_pp[mask_eq_2o37])))
result[mask_eq_2o38] = rho[mask_eq_2o38] / (np.pi * np.sqrt(mu[mask_eq_2o38] - mu_p[mask_eq_2o38]) * np.sqrt(np.power(rho[mask_eq_2o38], 2) - mu_pp[mask_eq_2o38])) * F(2 * (mu_p[mask_eq_2o38] - np.power(rho[mask_eq_2o38], 2)) * (mu[mask_eq_2o38] - mu_pp[mask_eq_2o38]) / ((np.power(rho[mask_eq_2o38], 2) - mu_pp[mask_eq_2o38]) * (mu[mask_eq_2o38] - mu_p[mask_eq_2o38])))
# return
return result
def Q(C, eta_pp, eta_p, eta):
# init result
result = np.zeros((C.size, eta.size))
# broadcast
C = np.broadcast_to(C.reshape((-1, 1)), result.shape)
eta_pp = np.broadcast_to(eta_pp, result.shape)
eta_p = np.broadcast_to(eta_p, result.shape)
eta = np.broadcast_to(eta, result.shape)
#mask
mask_eq_2o43 = (np.power(C, -2) > eta_pp) & (np.power(C, -2) < eta_p)
mask_eq_2o44 = (np.power(C, -2) > eta_p) & (np.power(C, -2) < eta)
# compute
result[mask_eq_2o43] = dA0 * s0 / (np.pi * np.power(C[mask_eq_2o43], 4) * np.sqrt(eta[mask_eq_2o43] - eta_p[mask_eq_2o43]) * np.sqrt(np.power(C[mask_eq_2o43], -2) - eta_pp[mask_eq_2o43])) * F(2 * (eta_p[mask_eq_2o43] - np.power(C[mask_eq_2o43], -2)) * (eta[mask_eq_2o43] - eta_pp[mask_eq_2o43]) / ((np.power(C[mask_eq_2o43], -2) - eta_pp[mask_eq_2o43]) * (eta[mask_eq_2o43] - eta_p[mask_eq_2o43])))
result[mask_eq_2o44] = dA0 * s0 / (np.pi * np.power(C[mask_eq_2o44], 4) * np.sqrt(eta[mask_eq_2o44] - np.power(C[mask_eq_2o44], -2)) * np.sqrt(eta_p[mask_eq_2o44] - eta_pp[mask_eq_2o44])) * F(2 * (np.power(C[mask_eq_2o44], -2) - eta_p[mask_eq_2o44]) * (eta[mask_eq_2o44] - eta_pp[mask_eq_2o44]) / ((eta[mask_eq_2o44] - | np.power(C[mask_eq_2o44], -2) | numpy.power |
import types
import numpy as np
import sklearn
import torch
from sklearn.linear_model import RANSACRegressor
from utils.iou3d_nms import iou3d_nms_utils
from utils import kitti_util
def cart2hom(pts_3d):
n = pts_3d.shape[0]
pts_3d_hom = np.hstack((pts_3d, np.ones((n, 1), dtype=np.float32)))
return pts_3d_hom
def transform_points(pts_3d_ref, Tr):
pts_3d_ref = cart2hom(pts_3d_ref) # nx4
return np.dot(pts_3d_ref, np.transpose(Tr)).reshape(-1, 4)[:, 0:3]
def load_velo_scan(velo_filename):
scan = np.fromfile(velo_filename, dtype=np.float32)
scan = scan.reshape((-1, 4))
return scan
def load_plane(plane_filename):
with open(plane_filename, 'r') as f:
lines = f.readlines()
lines = [float(i) for i in lines[3].split()]
plane = np.asarray(lines)
# Ensure normal is always facing up, this is in the rectified camera coordinate
if plane[1] > 0:
plane = -plane
norm = np.linalg.norm(plane[0:3])
plane = plane / norm
return plane
def estimate_plane(origin_ptc, max_hs=-1.5, it=1, ptc_range=((-20, 70), (-20, 20))):
mask = (origin_ptc[:, 2] < max_hs) & \
(origin_ptc[:, 0] > ptc_range[0][0]) & \
(origin_ptc[:, 0] < ptc_range[0][1]) & \
(origin_ptc[:, 1] > ptc_range[1][0]) & \
(origin_ptc[:, 1] < ptc_range[1][1])
for _ in range(it):
ptc = origin_ptc[mask]
reg = RANSACRegressor().fit(ptc[:, [0, 1]], ptc[:, 2])
w = np.zeros(3)
w[0] = reg.estimator_.coef_[0]
w[1] = reg.estimator_.coef_[1]
w[2] = -1.0
h = reg.estimator_.intercept_
norm = np.linalg.norm(w)
w /= norm
h = h / norm
result = np.array((w[0], w[1], w[2], h))
result *= -1
mask = np.logical_not(above_plane(
origin_ptc[:, :3], result, offset=0.2))
return result
def above_plane(ptc, plane, offset=0.05, only_range=((-30, 30), (-30, 30))):
mask = distance_to_plane(ptc, plane, directional=True) < offset
if only_range is not None:
range_mask = (ptc[:, 0] < only_range[0][1]) * (ptc[:, 0] > only_range[0][0]) * \
(ptc[:, 1] < only_range[1][1]) * (ptc[:, 1] > only_range[1][0])
mask *= range_mask
return np.logical_not(mask)
def distance_to_plane(ptc, plane, directional=False):
d = ptc @ plane[:3] + plane[3]
if not directional:
d = np.abs(d)
d /= np.sqrt((plane[:3]**2).sum())
return d
import numpy as np
from scipy.spatial import ConvexHull
def minimum_bounding_rectangle(points):
"""
Find the smallest bounding rectangle for a set of points.
Returns a set of points representing the corners of the bounding box.
https://stackoverflow.com/questions/13542855/algorithm-to-find-the-minimum-area-rectangle-for-given-points-in-order-to-comput
:param points: an nx2 matrix of coordinates
:rval: an nx2 matrix of coordinates
"""
from scipy.ndimage.interpolation import rotate
pi2 = np.pi/2.
# get the convex hull for the points
hull_points = points[ConvexHull(points).vertices]
# calculate edge angles
edges = np.zeros((len(hull_points)-1, 2))
edges = hull_points[1:] - hull_points[:-1]
angles = np.zeros((len(edges)))
angles = np.arctan2(edges[:, 1], edges[:, 0])
angles = np.abs(np.mod(angles, pi2))
angles = np.unique(angles)
# find rotation matrices
rotations = np.vstack([
np.cos(angles),
np.cos(angles-pi2),
np.cos(angles+pi2),
np.cos(angles)]).T
rotations = rotations.reshape((-1, 2, 2))
# apply rotations to the hull
rot_points = np.dot(rotations, hull_points.T)
# find the bounding points
min_x = np.nanmin(rot_points[:, 0], axis=1)
max_x = np.nanmax(rot_points[:, 0], axis=1)
min_y = np.nanmin(rot_points[:, 1], axis=1)
max_y = np.nanmax(rot_points[:, 1], axis=1)
# find the box with the best area
areas = (max_x - min_x) * (max_y - min_y)
best_idx = np.argmin(areas)
# return the best box
x1 = max_x[best_idx]
x2 = min_x[best_idx]
y1 = max_y[best_idx]
y2 = min_y[best_idx]
r = rotations[best_idx]
rval = np.zeros((4, 2))
rval[0] = np.dot([x1, y2], r)
rval[1] = np.dot([x2, y2], r)
rval[2] = np.dot([x2, y1], r)
rval[3] = np.dot([x1, y1], r)
return rval, angles[best_idx], areas[best_idx]
def PCA_rectangle(cluster_ptc):
components = sklearn.decomposition.PCA(
n_components=2).fit(cluster_ptc).components_
on_component_ptc = cluster_ptc @ components.T
min_x, max_x = on_component_ptc[:, 0].min(), on_component_ptc[:, 0].max()
min_y, max_y = on_component_ptc[:, 1].min(), on_component_ptc[:, 1].max()
area = (max_x - min_x) * (max_y - min_y)
rval = np.array([
[max_x, min_y],
[min_x, min_y],
[min_x, max_y],
[max_x, max_y],
])
rval = rval @ components
angle = np.arctan2(components[0, 1], components[0, 0])
return rval, angle, area
def closeness_rectangle(cluster_ptc, delta=0.1, d0=1e-2):
max_beta = -float('inf')
choose_angle = None
for angle in np.arange(0, 90+delta, delta):
angle = angle / 180. * np.pi
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:,0].min(), projection[:,0].max()
min_y, max_y = projection[:,1].min(), projection[:,1].max()
Dx = np.vstack((projection[:, 0] - min_x, max_x - projection[:, 0])).min(axis=0)
Dy = np.vstack((projection[:, 1] - min_y, max_y - projection[:, 1])).min(axis=0)
beta = np.vstack((Dx, Dy)).min(axis=0)
beta = np.maximum(beta, d0)
beta = 1 / beta
beta = beta.sum()
if beta > max_beta:
max_beta = beta
choose_angle = angle
angle = choose_angle
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:, 0].min(), projection[:, 0].max()
min_y, max_y = projection[:, 1].min(), projection[:, 1].max()
if (max_x - min_x) < (max_y - min_y):
angle = choose_angle + np.pi / 2
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:, 0].min(), projection[:, 0].max()
min_y, max_y = projection[:, 1].min(), projection[:, 1].max()
area = (max_x - min_x) * (max_y - min_y)
rval = np.array([
[max_x, min_y],
[min_x, min_y],
[min_x, max_y],
[max_x, max_y],
])
rval = rval @ components
return rval, angle, area
def variance_rectangle(cluster_ptc, delta=0.1):
max_var = -float('inf')
choose_angle = None
for angle in np.arange(0, 90+delta, delta):
angle = angle / 180. * np.pi
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:, 0].min(), projection[:, 0].max()
min_y, max_y = projection[:, 1].min(), projection[:, 1].max()
Dx = np.vstack((projection[:, 0] - min_x,
max_x - projection[:, 0])).min(axis=0)
Dy = np.vstack((projection[:, 1] - min_y,
max_y - projection[:, 1])).min(axis=0)
Ex = Dx[Dx < Dy]
Ey = Dy[Dy < Dx]
var = 0
if (Dx < Dy).sum() > 0:
var += -np.var(Ex)
if (Dy < Dx).sum() > 0:
var += -np.var(Ey)
# print(angle, var)
if var > max_var:
max_var = var
choose_angle = angle
# print(choose_angle, max_var)
angle = choose_angle
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:, 0].min(), projection[:, 0].max()
min_y, max_y = projection[:, 1].min(), projection[:, 1].max()
if (max_x - min_x) < (max_y - min_y):
angle = choose_angle + np.pi / 2
components = np.array([
[np.cos(angle), np.sin(angle)],
[-np.sin(angle), np.cos(angle)]
])
projection = cluster_ptc @ components.T
min_x, max_x = projection[:, 0].min(), projection[:, 0].max()
min_y, max_y = projection[:, 1].min(), projection[:, 1].max()
area = (max_x - min_x) * (max_y - min_y)
rval = np.array([
[max_x, min_y],
[min_x, min_y],
[min_x, max_y],
[max_x, max_y],
])
rval = rval @ components
return rval, angle, area
def get_lowest_point_rect(ptc, xz_center, l, w, ry):
ptc_xz = ptc[:, [0, 2]] - xz_center
rot = np.array([
[np.cos(ry), -np.sin(ry)],
[np.sin(ry), np.cos(ry)]
])
ptc_xz = ptc_xz @ rot.T
mask = (ptc_xz[:, 0] > -l/2) & \
(ptc_xz[:, 0] < l/2) & \
(ptc_xz[:, 1] > -w/2) & \
(ptc_xz[:, 1] < w/2)
ys = ptc[mask, 1]
return ys.max()
def get_obj(ptc, full_ptc, fit_method='min_zx_area_fit'):
if fit_method == 'min_zx_area_fit':
corners, ry, area = minimum_bounding_rectangle(ptc[:, [0, 2]])
elif fit_method == 'PCA':
corners, ry, area = PCA_rectangle(ptc[:, [0, 2]])
elif fit_method == 'variance_to_edge':
corners, ry, area = variance_rectangle(ptc[:, [0, 2]])
elif fit_method == 'closeness_to_edge':
corners, ry, area = closeness_rectangle(ptc[:, [0, 2]])
else:
raise NotImplementedError(fit_method)
ry *= -1
l = np.linalg.norm(corners[0] - corners[1])
w = np.linalg.norm(corners[0] - corners[-1])
c = (corners[0] + corners[2]) / 2
# bottom = ptc[:, 1].max()
bottom = get_lowest_point_rect(full_ptc, c, l, w, ry)
h = bottom - ptc[:, 1].min()
obj = types.SimpleNamespace()
obj.t = np.array([c[0], bottom, c[1]])
obj.l = l
obj.w = w
obj.h = h
obj.ry = ry
obj.volume = area * h
return obj
def objs_nms(objs, use_score_rank=False, nms_threshold=0.1):
# generate box array
boxes = np.array(
[[obj.t[0], obj.t[2], 0, obj.l, obj.w, obj.h, -obj.ry] for obj in objs])
boxes = torch.from_numpy(boxes).float().cuda()
overlaps_bev = iou3d_nms_utils.boxes_iou_bev(
boxes.contiguous(), boxes.contiguous())
overlaps_bev = overlaps_bev.cpu().numpy()
mask = np.ones(overlaps_bev.shape[0], dtype=bool)
if use_score_rank:
scores = [obj.score for obj in objs]
order = np.argsort(scores)[::-1]
else:
bbox_area = np.diag(overlaps_bev)
order = bbox_area.argsort()[::-1]
for idx in order:
if not mask[idx]:
continue
mask[overlaps_bev[idx] > nms_threshold] = False
mask[idx] = True
objs_nmsed = [objs[i] for i in range(len(objs)) if mask[i]]
return objs_nmsed
def objs2label(objs, calib, obj_type="Dynamic", with_score=False):
label_strings = []
for obj in objs:
alpha = - | np.arctan2(obj.t[0], obj.t[2]) | numpy.arctan2 |
from __future__ import print_function
import itertools
import math
import os
import random
import shutil
import tempfile
import unittest
import uuid
import numpy as np
import pytest
import tensorflow as tf
import coremltools
import coremltools.models.datatypes as datatypes
from coremltools.models import _MLMODEL_FULL_PRECISION, _MLMODEL_HALF_PRECISION
from coremltools.models import neural_network as neural_network
from coremltools.models.neural_network import flexible_shape_utils
from coremltools.models.utils import macos_version, is_macos
np.random.seed(10)
MIN_MACOS_VERSION_REQUIRED = (10, 13)
LAYERS_10_15_MACOS_VERSION = (10, 15)
def _get_unary_model_spec(x, mode, alpha=1.0):
input_dim = x.shape
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_unary(name='unary', input_name='data',
output_name='output', mode=mode, alpha=alpha)
return builder.spec
class CorrectnessTest(unittest.TestCase):
def runTest(self):
pass
def _compare_shapes(self, np_preds, coreml_preds):
return np.squeeze(np_preds).shape == np.squeeze(coreml_preds).shape
def _compare_nd_shapes(self, np_preds, coreml_preds, shape=()):
if shape:
return coreml_preds.shape == shape
else:
# check if shape has 0 valued dimension
if np.prod(np_preds.shape) == 0 and np.prod(coreml_preds.shape) == 0:
return True
return coreml_preds.shape == np_preds.shape
def _compare_predictions(self, np_preds, coreml_preds, delta=.01):
np_preds = np_preds.flatten()
coreml_preds = coreml_preds.flatten()
for i in range(len(np_preds)):
max_den = max(1.0, np_preds[i], coreml_preds[i])
if np.abs(
np_preds[i] / max_den - coreml_preds[i] / max_den) > delta:
return False
return True
@staticmethod
def _compare_moments(model, inputs, expected, use_cpu_only=True, num_moments=10):
"""
This utility function is used for validate random distributions layers.
It validates the first 10 moments of prediction and expected values.
"""
def get_moment(data, k):
return np.mean(np.power(data - np.mean(data), k))
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=use_cpu_only)
prediction = model.predict(inputs, useCPUOnly=use_cpu_only)
for output_name in expected:
np_preds = expected[output_name]
coreml_preds = prediction[output_name]
np_moments = [get_moment(np_preds.flatten(), k) for k in range(num_moments)]
coreml_moments = [get_moment(coreml_preds.flatten(), k) for k in range(num_moments)]
np.testing.assert_almost_equal(np_moments, coreml_moments, decimal=2)
# override expected values to allow element-wise compares
for output_name in expected:
expected[output_name] = prediction[output_name]
def _test_model(self,
model,
input,
expected,
model_precision=_MLMODEL_FULL_PRECISION,
useCPUOnly=False,
output_name_shape_dict={},
validate_shapes_only=False):
model_dir = None
# if we're given a path to a model
if isinstance(model, str):
model = coremltools.models.MLModel(model)
# If we're passed in a specification, save out the model
# and then load it back up
elif isinstance(model, coremltools.proto.Model_pb2.Model):
model_dir = tempfile.mkdtemp()
model_name = str(uuid.uuid4()) + '.mlmodel'
model_path = os.path.join(model_dir, model_name)
coremltools.utils.save_spec(model, model_path)
model = coremltools.models.MLModel(model, useCPUOnly=useCPUOnly)
# If we want to test the half precision case
if model_precision == _MLMODEL_HALF_PRECISION:
model = coremltools.utils.convert_neural_network_weights_to_fp16(
model)
try:
prediction = model.predict(input, useCPUOnly=useCPUOnly)
for output_name in expected:
if self.__class__.__name__ == "SimpleTest":
assert (self._compare_shapes(expected[output_name],
prediction[output_name]))
else:
if output_name in output_name_shape_dict:
output_shape = output_name_shape_dict[output_name]
else:
output_shape = []
if len(output_shape) == 0 and len(expected[output_name].shape) == 0:
output_shape = (1,)
assert (self._compare_nd_shapes(expected[output_name],
prediction[output_name],
output_shape))
if not validate_shapes_only:
assert (self._compare_predictions(expected[output_name],
prediction[output_name]))
finally:
# Remove the temporary directory if we created one
if model_dir and os.path.exists(model_dir):
shutil.rmtree(model_dir)
@unittest.skipIf(not is_macos() or macos_version() < MIN_MACOS_VERSION_REQUIRED,
'macOS 10.13+ is required. Skipping tests.')
class SimpleTest(CorrectnessTest):
def test_tiny_upsample_linear_mode(self):
input_dim = (1, 1, 3) # (C,H,W)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_upsample(name='upsample',
scaling_factor_h=2, scaling_factor_w=3,
input_name='data', output_name='output',
mode='BILINEAR')
input = {
'data': np.reshape(np.array([1.0, 2.0, 3.0]), (1, 1, 3))
}
expected = {
'output': np.array(
[[1, 1.333, 1.666, 2, 2.333, 2.666, 3, 3, 3],
[1, 1.333, 1.6666, 2, 2.33333, 2.6666, 3, 3, 3]
])
}
self._test_model(builder.spec, input, expected)
self.assertEquals(len(input_dim), builder._get_rank('output'))
def test_LRN(self):
input_dim = (1, 3, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_lrn(name='lrn', input_name='data', output_name='output',
alpha=2, beta=3, local_size=1, k=8)
input = {
'data': np.ones((1, 3, 3))
}
expected = {
'output': 1e-3 * np.ones((1, 3, 3))
}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_MVN(self):
input_dim = (2, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_mvn(name='mvn', input_name='data', output_name='output',
across_channels=False, normalize_variance=False)
input = {
'data': np.reshape(np.arange(8, dtype=np.float32), (2, 2, 2))
}
expected = {
'output': np.reshape(np.arange(8) - np.array(
[1.5, 1.5, 1.5, 1.5, 5.5, 5.5, 5.5, 5.5]), (2, 2, 2))
}
self._test_model(builder.spec, input, expected)
def test_L2_normalize(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_l2_normalize(name='mvn', input_name='data',
output_name='output')
input = {
'data': np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
}
expected = {
'output': np.reshape(np.arange(4, dtype=np.float32),
(1, 2, 2)) / np.sqrt(14)
}
self._test_model(builder.spec, input, expected)
def test_unary_sqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.sqrt(x)}
spec = _get_unary_model_spec(x, 'sqrt')
self._test_model(spec, input, expected)
def test_unary_rsqrt(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / np.sqrt(x)}
spec = _get_unary_model_spec(x, 'rsqrt')
self._test_model(spec, input, expected)
def test_unary_inverse(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 1 / x}
spec = _get_unary_model_spec(x, 'inverse')
self._test_model(spec, input, expected)
def test_unary_power(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x ** 3}
spec = _get_unary_model_spec(x, 'power', 3)
self._test_model(spec, input, expected)
def test_unary_exp(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.exp(x)}
spec = _get_unary_model_spec(x, 'exp')
self._test_model(spec, input, expected)
def test_unary_log(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.log(x)}
spec = _get_unary_model_spec(x, 'log')
self._test_model(spec, input, expected)
def test_unary_abs(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.abs(x)}
spec = _get_unary_model_spec(x, 'abs')
self._test_model(spec, input, expected)
def test_unary_threshold(self):
x = np.reshape(np.arange(1, 5, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': np.maximum(x, 2)}
spec = _get_unary_model_spec(x, 'threshold', 2)
self._test_model(spec, input, expected)
def test_split(self):
input_dim = (9, 2, 2)
x = np.random.rand(*input_dim)
input_features = [('data', datatypes.Array(*input_dim))]
output_names = []
output_features = []
for i in range(3):
out = 'out_' + str(i)
output_names.append(out)
output_features.append((out, None))
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_split(name='split', input_name='data',
output_names=output_names)
input = {'data': x}
expected = {
'out_0': x[0: 3, :, :],
'out_1': x[3: 6, :, :],
'out_2': x[6: 9, :, :]
}
self._test_model(builder.spec, input, expected)
for output_ in output_names:
self.assertEqual(len(input_dim), builder._get_rank(output_))
def test_scale_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_scale(name='scale', W=5, b=45, has_bias=True,
input_name='data', output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': 5 * x + 45}
self._test_model(builder.spec, input, expected)
def test_scale_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_scale(name='scale', W=W, b=None, has_bias=False,
input_name='data', output_name='output',
shape_scale=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': W * x}
self._test_model(builder.spec, input, expected)
def test_bias_constant(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_bias(name='bias', b=45, input_name='data',
output_name='output')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + 45}
self._test_model(builder.spec, input, expected)
def test_bias_matrix(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected)
def test_load_constant(self, model_precision=_MLMODEL_FULL_PRECISION):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_load_constant(name='load_constant', output_name='bias',
constant_value=b, shape=[1, 2, 2])
builder.add_elementwise(name='add', input_names=['data', 'bias'],
output_name='output', mode='ADD')
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, model_precision)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_load_constant_half_precision(self):
self.test_load_constant(model_precision=_MLMODEL_HALF_PRECISION)
def test_min(self):
input_dim = (1, 2, 2)
input_features = [('data_0', datatypes.Array(*input_dim)),
('data_1', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_elementwise(name='min', input_names=['data_0', 'data_1'],
output_name='output', mode='MIN')
x1 = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
x2 = np.reshape(np.arange(2, 6, dtype=np.float32), (1, 2, 2))
input = {'data_0': x1, 'data_1': x2}
expected = {'output': np.minimum(x1, x2)}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_conv_same_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='conv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='same', groups=1,
W=W, b=None, has_bias=False,
input_name='data', output_name='output',
same_padding_asymmetry_mode='TOP_LEFT_HEAVY')
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 8, 8)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
self.assertEqual(len(input_dim), builder._get_rank('output'))
def test_deconv_valid_padding(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
W = np.random.rand(3, 3, 10, 20)
builder.add_convolution(name='deconv', kernel_channels=10,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=1,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_deconv_non_unit_groups(self):
input_dim = (16, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
W = np.random.rand(3, 3, 16, 5)
builder.add_convolution(name='deconv', kernel_channels=16,
output_channels=20,
height=3, width=3, stride_height=2,
stride_width=2,
border_mode='valid', groups=4,
W=W, b=None, has_bias=False,
is_deconv=True,
input_name='data', output_name='output',
padding_top=2, padding_bottom=3,
padding_left=2, padding_right=3)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.random.rand(20, 26, 26)}
self._test_model(
builder.spec, input, expected, validate_shapes_only=True)
def test_linear_activation(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected)
def test_padding_constant(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features)
builder.add_padding(name='pad',
left=1, right=0, top=2, bottom=0,
value=-1,
input_name='data',
output_name='output')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(
np.array([[-1, -1, -1, -1], [-1, -1, -1, -1], [-1, 1, 2, 3],
[-1, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_padding_replication(self):
input_dim = (1, 2, 3)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_padding(name='pad',
left=1, top=2,
input_name='data',
output_name='output', padding_type='replication')
x = np.reshape(np.array([[1, 2, 3], [4, 5, 6]]), (1, 2, 3)).astype(
np.float32)
input = {'data': x}
y = np.reshape(np.array([[1, 1, 2, 3], [1, 1, 2, 3], [1, 1, 2, 3],
[4, 4, 5, 6]]), (1, 4, 4)).astype(np.float32)
expected = {'output': y}
self._test_model(builder.spec, input, expected)
def test_reshape_target_shape_3(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_reshape_target_shape_4(self):
input_dim = (1, 2, 5) # (C,H,W)
target_dim = (1, 10, 1, 1)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_reshape(name='reshape', input_name='data',
output_name='output', target_shape=target_dim,
mode=0)
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': np.reshape(x, (1, 10, 1, 1))}
self._test_model(builder.spec, input, expected)
self.assertEqual(len(target_dim), builder._get_rank('output'))
def test_bias_matrix_cpu(self):
input_dim = (1, 2, 2)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
b = np.reshape(np.arange(5, 9), (1, 2, 2))
builder.add_bias(name='bias', b=b, input_name='data',
output_name='output',
shape_bias=[1, 2, 2])
x = np.reshape(np.arange(4, dtype=np.float32), (1, 2, 2))
input = {'data': x}
expected = {'output': x + b}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_linear_activation_cpu(self):
input_dim = (10, 15, 15)
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features,
output_features)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
@unittest.skipIf(not is_macos() or macos_version() < LAYERS_10_15_MACOS_VERSION,
'macOS 10.15+ required. Skipping tests.')
class NewLayersSimpleTest(CorrectnessTest):
def test_shape_flexibility_range(self):
input_features = [('data', datatypes.Array(*(3,4)))]
builder = neural_network.NeuralNetworkBuilder(input_features,
[('output', None)], disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
flexible_shape_utils.set_multiarray_ndshape_range(spec, feature_name='data',
lower_bounds=[1,1], upper_bounds=[-1,5])
shapes = [(3,4), (1,5), (60,5), (22,4), (5,3)]
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration(self, rank=4):
default_shape = tuple(np.random.randint(1, 15, size=rank))
input_features = [('data', datatypes.Array(*default_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features=input_features,
output_features=[('output', None)],
disable_rank5_shape_mapping=True)
builder.add_sin(name='sin', input_name='data', output_name='output')
spec = builder.spec
shapes = [tuple(np.random.randint(1, 15, size=rank)),
tuple(np.random.randint(1, 15, size=rank))]
flexible_shape_utils.add_multiarray_ndshape_enumeration(
spec, feature_name='data', enumerated_shapes=shapes)
shapes.append(default_shape)
for s in shapes:
x = np.random.rand(*s)
expected = {'output': np.sin(x)}
self._test_model(spec, {'data': x}, expected, useCPUOnly=True)
def test_shape_flexibility_enumeration_rank3(self):
self.test_shape_flexibility_enumeration(rank=3)
def test_shape_flexibility_enumeration_rank2(self):
self.test_shape_flexibility_enumeration(rank=2)
def test_transpose_cpu(self):
for rank in range(1, 6):
axes = np.random.permutation(rank)
axes = [axis - rank if np.random.choice([True, False]) else axis for axis in axes]
input_shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_transpose(name='TransposeND',
axes=axes,
input_name='data',
output_name='output')
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.transpose(x, axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_dynamic_weight_conv(self):
input_dim = (1, 3, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (4, 3, 3, 3)
output_dim = (1, 4, 14, 14)
kernel_channels = input_dim[0]
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('input', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='two_input_conv_layer',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
input_name=['input', 'weight'],
output_name='output')
# Assigning everything to ones should cover the execution path
# and engine failures, but is not a complete check on numerics.
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'input': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=True)
self._test_model(builder.spec, feed_dict, expected, useCPUOnly=False)
@pytest.mark.xfail
def test_dynamic_weight_deconv(self):
# Expect to fail in Core ML 3
input_dim = (1, 1, 16, 16)
# weight layout: (output_channels, kernel_channels, height, width)
weight_dim = (1, 1, 3, 3)
output_dim = (1, 1, 18, 18)
output_channels, kernel_channels, height, width = weight_dim
input_features = [
('data', datatypes.Array(*input_dim)),
('weight', datatypes.Array(*weight_dim))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features,
output_features,
disable_rank5_shape_mapping=True)
builder.add_convolution(
name='deconv',
kernel_channels=kernel_channels,
output_channels=output_channels,
height=height,
width=width,
stride_height=1,
stride_width=1,
border_mode='valid',
groups=1,
W=None,
b=None,
has_bias=False,
is_deconv=True,
input_name=['data', 'weight'],
output_name='output')
input_val = np.ones(input_dim)
weight_val = np.ones(weight_dim)
expected = np.ones(output_dim) * 27
feed_dict = {'data': input_val, 'weight': weight_val}
expected = {'output': expected}
self._test_model(builder.spec, feed_dict, expected)
def test_batched_mat_mul_cpu(self, cpu_only=True):
a_shapes = [(10,), (4, 10), (10,), (10,), (2, 3), (1, 3, 4),
(1, 3, 1, 2, 3), (2, 3, 1, 3, 4)]
b_shapes = [(10,), (10,), (10, 3), (2, 10, 3), (3, 4), (3, 2, 4, 5),
(1, 4, 3, 2), (2, 1, 2, 4, 5)]
out_shapes = [(1, 1), (4, 1), (1, 3), (2, 1, 3), (2, 4), (3, 2, 3, 5),
(1, 3, 4, 2, 2), (2, 3, 2, 3, 5)]
for a_shape, b_shape, outShape in zip(a_shapes, b_shapes, out_shapes):
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['A', 'B'],
output_name='output',
transpose_a=False,
transpose_b=False)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input_ = {'A': a, 'B': b}
expected = {'output': np.array(np.matmul(a, b))}
shape_dict = {'output': outShape}
self._test_model(builder.spec, input_, expected, useCPUOnly=cpu_only,
output_name_shape_dict=shape_dict)
self.assertEqual(len(outShape), builder._get_rank('output'))
def test_batched_mat_mul_gpu(self):
self.test_batched_mat_mul_cpu(cpu_only=False)
def test_batched_mat_mul_with_transposes_cpu(self, cpu_only=True):
for transpose_a, transpose_b in itertools.product([True, False],
[True, False]):
a_shape = (3, 4)
b_shape = (4, 5)
a_shape = a_shape[::-1] if transpose_a else a_shape
b_shape = b_shape[::-1] if transpose_b else b_shape
input_shapes = [a_shape, b_shape]
input_features = [
('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_batched_mat_mul(
name='BatchedMatMul', input_names=['A', 'B'],
output_name='output', transpose_a=transpose_a,
transpose_b=transpose_b
)
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
inputs = {'A': a, 'B': b}
a = a.T if transpose_a else a
b = b.T if transpose_b else b
expected = {'output': np.matmul(a, b)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_batched_mat_mul_with_transposes_gpu(self):
self.test_batched_mat_mul_with_transposes_cpu(cpu_only=False)
def test_batched_mat_mul_single_input_cpu(self,
model_precision=_MLMODEL_FULL_PRECISION,
cpu_only=True):
X1 = 11
X2 = 23
W = np.random.rand(X1, X2)
bias = np.random.rand(X2)
input_shapes = [(X1,), (5, X1), (2, 3, X1), (4, 1, X1), (12, 5, 8, X1),
(2, 3, 1, 5, X1)]
for input_shape in input_shapes:
x = np.random.rand(*input_shape)
np_out = np.matmul(x, W) + bias
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_batched_mat_mul(name='batched_mat_mul',
input_names=['data'],
output_name='output',
weight_matrix_rows=X1,
weight_matrix_columns=X2,
W=W, bias=bias)
inputs = {'data': x}
self._test_model(
builder.spec, inputs, expected,
model_precision=model_precision, useCPUOnly=cpu_only)
def test_batched_mat_mul_single_input_half_precision_cpu(self):
self.test_batched_mat_mul_single_input_cpu(
model_precision=_MLMODEL_HALF_PRECISION,
cpu_only=True)
def test_batched_mat_mul_single_input_gpu(self):
self.test_batched_mat_mul_single_input_cpu(model_precision=_MLMODEL_FULL_PRECISION, cpu_only=False)
def test_embedding_nd_cpu(
self, model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=True):
vocab_size = 10
embedding_size = 19
W = np.random.rand(embedding_size, vocab_size)
input_shapes = [(5, 1), (2, 3, 1), (4, 1, 1), (12, 5, 8, 1),
(2, 3, 1, 5, 1)]
for input_shape in input_shapes:
x = np.random.randint(vocab_size, size=input_shape)
np_out = np.take(np.transpose(W), np.squeeze(x, axis=-1), axis=0)
expected = {'output': np_out}
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_embedding_nd(name='embedding_nd',
input_name='data',
output_name='output',
vocab_size=vocab_size,
embedding_size=embedding_size,
W=W)
input = {'data': x.astype(np.float32)}
self._test_model(
builder.spec, input, expected,
model_precision=model_precision, useCPUOnly=use_cpu_only)
def test_embedding_nd_half_precision_cpu(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=True)
def test_embedding_nd_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_FULL_PRECISION, use_cpu_only=False)
def test_embedding_nd_half_precision_GPU(self):
self.test_embedding_nd_cpu(
model_precision=_MLMODEL_HALF_PRECISION, use_cpu_only=False)
def test_softmax_nan_bug_cpu(self):
input_shape = [2,2]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
for axis in [0,1]:
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.array([[0.5, 0.5],[1e8, 1e8]])
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_softmax_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_softmax_nd(name='softmax_nd', input_name='data',
output_name='output', axis=axis)
x = np.random.rand(*input_shape)
input = {'data': x}
y = np.exp(x - np.max(x, axis=axis, keepdims=True))
y = y / np.sum(y, axis=axis, keepdims=True)
expected = {'output': y}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_softmax_nd_gpu(self):
self.test_softmax_nd_cpu(cpu_only=False)
def test_concat_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_inputs = np.random.choice(range(2, 5))
output_shape = np.random.randint(low=2, high=5, size=rank)
output_shape[axis] = 0
input_shapes = []
input_features = []
input_names = []
for _ in range(n_inputs):
input_shapes.append(np.copy(output_shape))
input_shapes[-1][axis] = np.random.choice(range(2, 8))
output_shape[axis] += input_shapes[-1][axis]
for i, input_dim in enumerate(input_shapes):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append((input_name, datatypes.Array(*input_dim)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_concat_nd(name='concat_nd', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for input_dim in input_shapes:
input_tensors.append(np.random.rand(*input_dim))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.concatenate(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_concat_nd_gpu(self):
self.test_concat_nd_cpu(cpu_only=False)
def test_fill_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
target_shape = np.random.randint(low=2, high=6, size=rank)
value = float(np.random.rand())
input_features = [('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_like(name='fill_like', input_name='tensor',
output_name='output', value=value)
tensor = np.random.rand(*target_shape)
input = {'tensor': tensor}
expected = {'output': np.zeros(target_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_fill_like_gpu(self):
self.test_fill_like_cpu(cpu_only=False)
def test_fill_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
value = float(np.random.rand())
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_static(name='fill_static', output_name='tmp',
output_shape=list(shape), value=value)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.random.rand(*shape)
input = {'data': data}
expected = {'output': data + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(shape), builder._get_rank('output'))
def test_fill_static_gpu(self):
self.test_fill_static_cpu(cpu_only=False)
def test_fill_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
value = float(np.random.rand())
input_features = [('shape', datatypes.Array(len(input_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic', input_name='shape',
output_name='output', value=value)
input = {'shape': np.array(input_shape, dtype='float')}
expected = {'output': np.zeros(input_shape) + value}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_fill_dynamic_gpu(self):
self.test_fill_dynamic_cpu(cpu_only=False)
def test_broadcast_to_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_like(name='broadcast_to_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_broadcast_to_like_gpu(self):
self.test_broadcast_to_like_cpu(cpu_only=False)
def test_broadcast_to_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_static(name='broadcast_to_static',
input_name='data',
output_name='output',
output_shape=list(target_shape))
data = np.random.rand(*input_shape)
input = {'data': data}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_broadcast_to_static_gpu(self):
self.test_broadcast_to_static_cpu(cpu_only=False)
def test_broadcast_to_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.broadcast_to(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(builder._get_rank('output'), -1)
def test_broadcast_to_dynamic_gpu(self):
self.test_broadcast_to_dynamic_cpu(cpu_only=False)
# Test Rank being set to unknown when one of the input rank is unknown
# For max rank case
def test_unknown_rank(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=8, size=rank)
mask = [np.random.choice([True, False, False]) for _ in range(rank)]
input_shape = np.where(mask, 1, input_shape)
target_rank = np.random.randint(low=rank, high=6)
target_shape = [np.random.randint(low=2, high=8) if (-i > rank or input_shape[i] == 1)
else input_shape[i] for i in range(-1, -target_rank - 1, -1)][::-1]
input_features = [('x', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_broadcast_to_dynamic(name='broadcast_to_dynamic',
input_names=['x', 'shape'],
output_name='y')
condition = np.random.randint(0, 2, input_shape).astype(np.float32)
builder.add_load_constant_nd(name='load_constant_condition',
output_name='condition',
constant_value=condition,
shape=input_shape)
builder.add_where_broadcastable(name='where',
input_names=['condition', 'x', 'y'],
output_name='output')
self.assertEqual(builder._get_rank('output'), -1)
def test_trigonometry_cpu(self, cpu_only=True):
ops = ['sin', 'cos', 'tan',
'asin', 'acos', 'atan',
'sinh', 'cosh', 'tanh',
'asinh', 'acosh', 'atanh']
for op in ops:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
x = np.random.rand(*shape)
if op == 'sin':
builder.add_sin(name=op, input_name='data', output_name='output')
expected = {'output': np.sin(x)}
elif op == 'cos':
builder.add_cos(name=op, input_name='data', output_name='output')
expected = {'output': np.cos(x)}
elif op == 'tan':
builder.add_tan(name=op, input_name='data', output_name='output')
expected = {'output': np.tan(x)}
elif op == 'asin':
builder.add_asin(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsin(x)}
elif op == 'acos':
builder.add_acos(name=op, input_name='data', output_name='output')
expected = {'output': np.arccos(x)}
elif op == 'atan':
builder.add_atan(name=op, input_name='data', output_name='output')
expected = {'output': np.arctan(x)}
elif op == 'sinh':
builder.add_sinh(name=op, input_name='data', output_name='output')
expected = {'output': np.sinh(x)}
elif op == 'cosh':
builder.add_cosh(name=op, input_name='data', output_name='output')
expected = {'output': np.cosh(x)}
elif op == 'tanh':
builder.add_tanh(name=op, input_name='data', output_name='output')
expected = {'output': np.tanh(x)}
elif op == 'asinh':
builder.add_asinh(name=op, input_name='data', output_name='output')
expected = {'output': np.arcsinh(x)}
elif op == 'acosh':
x = np.random.choice([10, np.e, 1], tuple(shape)).astype(np.float32)
builder.add_acosh(name=op, input_name='data', output_name='output')
expected = {'output': np.arccosh(x)}
elif op == 'atanh':
builder.add_atanh(name=op, input_name='data', output_name='output')
expected = {'output': np.arctanh(x)}
self._test_model(builder.spec, {'data': x}, expected, useCPUOnly=cpu_only)
def test_trigonometry_gpu(self):
self.test_trigonometry_cpu(cpu_only=False)
def test_exp2_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_exp2(name='exp2', input_name='data', output_name='output')
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.exp2(x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_exp2_gpu(self):
self.test_exp2_cpu(cpu_only=False)
def test_elementwise_binary_cpu(self, cpu_only=True):
input_names = ['A', 'B']
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal', 'logical_and', 'logical_or', 'logical_xor',
'add', 'subtract', 'multiply', 'divide', 'power',
'maximum', 'minimum', 'floor_divide', 'mod']
for test_case in test_cases:
for _ in range(10):
rank_a = np.random.randint(low=1, high=6)
rank_b = np.random.randint(low=1, high=6)
rank_out = max(rank_a, rank_b)
shape_a = np.random.randint(low=2, high=8, size=rank_a)
shape_b = np.random.randint(low=2, high=8, size=rank_b)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_a: dims.append(shape_a[i])
if -i <= rank_b: dims.append(shape_b[i])
dim = np.random.choice(dims)
if -i <= rank_a: shape_a[i] = np.random.choice([1, dim])
if -i <= rank_b: shape_b[i] = np.random.choice([1, dim])
input_shapes = [shape_a, shape_b]
input_features = [('A', datatypes.Array(*input_shapes[0])),
('B', datatypes.Array(*input_shapes[1]))]
builder = neural_network.NeuralNetworkBuilder(input_features, [
('output', None)], disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output')
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output')
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True)
elif test_case == 'logical_and':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='AND')
elif test_case == 'logical_or':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='OR')
elif test_case == 'logical_xor':
builder.add_logical(test_case, input_names=input_names,
output_name='output', mode='XOR')
elif test_case == 'add':
builder.add_add_broadcastable(test_case, input_names=input_names,
output_name='output')
elif test_case == 'subtract':
builder.add_subtract_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'multiply':
builder.add_multiply_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'divide':
builder.add_divide_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'power':
builder.add_pow_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'maximum':
builder.add_max_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'minimum':
builder.add_min_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'floor_divide':
builder.add_floor_div_broadcastable(test_case,
input_names=input_names,
output_name='output')
elif test_case == 'mod':
builder.add_mod_broadcastable(test_case,
input_names=input_names,
output_name='output')
a = np.random.rand(*input_shapes[0])
b = np.random.rand(*input_shapes[1])
input = {'A': a, 'B': b}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_binary_gpu(self):
self.test_elementwise_binary_cpu(cpu_only=False)
def test_elementwise_boolean_unary_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
test_cases = ['greater', 'less', 'equal', 'not_equal', 'greater_equal',
'less_equal']
for test_case in test_cases:
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
b = np.random.rand()
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
func = getattr(np, test_case)
if test_case == 'greater':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'less':
builder.add_less_than(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'equal':
builder.add_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'not_equal':
builder.add_not_equal(test_case, input_names=input_names,
output_name='output', alpha=b)
elif test_case == 'greater_equal':
builder.add_greater_than(test_case, input_names=input_names,
output_name='output',
use_greater_than_equal=True,
alpha=b)
elif test_case == 'less_equal':
builder.add_less_than(test_case, input_names=input_names,
output_name='output',
use_less_than_equal=True, alpha=b)
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': func(a, b, dtype=np.float32)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_elementwise_boolean_unary_gpu(self):
self.test_elementwise_boolean_unary_cpu(cpu_only=False)
def test_logical_not_cpu(self, cpu_only=True):
input_names = ['input']
shapes = [(1, 2, 3, 1), (3, 1, 2, 1, 2), (1, 2, 1, 3), (2, 3),
(2, 1, 1), (2, 3, 4), (2, 4), (1,), (1,)]
for shape in shapes:
input_features = [('input', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_logical('logical_not', input_names=input_names,
output_name='output', mode='NOT')
a = np.random.rand(*shape)
input = {'input': a}
expected = {'output': np.logical_not(a)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_logical_not_gpu(self):
self.test_logical_not_cpu(cpu_only=False)
def test_stack_cpu(self, cpu_only=True):
for input_rank in range(1, 5):
for axis in range(-input_rank - 1, input_rank + 1):
n_inputs = np.random.choice(range(2, 5))
input_shape = np.random.randint(low=2, high=5, size=input_rank)
input_features = []
input_names = []
for i in range(n_inputs):
input_name = 'input_%s' % str(i)
input_names.append(input_name)
input_features.append(
(input_name, datatypes.Array(*input_shape)))
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_stack(name='stack', input_names=input_names,
output_name='output', axis=axis)
input_tensors = []
for _ in range(n_inputs):
input_tensors.append(np.random.rand(*input_shape))
input = dict(zip(input_names, input_tensors))
expected = {'output': np.stack(input_tensors, axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(input_rank + 1, builder._get_rank('output'))
def test_stack_gpu(self):
self.test_stack_cpu(cpu_only=False)
def test_ceil_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_ceil(name='ceil', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.ceil(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_ceil_gpu(self):
self.test_ceil_cpu(cpu_only=False)
def test_floor_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_floor(name='floor', input_name='data', output_name='output')
x = np.random.rand(*shape)
inputs = {'data': x}
expected = {'output': np.floor(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_floor_gpu(self):
self.test_floor_cpu(cpu_only=False)
def test_round_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_round(name='round', input_name='data', output_name='output')
x = np.float32(np.random.rand(*shape) * np.random.randint(low=-100, high=101))
inputs = {'data': x}
expected = {'output': np.around(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_round_gpu(self):
self.test_round_cpu(cpu_only=False)
def test_sign_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sign(name='sign', input_name='data', output_name='output')
x = np.random.choice([-np.random.rand(1), 0.0, np.random.rand(1)],
tuple(shape)).astype(np.float32)
inputs = {'data': x}
expected = {'output': np.sign(x)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_sign_gpu(self):
self.test_sign_cpu(cpu_only=False)
def test_clip_cpu(self, cpu_only=True):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', datatypes.Array(*shape))]
x = np.random.rand(*shape)
min_value = np.percentile(x, 25)
max_value = np.percentile(x, 75)
input = {'data': x}
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_clip(name='clip', input_name='data', output_name='output',
min_value=min_value, max_value=max_value)
expected = {'output': np.clip(x, min_value, max_value)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_clip_gpu(self):
self.test_clip_cpu(cpu_only=False)
def test_split_nd_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes = []
output_features = []
output_names = []
almost_equal = random.choice([True, False])
remainder = np.random.choice(
range(1, n_outputs)) if almost_equal else 0
value = np.random.choice(range(2, 5))
for k in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][
axis] = value + 1 if k < remainder else value
input_shape[axis] += output_shapes[-1][axis]
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
num_splits=n_outputs)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(
output_names, np.array_split(x, n_outputs, axis=axis)
if almost_equal else np.split(x, n_outputs, axis=axis)
)
) # Explicitly trying to compare against both versions of numpy split
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_gpu(self):
self.test_split_nd_cpu(cpu_only=False)
def test_split_nd_with_split_sizes_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
n_outputs = np.random.choice(range(2, 4))
input_shape = np.random.randint(low=2, high=5, size=rank)
input_shape[axis] = 0
output_shapes, output_features, output_names = [], [], []
sections, split_sizes = [], []
for _ in range(n_outputs):
output_shapes.append(np.copy(input_shape))
output_shapes[-1][axis] = np.random.choice(range(2, 5))
input_shape[axis] += output_shapes[-1][axis]
sections.append(input_shape[axis])
split_sizes.append(output_shapes[-1][axis])
sections.pop()
for i in range(n_outputs):
output_name = 'output_%s' % str(i)
output_names.append(output_name)
output_features.append(
(output_name, None))
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_split_nd(name='split_nd', input_name='data',
output_names=output_names, axis=axis,
split_sizes=split_sizes)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = dict(
zip(output_names, np.split(x, sections, axis=axis)))
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
for output_ in output_names:
self.assertEqual(rank, builder._get_rank(output_))
def test_split_nd_with_split_sizes_gpu(self):
self.test_split_nd_with_split_sizes_cpu(cpu_only=False)
def test_slice_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(200):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_slice_static('slice_static', 'data', 'output',
begin_ids=begin_ids, end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
x = np.random.rand(*input_shape)
inputs = {'data': x}
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_static_gpu(self):
self.test_slice_static_cpu(cpu_only=False)
def test_slice_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.array([5 for _ in range(rank)])
objs, strides, begin_masks, end_ids, end_masks, begin_ids = [], [], [], [], [], []
for dim in range(rank):
stride = random.choice([-3, -1, 1, 2])
begin_mask = random.choice([True, False])
end_mask = random.choice([True, False])
length = 0
while length <= 0:
begin_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
end_id = np.random.randint(low=-input_shape[dim],
high=input_shape[dim])
obj = slice(None if begin_mask else begin_id,
None if end_mask else end_id, stride)
length = np.arange(input_shape[dim])[(obj,)].shape[0]
objs.append(obj), strides.append(stride), begin_masks.append(
begin_mask)
end_masks.append(end_mask), begin_ids.append(
begin_id), end_ids.append(end_id)
# test different number of inputs, from 2 inputs up to 6 inputs
# when num_inputs == 2, begin_ids are inputs, rest are read from parameters
# when num_inputs == 6, all read from inputs, none are read from parameters
for num_inputs in [2, 3, 4, 5, 6]:
x = np.random.rand(*input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
input_names = ['data']
inputs = dict()
inputs['data'] = x
if num_inputs == 2:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids)))]
input_names = ['data', 'begin_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
elif num_inputs == 3:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids)))]
input_names = ['data', 'begin_ids', 'end_ids']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
elif num_inputs == 4:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
elif num_inputs == 5:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks)))]
input_names = ['data', 'begin_ids', 'end_ids', 'strides', 'begin_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
elif num_inputs == 6:
input_features = [('data', datatypes.Array(*input_shape)),
('begin_ids', datatypes.Array(len(begin_ids))),
('end_ids', datatypes.Array(len(end_ids))),
('strides', datatypes.Array(len(strides))),
('begin_masks', datatypes.Array(len(begin_masks))),
('end_masks', datatypes.Array(len(end_masks)))]
input_names = ['data', 'begin_ids', 'end_ids',
'strides', 'begin_masks', 'end_masks']
inputs['begin_ids'] = np.array(begin_ids, dtype=np.int32)
inputs['end_ids'] = np.array(end_ids, dtype=np.int32)
inputs['strides'] = np.array(strides, dtype=np.int32)
inputs['begin_masks'] = np.array(begin_masks, dtype=np.int32)
inputs['end_masks'] = np.array(end_masks, dtype=np.int32)
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 2:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_ids=end_ids, strides=strides,
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 3:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
strides=strides, begin_masks=begin_masks,
end_masks=end_masks)
elif num_inputs == 4:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
begin_masks=begin_masks, end_masks=end_masks)
elif num_inputs == 5:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output',
end_masks=end_masks)
elif num_inputs == 6:
builder.add_slice_dynamic('slice_dynamic', input_names, 'output')
expected = {'output': x[tuple(objs)]}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_slice_dynamic_gpu(self):
self.test_slice_dynamic_cpu(cpu_only=False)
def test_tile_cpu(self, cpu_only=True):
for rank in range(1, 6):
input_shape = np.random.randint(low=2, high=5, size=rank)
for rep_rank in range(1,rank+1):
reps = list(np.random.randint(low=1, high=9, size=rep_rank))
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_tile('Tile', 'data', 'output', reps)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.tile(x, reps)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_tile_gpu(self):
self.test_tile_cpu(cpu_only=False)
def test_sliding_windows_cpu(self, cpu_only=True):
def numpy_sliding_windows(a, np_axis, np_size, np_step):
n = (a.shape[np_axis] - np_size) // np_step + 1
shape = list(a.shape)
shape[np_axis] = n
if np_axis < 0:
np_axis += len(shape)
shape.insert(np_axis + 1, np_size)
strides = list(a.strides)
effstride = strides[np_axis] * np_step
strides.insert(np_axis, effstride)
return np.lib.stride_tricks.as_strided(a, shape, strides)
for rank in range(1, 5):
for axis in range(-rank, rank):
input_shape = np.random.randint(low=2, high=5, size=rank)
output_shape = list(input_shape)
window_size = np.random.randint(low=1, high=input_shape[axis])
length = 0
while length <= 0:
step = np.random.randint(low=1, high=input_shape[axis])
length = (input_shape[axis] - window_size) // step + 1
output_shape[axis] = length
pos_axis = axis if axis >= 0 else axis + rank
output_shape.insert(pos_axis + 1, window_size)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_sliding_windows('sliding_windows',
input_name='data',
output_name='output',
axis=axis,
window_size=window_size,
step=step)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': numpy_sliding_windows(x, axis, window_size, step)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(rank+1, builder._get_rank('output'))
def test_sliding_windows_gpu(self):
self.test_sliding_windows_cpu(cpu_only=False)
def test_range_static_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
for param in params:
start, end, step = param
input_features = [('multiplicative_input', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_range_static('range_static', 'output_range',
end=end, start=start, step=step)
builder.add_multiply_broadcastable(
name='multiply_broadcastable',
input_names=['multiplicative_input', 'output_range'],
output_name='output')
# save the model
model_dir = tempfile.mkdtemp()
model_path = os.path.join(model_dir, 'test_layer.mlmodel')
coremltools.utils.save_spec(builder.spec, model_path)
inputs = dict()
inputs['multiplicative_input'] = np.ones((1,), dtype=np.float64)
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_static_gpu(self):
self.test_range_static_cpu(cpu_only=False)
def test_range_dynamic_cpu(self, cpu_only=True):
params = [(-10.4, 23, 12.2), (0, 1000, 1), (50.5, 90.5, 1.5), (5, 8, 2),
(5, 8, 98), (5, 8, 1.5), (10, 5, -0.6), (24, -65, -2)]
# input size == 1: end is input, start and step are read from parameters
# input size == 2: end, start are inputs, step is read from parameters
# input size == 3: start, end, step are all inputs, none of the parameters are used.
for num_inputs in [1, 2, 3]:
for param in params:
inputs = dict()
start, end, step = param
if num_inputs == 1:
input_features = [('end', datatypes.Array(1))]
elif num_inputs == 2:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1))]
elif num_inputs == 3:
input_features = [('end', datatypes.Array(1)),
('start', datatypes.Array(1)),
('step', datatypes.Array(1))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
if num_inputs == 1:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end'],
start=start, step=step)
elif num_inputs == 2:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start'],
step=step)
elif num_inputs == 3:
inputs['end'] = end * np.ones((1,), dtype=np.float64)
inputs['start'] = start * np.ones((1,), dtype=np.float64)
inputs['step'] = step * np.ones((1,), dtype=np.float64)
builder.add_range_dynamic('range_dynamic',
output_name='output',
input_names=['end', 'start', 'step'])
expected = {'output': np.arange(start, end, step)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(1, builder._get_rank('output'))
def test_range_dynamic_gpu(self):
self.test_range_dynamic_cpu(cpu_only=False)
def test_linear_activation_different_ranks_cpu(self, cpu_only=True):
for input_dim in [(10, 15), (10, 15, 2, 3),
(10, 2, 4, 15, 1, 4), (6,)]:
input_features = [('data', datatypes.Array(*input_dim))]
output_features = [('output', datatypes.Array(*input_dim))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_activation(name='activation',
non_linearity='LINEAR',
input_name='data',
output_name='output', params=[34.0, 67.0])
x = np.random.rand(*input_dim)
input = {'data': x}
expected = {'output': 34.0 * x + 67.0}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_linear_activation_different_ranks_gpu(self):
self.test_linear_activation_different_ranks_cpu(cpu_only=False)
def test_topk_cpu(self, cpu_only=True):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
K = [3, 5]
axes = [[0], [0, 1], [1, 2], [0, 3, 1], [1, 3, 4]]
for ii, input_shape in enumerate(test_input_shapes):
for k in K:
for n_inputs in [1, 2]:
for bottom_k_flag in [False, True]:
for axis in axes[ii]:
for negative_axis in [False, True]:
if negative_axis:
axis = axis - len(input_shape)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('values', None), ('indices', None)]
input_names = ['data']
output_names = ['values', 'indices']
if n_inputs == 2:
input_names.append('k_in')
input_features.append(('k_in', datatypes.Array(1)))
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_topk('topk', input_names, output_names,
axis=axis, use_bottom_k=bottom_k_flag)
else:
builder.add_topk('topk', input_names, output_names,
k=k, axis=axis, use_bottom_k=bottom_k_flag)
data = np.random.randint(low=0, high=int(np.prod(input_shape)), size=input_shape)
data = data.astype(np.float32)
input = {'data': data}
if n_inputs == 2:
input['k_in'] = k * np.ones([1], dtype=np.float32)
# numpy reference values
if bottom_k_flag:
ref_indices = np.argsort(data, axis=axis)
else:
ref_indices = np.argsort(-data, axis=axis)
slc = [slice(None)] * len(input_shape)
slc[axis] = slice(0, k)
ref_indices = ref_indices[tuple(slc)]
ref_values = np.take_along_axis(data, ref_indices, axis=axis)
expected = {'values': ref_values, 'indices': ref_indices}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_topk_gpu(self):
self.test_topk_cpu(cpu_only=False)
def test_const_pad_cpu(self, cpu_only=True):
def get_reference(data, pads, value):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=pads.shape)
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
ctr = 0
for shape in shapes:
rank = len(shape)
for force_zeros_in_end in [0, 2, 6]:
for max_pad_value in range(1, 6):
for n_inputs in [1, 2]:
pads = np.random.randint(low=0, high=max_pad_value, size=(rank, 2))
if force_zeros_in_end > 2 * rank:
continue
# pads = np.reshape(np.array([1,1,1,0,0,1]), (rank, 2))
if force_zeros_in_end != 0:
pads[-force_zeros_in_end:] = 0
data = np.random.rand(*shape)
reference = get_reference(data, pads, value)
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten())
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_gpu(self):
self.test_const_pad_cpu(cpu_only=False)
def test_const_pad_mode2_cpu(self, cpu_only=True):
def get_reference(data, output_shape, value, left_pad=False):
with tf.Graph().as_default(), tf.Session() as sess:
x = tf.placeholder(tf.float32, shape=data.shape)
p = tf.placeholder(tf.int32, shape=(len(output_shape), 2))
y = tf.pad(x, p, mode='CONSTANT', constant_values=value)
pads = np.zeros((len(output_shape), 2))
if left_pad:
pads[:, 0] = np.array(output_shape) - np.array(data.shape)
else:
pads[:, 1] = np.array(output_shape) - np.array(data.shape)
return sess.run(y, feed_dict={x: data, p: pads})
value = 34.0
shapes = [(3,), (4, 5), (2, 4, 5), (12, 6, 3, 5, 7), (1, 24, 2, 4, 8)]
out_shapes = [(5,), (4, 8), (2, 4, 10), (20, 6, 7, 10, 7), (5, 24, 10, 4, 10)]
ctr = 0
for ii, shape in enumerate(shapes):
rank = len(shape)
for left_pad in [True, False]:
for n_inputs in [1, 2]:
data = np.random.rand(*shape)
reference = get_reference(data, out_shapes[ii], value, left_pad)
pads = np.zeros((rank, 2))
tmp = np.zeros((rank))
for i in range(rank):
if out_shapes[ii][i] == shape[i]:
tmp[i] = 0
else:
tmp[i] = out_shapes[ii][i]
if left_pad:
pads[:, 0] = tmp
else:
pads[:, 1] = tmp
ctr += 1
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
input_names = ['data']
if n_inputs == 2:
input_names.append('pads')
input_features.append(('pads', datatypes.Array(2*rank,)))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features, disable_rank5_shape_mapping=True)
if n_inputs == 2:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_to_given_output_size_mode=True)
else:
builder.add_constant_pad('pad', input_names, 'output', value=value, pad_amounts=pads.flatten(), pad_to_given_output_size_mode=True)
input = {'data': data}
if n_inputs == 2:
input['pads'] = pads.flatten().astype(np.float)
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_const_pad_mode2_gpu(self):
self.test_const_pad_mode2_cpu(cpu_only=False)
def test_nms_cpu(self, cpu_only=True):
def _compute_iou_matrix(boxes):
# input is (N,4), in order [center_w, center_h, width, height]
assert len(boxes.shape) == 2
assert boxes.shape[1] == 4
boxes = boxes.astype(np.float)
center_w, center_h, width, height = np.split(boxes, 4, axis=1) # outs are all (N,1)
top = center_h + 0.5 * height
bottom = center_h - 0.5 * height
left = center_w - 0.5 * width
right = center_w + 0.5 * width
area = width * height
hB = np.minimum(top, np.transpose(top))
wB = np.minimum(right, np.transpose(right))
hA = np.maximum(bottom, np.transpose(bottom))
wA = np.maximum(left, np.transpose(left))
intersection_area = np.maximum(0, hB - hA) * np.maximum(0, wB - wA)
union_area = area + np.transpose(area) - intersection_area
iou = intersection_area / union_area
return iou
def _nms_TF(boxes, scores, iou_threshold, score_threshold, per_class_suppression, M):
# boxes is (B,N,4), in order [center_w, center_h, width, height]
# scores is (B,N,C)
# output shapes: (B,M,4), (B,M,C), (B,M), (B,)
'''
this is implementation of CoreML's NMS layer
'''
B, N, C = scores.shape
iou_threshold = iou_threshold.astype(np.float32)
score_threshold = score_threshold.astype(np.float32)
# convert box ids to TF style
center_w, center_h, width, height = np.split(boxes, 4, axis=-1) # outs are all (B,N,1)
y1 = center_h - 0.5 * height
y2 = center_h + 0.5 * height
x1 = center_w - 0.5 * width
x2 = center_w + 0.5 * width
boxes_tf = np.concatenate((y1, x1, y2, x2), axis=-1) # (B,N,4)
out1 = np.zeros((B, M, 4))
out2 = np.zeros((B, M, C))
out3 = -1 * np.ones((B, M))
out4 = np.zeros((B,))
for b in range(B):
box_coord_matrix = boxes_tf[b, :, :] # (N,4)
score_vector = np.max(scores[b, :, :], axis=-1) # (N,)
if not per_class_suppression:
# this is the simple case as TF directly supports it
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=box_coord_matrix.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=score_vector.shape)
ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
ids = sess.run(ids_g, feed_dict={box_coord_matrix_pl: box_coord_matrix, score_vector_pl: score_vector})
else:
# this is slightly complicated as TF does not directly support it
class_ids = np.argmax(scores[b, :, :], axis=-1) # (N,)
sorted_score_ids = np.argsort(-score_vector)
box_coord_matrix2 = np.take(box_coord_matrix, sorted_score_ids, axis=0)
score_vector2 = np.take(score_vector, sorted_score_ids)
class_ids = np.take(class_ids, sorted_score_ids)
classes_seen = dict()
ids_intermediate = np.array([], dtype=np.int)
for n in range(N):
if class_ids[n] in classes_seen:
continue
c = class_ids[n]
classes_seen[c] = True
current_class_ids = np.where(class_ids == c)[0]
if len(current_class_ids) > 0:
feed_in1 = np.take(box_coord_matrix2, current_class_ids, axis=0)
feed_in2 = np.take(score_vector2, current_class_ids)
with tf.Graph().as_default(), tf.Session() as sess:
box_coord_matrix_pl = tf.placeholder(tf.float32, shape=feed_in1.shape)
score_vector_pl = tf.placeholder(tf.float32, shape=feed_in2.shape)
cur_ids_g = tf.image.non_max_suppression(box_coord_matrix_pl,
score_vector_pl,
max_output_size=M, iou_threshold=iou_threshold,
score_threshold=score_threshold)
cur_ids = sess.run(cur_ids_g, feed_dict={box_coord_matrix_pl: feed_in1,
score_vector_pl: feed_in2})
from_sort_ids = np.take(current_class_ids, cur_ids)
ids_intermediate = np.append(ids_intermediate, from_sort_ids)
ids_intermediate.sort()
ids = np.take(sorted_score_ids, ids_intermediate)
xx = len(ids)
if xx == 0:
ids = np.array([np.argmax(score_vector)])
xx = 1
if xx > M:
ids = ids[:M]
xx = len(ids)
out1[b, :xx, :] = np.take(boxes[b, :, :], ids, axis=0)
out2[b, :xx, :] = np.take(scores[b, :, :], ids, axis=0)
out3[b, :xx] = ids
out4[b] = xx
return out1, out2, out3, out4
iou_threshold_percentile = [0, 30, 80, 100]
score_threshold_percentile_arr = [0, 40, 100]
N_M_pairs_to_test = [[100, 48], [100, 112]] # N : boxes in, M: max boxes out
number_of_test = 0
for N_M in N_M_pairs_to_test:
for B in [1, 5]:
for C in [1, 7]:
N, M = N_M
boxes = np.random.rand(B, N, 4)
scores = np.random.rand(B, N, C)
iou_matrix = _compute_iou_matrix(boxes[0, :, :]) # (N,N)
iou_matrix = iou_matrix[~np.eye(iou_matrix.shape[0], dtype=bool)].reshape(iou_matrix.shape[0], -1)
for per_class_suppression in [False, True]:
for iou_thresh in iou_threshold_percentile:
for score_thresh in score_threshold_percentile_arr:
for is_dynamic in [False, True]:
if score_thresh == 0:
score_threshold = np.min(scores) - 1
elif score_thresh == 100:
score_threshold = np.max(scores) + 1
else:
score_threshold = np.percentile(scores, score_thresh) + .01
if iou_thresh == 0:
iou_threshold = np.maximum(np.min(iou_matrix) - .01, 0.0)
else:
iou_threshold = np.percentile(iou_matrix, iou_thresh) + .01
number_of_test += 1
tf_boxes, tf_scores, tf_ids, tf_num_boxes = _nms_TF(boxes, scores, iou_threshold,
score_threshold,
per_class_suppression,
M)
expected = dict()
expected['selected_boxes'] = tf_boxes
expected['selected_scores'] = tf_scores
expected['selected_box_ids'] = tf_ids
expected['number_of_boxes'] = tf_num_boxes
# define CoreML model
input_features = [('boxes', datatypes.Array(B,N,4)), ('scores', datatypes.Array(B,N,C))]
output_features = [('selected_boxes', None), ('selected_scores', None),
('selected_box_ids', None), ('number_of_boxes', None)]
input_names = ['boxes', 'scores']
if is_dynamic:
input_names.extend(['iou_threshold', 'score_threshold', 'max_boxes'])
input_features.append(('iou_threshold', datatypes.Array(1, )))
input_features.append(('score_threshold', datatypes.Array(1, )))
input_features.append(('max_boxes', datatypes.Array(1, )))
builder = neural_network.NeuralNetworkBuilder(input_features, output_features,
disable_rank5_shape_mapping=True)
input_dict = dict()
input_dict['boxes'] = boxes
input_dict['scores'] = scores
if is_dynamic:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
per_class_suppression=per_class_suppression)
input_dict['iou_threshold'] = iou_threshold * np.ones([1], dtype=np.float)
input_dict['score_threshold'] = score_threshold * np.ones([1], dtype=np.float)
input_dict['max_boxes'] = M * np.ones([1], dtype=np.float)
else:
builder.add_nms('nms', input_names,
['selected_boxes', 'selected_scores', 'selected_box_ids','number_of_boxes'],
iou_threshold=iou_threshold, score_threshold=score_threshold,
max_boxes=M, per_class_suppression=per_class_suppression)
self._test_model(builder.spec, input_dict, expected, useCPUOnly=cpu_only)
def test_nms_gpu(self):
self.test_nms_cpu(cpu_only=False)
def test_rank_preserving_reshape(self):
input_shapes = [(20, 10), (20, 10, 5), (10, 3, 5)]
target_shapes = [(5, -1), (0, 2, 25), (25, 0, -1)]
output_shapes = [(5, 40), (20, 2, 25), (25, 3, 2)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_rank_preserving_reshape(
name='rank_preserving_reshape', input_name='data',
output_name='output', output_shape=target_shapes[i])
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_expand_dims(self):
input_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (10,)]
axes = [(0, 1), (0, 2), (2, 0), (-2, -1), (1, 0, -2)]
output_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (1, 10, 1, 5), (10, 5, 1, 1), (1, 1, 1, 10)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_expand_dims(
name='expand_dims', input_name='data', output_name='output',
axes=axes[i]
)
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze(self):
input_shapes = [(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1),
(10, 5, 1, 1), (1,), (10, 5, 1, 1), (3, 1, 7)]
axes = [(0, 1), (0, 2), (-2, -1), (-1, -2), (0,), (3, -2), (1,)]
output_shapes = [(10, 5), (10, 5), (10, 5), (10, 5), (1,), (10, 5), (3, 7)]
for i in range(len(input_shapes)):
input_features = [('data', datatypes.Array(*input_shapes[i]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', axes=list(axes[i]))
x = np.random.rand(*input_shapes[i])
input = {'data': x}
expected = {'output': np.reshape(x, output_shapes[i])}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(output_shapes[i]), builder._get_rank('output'))
def test_squeeze_all(self):
input_shapes = [
(1, 1, 10, 5), (1, 10, 1, 5), (10, 5, 1, 1), (10, 5, 1, 1), (1,),
(10, 5, 1, 1), (3, 1, 7), (3,), (5, 6)
]
for input_shape in input_shapes:
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_squeeze(name='squeeze_layer', input_name='data',
output_name='output', squeeze_all=True)
x = np.random.rand(*input_shape)
input = {'data': x}
reference = np.squeeze(x)
if not reference.shape:
reference = np.reshape(reference, (1,))
expected = {'output': reference}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(-1, builder._get_rank('output'))
def test_argmax_argmin(self):
test_input_shapes = [(9,), (8, 6), (9, 8, 10), (5, 9, 7, 9), (12, 8, 6, 6, 7)]
# (1+2+3+4+5) * 2^3 = 120 test cases
for input_shape in test_input_shapes:
for negative_axis in [False, True]:
for mode in ['argmax', 'argmin']:
for keep_dims in [True, False]:
for axis in np.arange(len(input_shape)):
if negative_axis:
axis_val = axis - len(input_shape)
else:
axis_val = axis
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
x = np.random.rand(*input_shape)
if mode == 'argmax':
builder.add_argmax('argmax', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmax(x, axis=axis_val)
else:
builder.add_argmin('argmin', 'data', 'output', axis=axis_val, keepdims=keep_dims)
np_out = np.argmin(x, axis=axis_val)
if keep_dims:
np_out = np.expand_dims(np_out, axis=axis_val)
elif len(input_shape) == 1:
np_out = np.expand_dims(np_out, axis=axis_val)
input = {'data': x}
expected = {'output': np_out}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
self.assertEqual(len(np_out.shape), builder._get_rank('output'))
def test_get_shape(self):
dims = [1, 2, 3, 4, 5]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_get_shape(name='get_shape_layer', input_name='data',
output_name='output')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': np.array(input_shape)}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(1, builder._get_rank('output'))
def test_load_constant_nd(self):
dims = [2, 3, 4, 5, 6]
for rank in range(1, len(dims) + 1):
input_shape = dims[:rank]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_load_constant_nd('load_const_nd_layer', 'tmp',
constant_value=np.ones(input_shape),
shape=input_shape)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output',
mode='ADD')
feed = {'data': np.random.rand(*input_shape)}
expected = {'output': feed['data'] + 1}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
self.assertEqual(rank, builder._get_rank('output'))
@unittest.skip('fix')
def test_simple_array_alloc_scatter(self):
alloc_shape = [2, 3, 4]
value_shape = [1, 3, 4]
input_features = [('alloc_shape', datatypes.Array(len(alloc_shape))),
('value', datatypes.Array(*value_shape)),
('index', datatypes.Array(1))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_fill_dynamic(name='fill_dynamic_layer', input_name='alloc_shape',
output_name='array', value=np.float(0.0))
# CoreML input order: container (array), indices, slices (value)
builder.add_scatter(name='scatter_layer',
input_names=['array', 'index', 'value'],
output_name='output')
value = np.random.rand(*value_shape).astype('float')
feed = {'alloc_shape': np.array(alloc_shape, dtype='float'),
'value': value,
'index': np.array([1], dtype='float')}
ref = np.zeros(alloc_shape)
ref[1, :, :] = value
expected = {'output': ref}
self._test_model(builder.spec, feed, expected, useCPUOnly=True)
def test_erf_activation_cpu(self, cpu_only=True):
input_features = [('data', datatypes.Array(10, 45))]
output_features = [('output', datatypes.Array(10, 45))]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_erf(name='erf', input_name='data',
output_name='output')
x = np.random.rand(10, 45)
input = {'data': x}
expected = {
'output': np.asarray([math.erf(i) for i in
x.flatten().tolist()]).reshape(10, 45)
}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_erf_activation_gpu(self):
self.test_erf_activation_cpu(cpu_only=False)
def test_gelu_activation(self):
for mode in ['EXACT', 'TANH_APPROXIMATION', 'SIGMOID_APPROXIMATION']:
for rank in range(1, 6):
shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_gelu(name='gelu', input_name='data',
output_name='output', mode=mode)
x = np.random.rand(*shape)
input = {'data': x}
exact = np.asarray([0.5 * i * (1.0 + math.erf(i / math.sqrt(2)))
for i in x.flatten().tolist()]).reshape(*shape)
expected = {'output': exact}
self._test_model(builder.spec, input, expected, useCPUOnly=True)
def test_lower_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_lower_triangular('tril', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.tril(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_lower_triangular_gpu(self):
self.test_lower_triangular_cpu(cpu_only=False)
def test_upper_triangular_cpu(self, cpu_only=True):
for rank in range(2, 6):
for k in range(-3, 4):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_upper_triangular('triu', 'data', 'output', k=k)
x = np.random.rand(*shape)
input = {'data': x}
expected = {'output': np.triu(x, k=k)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_upper_triangular_gpu(self):
self.test_upper_triangular_cpu(cpu_only=False)
def test_where_broadcastable_cpu(self, cpu_only=True):
for _ in range(150):
rank_cond = np.random.randint(low=1, high=6)
rank_true = np.random.randint(low=1, high=6)
rank_false = np.random.randint(low=1, high=6)
rank_out = max(rank_cond, rank_true, rank_false)
shape_cond = np.random.randint(low=2, high=8, size=rank_cond)
shape_true = np.random.randint(low=2, high=8, size=rank_true)
shape_false = np.random.randint(low=2, high=8, size=rank_false)
for i in range(-1, -rank_out - 1, -1):
dims = []
if -i <= rank_cond: dims.append(shape_cond[i])
if -i <= rank_true: dims.append(shape_true[i])
if -i <= rank_false: dims.append(shape_false[i])
dim = np.random.choice(dims)
if -i <= rank_cond: shape_cond[i] = np.random.choice([1, dim])
if -i <= rank_true: shape_true[i] = np.random.choice([1, dim])
if -i <= rank_false: shape_false[i] = np.random.choice([1, dim])
input_features = [
('cond', datatypes.Array(*shape_cond)),
('true', datatypes.Array(*shape_true)),
('false', datatypes.Array(*shape_false))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_where_broadcastable('if_broadcastable', input_names=['cond', 'true', 'false'],
output_name='output')
cond = np.random.choice([1.0, 0.0], size=shape_cond)
true = np.random.rand(*shape_true)
false = np.random.rand(*shape_false)
input = {'cond': cond, 'true': true, 'false': false}
expected = {'output': np.where(cond, true, false)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))
def test_where_broadcastable_gpu(self):
self.test_where_broadcastable_cpu(cpu_only=False)
def test_random_normal_like_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(5, -1, -1):
if rank > 0:
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
else: # one extra test to test more moments
shape = np.array([10, 10, 10, 10, 10000])
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_like(name='random_normal_like',
input_name='tensor',
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'tensor': np.random.rand(*shape)}
expected = {'output': np.random.normal(mean, stddev, shape)}
if rank > 0:
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
else: # one extra test to test more moments
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=6)
def test_random_normal_like_gpu(self):
self.test_random_normal_like_cpu(cpu_only=False)
def test_random_normal_static_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_static(name='random_normal_static',
output_name='tmp',
output_shape=list(shape),
mean=mean, stddev=stddev, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_normal_static_gpu(self):
self.test_random_normal_static_cpu(cpu_only=False)
def test_random_normal_dynamic_cpu(self, cpu_only=True):
mean, stddev, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_normal_dynamic(name='random_normal_dynamic',
input_names=['shape'],
output_name='output',
mean=mean, stddev=stddev, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.normal(mean, stddev, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected, num_moments=2)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_normal_dynamic_gpu(self):
self.test_random_normal_dynamic_cpu(cpu_only=False)
def test_random_uniform_like_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_like(name='random_uniform_like',
input_name='tensor',
output_name='output',
minval=minval, maxval=maxval, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_like_gpu(self):
self.test_random_uniform_like_cpu(cpu_only=False)
def test_random_uniform_static_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_static(name='random_uniform_static',
output_name='tmp',
output_shape=list(shape),
minval=minval, maxval=maxval, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(rank, builder._get_rank('output'))
def test_random_uniform_static_gpu(self):
self.test_random_uniform_static_cpu(cpu_only=False)
def test_random_uniform_dynamic_cpu(self, cpu_only=True):
minval, maxval, seed = 0., 1., 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_uniform_dynamic(name='random_uniform_dynamic',
input_names=['shape'],
output_name='output',
minval=minval, maxval=maxval, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.uniform(minval, maxval, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_random_uniform_dynamic_gpu(self):
self.test_random_uniform_dynamic_cpu(cpu_only=False)
def test_random_bernoulli_like_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('tensor', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_like(name='random_bernoulli_like',
input_name='tensor',
output_name='output',
prob=prob, seed=seed)
tensor = np.random.rand(*shape)
inputs = {'tensor': tensor}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_like_gpu(self):
self.test_random_bernoulli_like_cpu(cpu_only=False)
def test_random_bernoulli_static_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_static(name='random_bernoulli_static', output_name='tmp',
output_shape=list(shape), prob=prob, seed=seed)
builder.add_elementwise('add_layer', ['data', 'tmp'], 'output', mode='ADD')
data = np.zeros(shape)
inputs = {'data': data}
expected = {'output': data + np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_static_gpu(self):
self.test_random_bernoulli_static_cpu(cpu_only=False)
def test_random_bernoulli_dynamic_cpu(self, cpu_only=True):
prob, seed = 0.5, 42
for rank in range(1, 6):
low_factor = np.random.randint(low=2, high=4)
low = int(np.power(1000, 1. / rank)) * low_factor
high = int(np.power(2000, 1. / rank)) * np.random.randint(low=low_factor, high=4)
shape = np.random.randint(low=low, high=high, size=rank)
input_features = [('shape', datatypes.Array(len(shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_random_bernoulli_dynamic(name='random_bernoulli_dynamic',
input_names=['shape'],
output_name='output',
prob=prob, seed=seed)
inputs = {'shape': np.array(shape, np.float)}
expected = {'output': np.random.binomial(1, prob, shape)}
CorrectnessTest._compare_moments(builder.spec, inputs, expected)
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
def test_random_bernoulli_dynamic_gpu(self):
self.test_random_bernoulli_dynamic_cpu(cpu_only=False)
def test_categorical_distribution_cpu_shapes(self):
for rank in range(1, 6):
shape = np.random.randint(low=2, high=8, size=rank)
num_samples = np.random.randint(low=10, high=1000)
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name='data',
output_name='output',
num_samples=num_samples)
x = np.random.randint(low=0, high=20, size=shape).astype(np.float32)
inputs = {'data': x}
shape[-1] = num_samples
expected = {'output': np.random.rand(*shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=True, validate_shapes_only=True)
def test_categorical_distribution_cpu_logits(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=True,
seed=42)
x = np.random.rand(*shape)
inputs = {input_name: x}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
logits = x.reshape(2, num_class)
probs = [softmax(logits[0]), softmax(logits[1])]
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_categorical_distribution_cpu_probs(self):
def softmax(data):
e_data = np.exp(data - np.max(data))
return e_data / e_data.sum()
num_samples, num_class = 50000, 10
input_name, output_name = 'data', 'output'
shapes = [(2, num_class), (2, 1, num_class), (1, 2, num_class),
(2, 1, 1, num_class), (1, 2, 1, num_class), (1, 1, 2, num_class),
(2, 1, 1, 1, num_class), (1, 2, 1, 1, num_class),
(1, 1, 2, 1, num_class), (1, 1, 1, 2, num_class)]
for shape in shapes:
input_features = [('data', datatypes.Array(*shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)], disable_rank5_shape_mapping=True)
builder.add_categorical_distribution(name='categorical_distribution',
input_name=input_name,
output_name=output_name,
num_samples=num_samples,
is_logits=False,
seed=42)
x = np.random.rand(*shape)
probs = x.reshape(2, num_class)
probs[0], probs[1] = softmax(probs[0]), softmax(probs[1])
inputs = {input_name: np.reshape(probs, shape)}
model = builder.spec
if isinstance(model, str):
model = coremltools.models.MLModel(model)
model = coremltools.models.MLModel(model, useCPUOnly=True)
prediction = model.predict(inputs, useCPUOnly=True)
# validate each distribution separately
probs = probs.reshape(2, num_class)
ref0 = np.random.multinomial(num_samples, probs[0])
ref1 = np.random.multinomial(num_samples, probs[1])
pre0 = prediction[output_name].reshape(2, num_samples)[0]
pre1 = prediction[output_name].reshape(2, num_samples)[1]
expected = {output_name: np.stack((pre0, pre1))}
# convert to bincount and validate probabilities
pre0 = np.bincount(np.array(pre0).astype(np.int), minlength=num_class)
pre1 = np.bincount(np.array(pre1).astype(np.int), minlength=num_class)
assert np.allclose(np.true_divide(pre0, num_samples), probs[0], atol=1e-2)
assert np.allclose(np.true_divide(pre0, num_samples),
np.true_divide(ref0, num_samples), atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples), probs[1], atol=1e-2)
assert np.allclose(np.true_divide(pre1, num_samples),
np.true_divide(ref1, num_samples), atol=1e-2)
self._test_model(model, inputs, expected, useCPUOnly=True,
output_name_shape_dict={'output': prediction['output'].shape})
def test_reverse_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
reverse_dim = [np.random.choice([True, False]) for _ in range(rank)]
axes = [i for i in range(rank) if reverse_dim[i] == True]
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_reverse('reverse', 'data', 'output', reverse_dim)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.flip(x, axis=axes)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reverse_gpu(self):
self.test_reverse_cpu(cpu_only=False)
def test_matrix_band_part_cpu(self, cpu_only=True):
for rank in range(2, 6):
for _ in range(20):
num_lower = np.random.randint(low=-7, high=8)
num_upper = np.random.randint(low=-7, high=8)
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_matrix_band_part('matrix_band_part', 'data', 'output',
num_lower=num_lower, num_upper=num_upper)
x = np.random.rand(*shape)
input = {'data': x}
rows, cols = shape[-2:]
band = np.ones((rows, cols))
for m in range(rows):
for n in range(cols):
band[m, n] = (num_lower < 0 or (m - n) <= num_lower) and (num_upper < 0 or (n - m) <= num_upper)
expected = {'output': np.multiply(band, x)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_matrix_band_part_gpu(self):
self.test_matrix_band_part_cpu(cpu_only=False)
def test_flatten_to_2d_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank + 1):
shape = np.random.randint(low=2, high=6, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features, disable_rank5_shape_mapping=True)
builder.add_flatten_to_2d('flatten_to_2d', 'data', 'output', axis=axis)
x = np.random.rand(*shape)
np_axis = axis + rank if axis < 0 else axis
pl, pr = 1, 1
for i in range(0, np_axis):
pl *= shape[i]
for i in range(np_axis, len(shape)):
pr *= shape[i]
new_shape = [pl, pr]
ref = x.reshape(new_shape)
input = {'data': x}
expected = {'output': ref}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(2, builder._get_rank('output'))
def test_flatten_to_2d_gpu(self):
self.test_flatten_to_2d_cpu(cpu_only=False)
def test_reshape_like_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = n // np.prod(target_shape)
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('tensor', datatypes.Array(*target_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_like(name='reshape_like',
input_names=['data', 'tensor'],
output_name='output')
data = np.random.rand(*input_shape)
tensor = np.random.rand(*target_shape)
inputs = {'data': data, 'tensor': tensor}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(target_rank, builder._get_rank('output'))
def test_reshape_like_gpu(self):
self.test_reshape_like_cpu(cpu_only=False)
def test_reshape_static_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_static(name='reshape_static',
input_name='data',
output_name='output',
output_shape=target_shape)
data = np.random.rand(*input_shape)
inputs = {'data': data}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(len(target_shape), builder._get_rank('output'))
def test_reshape_static_gpu(self):
self.test_reshape_static_cpu(cpu_only=False)
def test_reshape_dynamic_cpu(self, cpu_only=True):
for rank in range(1, 6):
for _ in range(20):
input_shape = np.random.randint(low=2, high=8, size=rank)
n = int(np.prod(input_shape))
divisors = [d for d in range(1, n) if n % d == 0]
target_rank = np.random.randint(low=2, high=6)
target_shape = [1]
for i in range(target_rank - 1):
dim_size = np.random.choice(divisors)
while n % (np.prod(target_shape) * dim_size) != 0:
dim_size = np.random.choice(divisors)
target_shape.append(dim_size)
target_shape[0] = -1
np.random.shuffle(target_shape)
input_features = [('data', datatypes.Array(*input_shape)),
('shape', datatypes.Array(len(target_shape)))]
builder = neural_network.NeuralNetworkBuilder(
input_features, [('output', None)],
disable_rank5_shape_mapping=True)
builder.add_reshape_dynamic(name='reshape_dynamic',
input_names=['data', 'shape'],
output_name='output')
data = np.random.rand(*input_shape)
inputs = {'data': data, 'shape': np.array(target_shape, dtype='float')}
expected = {'output': np.reshape(data, target_shape)}
self._test_model(builder.spec, inputs, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_reshape_dynamic_gpu(self):
self.test_reshape_dynamic_cpu(cpu_only=False)
def test_reduce_sum_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sum('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.add.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_sum_gpu(self):
self.test_reduce_sum_cpu(cpu_only=False)
def test_reduce_prod_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_prod('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.multiply.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
expected_rank = len(expected['output'].shape)
if expected_rank == 0:
expected_rank = 1
self.assertEqual(expected_rank, builder._get_rank('output'))
def test_reduce_prod_gpu(self):
self.test_reduce_prod_cpu(cpu_only=False)
def test_reduce_mean_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_mean('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.mean(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_mean_gpu(self):
self.test_reduce_mean_cpu(cpu_only=False)
def test_reduce_max_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_max('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.maximum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_max_gpu(self):
self.test_reduce_max_cpu(cpu_only=False)
def test_reduce_min_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_min('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.minimum.reduce(x, axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_min_gpu(self):
self.test_reduce_min_cpu(cpu_only=False)
def test_reduce_l2_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l2('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sqrt(np.sum(np.square(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_l2_gpu(self):
self.test_reduce_l2_cpu(cpu_only=False)
def test_reduce_l1_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_l1('reduce', 'data', 'output', axes, keepdims=keep_dims, reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.abs(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_l1_gpu(self):
self.test_reduce_l1_cpu(cpu_only=False)
def test_reduce_sumsquare_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_sumsquare('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.sum(np.square(x), axis=axes, keepdims=keep_dims)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_sumsquare_gpu(self):
self.test_reduce_sumsquare_cpu(cpu_only=False)
def test_reduce_logsum_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsum('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(x, axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_logsum_gpu(self):
self.test_reduce_logsum_cpu(cpu_only=False)
def test_reduce_logsumexp_cpu(self, cpu_only=True):
for rank in range(1, 6):
axes_list = [axes for length in range(1, rank + 1) for axes in itertools.combinations(range(rank), length)]
axes_list.append(None)
for axes in axes_list:
if axes:
axes = tuple([axis if np.random.choice([True, False]) else axis - rank for axis in axes])
reduce_all = False
else:
reduce_all = True
for keep_dims in [True, False]:
input_shape = np.random.randint(low=2, high=5, size=rank)
input_features = [('data', datatypes.Array(*input_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reduce_logsumexp('reduce', 'data', 'output', axes, keepdims=keep_dims,
reduce_all=reduce_all)
x = np.random.rand(*input_shape)
input = {'data': x}
expected = {'output': np.log(np.sum(np.exp(x), axis=axes, keepdims=keep_dims))}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reduce_logsumexp_gpu(self):
self.test_reduce_logsumexp_cpu(cpu_only=False)
def test_reverse_sequence_cpu(self, cpu_only=True):
for rank in range(2, 6):
for i in range(20):
input_shape = np.random.randint(low=2, high=6, size=rank)
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
while pos_batch_axis >= pos_seq_axis:
seq_axis = np.random.randint(low=-rank, high=rank)
batch_axis = np.random.randint(low=-rank, high=rank)
pos_batch_axis = batch_axis if batch_axis >= 0 else rank + batch_axis
pos_seq_axis = seq_axis if seq_axis >= 0 else rank + seq_axis
input_features = [('data', datatypes.Array(*input_shape)),
('lengths', datatypes.Array(input_shape[batch_axis]))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_reverse_sequence('reverse_sequence', ['data', 'lengths'],
'output', batch_axis=batch_axis,
seq_axis=seq_axis)
data = np.random.rand(*input_shape)
lengths = np.random.randint(low=0, high=input_shape[seq_axis], size=input_shape[batch_axis])
input = {'data': data, 'lengths': lengths.astype(np.float32)}
with tf.Graph().as_default(), tf.Session() as sess:
tf_op = tf.reverse_sequence(input=data, seq_lengths=lengths,
seq_axis=pos_seq_axis, batch_axis=pos_batch_axis)
expected = {'output': sess.run(tf_op)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_reverse_sequence_gpu(self):
self.test_reverse_sequence_cpu(cpu_only=False)
def test_where_nonzero_cpu(self, cpu_only=True):
for rank in range(1, 6):
for i in range(10):
shape = np.random.randint(low=2, high=8, size=rank)
input_features = [('data', datatypes.Array(*shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_where_nonzero('multi_indices', 'data', 'output')
x = np.random.randint(low=0, high=3, size=shape)
input = {'data': x.astype(np.float)}
expected = {'output': np.transpose(np.nonzero(x)).astype(np.float)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_where_nonzero_gpu(self):
self.test_where_nonzero_cpu(cpu_only=False)
def test_gather_cpu(self, cpu_only=True):
for rankParams, rankIndices in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for axis in range(-rankParams, rankParams):
shapeParams = np.random.randint(low=2, high=5, size=rankParams)
shapeIndices = np.random.randint(low=2, high=5,
size=rankIndices)
input_shapes = [shapeParams, shapeIndices]
posAxis = axis if axis >= 0 else axis + rankParams
output_shape = list(shapeParams[:posAxis]) + list(
shapeIndices) + list(shapeParams[posAxis + 1:])
if len(output_shape) > 5:
continue
input_names = ['params', 'indices']
input_features = [
('params', datatypes.Array(*input_shapes[0])),
('indices', datatypes.Array(*input_shapes[1]))
]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True)
builder.add_gather(name='gather', input_names=input_names,
output_name='output', axis=axis)
a = np.random.rand(*input_shapes[0])
b = np.random.randint(-shapeParams[axis], shapeParams[axis],
size=shapeIndices)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': np.take(a, b, axis=axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))
def test_gather_gpu(self):
self.test_gather_cpu(cpu_only=False)
def test_gather_along_axis_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
for _ in range(5):
params_shape = np.random.randint(low=2, high=8, size=rank)
indices_shape = np.copy(params_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_gather_along_axis('gather_along_axis', ['params', 'indices'], 'output', axis=axis)
a = np.random.rand(*params_shape)
b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': np.take_along_axis(a, b, axis=axis)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(len(expected['output'].shape), builder._get_rank('output'))
def test_gather_along_axis_gpu(self):
self.test_gather_along_axis_cpu(cpu_only=False)
def test_gather_nd_cpu(self, cpu_only=True):
for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
params_shape = np.random.randint(low=2, high=8, size=params_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
indices_shape[-1] = np.random.randint(low=1, high=params_rank + 1)
for _ in range(5):
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
output_shape = list(indices_shape[:-1]) + list(params_shape[indices_shape[-1]:])
if len(output_shape) > 5:
continue
builder.add_gather_nd('gather_nd', ['params', 'indices'], 'output')
a = np.random.rand(*params_shape)
indices_list = []
for i in range(indices_shape[-1]):
indices_list.append(np.random.randint(0, params_shape[i], size=indices_shape[:-1]))
indices = np.stack(indices_list, axis=-1)
input = {'params': a, 'indices': indices.astype(np.float)}
with tf.Graph().as_default(), tf.Session() as sess:
tf_op = tf.gather_nd(a, indices)
expected = {'output': sess.run(tf_op)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
self.assertEqual(-1, builder._get_rank('output'))
def test_gather_nd_gpu(self):
self.test_gather_nd_cpu(cpu_only=False)
def test_scatter_cpu(self, cpu_only=True):
for ref_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for accumulate_mode in ["UPDATE", "ADD", "SUB", "MUL", "DIV", "MAX", "MIN"]:
for _ in range(5):
ref_shape = np.random.randint(low=2, high=8, size=ref_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
updates_shape = list(indices_shape) + list(ref_shape[1:])
input_features = [('ref', datatypes.Array(*ref_shape)),
('indices', datatypes.Array(*indices_shape)),
('updates', datatypes.Array(*updates_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
if len(updates_shape) > 5:
continue
builder.add_scatter('scatter', ['ref', 'indices', 'updates'], 'output', axis=0,
mode=accumulate_mode)
ref = np.random.rand(*ref_shape)
updates = np.random.rand(*updates_shape)
indices = np.random.randint(0, ref_shape[0], size=indices_shape)
input = {'ref': ref, 'indices': indices.astype(np.float), 'updates': updates}
with tf.Graph().as_default(), tf.Session() as sess:
tf_output = tf.Variable(ref)
sess.run(tf.global_variables_initializer())
if accumulate_mode == "UPDATE":
sess.run(tf.scatter_update(tf_output, indices, updates))
if accumulate_mode == "ADD":
sess.run(tf.scatter_add(tf_output, indices, updates))
if accumulate_mode == "SUB":
sess.run(tf.scatter_sub(tf_output, indices, updates))
if accumulate_mode == "MUL":
sess.run(tf.scatter_mul(tf_output, indices, updates))
if accumulate_mode == "DIV":
sess.run(tf.scatter_div(tf_output, indices, updates))
if accumulate_mode == "MAX":
sess.run(tf.scatter_max(tf_output, indices, updates))
if accumulate_mode == "MIN":
sess.run(tf.scatter_min(tf_output, indices, updates))
expected = {'output': sess.run(tf_output)}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_scatter_gpu(self):
self.test_scatter_cpu(cpu_only=False)
def test_gather_scatter_multiple_axis_cpu(self, cpu_only=True):
for params_rank, indices_rank in [(i, j) for i in range(1, 6) for j in range(1, 6)]:
for axis in range(-params_rank, params_rank):
for _ in range(5):
params_shape = np.random.randint(low=2, high=8, size=params_rank)
indices_shape = np.random.randint(low=2, high=8, size=indices_rank)
pos_axis = axis if axis >= 0 else axis + params_rank
output_shape = list(params_shape[:pos_axis]) + list(indices_shape) + list(
params_shape[pos_axis + 1:])
if len(output_shape) > 5:
continue
input_features = [('params', datatypes.Array(*params_shape)),
('indices', datatypes.Array(*indices_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_gather('gather', ['params', 'indices'], 'updates', axis=axis)
builder.add_scatter('scatter', ['params', 'indices', 'updates'], 'output', axis=axis, mode='UPDATE')
a = np.random.rand(*params_shape)
b = np.random.randint(-params_shape[axis], params_shape[axis], size=indices_shape)
input = {'params': a, 'indices': b.astype(np.float)}
expected = {'output': a}
self._test_model(builder.spec, input, expected, useCPUOnly=cpu_only)
def test_gather_scatter_multiple_axis_gpu(self):
self.test_gather_scatter_multiple_axis_cpu(cpu_only=False)
def test_scatter_along_axis_cpu(self, cpu_only=True):
for rank in range(1, 6):
for axis in range(-rank, rank):
for id in range(5):
ref_shape = np.random.randint(low=2, high=8, size=rank)
indices_shape = np.copy(ref_shape)
indices_shape[axis] = np.random.randint(low=1, high=8)
updates_shape = indices_shape
input_features = [('ref', datatypes.Array(*ref_shape)),
('indices', datatypes.Array(*indices_shape)),
('updates', datatypes.Array(*updates_shape))]
output_features = [('output', None)]
builder = neural_network.NeuralNetworkBuilder(
input_features, output_features,
disable_rank5_shape_mapping=True
)
builder.add_scatter_along_axis('scatter_along_axis', ['ref', 'indices', 'updates'], 'output',
axis=axis, mode="UPDATE")
ref = np.random.rand(*ref_shape)
updates = | np.random.rand(*updates_shape) | numpy.random.rand |
from itertools import count
import numpy as np
from sklearn import datasets
from src.io import npy_events_tools
from src.io import psee_loader
import tqdm
import os
from numpy.lib import recfunctions as rfn
import h5py
import pickle
import torch
import time
import math
import argparse
def taf_cuda(x, y, t, p, shape, volume_bins, past_volume):
tick = time.time()
H, W = shape
t_star = t.float()[:,None,None]
adder = torch.stack([torch.arange(2),torch.arange(2)],dim = 1).to(x.device)[None,:,:] #1, 2, 2
adder = (1 - torch.abs(adder-t_star)) * torch.stack([p,1 - p],dim=1)[:,None,:] #n, 2, 2
adder = torch.where(adder>=0,adder,torch.zeros_like(adder)).view(adder.shape[0], 4) #n, 4
img = torch.zeros((H * W, 4)).float().to(x.device)
img.index_add_(0, x + W * y, adder)
img = img.view(H * W, 2, 2, 1) #img: hw, 2, 2, 1
torch.cuda.synchronize()
generate_volume_time = time.time() - tick
#print("generate_volume_time",time.time() - tick)
tick = time.time()
forward = (img[:,-1]==0)[:,None] #forward: hw, 1, 2, 1
if not (past_volume is None):
img_old_ecd = past_volume #img_ecd: hw, 2, 2, 2
img_old_ecd[:,-1,:,0] = torch.where(img_old_ecd[:,-1,:,1] == 0,img_old_ecd[:,-1,:,0] + img[:,0,:,0],img_old_ecd[:,-1,:,0])
img_ecd = torch.cat([img_old_ecd,torch.cat([img[:,1:],torch.zeros_like(img[:,1:])],dim=3)],dim=1)
for i in range(1,img_ecd.shape[1])[::-1]:
img_ecd[:,i-1,:,1] = img_ecd[:,i-1,:,1] - 1
img_ecd[:,i:i+1] = torch.where(forward, img_ecd[:,i-1:i],img_ecd[:,i:i+1])
img_ecd[:,:1] = torch.where(forward, torch.cat([torch.zeros_like(forward).float(),torch.zeros_like(forward).float() -1e6],dim=3), img_ecd[:,:1])
else:
ecd = torch.where(forward, torch.zeros_like(forward).float() -1e6, torch.zeros_like(forward).float()) #ecd: hw, 1, 2, 1
img_ecd = torch.cat([img, torch.cat([ecd,ecd],dim=1)],dim=3) #img_ecd: hw, 2, 2, 2
if img_ecd.shape[1] > volume_bins:
img_ecd = img_ecd[:,1:]
torch.cuda.synchronize()
generate_encode_time = time.time() - tick
#print("generate encode",time.time() - tick)
img_ecd_viewed = img_ecd.view((H, W, img_ecd.shape[1] * 2, 2)).permute(2, 0, 1, 3)
return img_ecd_viewed, img_ecd, generate_volume_time, generate_encode_time
def generate_taf_cuda(events, shape, past_volume = None, volume_bins=5):
x, y, t, p, z = events.unbind(-1)
x, y, p = x.long(), y.long(), p.long()
histogram_ecd, past_volume, generate_volume_time, generate_encode_time = taf_cuda(x, y, t, p, shape, volume_bins, past_volume)
return histogram_ecd, past_volume, generate_volume_time, generate_encode_time
def denseToSparse(dense_tensor):
"""
Converts a dense tensor to a sparse vector.
:param dense_tensor: BatchSize x SpatialDimension_1 x SpatialDimension_2 x ... x FeatureDimension
:return locations: NumberOfActive x (SumSpatialDimensions + 1). The + 1 includes the batch index
:return features: NumberOfActive x FeatureDimension
"""
non_zero_indices = np.nonzero(dense_tensor)
features = dense_tensor[non_zero_indices[0],non_zero_indices[1],non_zero_indices[2],non_zero_indices[3]]
return np.stack(non_zero_indices), features
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='visualize one or several event files along with their boxes')
parser.add_argument('-rank', type=int)
args = parser.parse_args()
rank = args.rank
raw_dir = "/datassd4t/lbd/Large_Automotive_Detection_Dataset"
target_dir1 = "/datassd4t/lbd/Large_Automotive_Detection_Dataset_sampling"
target_dir2 = "/datassd4t/lbd/Large_taf"
min_event_count = 50000000
# min_event_count = 800000
shape = [720,1280]
target_shape = [512, 640]
events_window_abin = 10000
event_volume_bins = 5
events_window = events_window_abin * event_volume_bins
rh = target_shape[0] / shape[0]
rw = target_shape[1] / shape[1]
#raw_dir = "/data/lbd/ATIS_Automotive_Detection_Dataset/detection_dataset_duration_60s_ratio_1.0"
#target_dir = "/data/lbd/ATIS_taf"
#raw_dir = "/data/Large_Automotive_Detection_Dataset_sampling"
#target_dir = "/data/Large_taf"
total_volume_time = []
total_taf_time = []
for mode in ["train"]:
file_dir = os.path.join(raw_dir, mode)
if not os.path.exists(file_dir):
os.makedirs(file_dir)
root = file_dir
target_root1 = os.path.join(target_dir1, mode)
target_root2 = os.path.join(target_dir2, mode)
#h5 = h5py.File(raw_dir + '/ATIS_taf_'+mode+'.h5', 'w')
try:
files = os.listdir(file_dir)
except Exception:
continue
# Remove duplicates (.npy and .dat)
#files = files[int(len(files)/3):]
files = [time_seq_name[:-7] for time_seq_name in files
if time_seq_name[-3:] == 'dat']
files = files[int(len(files)/4 * rank):int(len(files)/4 * (rank+1))]
pbar = tqdm.tqdm(total=len(files), unit='File', unit_scale=True)
for i_file, file_name in enumerate(files):
event_file = os.path.join(root, file_name + '_td.dat')
bbox_file = os.path.join(root, file_name + '_bbox.npy')
new_bbox_file = os.path.join(target_root1, file_name + '_bbox.npy')
# if os.path.exists(volume_save_path):
# continue
#h5 = h5py.File(volume_save_path, "w")
#f_bbox = open(new_bbox_file, "rb")
f_bbox = open(bbox_file, "rb")
start, v_type, ev_size, size, dtype = npy_events_tools.parse_header(f_bbox)
dat_bbox = np.fromfile(f_bbox, dtype=v_type, count=-1)
f_bbox.close()
unique_ts, unique_indices = np.unique(dat_bbox['t'], return_index=True)
sampled_bboxes = []
time_upperbound = -1e16
for bbox_count,unique_time in enumerate(unique_ts):
if unique_time <= 500000:
continue
if (unique_time - time_upperbound < 1000000):
continue
end_time = int(unique_time)
sampled_bboxes.append(dat_bbox[dat_bbox['t']==unique_time])
time_upperbound = end_time
sampled_bboxes = np.concatenate(sampled_bboxes)
mmp = np.lib.format.open_memmap(new_bbox_file, "w+", dtype, sampled_bboxes.shape)
mmp[:] = sampled_bboxes[:]
mmp.flush()
#f_event = psee_loader.PSEELoader(new_event_file)
f_bbox = open(new_bbox_file, "rb")
start, v_type, ev_size, size, dtype = npy_events_tools.parse_header(f_bbox)
dat_bbox = np.fromfile(f_bbox, dtype=v_type, count=-1)
f_bbox.close()
unique_ts, unique_indices = | np.unique(dat_bbox['t'], return_index=True) | numpy.unique |
import abc
from typing import Union
from operator import itemgetter
import numpy as np
from python_polar_coding.polar_codes import pcc, utils
from . import encoder
class BasePolarCodec(metaclass=abc.ABCMeta):
"""Basic codec for Polar code.
Includes code construction.
Defines the basic workflow for encoding and decoding.
Supports creation of a polar code from custom mask.
"""
encoder_class = encoder.Encoder
decoder_class = None
BHATTACHARYYA = 'bhattacharyya'
GAUSSIAN = 'gaussian'
MONTE_CARLO = 'monte carlo'
PCC_METHODS = {
BHATTACHARYYA: pcc.bhattacharyya_bounds,
}
def __init__(self, N: int, K: int,
design_snr: float = 0.0,
is_systematic: bool = True,
mask: Union[str, None] = None,
pcc_method: str = BHATTACHARYYA):
assert K < N, (f'Cannot create Polar code with N = {N}, K = {K}.'
f'\nN must be bigger than K.')
self.N = N
self.K = K
self.n = int( | np.log2(N) | numpy.log2 |
import torch.nn as nn
import torch # pytorch 0.4.0! fft
import numpy as np
import cv2
def complex_mul(x, z):
out_real = x[..., 0] * z[..., 0] - x[..., 1] * z[..., 1]
out_imag = x[..., 0] * z[..., 1] + x[..., 1] * z[..., 0]
return torch.stack((out_real, out_imag), -1)
def complex_mulconj(x, z):
out_real = x[..., 0] * z[..., 0] + x[..., 1] * z[..., 1]
out_imag = x[..., 1] * z[..., 0] - x[..., 0] * z[..., 1]
return torch.stack((out_real, out_imag), -1)
class DCFNetFeature(nn.Module):
def __init__(self):
super(DCFNetFeature, self).__init__()
self.feature = nn.Sequential(
nn.Conv2d(3, 32, 3, padding=1),
nn.ReLU(inplace=True),
nn.Conv2d(32, 32, 3, padding=1),
nn.LocalResponseNorm(size=5, alpha=0.0001, beta=0.75, k=1),
)
def forward(self, x):
return self.feature(x)
class DCFNet(nn.Module):
def __init__(self, config=None):
super(DCFNet, self).__init__()
self.feature = DCFNetFeature()
self.model_alphaf = []
self.model_xf = []
self.config = config
def forward(self, x):
x = self.feature(x) * self.config.cos_window
xf = torch.rfft(x, signal_ndim=2)
kxzf = torch.sum(complex_mulconj(xf, self.model_zf), dim=1, keepdim=True)
response = torch.irfft(complex_mul(kxzf, self.model_alphaf), signal_ndim=2)
# r_max = torch.max(response)
# cv2.imshow('response', response[0, 0].data.cpu().numpy())
# cv2.waitKey(0)
return response
def update(self, z, lr=1.):
z = self.feature(z) * self.config.cos_window
zf = torch.rfft(z, signal_ndim=2)
kzzf = torch.sum(torch.sum(zf ** 2, dim=4, keepdim=True), dim=1, keepdim=True)
alphaf = self.config.yf / (kzzf + self.config.lambda0)
if lr > 0.99:
self.model_alphaf = alphaf
self.model_zf = zf
else:
self.model_alphaf = (1 - lr) * self.model_alphaf.data + lr * alphaf.data
self.model_zf = (1 - lr) * self.model_zf.data + lr * zf.data
def load_param(self, path='param.pth'):
checkpoint = torch.load(path)
if 'state_dict' in checkpoint.keys(): # from training result
state_dict = checkpoint['state_dict']
if 'module' in state_dict.keys()[0]: # train with nn.DataParallel
from collections import OrderedDict
new_state_dict = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
new_state_dict[name] = v
self.load_state_dict(new_state_dict)
else:
self.load_state_dict(state_dict)
else:
self.feature.load_state_dict(checkpoint)
if __name__ == '__main__':
# network test
net = DCFNetFeature()
net.eval()
for idx, m in enumerate(net.modules()):
print(idx, '->', m)
for name, param in net.named_parameters():
if 'bias' in name or 'weight' in name:
print(param.size())
from scipy import io
import numpy as np
p = io.loadmat('net_param.mat')
x = p['res'][0][0][:,:,::-1].copy()
x_out = p['res'][0][-1]
from collections import OrderedDict
pth_state_dict = OrderedDict()
match_dict = dict()
match_dict['feature.0.weight'] = 'conv1_w'
match_dict['feature.0.bias'] = 'conv1_b'
match_dict['feature.2.weight'] = 'conv2_w'
match_dict['feature.2.bias'] = 'conv2_b'
for var_name in net.state_dict().keys():
# print var_name
key_in_model = match_dict[var_name]
param_in_model = var_name.rsplit('.', 1)[1]
if 'weight' in var_name:
pth_state_dict[var_name] = torch.Tensor(np.transpose(p[key_in_model],(3,2,0,1)))
elif 'bias' in var_name:
pth_state_dict[var_name] = torch.Tensor(np.squeeze(p[key_in_model]))
if var_name == 'feature.0.weight':
weight = pth_state_dict[var_name].data.numpy()
weight = weight[:, ::-1, :, :].copy() # cv2 bgr input
pth_state_dict[var_name] = torch.Tensor(weight)
torch.save(pth_state_dict, 'param.pth')
net.load_state_dict(torch.load('param.pth'))
x_t = torch.Tensor(np.expand_dims(np.transpose(x,(2,0,1)), axis=0))
x_pred = net(x_t).data.numpy()
pred_error = np.sum(np.abs( | np.transpose(x_pred,(0,2,3,1)) | numpy.transpose |
from kapteyn import maputils
import numpy
from service import *
fignum = 21
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
theta_a = -45
t1 = -20.0; t2 = -70.0
eta = abs(t1-t2)/2.0
title = r"""Conic equal area projection (COE) with:
$\theta_a=-45^\circ$, $\theta_1=-20^\circ$ and $\theta_2=-70^\circ$. (Cal. fig.25)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COE',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--COE',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = cylrange()
Y = | numpy.arange(-90,91,30.0) | numpy.arange |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 6 11:54:28 2021
@author: ccamargo
"""
import sys
sys.path.append("/Users/ccamargo/Documents/py_scripts/")
import utils_unc as unc
import utils_SL as sl
import utils_SLE_v2 as sle
import pandas as pd
import xarray as xr
import numpy as np
import cmocean as cm
from cmcrameri import cm as cmf
import matplotlib.pyplot as plt
# %% # open masks:
# ANT
path='/Volumes/LaCie_NIOZ/data/barystatic/masks/rignot_basins/ANT_Basins_IMBIE2_v1.6/'
ds=xr.open_dataset(path+'final_mask.nc')
ds
lon=np.array(ds.lon)
lon=sl.from_180_to_360(lon)
ds = ds.assign_coords(lon=lon)
ds = ds.sortby('lon')
lon=np.array(ds.lon)
lat=np.array(ds.lat)
# plot_ant(ds.lon,ds.lat,ds.mask,cmin=0,cmax=19,title='Rignot Drainage Basins',cmap='tab20')
mask=np.array(ds.mask)
codes=np.unique(mask[np.isfinite(mask)])
maskant_basins=np.array(ds.mask)
#% %
maskant_regions = np.array(maskant_basins)
wais=(1,2,9,14,18)
for i in wais:
maskant_regions[np.where(maskant_basins==i)]=1
eais=(3,4,5,6,7,8,10,11,12,13)
for i in eais:
maskant_regions[np.where(maskant_basins==i)]=2
ap=(15,16,17)
for i in ap:
maskant_regions[np.where(maskant_basins==i)]=3
# plot_ant(ds.lon,ds.lat,maskant_regions,cmin=0,cmax=3,
# title='Rignot Drainage Basins per Region',cmap='tab10')
maskant = np.array(mask)
maskant[np.isfinite(mask)]=1
# plot_ant(ds.lon,ds.lat,maskant,cmin=0,cmax=1,title='AIS mask')
ngrid = np.array(maskant)
ngrid=np.hstack(ngrid)
j=0
for i in range(len(ngrid)):
if np.isfinite(ngrid[i]):
ngrid[i]=j
j=j+1
ngrid_ant=ngrid.reshape(maskant.shape)
#%% GRE mask
ds=xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/barystatic_mask2.nc')
maskgre_basins=np.array(ds.gre)
lat=np.array(ds.lat);lon=np.array(ds.lon)
dimlat=len(lat);dimlon=len(lon)
# unc.plot_gre(lon,lat,maskgre_basins,cmin=0,cmax=8,
# cmap='tab10',title='Rinot/Mouginot GIS mask')
# We want only the dynamic regions NW (0), CW(2) and SE (6)
maskgre_dyn=np.copy(maskgre_basins)
for i in [1,3,4,5]:
# print(i)
maskgre_dyn[np.where(maskgre_basins==i)]=10
maskgre_dyn[maskgre_dyn!=10]=0
maskgre_dyn[maskgre_dyn==10]=1
maskgre_dyn[np.isnan(maskgre_basins)]=np.nan
# unc.plot_gre(lon,lat,maskgre_dyn,cmin=0,cmax=1,cmap='tab10',title='Dynamic GIS mask')
maskgre=np.array(maskgre_basins)
maskgre[np.isfinite(maskgre)]=1
# unc.plot_gre(lon,lat,maskgre,title='GIS mask')
ngrid = np.array(maskgre)
ngrid=np.hstack(ngrid)
j=0
for i in range(len(ngrid)):
if np.isfinite(ngrid[i]):
ngrid[i]=j
j=j+1
ngrid_gre=ngrid.reshape(maskgre.shape)
#%% GLaciers mask
# ds=xr.open_dataset('/Volumes/LaCie_NIOZ/data/barystatic/masks/barystatic_mask2.nc')
maskgla_rgi=np.array(ds.gla)
lat= | np.array(ds.lat) | numpy.array |
# -*- coding: utf-8 -*-
"""
Time Domain Fit
===============
Equations to fit the frequency shift transient in the time domain
are developed here.
"""
from __future__ import division, print_function
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import h5py
import sklearn
from scipy import linalg, signal, optimize
import lockin # Switch to kpfm module at some point
import phasekick2 as pk2 # Switch to kpfm module at some point
import phasekick as pk
import tqdm
import sigutils
import munch
def avg_xt(tr, xr):
tr_cent = (tr - tr.mean()) * 1e6 # Use µs units here
A = np.c_[tr_cent ** 2, tr_cent, np.ones_like(tr_cent)]
val, err, __, ___ = linalg.lstsq(A, xr)
return val[-1], np.sqrt(err / (xr.size - 3)) / np.sqrt(xr.size)
def record_fit(f, xdata, ydata, p0=None, bounds=(-np.inf, np.inf), name=''):
fit_data = munch.Munch()
fit_data.p0 = p0
fit_data.popt, fit_data.pcov = optimize.curve_fit(f, xdata, ydata, p0=p0, bounds=bounds)
fit_data.x = f(xdata, *fit_data.popt)
fit_data.resid = ydata - fit_data.x
fit_data.name = name
return fit_data
def _remove_harmonics(b, popt, x_har, N_harmonics=5):
b.t_zeroed = b.t_ms - b.t_ms[0]
b.A = harN(b.t_zeroed, popt[2], N_harmonics, np.exp(-b.t_zeroed * popt[3]))
b.x_no_overtones = b.x - np.dot(b.A, x_har)
b.resid = b.x_no_overtones - damped_sine(b.t_zeroed, *popt)
def remove_harmonics(b, bdamp):
_remove_harmonics(b, bdamp.fit.popt, bdamp.x_har, bdamp.N_harmonics)
# def avg_trig_xt(tr, xr):
# tr_cent = (tr - tr.mean()) * 1e6 # Use µs units here
# A = np.c_[np.cos(0.062*2*np.pi*tr_cent),
# np.sin(0.062*2*np.pi*tr_cent),
# np.ones_like(tr_cent)]
# val, err, __, ___ = linalg.lstsq(A, xr)
# return (val[0] + val[-1]), np.sqrt(err / (xr.size - 3)) / np.sqrt(xr.size)
def damped_sine(t, X, Y, f, k, x0):
"""Sine wave with variable phase (X, Y), frequency f,
damping rate k, and DC offset x0."""
return np.exp(-k * t) * (X * np.cos(2*np.pi*f*t) + Y * np.sin(2*np.pi*f*t)) + x0
def _damped_sine_exp_A(t, X, Y, f, k, x0, df, k_tau, k1):
return np.where(t > 0,
np.exp(-k1 * t),
np.exp(-k * t))
def _damped_sine_exp_phi(t, X, Y, f, k, x0, df, k_tau, k1):
return np.where(t > 0,
2*np.pi*(f - df * np.expm1(-k_tau*t)) * t,
2*np.pi*f*t
)
def damped_sine_exp(t, X, Y, f, k, x0, df, k_tau, k1):
phi = _damped_sine_exp_phi(t, X, Y, f, k, x0, df, k_tau, k1)
A = _damped_sine_exp_A(t, X, Y, f, k, x0, df, k_tau, k1)
return A * (X * np.cos(phi) + Y*np.sin(phi)) + x0
def harN(t, f, N, A=1):
"""Generate harmonics of a given frequency for least squares fitting."""
cols = []
for i in xrange(2, N+1):
cols.extend([A*np.cos(t*2*np.pi*f*i), A*np.sin(t*2*np.pi*f*i)])
return np.c_[cols].T
def harNphi(phi, N, A=1):
"""Generate harmonics of a given frequency for least squares fitting."""
cols = []
for i in xrange(2, N+1):
cols.extend([A*np.cos(phi*i), A*np.sin(phi*i)])
return np.c_[cols].T
def fit_damped_sine_eliminate_harmonics(t, x, p0, N_harmonics=5):
b = munch.Munch()
# Copy input parameters
b.t = t
b.t_zeroed = b.t - b.t[0]
b.x = x
b.N_harmonics = N_harmonics
b.fit0 = record_fit(damped_sine, b.t_zeroed, x, p0=p0, name='initial fit')
b.A = harN(b.t_zeroed, b.fit0.popt[2], N_harmonics, A=np.exp(-t * b.fit0.popt[3]))
b.Afit = linalg.lstsq(b.A, b.fit0.resid)
b.x_har = b.Afit[0]
b.x_no_overtones = b.x - np.dot(b.A, b.x_har)
b.fit = record_fit(damped_sine, b.t_zeroed, b.x_no_overtones, p0=p0, name='final fit')
return b
def signal_average_list(gr_list, ti, tf):
"""Utility function to signal average a group from an HDF5 file."""
b = munch.Munch()
xs = []
ts = []
for ds in tqdm.tqdm(gr_list):
t = pk.gr2t(ds)
t1 = ds.attrs['Abrupt BNC565 CantClk.t1 [s]']
t0 = -t1 - ds["half periods [s]"][0]
m = (t > ti) & (t < tf)
ts.append(t[m])
x = ds['cantilever-nm'][:]
xs.append(x[m])
ts = np.array(ts)
b.t = np.mean(ts, axis=0)
x_array = np.array(xs)
b.x = np.mean(x_array, axis=0)
m2 = b.t < 0.0
b.x = b.x - b.x[m2].mean()
b.t_ms = b.t * 1e3
b.t_us = b.t * 1e6
b.t5 = b.t[500:]
b.x5 = b.x[500:]
b.t5ms = b.t5*1e3
b.t5us = b.t5*1e6
try:
b.li = lockin.LockIn(b.t, b.x, 1e6)
b.li.lock2()
b.li.phase(tf=0)
b.li.name='data'
except TypeError:
print("TypeError")
return b
def signal_average_gr(gr, ti, tf):
"""Utility function to signal average a group from an HDF5 file."""
b = munch.Munch()
xs = []
ts = []
for ds in tqdm.tqdm(gr.values()):
t = pk.gr2t(ds)
t1 = ds.attrs['Abrupt BNC565 CantClk.t1 [s]']
t0 = -t1 - ds["half periods [s]"][0]
m = (t > ti) & (t < tf)
ts.append(t[m])
x = ds['cantilever-nm'][:]
xs.append(x[m])
ts = np.array(ts)
b.t = np.mean(ts, axis=0)
x_array = np.array(xs)
b.x = np.mean(x_array, axis=0)
m2 = b.t < 0.0
b.x = b.x - b.x[m2].mean()
b.t_ms = b.t * 1e3
b.t_us = b.t * 1e6
b.t5 = b.t[500:]
b.x5 = b.x[500:]
b.t5ms = b.t5*1e3
b.t5us = b.t5*1e6
try:
b.li = lockin.LockIn(b.t, b.x, 1e6)
b.li.lock2()
b.li.phase(tf=0)
b.li.name='data'
except TypeError:
print("TypeError")
return b
def signal_average_parab_list(gr_list, ti, tf, invert=True, align_voltage=False):
"""Utility function to signal average a group from an HDF5 file."""
b = munch.Munch()
xs = []
ts = []
if invert:
scale = -1
else:
scale = 1
for ds in tqdm.tqdm(gr_list):
# Move the zero in time to the initial voltage pulse
x = ds['cantilever-nm'][:]
dt = ds['dt [s]'].value
if align_voltage:
t1 = ds.attrs['Abrupt BNC565 CantClk.t1 [s]']
t0 = -t1 - ds["half periods [s]"][0]
t = np.arange(x.size)*dt + t0
else:
t = pk.gr2t(ds)
m = (t > ti) & (t < tf)
ts.append(t[m])
x = ds['cantilever-nm'][:]
xs.append(scale * x[m])
ts = np.array(ts)
b.t = np.mean(ts, axis=0)
xs = np.array(xs)
# Do proper signal averaging, fitting data to a parabola at each point
x_err = np.zeros(xs.shape[1])
x_bf = np.zeros(xs.shape[1])
for i, (tr, xr) in tqdm.tqdm(enumerate(zip(ts.T, xs.T))):
x, err = avg_xt(tr, xr)
x_err[i] = err
x_bf[i] = x
b.x = x_bf
b.x_err = x_err
m2 = b.t < 0.0
b.x0neg = b.x[m2].mean()
b.x = b.x - b.x0neg
b.t_ms = b.t * 1e3
b.t_us = b.t * 1e6
b.t5 = b.t[500:]
b.x5 = b.x[500:]
b.t5ms = b.t5*1e3
b.t5us = b.t5*1e6
try:
b.li = lockin.LockIn(b.t, b.x, 1e6)
b.li.lock2()
b.li.phase(tf=0)
b.li.name='data'
except TypeError:
print("TypeError")
return b
def signal_average_gr_parab(gr, ti, tf, invert=True, align_voltage=False):
"""Utility function to signal average a group from an HDF5 file."""
return signal_average_parab_list(gr.values(), ti, tf, invert=invert, align_voltage=align_voltage)
def pk_phase(f_i, df, f_f, tau, t0, tp):
return lambda t: 2 * np.pi * np.where(t <= t0, f_i * (t-t0),
np.where(t < tp, f_i*(t-t0)+ pk.phase_step(t-t0, tau, df),
f_i*(tp-t0)+ pk.phase_step(tp-t0, tau, df) + f_f*(t-tp)))
def pk_freq(f_i, df, f_f, tau, t0, tp):
return lambda t: np.where(t <= t0, f_i,
np.where(t < tp, f_i - df *np.expm1(-(t-t0)/tau), f_f)
)
def osc(phi, amp, X0, Y0):
return (X0 * np.cos(phi) + Y0 * np.sin(phi)) * amp
def osc_phase(t, phase, A, X0, Y0):
return osc(phase(t), A(t), X0, Y0)
def osc_freq(t, freq, A, X0, Y0):
dt = np.r_[0, np.diff(t)]
phi = np.cumsum(freq(t) * dt)
return osc(phi, A(t), X0, Y0)
def getA(ki, km, kf, ti, tf):
def A(t):
t_ = t - ti
Af = np.exp(-km*(tf-ti))
return np.where(t <= ti, np.exp(-ki * t_), np.where(t < tf,
np.exp(-km * t_), Af * np.exp(-(t-tf)*kf)))
return A
def get_xDCt(phaset, ft, A, tau, xDC0, dx_light, t0, tp):
delta = (ft(tp) / ft(t0))**2
r = 1 - delta
omega_tau = (2*np.pi*ft(t0)*tau)
omega_tau2 = omega_tau**2
omega_bar = (phaset(tp) - phaset(t0)) / (tp - t0)
xeq = lambda t: np.where(t <= t0, xDC0, np.where(t < tp, xDC0-dx_light*np.expm1(-(t-t0)/tau),0))
xresp = lambda t: np.where(t <= t0, xDC0, np.where(t < tp, r*(
xDC0 + dx_light -
dx_light * omega_tau2 / (1+omega_tau2) * np.exp(-(t-t0)/tau)
) +
delta*xDC0*np.cos(omega_bar*(t-t0)) -
dx_light * r /(1+omega_tau2) * (
np.cos(omega_bar * (t-t0)) + omega_tau*np.sin(omega_bar * (t-t0))
),np.nan))
xDC = lambda t: (xresp(t) - xeq(t)) * A(t)/A(t0) + xeq(t)
return xDC
def fit(f, xdata, ydata, p0=None, bounds=(-np.inf, np.inf), tfit=None, name=''):
fit_data = munch.Munch()
fit_data.popt, fit_data.pcov = optimize.curve_fit(f, xdata, ydata, p0=p0, bounds=bounds)
fit_data.x = f(xdata, *fit_data.popt)
fit_data.resid = ydata - fit_data.x
if tfit is None:
tfit = xdata
fs = 1.0/np.mean(np.diff(tfit))
li = lockin.LockIn(tfit, fit_data.x, fs)
li.lock2()
li.phase(tf=0)
fit_data.li = li
fit_data.name = name
fit_data.li.name = name
return fit_data
def workup_signal_averaged_force_data(b, ti_phase=-0.02, tf_phase=-0.001, T = 0.005):
fs = 1.0/np.mean(np.gradient(b.t))
li = lockin.LockIn(b.t5, b.x5, fs=fs)
li.f0_est = 65900
li.lock2(fp=500, fc=2000)
li.phase(ti=ti_phase, tf=tf_phase,adjust_f0=False)
m = pk.masklh(li.t, ti_phase, tf_phase)
mb = np.polyfit(li.t[m]-li.t[m][0], np.unwrap(li.phi[m]), 1)
li.lock2(fp=500, fc=2000, f0=li.f0 + mb[0] / (2*np.pi))
li.phase(ti=ti_phase/2, tf=tf_phase, adjust_f0=False)
t = li('t')
X = li('X')
Y = li('Y')
Npts = li.fir.size//8
mask = pk.masklh(t, -T, T)
mR = pk.masklh(t, 0, T)
mL = pk.masklh(t, -T, 0)
t_L = t[mL][:-Npts]
X_L = X[mL][:-Npts]
Y_L = Y[mL][:-Npts]
t_R = t[mR][Npts:]
X_R = X[mR][Npts:]
Y_R = Y[mR][Npts:]
X_mb_L = np.polyfit(t_L, X_L, 1)
Y_mb_L = np.polyfit(t_L, Y_L, 1)
X_mb_R = np.polyfit(t_R, X_R, 1)
Y_mb_R = np.polyfit(t_R, Y_R, 1)
dX = np.polyval(X_mb_R, 0) - np.polyval(X_mb_L, 0)
dY = np.polyval(Y_mb_R, 0) - np.polyval(Y_mb_L, 0)
dA = (dX**2 + dY**2)**0.5
return dA
def make_col(f, t, div):
t_div = t.reshape(-1, div)
col = np.zeros((t.size, t_div.shape[0]))
for i, tt in enumerate(t_div):
col[i*div:(i*div+div), i] = f(tt)
return col
def make_L(col, initial=1000.0):
N = col.shape[1]
L = np.zeros((N, N))
for j in xrange(N):
for k in xrange(N):
if k == 0:
L[j,k] = initial
elif j >= k:
L[j,k] = 1.0
return L
class WorkupForceEFM(object):
def __init__(self, fm, fc, Nm, Nc):
self.fm = fm
self.fc = fc
self.Nm = Nm
self.Nc = Nc
def __repr__(self):
return "WorkupForceEFM(fm={},fc={},Nm={},Nc={})".format(self.fm,
self.fc, self.Nm, self.Nc)
def __call__(self, t, x):
fm = self.fm
fc = self.fc
Nm = self.Nm
Nc = self.Nc
Npts_per_column = [Nm, Nm, Nm, Nm, Nc, Nc, Nc]
cols = [make_col(lambda t: np.cos(2*np.pi*fm*t), t, Nm),
make_col(lambda t: np.sin(2*np.pi*fm*t), t, Nm),
make_col(lambda t: np.cos(4*np.pi*fm*t), t, Nm),
make_col(lambda t: np.sin(4*np.pi*fm*t), t, Nm),
make_col(lambda t: np.cos(2*np.pi*fc*t), t, Nc),
make_col(lambda t: | np.sin(2*np.pi*fc*t) | numpy.sin |
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: <NAME>
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure(figsize=(8, 4), dpi=100)
frames = 500
n = 8
X, Y, L = [], [], []
for row in range(n // 2):
T2 = np.linspace(0, row * 2 * np.pi, frames) if row > 0 else np.zeros(frames)
for col in range(n):
T1 = | np.linspace(0, col * 2 * np.pi, frames) | numpy.linspace |
from more_itertools import split_at, distinct_combinations
import numpy as np
np.core.arrayprint._line_width = 160
from collections import Counter
def convert(row):
return [1 if c == "#" else 0 for c in row]
def to_sig(row):
return row.dot(2**np.arange(row.size)[::-1])
signatures = {}
signatures_f = {}
all_sigs = Counter()
class Tile:
def __init__(self, tile_id, bitmap, ori):
self.tile_id = tile_id
self.bitmap = bitmap
self.ori = ori
def choices(self):
yield self.bitmap
yield np.rot90(self.bitmap)
yield np.rot90(self.bitmap, 2)
yield np.rot90(self.bitmap, 3)
yield np.fliplr(self.bitmap)
yield np.rot90(np.fliplr(self.bitmap))
yield np.rot90(np.fliplr(self.bitmap), 2)
yield np.rot90(np.fliplr(self.bitmap), 3)
def __repr__(self): return '% 4s(%d)' % (self.tile_id, self.ori)
from collections import Counter
all_sigs = Counter()
tiles = {}
for grp in split_at(map(str.rstrip, open("d20.txt")), lambda e: e == ""):
tile_id = int(grp[0][-5:-1])
bitmap = np.array([convert(row) for row in grp[1:]])
tiles[tile_id] = Tile(tile_id, bitmap, 0)
corners = [
(3539, 0), # top left
(2693, 2), # top right
(1549, 0), # bottom right
(3709, 0), # bottom left
]
UP, RIGHT = 0, 1
def compatible(a1, a2, dir):
if dir == RIGHT:
return np.all(a1[:, -1] == a2[:, 0])
elif dir == UP:
return np.all(a1[-1, :] == a2[0, :])
def find_compatible(left_tile, dir=RIGHT):
for tile in tiles.values():
if tile.tile_id == left_tile.tile_id: continue
for j, choice in enumerate(tile.choices()):
if compatible(left_tile.bitmap, choice, dir=dir):
# print(f'{left_tile.tile_id} {left_tile.bitmap[:, -1]} compatible with {tile.tile_id} {choice[:, 0]}')
yield choice, tile.tile_id, j
# return None, -1
solution = np.empty((12, 12), dtype=np.object)
solution[0, 0] = tiles[3539]
solution[-1, 0] = tiles[3709]
# solution[0, -1] = tiles[2693].rotate(2)
# solution[-1, -1] = tiles[1549]
disallowed = {3539, 3709}
i = 1
for i in range(1, 12):
for tile in tiles.values():
if tile.tile_id in disallowed: continue
compats = list(find_compatible(solution[0, i-1]))
if compats:
found_compatible, tile_id, j = compats[0]
solution[0, i] = Tile(tile_id, found_compatible, j)
disallowed.add(tile_id)
break
for j in range(1, 12):
for i in range(0, 12):
for tile in tiles.values():
if tile.tile_id in disallowed: continue
compats = list(find_compatible(solution[j-1, i], dir=UP))
if compats:
found_compatible, tile_id, k = compats[0]
solution[j, i] = Tile(tile_id, found_compatible, k)
disallowed.add(tile_id)
break
print(np.array2string(solution, max_line_width=np.inf))
solution_matrix = np.stack((e.bitmap for e in solution.ravel())) # (144, 10, 10)
unframed = solution_matrix[:, 1:-1, 1:-1].reshape((12, 12, 8, 8))
print(unframed.shape) # (12, 12, 8, 8)
image = | np.zeros((96, 96), dtype=np.int) | numpy.zeros |
import os
import numpy as np
from ctrm.environment import Instance, ObstacleBox, ObstacleSphere
from ctrm.planner import PrioritizedPlanning
from ctrm.roadmap import (
get_timed_roadamaps_SPARS_2d_common,
get_timed_roadmaps_grid_common,
get_timed_roadmaps_random_common,
)
from ctrm.viz import plot_trm_onestep, simple_plot_2d
def test_plot_2d():
ins = Instance(
2,
[np.array([0, 0]), | np.array([1, 0]) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import torch.optim as optim
from modules.models import MeanField, DynamicModel
from modules.training_tools import variational_update
from modules.networks import InferenceNet
from modules.dynamics import VolterraTransition
from modules.distributions import NormalDistribution
from modules.emissions import SingleCoordinateEmission
# Simulation parameters
num_iterations = 3000
batch_size = 200
# Model
T = 100
dt = 0.5
sigma = np.sqrt(dt)*0.5
initial_sigma = 3.
initial_mean = 0.
d_x = 2 #Number of latent variables
d_eps = 10
dist = NormalDistribution()
lk_sigma = 3.
#transition_model = lambda x,m: x #
transition_model = VolterraTransition(dt=dt)
# Likelihood
observation_gain = 1.
emission_model = SingleCoordinateEmission(k=0, gain=observation_gain)
emission_dist = NormalDistribution(scale=lk_sigma)
### Prior model ###
prior_model = DynamicModel(sigma=sigma, initial_sigma=initial_sigma, distribution=dist, d_x=d_x,
transition=transition_model,
emission=emission_model,
emission_distribution=emission_dist,
observation_gain=observation_gain, T=T, initial_mean=initial_mean)
### Cascading flow ###
print("Train cascading flows")
inference_net = InferenceNet(in_size=T, out_size=T*d_x, n_hidden=T, out_shape=(T,d_x), eps=0.1)
variational_model = MeanField(T,d_x, inference_net=inference_net)
loss_list = []
params_list = [inference_net.parameters()]
params = []
for p in params_list:
params += p
optimizer = optim.Adam(params, lr=0.001)
# generate ground truth
#X_true, Y, mu = prior_model.sample_observations(batch_size)
#data = Y.view((batch_size,1,T))
# generate ground truth
#X_true, Y, mu = prior_model.sample_observations(1)
#data = Y[0, :].view((1, T))
for itr in tqdm(range(num_iterations)):
# generate ground truth
X_true, Y, mu = prior_model.sample_observations(batch_size)
data = Y #[0, :].view((1, T))
# Variational update
loss = variational_update(prior_model, variational_model, data, optimizer, batch_size, amortized=True)
# Loss
loss_list.append(float(loss.detach().numpy()))
## Plot results ##
plt.plot(loss_list)
plt.show()
# generate ground truth
M = 100
for _ in range(4):
#X_true, Y, mu = prior_model.sample_observations(1)
#data = Y.view((1,1,T)).repeat((M,1,1))
#data = Y.view((1, 1, T)).repeat((M, 1, 1))
# generate ground truth
X_true, Y, mu = prior_model.sample_observations(1)
data = Y[0, :].view((1,T)).repeat((M, 1))
X, _, _, _, _ = variational_model.sample_timeseries(M, data)
x = X.detach().numpy()[:,0,:]
x_tr = X_true.detach().numpy()[0,0,:]
#y = data.detach().numpy()[:, 0, :]
#t_range = np.tile(np.linspace(0.,T*dt, T), (M,1))
plt.plot(np.transpose(x), alpha=0.5)
plt.plot( | np.transpose(x_tr) | numpy.transpose |
import numpy as np
from math import ceil
def deriveSizeFromScale(img_shape, scale):
output_shape = []
for k in range(2):
output_shape.append(int(ceil(scale[k] * img_shape[k])))
return output_shape
def deriveScaleFromSize(img_shape_in, img_shape_out):
scale = []
for k in range(2):
scale.append(1.0 * img_shape_out[k] / img_shape_in[k])
return scale
def cubic(x):
x = np.array(x).astype(np.float64)
absx = np.absolute(x)
absx2 = np.multiply(absx, absx)
absx3 = np.multiply(absx2, absx)
f = np.multiply(1.5*absx3 - 2.5*absx2 + 1, absx <= 1) + np.multiply(-0.5*absx3 + 2.5*absx2 - 4*absx + 2, (1 < absx) & (absx <= 2))
return f
def contributions(in_length, out_length, scale, kernel, k_width):
if scale < 1:
h = lambda x: scale * kernel(scale * x)
kernel_width = 1.0 * k_width / scale
else:
h = kernel
kernel_width = k_width
x = np.arange(1, out_length+1).astype(np.float64)
u = x / scale + 0.5 * (1 - 1 / scale)
left = np.floor(u - kernel_width / 2)
P = int(ceil(kernel_width)) + 2
ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0
indices = ind.astype(np.int32)
weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0
weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1))
aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32)
indices = aux[np.mod(indices, aux.size)]
ind2store = np.nonzero(np.any(weights, axis=0))
weights = weights[:, ind2store]
indices = indices[:, ind2store]
return weights, indices
def imresizemex(inimg, weights, indices, dim):
in_shape = inimg.shape
w_shape = weights.shape
out_shape = list(in_shape)
out_shape[dim] = w_shape[0]
outimg = np.zeros(out_shape)
if dim == 0:
for i_img in range(in_shape[1]):
for i_w in range(w_shape[0]):
w = weights[i_w, :]
ind = indices[i_w, :]
im_slice = inimg[ind, i_img].astype(np.float64)
outimg[i_w, i_img] = np.sum(np.multiply( | np.squeeze(im_slice, axis=0) | numpy.squeeze |
from numba import njit, boolean, int64, float64
from numba.experimental import jitclass
import numpy as np
from .utils import isin
@jitclass([
('value', float64[:]),
('sign', float64[:]),
('size', int64)
])
class signed:
def __init__(self, value, sign=None):
""" If sign is None, init from value in 'linear' space, possibly negative.
Else, init from value in log-space.
"""
if sign is None:
self.value = np.log(np.absolute(value))
self.sign = | np.sign(value) | numpy.sign |
import numpy as np
import pytest
from dnnv.nn.graph import OperationGraph
from dnnv.nn import operations
from dnnv.properties.expressions import Network
from dnnv.verifiers.common.reductions.iopolytope import *
from dnnv.verifiers.common.reductions.iopolytope import Variable
def setup_function():
Variable._count = 0
def test_init_merge():
input_op_0 = operations.Input((1, 5), np.dtype(np.float32))
add_op_0 = operations.Add(input_op_0, 1)
op_graph_0 = OperationGraph([add_op_0])
N0 = Network("N0").concretize(op_graph_0)
input_op_1 = operations.Input((1, 5), np.dtype(np.float32))
sub_op_1 = operations.Sub(input_op_1, 1)
op_graph_1 = OperationGraph([sub_op_1])
N1 = Network("N1").concretize(op_graph_1)
input_constraint = HalfspacePolytope()
output_constraint = HalfspacePolytope()
prop = IOPolytopeProperty([N0, N1], input_constraint, output_constraint)
assert len(prop.op_graph.output_operations) == 2
assert isinstance(prop.op_graph.output_operations[0], operations.Add)
assert isinstance(prop.op_graph.output_operations[1], operations.Sub)
assert len(prop.op_graph.input_details) == 1
def test_str():
input_op = operations.Input((1, 5), np.dtype(np.float32))
add_op = operations.Add(input_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 5))
input_constraint = HalfspacePolytope(vi)
input_constraint.update_constraint([vi], np.array([(0, 1)]), np.array([1.0]), 5.0)
vo = Variable((1, 5))
output_constraint = HalfspacePolytope(vo)
output_constraint.update_constraint([vo], np.array([(0, 0)]), np.array([2.0]), 12.0)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
assert str(prop) == (
"Property:\n"
" Networks:\n"
" [Network('N')]\n"
" Input Constraint:\n"
" 1.0 * x_0[(0, 1)] <= 5.0\n"
" Output Constraint:\n"
" 2.0 * x_1[(0, 0)] <= 12.0"
)
def test_validate_counter_example_true():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(2)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
x = np.array([[0.0, 0.0]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
x = np.array([[0.5, 0.5]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
x = np.array([[-1.0, 0.0]]).astype(np.float32)
assert prop.validate_counter_example(x)[0]
def test_validate_counter_example_false():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
op_graph = OperationGraph([add_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, 1.0])
b = np.array(11)
input_constraint.update_constraint(variables, indices, coeffs, b)
vo = Variable((1, 1))
output_constraint = HalfspacePolytope(vo)
variables = [vo]
indices = np.array([(0, 0)])
coeffs = np.array([1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0])
b = np.array(1)
output_constraint.update_constraint(variables, indices, coeffs, b)
prop = IOPolytopeProperty([N], input_constraint, output_constraint)
x = np.array([[0.0, 110.0]]).astype(np.float32)
assert not prop.validate_counter_example(x)[0]
x = np.array([[1.0, 0.5]]).astype(np.float32)
assert not prop.validate_counter_example(x)[0]
def test_suffixed_op_graph():
input_op = operations.Input((1, 2), np.dtype(np.float32))
matmul_op = operations.MatMul(input_op, np.array([[1.0], [1.0]], dtype=np.float32))
add_op = operations.Add(matmul_op, 1)
relu_op = operations.Relu(add_op)
op_graph = OperationGraph([relu_op])
N = Network("N").concretize(op_graph)
vi = Variable((1, 2))
input_constraint = HalfspacePolytope(vi)
variables = [vi, vi]
indices = np.array([(0, 0), (0, 1)])
coeffs = np.array([1.0, 1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([-1.0, -1.0])
b = np.array(2)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = np.array([1.0, -1.0])
b = np.array(4)
input_constraint.update_constraint(variables, indices, coeffs, b)
coeffs = | np.array([-1.0, 1.0]) | numpy.array |
import numpy as np
from numpy.lib.npyio import save
import vtk
from vtk.util.numpy_support import vtk_to_numpy
from vtk.util.numpy_support import numpy_to_vtk
import matplotlib.tri as mtri
import os
import sys
from scipy.interpolate import griddata
from scipy.ndimage.filters import gaussian_filter
from scipy.ndimage.filters import rank_filter
import matplotlib.pyplot as plt
from shapely.geometry import LineString
import math
import matplotlib.ticker as ticker
import matplotlib.colors as colors
plt.style.use('seaborn-bright')
#plt.style.use('dark_background')
positions_columns = {'time': 0, 'rank': 1, "posx": 2, "posy": 3, "radius": 4, "S0": 5, "S1": 6, "velx": 7, "vely": 8, "angle": 9, "total_int": 10,\
"neighbours": 11, "growth_rate": 12}
outdir = '/scratch/ws/1/haja565a-workspace2/master_thesis/output'
savedir = "/scratch/ws//1/haja565a-workspace2/DeformationField/"
expName = sys.argv[1]
totalCells = int(sys.argv[2])
time = float(sys.argv[3])
ranks = list(range(0,totalCells))
print("saving" + expName)
saveVTK = False
dt = 0.005
if not os.path.exists(savedir+expName):
os.makedirs(savedir+expName)
interpolation_steps = 1000
domain_size = 100
x = np.linspace(0, domain_size,interpolation_steps)
xx, yy = np.meshgrid(x,x)
cx, cy = xx-50, yy-50
angpos = np.arctan2(cy,cx) #angular posiiton of the pixel
positions_columns = {'time': 0, 'rank': 1, "posx": 2, "posy": 3, "radius": 4, "S0": 5, "S1": 6, "velx": 7, "vely": 8, "angle": 9, "total_int": 10,\
"neighbours": 11, "confine_int": 12, "growth_rate": 13}
#ageplot = False
#if(ageplot):
age = | np.load("/scratch/ws//1/haja565a-workspace2/quant/" + expName + "/age.npy") | numpy.load |
"""
Tests for fast version of ARMA innovations algorithm
"""
import numpy as np
import pytest
from numpy.testing import assert_equal, assert_allclose
from statsmodels.tsa.arima_process import arma_acovf
from statsmodels.tsa.innovations import _arma_innovations, arma_innovations
from statsmodels.tsa.statespace.sarimax import SARIMAX
def test_brockwell_davis_ex533():
# See <NAME> Davis (2009) - Time Series Theory and Methods
# Example 5.3.3: ARMA(1, 1) process, p.g. 177
nobs = 10
ar_params = np.array([0.2])
ma_params = np.array([0.4])
sigma2 = 8.92
p = len(ar_params)
q = len(ma_params)
m = max(p, q)
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
# First, get the autocovariance of the process
arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
unconditional_variance = (
sigma2 * (1 + 2 * ar_params[0] * ma_params[0] + ma_params[0]**2) /
(1 - ar_params[0]**2))
assert_allclose(arma_process_acovf[0], unconditional_variance)
# Next, get the autocovariance of the transformed process
# Note: as required by {{prefix}}arma_transformed_acovf, we first divide
# through by sigma^2
arma_process_acovf /= sigma2
unconditional_variance /= sigma2
out = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf))
acovf = np.array(out[0])
acovf2 = np.array(out[1])
# `acovf` is an m^2 x m^2 matrix, where m = max(p, q)
# but it is only valid for the autocovariances of the first m observations
# (this means in particular that the block `acovf[m:, m:]` should *not* be
# used)
# `acovf2` then contains the (time invariant) autocovariance terms for
# the observations m + 1, ..., nobs - since the autocovariance is the same
# for these terms, to save space we do not construct the autocovariance
# matrix as we did for the first m terms. Thus `acovf2[0]` is the variance,
# `acovf2[1]` is the first autocovariance, etc.
# Test the autocovariance function for observations m + 1, ..., nobs
# (it is time invariant here)
assert_equal(acovf2.shape, (nobs - m,))
assert_allclose(acovf2[0], 1 + ma_params[0]**2)
assert_allclose(acovf2[1], ma_params[0])
assert_allclose(acovf2[2:], 0)
# Test the autocovariance function for observations 1, ..., m
# (it is time varying here)
assert_equal(acovf.shape, (m * 2, m * 2))
# (we need to check `acovf[:m * 2, :m]`, i.e. `acovf[:2, :1])`
ix = np.diag_indices_from(acovf)
ix_lower = (ix[0][:-1] + 1, ix[1][:-1])
# acovf[ix] is the diagonal, and we want to check the first m
# elements of the diagonal
assert_allclose(acovf[ix][:m], unconditional_variance)
# acovf[ix_lower] is the first lower off-diagonal
assert_allclose(acovf[ix_lower][:m], ma_params[0])
# Now, check that we compute the moving average coefficients and the
# associated variances correctly
out = _arma_innovations.darma_innovations_algo_fast(
nobs, ar_params, ma_params, acovf, acovf2)
theta = np.array(out[0])
v = np.array(out[1])
# Test v (see eq. 5.3.13)
desired_v = np.zeros(nobs)
desired_v[0] = unconditional_variance
for i in range(1, nobs):
desired_v[i] = 1 + (1 - 1 / desired_v[i - 1]) * ma_params[0]**2
assert_allclose(v, desired_v)
# Test theta (see eq. 5.3.13)
# Note that they will have shape (nobs, m + 1) here, not (nobs, nobs - 1)
# as in the original (non-fast) version
assert_equal(theta.shape, (nobs, m + 1))
desired_theta = np.zeros(nobs)
for i in range(1, nobs):
desired_theta[i] = ma_params[0] / desired_v[i - 1]
assert_allclose(theta[:, 0], desired_theta)
assert_allclose(theta[:, 1:], 0)
# Test against Table 5.3.1
endog = np.array([
-1.1, 0.514, 0.116, -0.845, 0.872, -0.467, -0.977, -1.699, -1.228,
-1.093])
u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
theta)
# Note: Table 5.3.1 has \hat X_n+1 = -0.5340 for n = 1, but this seems to
# be a typo, since equation 5.3.12 gives the form of the prediction
# equation as \hat X_n+1 = \phi X_n + \theta_n1 (X_n - \hat X_n)
# Then for n = 1 we have:
# \hat X_n+1 = 0.2 (-1.1) + (0.2909) (-1.1 - 0) = -0.5399
# And for n = 2 if we use what we have computed, then we get:
# \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.54)) = 0.5068
# as desired, whereas if we used the book's number for n=1 we would get:
# \hat X_n+1 = 0.2 (0.514) + (0.3833) (0.514 - (-0.534)) = 0.5045
# which is not what Table 5.3.1 shows.
desired_hat = np.array([
0, -0.540, 0.5068, -0.1321, -0.4539, 0.7046, -0.5620, -0.3614,
-0.8748, -0.3869])
desired_u = endog - desired_hat
assert_allclose(u, desired_u, atol=1e-4)
def test_brockwell_davis_ex534():
# See Brockwell and Davis (2009) - Time Series Theory and Methods
# Example 5.3.4: ARMA(1, 1) process, p.g. 178
nobs = 10
ar_params = np.array([1, -0.24])
ma_params = np.array([0.4, 0.2, 0.1])
sigma2 = 1
p = len(ar_params)
q = len(ma_params)
m = max(p, q)
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
# First, get the autocovariance of the process
arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
assert_allclose(arma_process_acovf[:3],
[7.17133, 6.44139, 5.06027], atol=1e-5)
# Next, get the autocovariance of the transformed process
out = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf))
acovf = np.array(out[0])
acovf2 = np.array(out[1])
# See test_brockwell_davis_ex533 for details on acovf vs acovf2
# Test acovf
assert_equal(acovf.shape, (m * 2, m * 2))
ix = np.diag_indices_from(acovf)
ix_lower1 = (ix[0][:-1] + 1, ix[1][:-1])
ix_lower2 = (ix[0][:-2] + 2, ix[1][:-2])
ix_lower3 = (ix[0][:-3] + 3, ix[1][:-3])
ix_lower4 = (ix[0][:-4] + 4, ix[1][:-4])
assert_allclose(acovf[ix][:m], 7.17133, atol=1e-5)
desired = [6.44139, 6.44139, 0.816]
assert_allclose(acovf[ix_lower1][:m], desired, atol=1e-5)
assert_allclose(acovf[ix_lower2][0], 5.06027, atol=1e-5)
assert_allclose(acovf[ix_lower2][1:m], 0.34, atol=1e-5)
assert_allclose(acovf[ix_lower3][:m], 0.1, atol=1e-5)
assert_allclose(acovf[ix_lower4][:m], 0, atol=1e-5)
# Test acovf2
assert_equal(acovf2.shape, (nobs - m,))
assert_allclose(acovf2[:4], [1.21, 0.5, 0.24, 0.1])
assert_allclose(acovf2[4:], 0)
# Test innovations algorithm output
out = _arma_innovations.darma_innovations_algo_fast(
nobs, ar_params, ma_params, acovf, acovf2)
theta = np.array(out[0])
v = np.array(out[1])
# Test v (see Table 5.3.2)
desired_v = [7.1713, 1.3856, 1.0057, 1.0019, 1.0016, 1.0005, 1.0000,
1.0000, 1.0000, 1.0000]
assert_allclose(v, desired_v, atol=1e-4)
# Test theta (see Table 5.3.2)
assert_equal(theta.shape, (nobs, m + 1))
desired_theta = np.array([
[0, 0.8982, 1.3685, 0.4008, 0.3998, 0.3992, 0.4000, 0.4000, 0.4000,
0.4000],
[0, 0, 0.7056, 0.1806, 0.2020, 0.1995, 0.1997, 0.2000, 0.2000, 0.2000],
[0, 0, 0, 0.0139, 0.0722, 0.0994, 0.0998, 0.0998, 0.0999, 0.1]]).T
assert_allclose(theta[:, :m], desired_theta, atol=1e-4)
assert_allclose(theta[:, m:], 0)
# Test innovations filter output
endog = np.array([1.704, 0.527, 1.041, 0.942, 0.555, -1.002, -0.585, 0.010,
-0.638, 0.525])
u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
theta)
desired_hat = np.array([
0, 1.5305, -0.1710, 1.2428, 0.7443, 0.3138, -1.7293, -0.1688,
0.3193, -0.8731])
desired_u = endog - desired_hat
assert_allclose(u, desired_u, atol=1e-4)
@pytest.mark.parametrize("ar_params,ma_params,sigma2", [
(np.array([]), np.array([]), 1),
(np.array([0.]), np.array([0.]), 1),
(np.array([0.9]), np.array([]), 1),
(np.array([]), np.array([0.9]), 1),
(np.array([0.2, -0.4, 0.1, 0.1]), np.array([0.5, 0.1]), 1.123),
(np.array([0.5, 0.1]), np.array([0.2, -0.4, 0.1, 0.1]), 1.123),
])
def test_innovations_algo_filter_kalman_filter(ar_params, ma_params, sigma2):
# Test the innovations algorithm and filter against the Kalman filter
# for exact likelihood evaluation of an ARMA process
ar = np.r_[1, -ar_params]
ma = np.r_[1, ma_params]
endog = np.random.normal(size=10)
nobs = len(endog)
# Innovations algorithm approach
arma_process_acovf = arma_acovf(ar, ma, nobs=nobs, sigma2=sigma2)
acovf, acovf2 = np.array(_arma_innovations.darma_transformed_acovf_fast(
ar, ma, arma_process_acovf / sigma2))
theta, r = _arma_innovations.darma_innovations_algo_fast(
nobs, ar_params, ma_params, acovf, acovf2)
u = _arma_innovations.darma_innovations_filter(endog, ar_params, ma_params,
theta)
v = np.array(r) * sigma2
u = np.array(u)
llf_obs = -0.5 * u**2 / v - 0.5 * np.log(2 * np.pi * v)
# Kalman filter apparoach
mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
res = mod.filter(np.r_[ar_params, ma_params, sigma2])
# Test that the two approaches are identical
assert_allclose(u, res.forecasts_error[0])
# assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1])
assert_allclose(llf_obs, res.llf_obs)
# Get llf_obs directly
llf_obs2 = _arma_innovations.darma_loglikeobs_fast(
endog, ar_params, ma_params, sigma2)
assert_allclose(llf_obs2, res.llf_obs)
@pytest.mark.parametrize("ar_params,ma_params,sigma2", [
(np.array([]), np.array([]), 1),
(np.array([0.]), np.array([0.]), 1),
(np.array([0.9]), np.array([]), 1),
(np.array([]), np.array([0.9]), 1),
(np.array([0.2, -0.4, 0.1, 0.1]), np.array([0.5, 0.1]), 1.123),
(np.array([0.5, 0.1]), np.array([0.2, -0.4, 0.1, 0.1]), 1.123),
])
def test_innovations_algo_direct_filter_kalman_filter(ar_params, ma_params,
sigma2):
# Test the innovations algorithm and filter against the Kalman filter
# for exact likelihood evaluation of an ARMA process, using the direct
# function.
endog = np.random.normal(size=10)
# Innovations algorithm approach
u, r = arma_innovations.arma_innovations(endog, ar_params, ma_params,
sigma2)
v = np.array(r) * sigma2
u = np.array(u)
llf_obs = -0.5 * u**2 / v - 0.5 * np.log(2 * np.pi * v)
# Kalman filter apparoach
mod = SARIMAX(endog, order=(len(ar_params), 0, len(ma_params)))
res = mod.filter(np.r_[ar_params, ma_params, sigma2])
# Test that the two approaches are identical
assert_allclose(u, res.forecasts_error[0])
# assert_allclose(theta[1:, 0], res.filter_results.kalman_gain[0, 0, :-1])
assert_allclose(llf_obs, res.llf_obs)
# Get llf_obs directly
llf_obs2 = _arma_innovations.darma_loglikeobs_fast(
endog, ar_params, ma_params, sigma2)
assert_allclose(llf_obs2, res.llf_obs)
@pytest.mark.parametrize("ar_params,diff,ma_params,sigma2", [
(np.array([]), 1, np.array([]), 1),
(np.array([0.]), 1, np.array([0.]), 1),
(np.array([0.9]), 1, np.array([]), 1),
(np.array([]), 1, | np.array([0.9]) | numpy.array |
# -*- coding: utf-8 -*-
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import torch
import torch.optim as optim
from torchvision import transforms, models
def model():
# get the "features" portion of VGG19 (we will not need the "classifier" portion)
vgg = models.vgg19(pretrained=True).features
# freeze all VGG parameters since we're only optimizing the target image
for param in vgg.parameters():
param.requires_grad_(False)
return vgg
def load_image(img_path, max_size=400, shape=None):
''' Load in and transform an image, making sure the image
is <= 400 pixels in the x-y dims.'''
image = Image.open(img_path).convert('RGB')
# large images will slow down processing
if max(image.size) > max_size:
size = max_size
else:
size = max(image.size)
if shape is not None:
size = shape
in_transform = transforms.Compose([
transforms.Resize(size),
transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406),
(0.229, 0.224, 0.225))])
# discard the transparent, alpha channel (that's the :3) and add the batch dimension
image = in_transform(image)[:3,:,:].unsqueeze(0)
return image
# helper function for un-normalizing an image
# and converting it from a Tensor image to a NumPy image for display
def im_convert(tensor):
""" Display a tensor as an image. """
image = tensor.to("cpu").clone().detach()
image = image.numpy().squeeze()
image = image.transpose(1,2,0)
image = image * | np.array((0.229, 0.224, 0.225)) | numpy.array |
"""
Copyright (c) 2018, <NAME>, <NAME>, <NAME>
https://github.com/spagliarini
Mnemosyne team, Inria, Bordeaux, France
https://team.inria.fr/mnemosyne/fr/
Distributed under the BSD-2-Clause License
PLOT: comparison between different normalizations
(Fig. 4)
"""
import os
import numpy as np
import matplotlib.pyplot as plt
csfont = {'fontname':'Times New Roman'}
os.chdir('C://Users//Mnemosyne//Documents//Python Scripts//InverseModelBirdsong//results//IMsimple_model//AllNormalizations//NormalizedAN') #or NormalizedMN
#load distance files
err_wn=np.load('Distance_simple.npy')
err_2avrg=np.load('Distance_2avrg.npy')
err_2norm=np.load('Distance_2norm.npy')
err_3avrg=np.load('Distance_3avrg.npy')
err_3norm=np.load('Distance_3norm.npy')
err_4avrg=np.load('Distance_4avrg.npy')
err_4norm=np.load('Distance_4norm.npy')
err_5avrg=np.load('Distance_5avrg.npy')
err_5norm=np.load('Distance_5norm.npy')
#definition of the end_time
end_time=err_wn.shape[1]
#smooth average distance over all the simulations
mean_err_wn= | np.zeros((end_time,1)) | numpy.zeros |
def kmeans(data, k):
import numpy as np
if isinstance(k,int):
if isinstance(data,np.ndarray):
if data.shape[1]==2:
if data.shape[0]>=k:
centers=np.random.rand(k,2)
while True:
distance=sq_euc(data,centers)
index=np.argmin(distance,1)+1
results= | np.zeros((k,2)) | numpy.zeros |
#!/usr/bin/python3.6
# -*- coding: utf-8 -*-
"""
Created on Sun Oct 03 21:05:00 2021
@author: iv
"""
import sys
import os
import pandas as pd
import numpy as np
from textblob import TextBlob
import re
from textblob.sentiments import NaiveBayesAnalyzer
from googletrans import Translator
import unicodedata
### SYSTEM DATA ###
if '__file__' in locals():
if locals()['__file__'] == '<input>':
wd = os.path.split(os.path.realpath(__file__))[0]
wd += '/'
sys.path.append(wd)
os.chdir(wd)
del locals()['__file__']
else:
wd = os.path.dirname(__file__)
wd += '/'
sys.path.append(wd)
os.chdir(wd)
else:
wd = os.path.abspath("./Documents/Repositorio_Iv/CryptoRRSS")
wd += '/'
sys.path.append(wd)
def get_name(x):
result = x['screen_name']
return result
def sent_analisys(x):
blob_object = TextBlob(x, analyzer=NaiveBayesAnalyzer())
analysis = blob_object.sentiment
analysis = '$'.join([str(x) for x in analysis])
return analysis
def filtertext(x, excel_file):
df_palabras = pd.read_excel(wd + excel_file)
df_palabras = df_palabras.fillna(0)
lista_words = list(df_palabras['PALABRAS'].values) + \
list(df_palabras['hastag'].values) + \
list(df_palabras['arroba'].values)
# lista_words = list(filter((0).__ne__, lista_words)) #Tambien nos valdria
lista_words = [x for x in lista_words if x != 0]
result = []
for word in lista_words:
tag = bool(re.search(word, x.lower()))
result.append(tag)
return max(result)
def translate_en(x, lang='en'):
translator = Translator()
result = translator.translate(x, dest=lang).text
return result
def cleantext(x):
result = unicodedata.normalize('NFD', x).encode("utf8").decode("ascii", "ignore")
result = re.sub('[%+\\\+\(+\)+&+\n+\r+./]', ' ', result)
result = re.sub(' +', ' ', result)
result = result.strip()
return result
# userid_list = ('CriptoNoticias', 'bit2me', 'MundoCrypto_ES', 'Tesla',
# 'cryptocom', 'elonmusk', 'nayibbukele', 'Cointelegraph', 'crypto', 'CoinMarketCap',
# 'ForbesCrypto', 'CryptoBoomNews', 'BTCTN', 'solana', 'CoinbasePro', 'coingecko', 'CoinDesk',
# 'blockchain', 'healthy_pockets', 'wallstwolverine'
# )
userid_list = ('CriptoNoticias', 'coingecko', 'CoinDesk', 'blockchain', 'MundoCrypto_ES', 'bit2me', 'healthy_pockets',
'wallstwolverine', 'elonmusk', 'cryptocom', 'CryptoBoomNews', 'Cointelegraph', 'crypto', 'CoinMarketCap'
)
def json_sentiment(api, userid_list=userid_list, count_twits=3):
twits_df = pd.DataFrame()
for userid in userid_list:
tweets = api.user_timeline(screen_name=userid,
# 200 is the maximum allowed count
count=count_twits,
include_rts=False,
# Necessary to keep full_text
# otherwise only the first 140 words are extracted
tweet_mode='extended'
)
tweets_1 = [x._json for x in tweets]
twits_df_1 = pd.DataFrame(tweets_1)
twits_df = pd.concat([twits_df, twits_df_1])
twits_df['full_text'] = np.vectorize(cleantext)(twits_df['full_text'])
twits_df['has_keys'] = | np.vectorize(filtertext) | numpy.vectorize |
'''
validate survey simulations using CMX data.
updates
-------
* 5/19/2020: created script and test to compare which wavelength range I should
use for the exposure time correction factor
'''
import os
import h5py
import fitsio
import numpy as np
import astropy.units as u
# -- feasibgs --
from feasibgs import util as UT
from feasibgs import catalogs as Cat
from feasibgs import forwardmodel as FM
# -- desihub --
import desispec.io
# -- plotting --
import matplotlib as mpl
import matplotlib.pyplot as plt
if 'NERSC_HOST' not in os.environ:
mpl.rcParams['text.usetex'] = True
mpl.rcParams['font.family'] = 'serif'
mpl.rcParams['axes.linewidth'] = 1.5
mpl.rcParams['axes.xmargin'] = 1
mpl.rcParams['xtick.labelsize'] = 'x-large'
mpl.rcParams['xtick.major.size'] = 5
mpl.rcParams['xtick.major.width'] = 1.5
mpl.rcParams['ytick.labelsize'] = 'x-large'
mpl.rcParams['ytick.major.size'] = 5
mpl.rcParams['ytick.major.width'] = 1.5
mpl.rcParams['legend.frameon'] = False
dir = '/global/cscratch1/sd/chahah/feasibgs/cmx/survey_sims/'
def validate_spectral_pipeline():
''' validate the spectral pipeline by
1. constructing spectra from fiber acceptance fraction scaled smoothed CMX
spectra with CMX sky surface brightness
2. compare noise levels to CMX observations
'''
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 70502
date = 20200225
expid = 52113
ispec = 0
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# get sky surface brightness by correcting for the throughput on the CMX
# sky data
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# exposure time
_frame = desispec.io.read_frame(f_sky('b').replace('sky-', 'frame-'))
exptime = _frame.meta['EXPTIME']
print('exp.time = %.fs' % exptime)
# get which are good fibers from coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print('fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print('airmass = %.2f' % airmass)
# select BGS spectra
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.random.choice(np.arange(len(gal_cut))[gal_cut], size=5,
replace=False)
igals = np.arange(len(coadd['FIBER']))[coadd['FIBER'] == 143]
for igal in igals:
# source flux is the smoothed CMX spetra
source_flux = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igal,:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
np.atleast_2d(source_flux * fibloss), # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=None)
# barebone specsim pipeline for comparison
from specsim.simulator import Simulator
desi = Simulator(config, num_fibers=1)
desi.observation.exposure_time = exptime * u.s
desi.atmosphere._surface_brightness_dict[desi.atmosphere.condition] = \
np.interp(desi.atmosphere._wavelength, wave, sky_bright) * \
desi.atmosphere.surface_brightness.unit
desi.atmosphere._extinct_emission = False
desi.atmosphere._moon = None
desi.atmosphere.airmass = airmass # high airmass
desi.simulate(source_fluxes=np.atleast_2d(source_flux) * 1e-17 * desi.simulated['source_flux'].unit,
fiber_acceptance_fraction=np.tile(fibloss,
np.atleast_2d(source_flux).shape))
random_state = np.random.RandomState(0)
desi.generate_random_noise(random_state, use_poisson=True)
scale=1e17
waves, fluxes, ivars, ivars_electron = [], [], [], []
for table in desi.camera_output:
_wave = table['wavelength'].astype(float)
_flux = (table['observed_flux']+table['random_noise_electrons']*table['flux_calibration']).T.astype(float)
_flux = _flux * scale
_ivar = table['flux_inverse_variance'].T.astype(float)
_ivar = _ivar / scale**2
waves.append(_wave)
fluxes.append(_flux[0])
ivars.append(_ivar[0])
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(coadd_wave, coadd_flux[igal,:] * fibloss, c='C0', lw=1,
label='(coadd flux) x (fib.loss)')
for i_b, band in enumerate(['b', 'r', 'z']):
lbl = None
if band == 'b': lbl = 'spectral sim.'
sub.plot(bgs.wave[band], bgs.flux[band][0], c='C1', lw=1,
label=lbl)
sub.plot(waves[i_b], fluxes[i_b] *fibloss, c='C2', lw=1, ls=':')
sub.plot(wave, source_flux * fibloss, c='k', lw=1, ls='--',
label='source flux')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., 5.)
sub = fig.add_subplot(212)
sub.plot(coadd_wave, coadd_ivar[igal,:] * fibloss**-2, c='C0', lw=1,
label=r'(coadd ivar) / (fib.loss$)^2$')
for i_b, band in enumerate(['b', 'r', 'z']):
sub.plot(bgs.wave[band], bgs.ivar[band][0], c='C1', lw=1)
sub.plot(waves[i_b], ivars[i_b] * fibloss**-2, c='C2', lw=1, ls=':')
sub.legend(loc='upper right', frameon=True, fontsize=20)
sub.set_xlabel('wavelength [$A$]', fontsize=20)
sub.set_xlim(3600, 9800)
sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
fig.savefig(os.path.join(dir, 'valid.spectral_pipeline.exp%i.%i.png' % (expid, igal)),
bbox_inches='tight')
return None
def validate_spectral_pipeline_GAMA_source():
''' compare the fiber flux scaled source spectra from spectral simulations
pipeline to fiber loss corrected cframes CMX data for overlapping GAMA G12
galaxies.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 70502 #[66014, 70502] #66014 is with low transparency
date = 20200225
expids = [52112]#, 52113, 52114, 52115, 52116] # terrible FWHM
#tileid = 66014 # low transparency
#date = 20200314
#expids = [55432]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read in GAMA + Legacy catalo g
cata = Cat.GamaLegacy()
g12 = cata.Read('g12', dr_gama=3, dr_legacy=7)
g12_ra = g12['legacy-photo']['ra']
g12_dec = g12['legacy-photo']['dec']
Ng12 = len(g12_ra)
# match GAMA galaxies to templates
bgs3 = FM.BGStree()
template_match = bgs3._GamaLegacy(g12)
hasmatch = (template_match != -999)
# ra/dec cut for GAMA so we only keep ones near the tile
cut_gama = ((g12_ra > 174.0) & (g12_ra < 186.0) & (g12_dec > -3.0) & (g12_dec < 2.0) & hasmatch)
g12_ra = g12_ra[cut_gama]
g12_dec = g12_dec[cut_gama]
g12_z = g12['gama-spec']['z'][cut_gama]
g12_rfib = UT.flux2mag(g12['legacy-photo']['fiberflux_r'])[cut_gama]
g12_rmag_gama = g12['gama-photo']['r_model'][cut_gama] # r-band magnitude from GAMA (SDSS) photometry
print('%i galaxies in GAMA G12 + Legacy' % len(g12_ra))
# match coadd objects to G12+legacy catalog based on RA and Dec
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
match_gama, coadd_fluxes = [], []
for ispec in ispecs:
# select BGS galaxies from the coadds
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
# select ones that are in GAMA by matching RA and Dec
match = spherematch(g12_ra, g12_dec,
coadd['TARGET_RA'][gal_cut], coadd['TARGET_DEC'][gal_cut],
0.000277778)
m_gama = match[0]
m_coadd = match[1]
match_gama.append(m_gama)
coadd_fluxes.append(coadd_flux[gal_cut,:][m_coadd])
match_gama = np.concatenate(match_gama)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
print(' %i matches to G12' % len(match_gama))
# generate spectra for the following overlapping galaxies
gama_samp = np.arange(Ng12)[cut_gama][match_gama]
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
emline_flux = s_bgs.EmissionLineFlux(g12, index=gama_samp, dr_gama=3, silent=True) # emission lines from GAMA
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
g12_rfib[match_gama],
g12_z[match_gama],
np.repeat(100.0, len(match_gama)),
seed=1,
templateid=template_match[gama_samp],
emflux=emline_flux,
mag_em=g12_rmag_gama[match_gama]
)
igals = np.random.choice(np.arange(len(match_gama))[magnorm_flag], size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
#sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101), c='k',
# ls=':', lw=0.5, label='smoothed (coadd flux)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775 ,
c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss,
c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_flux[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
if expid == 55432:
sub.set_ylim(-0.5, 3.)
else:
sub.set_ylim(-0.5, 10.)
#sub.set_ylim(1e-1, None)
#sub.set_yscale('log')
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source_flux.exp%i.png' % expid), bbox_inches='tight')
plt.close()
return None
def validate_spectral_pipeline_source():
''' compare the color-matched and fiber flux scaled source spectra from the
spectral simulation to the fiber loss corrected cframes CMX data. This is
because the GAMA comparison was a bust.
'''
import glob
from scipy.signal import medfilt
from scipy.interpolate import interp1d
from scipy.spatial import cKDTree as KDTree
from desitarget.cmx import cmx_targetmask
from pydl.pydlutils.spheregroup import spherematch
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI redshifts, which will be used for constructing the source spectra
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
mbgs = FM.BGStree()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction for exposure from GFA
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
# spectrographs available for the exposure
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
coadd_fluxes, s_fluxes = [], []
for ispec in ispecs:
# read coadd file
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
rmag = UT.flux2mag(coadd['FLUX_R'], method='log')[gal_cut]
gmag = UT.flux2mag(coadd['FLUX_G'], method='log')[gal_cut]
rfib = UT.flux2mag(coadd['FIBERFLUX_R'], method='log')[gal_cut]
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
print(' %i matches to VI' % len(m_vi))
# match to templates
temp_rmag = mbgs.meta['SDSS_UGRIZ'].data[:,2]
temp_gmag = mbgs.meta['SDSS_UGRIZ'].data[:,1]
temp_meta = np.vstack([
mbgs.meta['Z'].data,
temp_rmag,
temp_gmag - temp_rmag]).T
tree = KDTree(temp_meta)
# match CMX galaxies to templates
_, match_temp = tree.query(np.vstack([
ztrue[m_vi], rmag[m_coadd], (gmag - rmag)[m_coadd]]).T)
# in some cases there won't be a match from KDTree.query
# we flag these with -999
has_match = ~(match_temp >= len(mbgs.meta['TEMPLATEID']))
s_bgs = FM.BGSsourceSpectra(wavemin=1500.0, wavemax=15000)
s_flux, s_wave, magnorm_flag = s_bgs.Spectra(
rfib[m_coadd][has_match],
ztrue[m_vi][has_match],
np.repeat(100.0, np.sum(has_match)),
seed=1,
templateid=match_temp[has_match],
emflux=None,
mag_em=None)
coadd_fluxes.append(coadd_flux[gal_cut][m_coadd][has_match])
s_fluxes.append(s_flux)
coadd_fluxes = np.concatenate(coadd_fluxes, axis=0)
s_fluxes = np.concatenate(s_fluxes, axis=0)
igals = np.random.choice(np.arange(s_fluxes.shape[0]), size=5, replace=False)
fig = plt.figure(figsize=(15,20))
for i, igal in enumerate(igals):
sub = fig.add_subplot(5,1,i+1)
sub.plot(coadd_wave, coadd_fluxes[igal,:] * transp * 0.775, c='C0', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * transp * 0.775 , c='C0',
label='(coadd flux) x (TRANSP) x (0.775)')
sub.plot(coadd_wave, coadd_fluxes[igal,:] * fibloss, c='C1', lw=0.1)
sub.plot(coadd_wave, medfilt(coadd_fluxes[igal,:], 101) * fibloss, c='C1',
label='(coadd flux) x (TRANSP) x (FIBER FRACFLUX)')
sub.plot(s_wave, s_fluxes[igal,:] * transp, c='k', ls='--',
label='(sim source flux) x (TRANSP)')
sub.set_xlim(3600, 9800)
if i < 4: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('inciddent flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-0.5, 6)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_source.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess_specsim_discrepancy(dchi2=40.):
''' This ended up being a useless test because the smoothed CMX spectra
that I was using as the source spectra has no features to fit the redshfits!
currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of simulated CMX exposure (smoothed fib.loss corrected
source spectra + CMX sky)
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fibloss = gfa['TRANSPARENCY'][isexp] * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
for ispec in ispecs:
print(' petal %i' % ispec)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
igals = np.arange(len(gal_cut))[gal_cut]
print(' %i BGS galaxies' % np.sum(gal_cut))
if os.path.isfile(fexp): continue
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
# source flux is the smoothed CMX spetra
source_flux = np.zeros((len(igals), len(wave)))
for i in range(len(igals)):
source_flux[i,:] = np.clip(np.interp(wave, coadd_wave,
medfilt(coadd_flux[igals[i],:], 101)), 0, None)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave,
source_flux * fibloss, # scale by fiber acceptance fraction
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
frr = run_redrock(fexp, overwrite=False)
for ispec in ispecs:
print(' petal %i' % ispec)
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_wave = fitsio.read(f_coadd, ext=2)
coadd_flux = fitsio.read(f_coadd, ext=3)
coadd_ivar = fitsio.read(f_coadd, ext=4)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
sim = desispec.io.read_spectra(fexp)
# randomly check 3 galaxies
igals = np.random.choice(np.arange(np.sum(gal_cut)), size=3, replace=False)
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_flux[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.flux[band][igal,:] / fibloss, c='C1',
label='sim / fib.loss')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('flux [$10^{-17} erg/s/cm^2/A$]', fontsize=25)
sub.set_ylim(-1., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_flux.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(15,15))
for i, igal in enumerate(igals):
sub = fig.add_subplot(3,1,i+1)
sub.plot(coadd_wave, coadd_ivar[gal_cut,:][igal,:], c='C0', label='coadd')
for band in ['b', 'r', 'z']:
sub.plot(sim.wave[band], sim.ivar[band][igal,:] *
fibloss**2, c='C1', label='sim x (fib.loss$)^2$')
sub.set_xlim(3600, 9800)
if i < 2: sub.set_xticklabels([])
if i == 1: sub.set_ylabel('ivar', fontsize=25)
sub.set_ylim(0., None)
sub.legend(loc='upper right', handletextpad=0.1, fontsize=20)
sub.set_xlabel('wavelength', fontsize=25)
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess_ivar.exp%i.petal%i.png' %
(expid, ispec)), bbox_inches='tight')
plt.close()
# read in single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
fexp = os.path.join(dir, 'sim_cmx_spectra.exp%i.petal%i.texp%.fs.fits'
% (expid, ispec, exptime))
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# match VI to exposure based on target ids
_, m_vi, m_sim = np.intersect1d(vi_id, targetid, return_indices=True)
print('%i matches to VI' % len(m_vi))
print(' ', ztrue[m_vi][:5])
print(' ', rr_coadd_z[m_sim][:5])
print(' ', rr_sim_z[m_sim][:5])
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rr_sim_zs = []
rr_sim_zwarns = []
rr_sim_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_sim], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_sim])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_sim])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_sim])
rr_sim_zs.append(rr_sim_z[m_sim])
rr_sim_zwarns.append(rr_sim_zwarn[m_sim])
rr_sim_dchi2s.append(rr_sim_dchi2[m_sim])
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
rr_sim_zs = np.concatenate(rr_sim_zs)
rr_sim_zwarns = np.concatenate(rr_sim_zwarns)
rr_sim_dchi2s = np.concatenate(rr_sim_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_zs, ztrues, rr_sim_zwarns,
deltachi2=rr_sim_dchi2s, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='specsim')
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectral_pipeline_zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def validate_cmx_zsuccess(dchi2=40.):
''' currently we know that the spectral simulation pipeline does not fuly
reproduce the noise level of CMX spectra even when we use the smoothed out
fiber loss corrected CMX spectra as input. This script is to check whether
this discrepancy significantly impacts the redshift success rates.
So we'll be comparing
- z-success rate of observe CMX exposure with VI truth table
- z-success rate of spectral simulations run with CMX sky and transparency
VI is currently available for tile 66033 and night 20200315.
'''
import glob
from scipy.signal import medfilt
import desisim.simexp
import specsim.instrument
from desitarget.cmx import cmx_targetmask
np.random.seed(0)
tileid = 66003
date = 20200315
expids = [55654, 55655, 55656]
dir_gfa = '/global/cfs/cdirs/desi/users/ameisner/GFA/conditions'
dir_redux = "/global/cfs/cdirs/desi/spectro/redux/daily"
dir_coadd = '/global/cfs/cdirs/desi/users/chahah/bgs_exp_coadd/'
# read VI table
fvi = os.path.join('/global/cfs/cdirs/desi/sv/vi/TruthTables/',
'truth_table_BGS_v1.2.csv')
vi_id, ztrue, qa_flag = np.genfromtxt(fvi, delimiter=',', skip_header=1, unpack=True,
usecols=[0, 2, 3])
good_z = (qa_flag >= 2.5)
vi_id = vi_id[good_z].astype(int)
ztrue = ztrue[good_z]
# read GAMA-Legacy source fluxes
wave_s, flux_s, meta_s = source_spectra()
for expid in expids:
print('--- %i ---' % expid)
# get fiber acceptance fraction and airmass
gfa = fitsio.read(os.path.join(dir_gfa,
'offline_all_guide_ccds_thru_20200315.fits'))
isexp = (gfa['EXPID'] == expid)
fwhm = gfa['FWHM_ASEC'][isexp]
print(' (FWHM) = %f' % np.median(fwhm[~np.isnan(fwhm)]))
transp = gfa['TRANSPARENCY'][isexp]
transp = np.median(transp[~np.isnan(transp)])
print(' (TRANSP) = %f' % transp)
fibloss = transp * gfa['FIBER_FRACFLUX'][isexp]
fibloss = np.median(fibloss[~np.isnan(fibloss)])
print(' fiber loss = (TRANSP) x (FFRAC) = %f' % fibloss)
airmass = np.median(gfa['AIRMASS'][isexp])
print(' airmass = %.2f' % airmass)
# get petals
ispecs = np.sort([int(os.path.basename(fframe).split('-')[1].replace('z', ''))
for fframe in glob.glob(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-z*.fits'))])
# exposure time
_frame = desispec.io.read_frame(os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'frame-b%i-%s.fits' % (ispecs[0], str(expid).zfill(8))))
exptime = _frame.meta['EXPTIME']
print(' exp.time = %.fs' % exptime)
# simulated exposure
fexp = os.path.join(dir, 'spectralsim_source.cmx_sky.exp%i.fits' % expid)
if not os.path.isfile(fexp):
# get sky brightness for exposure
sky_brights = []
for ispec in ispecs:
print(' petal %i' % ispec)
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
# get sky surface brightness for petal
f_sky = lambda band: os.path.join(dir_redux,
'exposures', str(date), str(expid).zfill(8),
'sky-%s%i-%s.fits' % (band, ispec, str(expid).zfill(8)))
sky_b = desispec.io.read_sky(f_sky('b'))
sky_r = desispec.io.read_sky(f_sky('r'))
sky_z = desispec.io.read_sky(f_sky('z'))
wave, sky_electrons = bs_coadd(
[sky_b.wave, sky_r.wave, sky_z.wave],
[sky_b.flux, sky_r.flux, sky_z.flux])
# get which are good fibers from coadd file
is_good = (coadd['FIBERSTATUS'] == 0)
is_sky = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SKY')) != 0
good_sky = is_good & is_sky
# get throughput for the cameras
config = desisim.simexp._specsim_config_for_wave(wave, dwave_out=0.8, specsim_config_file='desi')
instrument = specsim.instrument.initialize(config, True)
throughput = np.amax([instrument.cameras[0].throughput, instrument.cameras[1].throughput, instrument.cameras[2].throughput], axis=0)
desi_fiber_area = 1.862089 # fiber area
# calculate sky brightness
sky_bright = np.median(sky_electrons[good_sky,:], axis=0) / throughput / instrument.photons_per_bin / exptime * 1e17
sky_brights.append(sky_bright)
sky_brights = np.array(sky_brights)
# median sky brightness of the petals
sky_bright = np.median(sky_brights, axis=0)
# simulate the exposures using the spectral simulation pipeline
fdesi = FM.fakeDESIspec()
bgs = fdesi.simExposure(
wave_s,
flux_s * transp, # scale by transparency
exptime=exptime,
airmass=airmass,
Isky=[wave, sky_bright],
dwave_out=0.8,
filename=fexp)
# run redrock
frr_sim = run_redrock(fexp, overwrite=False)
rr_sim = fitsio.read(frr_sim)
rr_sim_z = rr_sim['Z']
rr_sim_zwarn = rr_sim['ZWARN']
rr_sim_dchi2 = rr_sim['DELTACHI2']
# compile single exposure coadd and redrock output
for i, ispec in enumerate(ispecs):
# get target id
f_coadd = os.path.join(dir_coadd, 'coadd-%i-%i-%i-%s.fits' % (tileid, date, ispec, str(expid).zfill(8)))
coadd = fitsio.read(f_coadd)
coadd_flux = fitsio.read(f_coadd, ext=3)
is_BGS = (coadd['CMX_TARGET'] & cmx_targetmask.cmx_mask.mask('SV0_BGS')) != 0
gal_cut = is_BGS & (np.sum(coadd_flux, axis=1) != 0)
targetid = coadd['TARGETID'][gal_cut]
# read coadd redrock fits
rr_coadd = fitsio.read(f_coadd.replace('coadd-', 'zbest-'))
rr_coadd_z = rr_coadd['Z'][gal_cut]
rr_coadd_zwarn = rr_coadd['ZWARN'][gal_cut]
rr_coadd_dchi2 = rr_coadd['DELTACHI2'][gal_cut]
# match VI to exposure based on target ids
_, m_vi, m_coadd = np.intersect1d(vi_id, targetid, return_indices=True)
if i == 0:
rmags = []
ztrues = []
rr_coadd_zs = []
rr_coadd_zwarns = []
rr_coadd_dchi2s = []
rmags.append(UT.flux2mag(coadd['FLUX_R'][gal_cut][m_coadd], method='log'))
ztrues.append(ztrue[m_vi])
rr_coadd_zs.append(rr_coadd_z[m_coadd])
rr_coadd_zwarns.append(rr_coadd_zwarn[m_coadd])
rr_coadd_dchi2s.append(rr_coadd_dchi2[m_coadd])
print('%i matches to VI' % len(rmags))
rmags = np.concatenate(rmags)
ztrues = np.concatenate(ztrues)
rr_coadd_zs = np.concatenate(rr_coadd_zs)
rr_coadd_zwarns = np.concatenate(rr_coadd_zwarns)
rr_coadd_dchi2s = np.concatenate(rr_coadd_dchi2s)
zs_coadd = UT.zsuccess(rr_coadd_zs, ztrues, rr_coadd_zwarns,
deltachi2=rr_coadd_dchi2s, min_deltachi2=dchi2)
zs_sim = UT.zsuccess(rr_sim_z, meta_s['zred'], rr_sim_zwarn,
deltachi2=rr_sim_dchi2, min_deltachi2=dchi2)
print('coadd z-success %.2f' % (np.sum(zs_coadd)/float(len(zs_coadd))))
print('sim z-success %.2f' % (np.sum(zs_sim)/float(len(zs_sim))))
# compare the two redshift success rates
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1.0, 1.0], c='k', ls='--')
wmean, rate, err_rate = UT.zsuccess_rate(rmags, zs_coadd, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C0', label='coadd')
wmean, rate, err_rate = UT.zsuccess_rate(meta_s['r_mag'], zs_sim, range=[15,22],
nbins=28, bin_min=10)
sub.errorbar(wmean, rate, err_rate, fmt='.C1', label='spectral sim')
sub.text(19.5, 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim(16, 20.5)
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir,
'valid.spectralsim_source.cmx_sky.zsuccess.exp%i.png' % expid),
bbox_inches='tight')
plt.close()
return None
def tnom(dchi2=40.):
''' Calculate z-success rate for nominal dark time exposure with different
tnom exposure times. For each tnom, use the z-success rate to determine
r_lim, the r magnitude that gets 95% completeness.
'''
np.random.seed(0)
# nominal exposure times
if dchi2 == 40:
texps = [100 + 20 * i for i in range(11)][::2]
elif dchi2 == 100:
texps = [200 + 10 * i for i in range(11)][::2]
# true redshift and r-magnitude
_, _, meta = source_spectra()
ztrue = meta['zred'] # true redshifts
r_mag = meta['r_mag']
r_fib = meta['r_mag_apflux']
# generate spectra for nominal dark sky exposures and run redrock
frr_noms = []
for texp in texps:
spec_nom = nomdark_spectra(texp)
# run redrock on nominal dark sky exposure spectra
frr_nom = run_redrock(
os.path.join(dir, 'exp_spectra.nominal_dark.%.fs.fits' % texp),
overwrite=False)
frr_noms.append(frr_nom)
rmags = np.linspace(17, 20, 31)
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([16, 21], [1., 1.], c='k', ls=':')
# for each tnom, calculate rlim from the z-sucess rates
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
# determine rlim
zs_rmag = []
for _r in rmags:
brighter = (r_mag < _r)
zs_rmag.append(np.sum(zs_nom[brighter]) / np.sum(brighter))
crit = (np.array(zs_rmag) < 0.95) & (rmags > 18)
if np.sum(crit) > 0:
rlim = np.min(rmags[crit])
else:
rlim = np.max(rmags)
print('--- tnom = %.fs ---' % texp)
print(' total z-success = %.2f' % (np.sum(zs_nom)/float(len(zs_nom))))
print(' 95percent complete rlim = %.1f' % rlim)
wmean, rate, err_rate = UT.zsuccess_rate(r_mag, zs_nom, range=[15,22],
nbins=28, bin_min=10)
sub.plot(wmean, rate, label=r'%.fs; $r_{\rm lim}= %.1f$' % (texp, rlim))
sub.text(19., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ magnitude', fontsize=20)
sub.set_xlim([16., 20.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.dchi2_%i.png' % dchi2),
bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(6,6))
sub = fig.add_subplot(111)
sub.plot([18, 25], [1., 1.], c='k', ls=':')
# nominal exposure z-success rate as a function of fiber magnitude
for i, texp, frr_nom in zip(range(len(texps)), texps, frr_noms):
# read redrock output and calculate z-success
rr_nom = fitsio.read(frr_nom)
zs_nom = UT.zsuccess(rr_nom['Z'], ztrue, rr_nom['ZWARN'],
deltachi2=rr_nom['DELTACHI2'], min_deltachi2=dchi2)
# ignore redshift failtures for bright r < 18.2 galaxies, since this is
# likely an issue with the emission line
zs_nom[r_mag < 18.2] = True
wmean, rate, err_rate = UT.zsuccess_rate(r_fib, zs_nom, range=[18,23],
nbins=28, bin_min=10)
sub.plot(wmean, rate, err_rate, label=r'%.fs' % texp)
sub.text(21., 1.05, r'$\Delta \chi^2 = %.f$' % dchi2, fontsize=20)
sub.legend(loc='lower left', ncol=3, handletextpad=0.1, fontsize=15)
sub.set_xlabel(r'Legacy $r$ fiber magnitude', fontsize=20)
sub.set_xlim([18., 22.5])
sub.set_ylabel(r'redrock $z$ success rate', fontsize=20)
sub.set_ylim([0.6, 1.1])
sub.set_yticks([0.6, 0.7, 0.8, 0.9, 1.])
fig.savefig(os.path.join(dir, 'zsuccess.tnom.r_fib.dchi2_%i.png' % dchi2),
bbox_inches='tight')
return None
def texp_factor_wavelength(emlines=True):
''' Q: Should the exposure time correction factor be determined by sky
surface brightness ratio at 5000A or 6500A?
sky surface brightness ratio = (sky surface brightness) / (nominal dark sky)
We will answer this by constructing a set of nominal dark sky exposure
spectra with 150s exposure time, getting the redshift success rate for
these spectra. Then we'll compare the redshift success rate for
1. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 5000A)
2. exposure spectra constructed with CMX sky brightness and
texp = 150s x (sky ratio at 6500A)
We use CMX sky brightness during bright exposures.
Whichever redshift success rate is coser to the nominal dark exposure z
success rate will determine the exposure factor
updates
-------
* <NAME> was surprised that 6500A agreed better. He finds that
5000A agrees better. He suggested I run this test without emission lines
* 06/11/2020: Read noise term in the SNR calculation cannot be ignored when
our nominal exposure time is low. New fsky values calculated for CMX
exposures including read noise.
'''
np.random.seed(0)
import desisim.simexp
from desimodel.io import load_throughput
wavemin = load_throughput('b').wavemin - 10.0
wavemax = load_throughput('z').wavemax + 10.0
wave = np.arange(round(wavemin, 1), wavemax, 0.8) * u.Angstrom
config = desisim.simexp._specsim_config_for_wave(wave.to('Angstrom').value, dwave_out=0.8, specsim_config_file='desi')
nominal_surface_brightness_dict = config.load_table(
config.atmosphere.sky, 'surface_brightness', as_dict=True)
Isky_nom = [wave, nominal_surface_brightness_dict['dark']]
# generate spectra for nominal dark sky exposure as reference
spec_nom = nomdark_spectra(150, emlines=emlines)
# run redrock on nominal dark sky exposure spectra
frr_nom = run_redrock(os.path.join(dir,
'exp_spectra.nominal_dark%s.150s.fits' % ['.noemission', ''][emlines]),
overwrite=False)
# read in CMX sky data
skies = cmx_skies()
# select CMX exposures when the sky was brighter than dark time. In
# principle we should focus on bright exposures (i.e. 2.5x nominal).
# we also remove exposures from 20200314 which has strange sky fluxes.
#bright = (((skies['sky_ratio_5000'] > 1.) | (skies['sky_ratio_7000'] > 1.))
# & (skies['date'] != 20200314))
#print('%i exposures with sky ratios > 1 and not taken during March 14' % len(expids))
bright = (((skies['fsky_5000'] > 1.5) | (skies['fsky_7000'] > 1.5))
& (skies['date'] != 20200314))
expids = np.unique(skies['expid'][bright])[:5]
print('%i exposures with fsky > 1.5 and not taken during March 14' % len(expids))
#np.random.choice(np.unique(skies['expid'][bright]), size=5, replace=False)
# generate exposure spectra for select CMX sky surface brightnesses with
# exposure times scaled by (1) sky ratio at 5000A (2) sky ratio at 6500A
for expid in expids:
print('--- expid = %i ---' % expid)
is_exp = (skies['expid'] == expid)
# get median sky surface brightnesses for exposure
Isky = bs_coadd(
[skies['wave_b'], skies['wave_r'], skies['wave_z']],
[
np.median(skies['sky_sb_b'][is_exp], axis=0),
np.median(skies['sky_sb_r'][is_exp], axis=0),
np.median(skies['sky_sb_z'][is_exp], axis=0)]
)
fig = plt.figure(figsize=(15,10))
sub = fig.add_subplot(211)
sub.plot(Isky_nom[0], Isky_nom[1], c='k', lw=0.5)
sub.plot(Isky[0], Isky[1], c='C0', lw=0.5)
sub.set_xlabel('wavelength', fontsize=20)
sub.set_xlim(3.6e3, 9.8e3)
sub.set_ylabel('flux', fontsize=20)
sub.set_ylim(0., 10.)
sub = fig.add_subplot(212)
for band in ['b', 'r', 'z']:
sub.plot(spec_nom.wave[band], spec_nom.flux[band][0,:], c='k', lw=1)
# get median sky ratios for the exposure
for i, _w in enumerate([5000, 7000]):
_fexp = np.median(skies['fsky_%i' % _w ][is_exp])
print(' fexp at %iA = %.2f' % (_w, _fexp))
print(' sky ratio = %.2f' % ( | np.median(skies['sky_ratio_%i' % _w][is_exp]) | numpy.median |
import pytest
import os
import glob
import json
from numpy import arange, allclose
from bolt import array as barray
from thunder.images.readers import fromlist, fromarray, frompng, fromtif, frombinary, fromexample
pytestmark = pytest.mark.usefixtures("eng")
resources = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'resources')
def test_from_list(eng):
a = arange(8).reshape((2, 4))
data = fromlist([a], engine=eng)
assert allclose(data.shape, (1,) + a.shape)
assert allclose(data.value_shape, a.shape)
assert allclose(data.toarray(), a)
def test_from_array(eng):
a = arange(8).reshape((1, 2, 4))
data = fromarray(a, engine=eng)
assert allclose(data.shape, a.shape)
assert allclose(data.value_shape, a.shape[1:])
assert allclose(data.toarray(), a)
def test_from_array_bolt(eng):
a = arange(8).reshape((1, 2, 4))
if eng is not None:
b = barray(a, context=eng)
else:
b = barray(a)
data = fromarray(b)
assert allclose(data.shape, a.shape)
assert allclose(data.value_shape, a.shape[1:])
assert allclose(data.toarray(), a)
def test_from_array_single(eng):
a = arange(8).reshape((2, 4))
data = fromarray(a, engine=eng)
assert | allclose(data.shape, (1,) + a.shape) | numpy.allclose |
import PIL.Image
from PIL import ImageTk
from tkbuilder.widgets import basic_widgets
from tkbuilder.utils.color_utils.hex_color_palettes import SeabornHexPalettes
import tkbuilder.utils.color_utils.color_utils as color_utils
import platform
import numpy
import time
import tkinter
import tkinter.colorchooser as colorchooser
from numpy import ndarray
from typing import Union
import PIL.Image
import numpy as np
from tkbuilder.image_readers.image_reader import ImageReader
if platform.system() == "Linux":
import pyscreenshot as ImageGrab
else:
from PIL import ImageGrab
# @property
# def canvas_image_object(self): # type: () -> AbstractCanvasImage
# return self._canvas_image_object
#
# @canvas_image_object.setter
# def canvas_image_object(self, value):
# if value is None:
# self._canvas_image_object = None
# return
#
# if not isinstance(value, AbstractCanvasImage):
# raise TypeError('Requires instance of AbstractCanvasImage, got {}'.format(type(value)))
# self._canvas_image_object = value
# class SpecificAppVariables(AppVariables):
# @property
# def canvas_image_object(self): # type: () -> object
# return self._canvas_image_object
#
# @canvas_image_object.setter
# def canvas_image_object(self, value):
# if value is None:
# self._canvas_image_object = None
# return
#
# if not isinstance(value, object):
# raise TypeError('Requires instance of AbstractCanvasImage, got {}'.format(type(value)))
# self._canvas_image_object = value
class ToolConstants:
ZOOM_IN_TOOL = "zoom in"
ZOOM_OUT_TOOL = "zoom out"
DRAW_RECT_BY_DRAGGING = "draw rect by dragging"
DRAW_RECT_BY_CLICKING = "draw rect by clicking"
DRAW_LINE_BY_DRAGGING = "draw line by dragging"
DRAW_LINE_BY_CLICKING = "draw line by clicking"
DRAW_ARROW_BY_DRAGGING = "draw arrow by dragging"
DRAW_ARROW_BY_CLICKING = "draw arrow by clicking"
DRAW_POLYGON_BY_CLICKING = "draw polygon by clicking"
DRAW_POINT_BY_CLICKING = "draw point by clicking"
SELECT_TOOL = "select tool"
SELECT_CLOSEST_SHAPE_TOOL = "select closest shape"
PAN_TOOL = "pan tool"
TRANSLATE_SHAPE_TOOL = "translate shape tool"
EDIT_SHAPE_COORDS_TOOL = "edit shape coords tool"
EDIT_SHAPE_TOOL = "edit shape tool"
class ShapePropertyConstants:
SHAPE_TYPE = "shape type"
CANVAS_COORDS = "canvas coords"
IMAGE_COORDS = "image coords"
POINT_SIZE = "point size"
COLOR = "color"
class ShapeTypeConstants:
POINT = "point"
LINE = "line"
RECT = "rect"
ARROW = "arrow"
POLYGON = "polygon"
class AppVariables:
def __init__(self):
self.canvas_height = 200 # default width
self.canvas_width = 300 # default height
self.rect_border_width = 2
self.line_width = 2
self.point_size = 3
self.poly_border_width = 2
self.poly_fill = None
self.foreground_color = "red"
self.image_id = None # type: int
self.current_shape_id = None
self.current_shape_canvas_anchor_point_xy = None
self.pan_anchor_point_xy = None
self.shape_ids = [] # type: [int]
self.shape_properties = {}
self.canvas_image_object = None # type: CanvasImage
self.zoom_rect_id = None # type: int
self.zoom_rect_color = "cyan"
self.zoom_rect_border_width = 2
self.animate_zoom = True
self.animate_pan = False
self.n_zoom_animations = 5
self.animation_time_in_seconds = 0.3
self.select_rect_id = None
self.select_rect_color = "red"
self.select_rect_border_width = 2
self.active_tool = None
self.current_tool = None
self.pan_anchor_point_xy = (0, 0)
self.vertex_selector_pixel_threshold = 10.0 # type: float
self.the_canvas_is_currently_zooming = False # type: bool
self.mouse_wheel_zoom_percent_per_event = 1.5
self.actively_drawing_shape = False
self.shape_drag_xy_limits = {} # type: dict
self.highlight_color_palette = SeabornHexPalettes.blues
self.highlight_n_colors_cycle = 10
self.tmp_points = None # type: [int]
self.tmp_closest_coord_index = 0 # type: int
self.zoom_on_wheel = True
self._tk_im = None # type: ImageTk.PhotoImage
self.rescale_image_to_fit_canvas = True
self.scale_dynamic_range = False
SHAPE_PROPERTIES = ShapePropertyConstants()
SHAPE_TYPES = ShapeTypeConstants()
TOOLS = ToolConstants()
class ImageCanvas(basic_widgets.Canvas):
def __init__(self,
master,
):
basic_widgets.Canvas.__init__(self, master)
self.variables = AppVariables()
self.variables.zoom_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.zoom_rect_color, width=self.variables.zoom_rect_border_width)
self.variables.select_rect_id = self.create_new_rect((0, 0, 1, 1), outline=self.variables.select_rect_color, width=self.variables.select_rect_border_width)
# hide the shapes we initialize
self.hide_shape(self.variables.select_rect_id)
self.hide_shape(self.variables.zoom_rect_id)
self.on_left_mouse_click(self.callback_handle_left_mouse_click)
self.on_left_mouse_motion(self.callback_handle_left_mouse_motion)
self.on_left_mouse_release(self.callback_handle_left_mouse_release)
self.on_right_mouse_click(self.callback_handle_right_mouse_click)
self.on_mouse_motion(self.callback_handle_mouse_motion)
self.on_mouse_wheel(self.callback_mouse_zoom)
self.variables.active_tool = None
self.variables.current_shape_id = None
def set_image_reader(self,
image_reader, # type: ImageReader
):
self.variables.canvas_image_object = CanvasImage(image_reader, self.variables.canvas_width, self.variables.canvas_height)
if self.variables.rescale_image_to_fit_canvas:
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
else:
self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)
def get_canvas_line_length(self, line_id):
line_coords = self.coords(line_id)
x1 = line_coords[0]
y1 = line_coords[1]
x2 = line_coords[2]
y2 = line_coords[3]
length = numpy.sqrt(numpy.square(x2-x1) + numpy.square(y2-y1))
return length
def get_image_line_length(self, line_id):
canvas_line_length = self.get_canvas_line_length(line_id)
return canvas_line_length * self.variables.canvas_image_object.decimation_factor
def get_shape_type(self,
shape_id, # type: int
):
return self._get_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE)
def hide_shape(self, shape_id):
if shape_id:
self.itemconfigure(shape_id, state="hidden")
def show_shape(self, shape_id):
if shape_id:
self.itemconfigure(shape_id, state="normal")
def callback_mouse_zoom(self, event):
if self.variables.zoom_on_wheel:
delta = event.delta
single_delta = 120
# handle case where platform is linux:
if platform.system() == "Linux":
delta = single_delta
if event.num == 5:
delta = delta*-1
zoom_in_box_half_width = int(self.variables.canvas_width / self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_out_box_half_width = int(self.variables.canvas_width * self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_in_box_half_height = int(self.variables.canvas_height / self.variables.mouse_wheel_zoom_percent_per_event / 2)
zoom_out_box_half_height = int(self.variables.canvas_height * self.variables.mouse_wheel_zoom_percent_per_event / 2)
x = event.x
y = event.y
after_zoom_x_offset = (self.variables.canvas_width/2 - x)/self.variables.mouse_wheel_zoom_percent_per_event
after_zoom_y_offset = (self.variables.canvas_height/2 - y)/self.variables.mouse_wheel_zoom_percent_per_event
x_offset_point = x + after_zoom_x_offset
y_offset_point = y + after_zoom_y_offset
zoom_in_box = [x_offset_point - zoom_in_box_half_width,
y_offset_point - zoom_in_box_half_height,
x_offset_point + zoom_in_box_half_width,
y_offset_point + zoom_in_box_half_height]
zoom_out_box = [x_offset_point - zoom_out_box_half_width,
y_offset_point - zoom_out_box_half_height,
x_offset_point + zoom_out_box_half_width,
y_offset_point + zoom_out_box_half_height]
if self.variables.the_canvas_is_currently_zooming:
pass
else:
if delta > 0:
self.zoom_to_selection(zoom_in_box, self.variables.animate_zoom)
else:
self.zoom_to_selection(zoom_out_box, self.variables.animate_zoom)
else:
pass
def animate_with_numpy_frame_sequence(self,
numpy_frame_sequence, # type: [numpy.ndarray]
frames_per_second=15, # type: float
):
sleep_time = 1/frames_per_second
for animation_frame in numpy_frame_sequence:
tic = time.time()
self.set_image_from_numpy_array(animation_frame)
self.update()
toc = time.time()
frame_generation_time = toc-tic
if frame_generation_time < sleep_time:
new_sleep_time = sleep_time - frame_generation_time
time.sleep(new_sleep_time)
else:
pass
def animate_with_pil_frame_sequence(self,
pil_frame_sequence, # type: [PIL.Image]
frames_per_second=15, # type: float
):
sleep_time = 1/frames_per_second
for animation_frame in pil_frame_sequence:
tic = time.time()
self._set_image_from_pil_image(animation_frame)
self.update()
toc = time.time()
frame_generation_time = toc-tic
if frame_generation_time < sleep_time:
new_sleep_time = sleep_time - frame_generation_time
time.sleep(new_sleep_time)
else:
pass
def callback_handle_left_mouse_click(self, event):
if self.variables.active_tool == TOOLS.PAN_TOOL:
self.variables.pan_anchor_point_xy = event.x, event.y
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:
self.variables.translate_anchor_point_xy = event.x, event.y
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:
closest_coord_index = self.find_closest_shape_coord(self.variables.current_shape_id, event.x, event.y)
self.variables.tmp_closest_coord_index = closest_coord_index
elif self.variables.active_tool == TOOLS.SELECT_CLOSEST_SHAPE_TOOL:
closest_shape_id = self.find_closest_shape(event.x, event.y)
self.variables.current_shape_id = closest_shape_id
self.highlight_existing_shape(self.variables.current_shape_id)
else:
start_x = self.canvasx(event.x)
start_y = self.canvasy(event.y)
self.variables.current_shape_canvas_anchor_point_xy = (start_x, start_y)
if self.variables.current_shape_id not in self.variables.shape_ids:
coords = (start_x, start_y, start_x + 1, start_y + 1)
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:
self.create_new_line(coords)
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.create_new_line(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:
self.create_new_arrow(coords)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.create_new_arrow(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:
self.create_new_rect(coords)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
self.create_new_rect(coords)
self.variables.actively_drawing_shape = True
elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:
self.create_new_point((start_x, start_y))
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.create_new_polygon(coords)
self.variables.actively_drawing_shape = True
else:
print("no tool selected")
else:
if self.variables.current_shape_id in self.variables.shape_ids:
if self.get_shape_type(self.variables.current_shape_id) == SHAPE_TYPES.POINT:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id,
(start_x, start_y))
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.event_click_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.event_click_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.event_click_polygon(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
if self.variables.actively_drawing_shape:
self.variables.actively_drawing_shape = False
else:
self.variables.actively_drawing_shape = True
def callback_handle_left_mouse_release(self, event):
if self.variables.active_tool == TOOLS.PAN_TOOL:
self._pan(event)
if self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:
rect_coords = self.coords(self.variables.zoom_rect_id)
self.zoom_to_selection(rect_coords, self.variables.animate_zoom)
self.hide_shape(self.variables.zoom_rect_id)
if self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:
rect_coords = self.coords(self.variables.zoom_rect_id)
x1 = -rect_coords[0]
x2 = self.variables.canvas_width + rect_coords[2]
y1 = -rect_coords[1]
y2 = self.variables.canvas_height + rect_coords[3]
zoom_rect = (x1, y1, x2, y2)
self.zoom_to_selection(zoom_rect, self.variables.animate_zoom)
self.hide_shape(self.variables.zoom_rect_id)
def callback_handle_mouse_motion(self, event):
if self.variables.actively_drawing_shape:
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.event_drag_multipoint_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.event_drag_multipoint_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.event_drag_multipoint_polygon(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_CLICKING:
self.event_drag_line(event)
elif self.variables.current_tool == TOOLS.EDIT_SHAPE_TOOL:
if self.get_shape_type(self.variables.current_shape_id) == SHAPE_TYPES.RECT:
select_x1, select_y1, select_x2, select_y2 = self.get_shape_canvas_coords(
self.variables.current_shape_id)
select_xul = min(select_x1, select_x2)
select_xlr = max(select_x1, select_x2)
select_yul = min(select_y1, select_y2)
select_ylr = max(select_y1, select_y2)
distance_to_ul = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_yul))
distance_to_ur = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_yul))
distance_to_lr = numpy.sqrt(numpy.square(event.x - select_xlr) + numpy.square(event.y - select_ylr))
distance_to_ll = numpy.sqrt(numpy.square(event.x - select_xul) + numpy.square(event.y - select_ylr))
if distance_to_ul < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="top_left_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_ur < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="top_right_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_lr < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="bottom_right_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif distance_to_ll < self.variables.vertex_selector_pixel_threshold:
self.config(cursor="bottom_left_corner")
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
elif select_xul < event.x < select_xlr and select_yul < event.y < select_ylr:
self.config(cursor="fleur")
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
else:
self.config(cursor="arrow")
self.variables.active_tool = None
def callback_handle_left_mouse_motion(self, event):
if self.variables.active_tool == TOOLS.PAN_TOOL:
x_dist = event.x - self.variables.tmp_anchor_point[0]
y_dist = event.y - self.variables.tmp_anchor_point[1]
self.move(self.variables.image_id, x_dist, y_dist)
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.TRANSLATE_SHAPE_TOOL:
x_dist = event.x - self.variables.tmp_anchor_point[0]
y_dist = event.y - self.variables.tmp_anchor_point[1]
new_x1 = min(self.get_shape_canvas_coords(self.variables.current_shape_id)[0] + x_dist,
self.get_shape_canvas_coords(self.variables.current_shape_id)[2] + x_dist)
new_x2 = max(self.get_shape_canvas_coords(self.variables.current_shape_id)[0] + x_dist,
self.get_shape_canvas_coords(self.variables.current_shape_id)[2] + x_dist)
new_y1 = min(self.get_shape_canvas_coords(self.variables.current_shape_id)[1] + y_dist,
self.get_shape_canvas_coords(self.variables.current_shape_id)[3] + y_dist)
new_y2 = max(self.get_shape_canvas_coords(self.variables.current_shape_id)[1] + y_dist,
self.get_shape_canvas_coords(self.variables.current_shape_id)[3] + y_dist)
width = new_x2 - new_x1
height = new_y2 - new_y1
if str(self.variables.current_shape_id) in self.variables.shape_drag_xy_limits.keys():
drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = self.variables.shape_drag_xy_limits[str(self.variables.current_shape_id)]
if self.get_shape_type(self.variables.current_shape_id) == SHAPE_TYPES.RECT:
if new_x1 < drag_x_lim_1:
new_x1 = drag_x_lim_1
new_x2 = new_x1 + width
if new_x2 > drag_x_lim_2:
new_x2 = drag_x_lim_2
new_x1 = new_x2 - width
if new_y1 < drag_y_lim_1:
new_y1 = drag_y_lim_1
new_y2 = new_y1 + height
if new_y2 > drag_y_lim_2:
new_y2 = drag_y_lim_2
new_y1 = new_y2 - height
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (new_x1, new_y1, new_x2, new_y2), update_pixel_coords=True)
self.variables.tmp_anchor_point = event.x, event.y
elif self.variables.active_tool == TOOLS.EDIT_SHAPE_COORDS_TOOL:
previous_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
coord_x_index = self.variables.tmp_closest_coord_index*2
coord_y_index = coord_x_index + 1
new_coords = list(previous_coords)
new_coords[coord_x_index] = event.x
new_coords[coord_y_index] = event.y
if str(self.variables.current_shape_id) in self.variables.shape_drag_xy_limits.keys():
drag_x_lim_1, drag_y_lim_1, drag_x_lim_2, drag_y_lim_2 = self.variables.shape_drag_xy_limits[str(self.variables.current_shape_id)]
if new_coords[coord_x_index] < drag_x_lim_1:
new_coords[coord_x_index] = drag_x_lim_1
if new_coords[coord_x_index] > drag_x_lim_2:
new_coords[coord_x_index] = drag_x_lim_2
if new_coords[coord_y_index] < drag_y_lim_1:
new_coords[coord_y_index] = drag_y_lim_1
if new_coords[coord_y_index] > drag_y_lim_2:
new_coords[coord_y_index] = drag_y_lim_2
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, tuple(new_coords))
elif self.variables.active_tool == TOOLS.ZOOM_IN_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.ZOOM_OUT_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.SELECT_TOOL:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_RECT_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_LINE_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_DRAGGING:
self.event_drag_line(event)
elif self.variables.active_tool == TOOLS.DRAW_POINT_BY_CLICKING:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (event.x, event.y))
def highlight_existing_shape(self, shape_id):
original_color = self._get_shape_property(shape_id, SHAPE_PROPERTIES.COLOR)
colors = color_utils.get_full_hex_palette(self.variables.highlight_color_palette, self.variables.highlight_n_colors_cycle)
for color in colors:
self.change_shape_color(shape_id, color)
time.sleep(0.001)
self.update()
colors.reverse()
for color in colors:
self.change_shape_color(shape_id, color)
time.sleep(0.001)
self.update()
self.change_shape_color(shape_id, original_color)
def callback_handle_right_mouse_click(self, event):
if self.variables.active_tool == TOOLS.DRAW_LINE_BY_CLICKING:
self.variables.actively_drawing_shape = False
elif self.variables.active_tool == TOOLS.DRAW_ARROW_BY_CLICKING:
self.variables.actively_drawing_shape = False
elif self.variables.active_tool == TOOLS.DRAW_POLYGON_BY_CLICKING:
self.variables.actively_drawing_shape = False
def set_image_from_numpy_array(self,
numpy_data, # type: numpy.ndarray
):
"""
This is the default way to set and display image data. All other methods to update images should
ultimately call this.
"""
if self.variables.scale_dynamic_range:
dynamic_range = numpy_data.max() - numpy_data.min()
numpy_data = numpy_data - numpy_data.min()
numpy_data = numpy_data / dynamic_range
numpy_data = numpy_data * 255
numpy_data = numpy.asanyarray(numpy_data, dtype=numpy.int8)
pil_image = PIL.Image.fromarray(numpy_data)
self._set_image_from_pil_image(pil_image)
def set_canvas_size(self,
width_npix, # type: int
height_npix, # type: int
):
self.variables.canvas_width = width_npix
self.variables.canvas_height = height_npix
self.config(width=width_npix, height=height_npix)
def modify_existing_shape_using_canvas_coords(self,
shape_id, # type: int
new_coords, # type: tuple
update_pixel_coords=True, # type: bool
):
if self.get_shape_type(shape_id) == SHAPE_TYPES.POINT:
point_size = self._get_shape_property(shape_id, SHAPE_PROPERTIES.POINT_SIZE)
x1, y1 = (new_coords[0] - point_size), (new_coords[1] - point_size)
x2, y2 = (new_coords[0] + point_size), (new_coords[1] + point_size)
canvas_drawing_coords = (x1, y1, x2, y2)
else:
canvas_drawing_coords = tuple(new_coords)
self.coords(shape_id, canvas_drawing_coords)
self.set_shape_canvas_coords(shape_id, new_coords)
if update_pixel_coords:
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
def modify_existing_shape_using_image_coords(self,
shape_id, # type: int
image_coords, # type: tuple
):
self.set_shape_pixel_coords(shape_id, image_coords)
canvas_coords = self.image_coords_to_canvas_coords(shape_id)
self.modify_existing_shape_using_canvas_coords(shape_id, canvas_coords, update_pixel_coords=False)
def event_drag_multipoint_line(self, event):
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
coords = self.coords(self.variables.current_shape_id)
new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]
if self.get_shape_type(self.variables.current_shape_id) == SHAPE_TYPES.ARROW or self.get_shape_type(self.variables.current_shape_id) == SHAPE_TYPES.LINE:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
pass
def event_drag_multipoint_polygon(self, event):
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
coords = self.coords(self.variables.current_shape_id)
new_coords = list(coords[0:-2]) + [event_x_pos, event_y_pos]
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
pass
def event_drag_line(self, event):
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (self.variables.current_shape_canvas_anchor_point_xy[0], self.variables.current_shape_canvas_anchor_point_xy[1], event_x_pos, event_y_pos))
def event_drag_rect(self, event):
if self.variables.current_shape_id:
self.show_shape(self.variables.current_shape_id)
event_x_pos = self.canvasx(event.x)
event_y_pos = self.canvasy(event.y)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (self.variables.current_shape_canvas_anchor_point_xy[0], self.variables.current_shape_canvas_anchor_point_xy[1], event_x_pos, event_y_pos))
def event_click_line(self, event):
if self.variables.actively_drawing_shape:
old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
new_coords = tuple(list(old_coords) + [event.x, event.y])
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
else:
new_coords = (event.x, event.y, event.x+1, event.y+1)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
self.variables.actively_drawing_shape = True
def delete_shape(self, shape_id):
self.variables.shape_ids.remove(shape_id)
self.delete(shape_id)
if shape_id == self.variables.current_shape_id:
self.variables.current_shape_id = None
def event_click_polygon(self, event):
if self.variables.actively_drawing_shape:
old_coords = self.get_shape_canvas_coords(self.variables.current_shape_id)
new_coords = tuple(list(old_coords) + [event.x, event.y])
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
# re-initialize shape if we're not actively drawing
else:
new_coords = (event.x, event.y, event.x+1, event.y+1)
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, new_coords)
self.variables.actively_drawing_shape = True
def create_new_rect(self,
coords, # type: (int, int, int, int)
**options
):
if options == {}:
shape_id = self.create_rectangle(coords[0], coords[1], coords[2], coords[3],
outline=self.variables.foreground_color,
width=self.variables.rect_border_width)
else:
shape_id = self.create_rectangle(coords[0], coords[1], coords[2], coords[3], options)
self.variables.shape_ids.append(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE, SHAPE_TYPES.RECT)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, self.variables.foreground_color)
self.set_shape_canvas_coords(shape_id, coords)
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_polygon(self,
coords, # type: (int, int, int, int)
**options
):
if options == {}:
shape_id = self.create_polygon(coords[0], coords[1], coords[2], coords[3],
outline=self.variables.foreground_color,
width=self.variables.poly_border_width,
fill='')
else:
shape_id = self.create_polygon(coords[0], coords[1], coords[2], coords[3], options)
self.variables.shape_ids.append(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE, SHAPE_TYPES.POLYGON)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, self.variables.foreground_color)
self.set_shape_canvas_coords(shape_id, coords)
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_arrow(self,
coords,
**options
):
if options == {}:
shape_id = self.create_line(coords[0], coords[1], coords[2], coords[3],
fill=self.variables.foreground_color,
width=self.variables.line_width,
arrow=tkinter.LAST)
else:
shape_id = self.create_line(coords[0], coords[1], coords[2], coords[3], options, arrow=tkinter.LAST)
self.variables.shape_ids.append(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE, SHAPE_TYPES.ARROW)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, self.variables.foreground_color)
self.set_shape_canvas_coords(shape_id, coords)
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_line(self, coords, **options):
if options == {}:
shape_id = self.create_line(coords,
fill=self.variables.foreground_color,
width=self.variables.line_width)
else:
shape_id = self.create_line(coords[0], coords[1], coords[2], coords[3], options)
self.variables.shape_ids.append(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE, SHAPE_TYPES.LINE)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, self.variables.foreground_color)
self.set_shape_canvas_coords(shape_id, coords)
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
self.variables.current_shape_id = shape_id
return shape_id
def create_new_point(self,
coords,
**options):
x1, y1 = (coords[0] - self.variables.point_size), (coords[1] - self.variables.point_size)
x2, y2 = (coords[0] + self.variables.point_size), (coords[1] + self.variables.point_size)
if options == {}:
shape_id = self.create_oval(x1, y1, x2, y2, fill=self.variables.foreground_color)
else:
shape_id = self.create_oval(x1, y1, x2, y2, options)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.POINT_SIZE, self.variables.point_size)
self.variables.shape_ids.append(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.SHAPE_TYPE, self.SHAPE_TYPES.POINT)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, self.variables.foreground_color)
self.set_shape_canvas_coords(shape_id, coords)
self.set_shape_pixel_coords_from_canvas_coords(shape_id)
self.variables.current_shape_id = shape_id
return shape_id
def change_shape_color(self,
shape_id, # type: int
color, # type: str
):
shape_type = self.get_shape_type(shape_id)
if shape_type == SHAPE_TYPES.RECT:
self.itemconfig(shape_id, outline=color)
elif shape_type == SHAPE_TYPES.POLYGON:
self.itemconfig(shape_id, outline=color)
else:
self.itemconfig(shape_id, fill=color)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.COLOR, color)
def set_shape_canvas_coords(self,
shape_id,
coords):
self._set_shape_property(shape_id, SHAPE_PROPERTIES.CANVAS_COORDS, coords)
def set_shape_pixel_coords_from_canvas_coords(self, shape_id):
if self.variables.canvas_image_object is None:
self._set_shape_property(shape_id, SHAPE_PROPERTIES.IMAGE_COORDS, None)
else:
image_coords = self.canvas_shape_coords_to_image_coords(shape_id)
self._set_shape_property(shape_id, SHAPE_PROPERTIES.IMAGE_COORDS, image_coords)
def set_shape_pixel_coords(self,
shape_id, # type: int
image_coords, # type: list
):
self._set_shape_property(shape_id, SHAPE_PROPERTIES.IMAGE_COORDS, image_coords)
def canvas_shape_coords_to_image_coords(self, shape_id):
canvas_coords = self.get_shape_canvas_coords(shape_id)
return self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)
def get_shape_canvas_coords(self, shape_id):
return self._get_shape_property(shape_id, SHAPE_PROPERTIES.CANVAS_COORDS)
def get_shape_image_coords(self, shape_id):
return self._get_shape_property(shape_id, SHAPE_PROPERTIES.IMAGE_COORDS)
def image_coords_to_canvas_coords(self, shape_id):
image_coords = self.get_shape_image_coords(shape_id)
return self.variables.canvas_image_object.full_image_yx_to_canvas_coords(image_coords)
def get_image_data_in_canvas_rect_by_id(self, rect_id, decimation=None):
image_coords = self.get_shape_image_coords(rect_id)
if image_coords[0] > image_coords[2]:
tmp = image_coords[0]
image_coords[0] = image_coords[2]
image_coords[2] = tmp
if image_coords[1] > image_coords[3]:
tmp = image_coords[1]
image_coords[1] = image_coords[3]
image_coords[3] = tmp
if decimation is None:
decimation = self.variables.canvas_image_object.get_decimation_factor_from_full_image_rect(image_coords)
image_data_in_rect = self.variables.canvas_image_object.get_decimated_image_data_in_full_image_rect(image_coords, decimation)
return image_data_in_rect
def zoom_to_selection(self, canvas_rect, animate=False):
self.variables.the_canvas_is_currently_zooming = True
# fill up empty canvas space due to inconsistent ratios between the canvas rect and the canvas dimensions
image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_rect)
zoomed_image_height = image_coords[2] - image_coords[0]
zoomed_image_width = image_coords[3] - image_coords[1]
canvas_height_width_ratio = self.variables.canvas_height / self.variables.canvas_width
zoomed_image_height_width_ratio = zoomed_image_height / zoomed_image_width
new_image_width = zoomed_image_height / canvas_height_width_ratio
new_image_height = zoomed_image_width * canvas_height_width_ratio
if zoomed_image_height_width_ratio > canvas_height_width_ratio:
image_zoom_point_center = (image_coords[3] + image_coords[1]) / 2
image_coords[1] = image_zoom_point_center - new_image_width/2
image_coords[3] = image_zoom_point_center + new_image_width/2
else:
image_zoom_point_center = (image_coords[2] + image_coords[0]) / 2
image_coords[0] = image_zoom_point_center - new_image_height / 2
image_coords[2] = image_zoom_point_center + new_image_height / 2
# keep the rect within the image bounds
image_y_ul = max(image_coords[0], 0)
image_x_ul = max(image_coords[1], 0)
image_y_br = min(image_coords[2], self.variables.canvas_image_object.image_reader.full_image_ny)
image_x_br = min(image_coords[3], self.variables.canvas_image_object.image_reader.full_image_nx)
# re-adjust if we ran off one of the edges
if image_x_ul == 0:
image_coords[3] = new_image_width
if image_x_br == self.variables.canvas_image_object.image_reader.full_image_nx:
image_coords[1] = self.variables.canvas_image_object.image_reader.full_image_nx - new_image_width
if image_y_ul == 0:
image_coords[2] = new_image_height
if image_y_br == self.variables.canvas_image_object.image_reader.full_image_ny:
image_coords[0] = self.variables.canvas_image_object.image_reader.full_image_ny - new_image_height
# keep the rect within the image bounds
image_y_ul = max(image_coords[0], 0)
image_x_ul = max(image_coords[1], 0)
image_y_br = min(image_coords[2], self.variables.canvas_image_object.image_reader.full_image_ny)
image_x_br = min(image_coords[3], self.variables.canvas_image_object.image_reader.full_image_nx)
new_canvas_rect = self.variables.canvas_image_object.full_image_yx_to_canvas_coords((image_y_ul, image_x_ul, image_y_br, image_x_br))
new_canvas_rect = (int(new_canvas_rect[0]), int(new_canvas_rect[1]), int(new_canvas_rect[2]), int(new_canvas_rect[3]))
background_image = self.variables.canvas_image_object.display_image
self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(new_canvas_rect)
if self.variables.rescale_image_to_fit_canvas:
new_image = PIL.Image.fromarray(self.variables.canvas_image_object.display_image)
else:
new_image = PIL.Image.fromarray(self.variables.canvas_image_object.canvas_decimated_image)
if animate is True:
#create frame sequence
n_animations = self.variables.n_zoom_animations
background_image = background_image / 2
background_image = np.asarray(background_image, dtype=np.uint8)
canvas_x1, canvas_y1, canvas_x2, canvas_y2 = new_canvas_rect
display_x_ul = min(canvas_x1, canvas_x2)
display_x_br = max(canvas_x1, canvas_x2)
display_y_ul = min(canvas_y1, canvas_y2)
display_y_br = max(canvas_y1, canvas_y2)
x_diff = new_image.width - (display_x_br - display_x_ul)
y_diff = new_image.height - (display_y_br - display_y_ul)
pil_background_image = PIL.Image.fromarray(background_image)
frame_sequence = []
for i in range(n_animations):
new_x_ul = int(display_x_ul * (1 - i/(n_animations-1)))
new_y_ul = int(display_y_ul * (1 - i/(n_animations-1)))
new_size_x = int((display_x_br - display_x_ul) + x_diff * (i/(n_animations-1)))
new_size_y = int((display_y_br - display_y_ul) + y_diff * (i/(n_animations-1)))
resized_zoom_image = new_image.resize((new_size_x, new_size_y))
animation_image = pil_background_image.copy()
animation_image.paste(resized_zoom_image, (new_x_ul, new_y_ul))
frame_sequence.append(animation_image)
fps = n_animations / self.variables.animation_time_in_seconds
self.animate_with_pil_frame_sequence(frame_sequence, frames_per_second=fps)
if self.variables.rescale_image_to_fit_canvas:
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
else:
self.set_image_from_numpy_array(self.variables.canvas_image_object.canvas_decimated_image)
self.update()
self.redraw_all_shapes()
self.variables.the_canvas_is_currently_zooming = False
def update_current_image(self):
rect = (0, 0, self.variables.canvas_width, self.variables.canvas_height)
self.variables.canvas_image_object.update_canvas_display_image_from_canvas_rect(rect)
self.set_image_from_numpy_array(self.variables.canvas_image_object.display_image)
self.update()
def redraw_all_shapes(self):
for shape_id in self.variables.shape_ids:
pixel_coords = self._get_shape_property(shape_id, SHAPE_PROPERTIES.IMAGE_COORDS)
if pixel_coords:
new_canvas_coords = self.image_coords_to_canvas_coords(shape_id)
self.modify_existing_shape_using_canvas_coords(shape_id, new_canvas_coords, update_pixel_coords=False)
def set_current_tool_to_select_closest_shape(self):
self.variables.active_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL
self.variables.current_tool = TOOLS.SELECT_CLOSEST_SHAPE_TOOL
def set_current_tool_to_zoom_out(self):
self.variables.current_shape_id = self.variables.zoom_rect_id
self.variables.active_tool = TOOLS.ZOOM_OUT_TOOL
self.variables.current_tool = TOOLS.ZOOM_OUT_TOOL
def set_current_tool_to_zoom_in(self):
self.variables.current_shape_id = self.variables.zoom_rect_id
self.variables.active_tool = TOOLS.ZOOM_IN_TOOL
self.variables.current_tool = TOOLS.ZOOM_IN_TOOL
def set_current_tool_to_draw_rect(self, rect_id=None):
self.variables.current_shape_id = rect_id
self.show_shape(rect_id)
self.variables.active_tool = TOOLS.DRAW_RECT_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_RECT_BY_DRAGGING
def set_current_tool_to_draw_rect_by_clicking(self, rect_id=None):
self.variables.current_shape_id = rect_id
self.show_shape(rect_id)
self.variables.active_tool = TOOLS.DRAW_RECT_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_RECT_BY_CLICKING
def set_current_tool_to_selection_tool(self):
self.variables.current_shape_id = self.variables.select_rect_id
self.variables.active_tool = TOOLS.SELECT_TOOL
self.variables.current_tool = TOOLS.SELECT_TOOL
def set_current_tool_to_draw_line_by_dragging(self, line_id=None):
self.variables.current_shape_id = line_id
self.show_shape(line_id)
self.variables.active_tool = TOOLS.DRAW_LINE_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_LINE_BY_DRAGGING
def set_current_tool_to_draw_line_by_clicking(self, line_id=None):
self.variables.current_shape_id = line_id
self.show_shape(line_id)
self.variables.active_tool = TOOLS.DRAW_LINE_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_LINE_BY_CLICKING
def set_current_tool_to_draw_arrow_by_dragging(self, arrow_id=None):
self.variables.current_shape_id = arrow_id
self.show_shape(arrow_id)
self.variables.active_tool = TOOLS.DRAW_ARROW_BY_DRAGGING
self.variables.current_tool = TOOLS.DRAW_ARROW_BY_DRAGGING
def set_current_tool_to_draw_arrow_by_clicking(self, line_id=None):
self.variables.current_shape_id = line_id
self.show_shape(line_id)
self.variables.active_tool = TOOLS.DRAW_ARROW_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_ARROW_BY_CLICKING
def set_current_tool_to_draw_polygon_by_clicking(self, polygon_id=None):
self.variables.current_shape_id = polygon_id
self.show_shape(polygon_id)
self.variables.active_tool = TOOLS.DRAW_POLYGON_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_POLYGON_BY_CLICKING
def set_current_tool_to_draw_point(self, point_id=None):
self.variables.current_shape_id = point_id
self.show_shape(point_id)
self.variables.active_tool = TOOLS.DRAW_POINT_BY_CLICKING
self.variables.current_tool = TOOLS.DRAW_POINT_BY_CLICKING
def set_current_tool_to_translate_shape(self):
self.variables.active_tool = TOOLS.TRANSLATE_SHAPE_TOOL
self.variables.current_tool = TOOLS.TRANSLATE_SHAPE_TOOL
def set_current_tool_to_none(self):
self.variables.active_tool = None
self.variables.current_tool = None
def set_current_tool_to_edit_shape(self):
self.variables.active_tool = TOOLS.EDIT_SHAPE_TOOL
self.variables.current_tool = TOOLS.EDIT_SHAPE_TOOL
def set_current_tool_to_edit_shape_coords(self):
self.variables.active_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
self.variables.current_tool = TOOLS.EDIT_SHAPE_COORDS_TOOL
def set_current_tool_to_pan(self):
self.variables.active_tool = TOOLS.PAN_TOOL
self.variables.current_tool = TOOLS.PAN_TOOL
def _set_image_from_pil_image(self, pil_image):
nx_pix, ny_pix = pil_image.size
self.config(scrollregion=(0, 0, nx_pix, ny_pix))
self.variables._tk_im = ImageTk.PhotoImage(pil_image)
self.variables.image_id = self.create_image(0, 0, anchor="nw", image=self.variables._tk_im)
self.tag_lower(self.variables.image_id)
def _get_shape_property(self,
shape_id, # type: int
shape_property, # type: str
):
properties = self.variables.shape_properties[str(shape_id)]
return properties[shape_property]
def _set_shape_property(self,
shape_id, # type: int
shape_property, # type: str
val,
):
if not str(shape_id) in self.variables.shape_properties.keys():
self.variables.shape_properties[str(shape_id)] = {}
self.variables.shape_properties[str(shape_id)][shape_property] = val
def _update_shape_properties(self,
shape_id, # type: int
properties, # type: dict
):
for key in properties.keys():
val = properties[key]
self._set_shape_property(shape_id, key, val)
def _pan(self, event):
new_canvas_x_ul = self.variables.pan_anchor_point_xy[0] - event.x
new_canvas_y_ul = self.variables.pan_anchor_point_xy[1] - event.y
new_canvas_x_br = new_canvas_x_ul + self.variables.canvas_width
new_canvas_y_br = new_canvas_y_ul + self.variables.canvas_height
canvas_coords = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)
image_coords = self.variables.canvas_image_object.canvas_coords_to_full_image_yx(canvas_coords)
image_y_ul = image_coords[0]
image_x_ul = image_coords[1]
image_y_br = image_coords[2]
image_x_br = image_coords[3]
if image_y_ul < 0:
new_canvas_y_ul = 0
new_canvas_y_br = self.variables.canvas_height
if image_x_ul < 0:
new_canvas_x_ul = 0
new_canvas_x_br = self.variables.canvas_width
if image_y_br > self.variables.canvas_image_object.image_reader.full_image_ny:
image_y_br = self.variables.canvas_image_object.image_reader.full_image_ny
new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(
(image_y_br, image_x_br))
new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(
new_canvas_y_br - self.variables.canvas_height)
if image_x_br > self.variables.canvas_image_object.image_reader.full_image_nx:
image_x_br = self.variables.canvas_image_object.image_reader.full_image_nx
new_canvas_x_br, new_canvas_y_br = self.variables.canvas_image_object.full_image_yx_to_canvas_coords(
(image_y_br, image_x_br))
new_canvas_x_ul, new_canvas_y_ul = int(new_canvas_x_br - self.variables.canvas_width), int(
new_canvas_y_br - self.variables.canvas_height)
canvas_rect = (new_canvas_x_ul, new_canvas_y_ul, new_canvas_x_br, new_canvas_y_br)
self.zoom_to_selection(canvas_rect, self.variables.animate_pan)
self.hide_shape(self.variables.zoom_rect_id)
def config_do_not_scale_image_to_fit(self):
self.sbarv=tkinter.Scrollbar(self, orient=tkinter.VERTICAL)
self.sbarh=tkinter.Scrollbar(self, orient=tkinter.HORIZONTAL)
self.sbarv.config(command=self.yview)
self.sbarh.config(command=self.xview)
self.config(yscrollcommand=self.sbarv.set)
self.config(xscrollcommand=self.sbarh.set)
self.sbarv.grid(row=0, column=1, stick=tkinter.N+tkinter.S)
self.sbarh.grid(row=1, column=0, sticky=tkinter.E+tkinter.W)
def save_full_canvas_as_png(self,
output_fname, # type: str
):
# put a sleep in here in case there is a dialog covering the screen before this method is called.
time.sleep(0.2)
im = self.save_currently_displayed_canvas_to_numpy_array()
im.save(output_fname)
def save_currently_displayed_canvas_to_numpy_array(self):
x_ul = self.winfo_rootx() + 1
y_ul = self.winfo_rooty() + 1
x_lr = x_ul + self.variables.canvas_width
y_lr = y_ul + self.variables.canvas_height
im = ImageGrab.grab()
im = im.crop((x_ul, y_ul, x_lr, y_lr))
return im
def activate_color_selector(self, event):
color = colorchooser.askcolor()[1]
self.variables.foreground_color = color
self.change_shape_color(self.variables.current_shape_id, color)
def find_closest_shape_coord(self,
shape_id, # type: int
canvas_x, # type: int
canvas_y, # type: int
): # type: (...) -> int
shape_type = self.get_shape_type(self.variables.current_shape_id)
coords = self.get_shape_canvas_coords(shape_id)
if shape_type == SHAPE_TYPES.RECT:
select_x1, select_y1, select_x2, select_y2 = coords
select_xul = min(select_x1, select_x2)
select_xlr = max(select_x1, select_x2)
select_yul = min(select_y1, select_y2)
select_ylr = max(select_y1, select_y2)
ul = (select_xul, select_yul)
ur = (select_xlr, select_yul)
lr = (select_xlr, select_ylr)
ll = (select_xul, select_ylr)
rect_coords = [(select_x1, select_y1), (select_x2, select_y2)]
all_coords = [ul, ur, lr, ll]
squared_distances = []
for corner_coord in all_coords:
coord_x, coord_y = corner_coord
d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2
squared_distances.append(d)
closest_coord_index = numpy.where(squared_distances == numpy.min(squared_distances))[0][0]
closest_coord = all_coords[closest_coord_index]
if closest_coord not in rect_coords:
if closest_coord == ul:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))
if closest_coord == ur:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ur[0], ur[1], ll[0], ll[1]))
if closest_coord == lr:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ul[0], ul[1], lr[0], lr[1]))
if closest_coord == ll:
self.modify_existing_shape_using_canvas_coords(self.variables.current_shape_id, (ll[0], ll[1], ur[0], ur[1]))
coords = self.get_shape_canvas_coords(shape_id)
squared_distances = []
coord_indices = numpy.arange(0, len(coords), step=2)
for i in coord_indices:
coord_x, coord_y = coords[i], coords[i+1]
d = (coord_x - canvas_x)**2 + (coord_y - canvas_y)**2
squared_distances.append(d)
closest_coord_index = numpy.where(squared_distances == | numpy.min(squared_distances) | numpy.min |
#!/usr/bin/env python3
import os
import glob
import re
import sys
import math
TIMEOUT = 100
# use cases and their directory names
tests = [
"CP3-4.8.5", "CP1-4.8.5", "CP3-4.8.9", "CP1-4.8.9",
"noSeqCon-CP3-4.8.5", "noSeqCon-CP1-4.8.5", "noSeqCon-CP3-4.8.9", "noSeqCon-CP1-4.8.9",
"nolambda-CP3-4.8.5", "nolambda-CP1-4.8.5", "nolambda-CP3-4.8.9", "nolambda-CP1-4.8.9"
]
loc_orig_5 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.5', '*.trace')
loc_orig_9 = os.path.join('Logs_DLL_8.20', 'Logs_orig_4.8.9', '*.trace')
loc_noseqcon_5 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.5', '*.trace')
loc_noseqcon_9 = os.path.join('Logs_DLL_8.20', 'Logs_noseqcon_4.8.9', '*.trace')
loc_nolambda_5 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.5', '*.trace')
loc_nolambda_9 = os.path.join('Logs_DLL_8.20', 'Logs_nolambda_4.8.9', '*.trace')
file_orig_5 = glob.glob(loc_orig_5)
file_orig_9 = glob.glob(loc_orig_9)
file_noseqcon_5 = glob.glob(loc_noseqcon_5)
file_noseqcon_9 = glob.glob(loc_noseqcon_9)
file_nolambda_5 = glob.glob(loc_nolambda_5)
file_nolambda_9 = glob.glob(loc_nolambda_9)
allinfo_Expand = {}
allinfo_Remove = {}
allinfo_InsertAfter = {}
allinfo_InsertBefore = {}
def get_time (files, index):
for f in files:
outfile = open(f, 'r')
data = outfile.readlines()
outfile.close()
for i in range(0, len(data)):
if 'Verifying Impl$$_module.__default.Expand ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index]] += [float(time[0][0])]
else:
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
allinfo_Expand[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index]] += [float(TIMEOUT)]
else:
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
allinfo_Expand[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_Expand[tests[index]] = allinfo_Expand.get(tests[index], [])
allinfo_Expand[tests[index+1]] = allinfo_Expand.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.Remove ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index]] += [float(time[0][0])]
else:
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
allinfo_Remove[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index]] += [float(TIMEOUT)]
else:
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
allinfo_Remove[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_Remove[tests[index]] = allinfo_Remove.get(tests[index], [])
allinfo_Remove[tests[index+1]] = allinfo_Remove.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.InsertAfter ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index]] += [float(time[0][0])]
else:
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
allinfo_InsertAfter[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index]] += [float(TIMEOUT)]
else:
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
allinfo_InsertAfter[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_InsertAfter[tests[index]] = allinfo_InsertAfter.get(tests[index], [])
allinfo_InsertAfter[tests[index+1]] = allinfo_InsertAfter.get(tests[index+1], [])
if 'Verifying Impl$$_module.__default.InsertBefore ...' in data[i]:
time = re.findall("\[([0-9.]*) s, ([0-9.]*) proof obligations\] ([a-z]+)", data[i + 1])
if len(time) > 0:
if time[0][2] == "verified":
if 'CP3' in f:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index]] += [float(time[0][0])]
else:
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
allinfo_InsertBefore[tests[index+1]] += [float(time[0][0])]
else:
if time[0][2] == "timed":
if 'CP3' in f:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index]] += [float(TIMEOUT)]
else:
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
allinfo_InsertBefore[tests[index+1]] += [float(TIMEOUT)]
else:
allinfo_InsertBefore[tests[index]] = allinfo_InsertBefore.get(tests[index], [])
allinfo_InsertBefore[tests[index+1]] = allinfo_InsertBefore.get(tests[index+1], [])
get_time(file_orig_5, 0)
get_time(file_orig_9, 2)
get_time(file_noseqcon_5, 4)
get_time(file_noseqcon_9, 6)
get_time(file_nolambda_5, 8)
get_time(file_nolambda_9, 10)
# print(allinfo_Expand)
# print(allinfo_Remove)
# print(allinfo_InsertAfter)
# print(allinfo_InsertBefore)
# print a CSV
def show_csv(allinfo, info):
for test in tests:
if test in allinfo:
times = allinfo[test]
print(test + ", " + info),
for i in times:
print(", " + str(i)),
print ("\n"),
# show_csv(allinfo_Expand, "Expand")
# show_csv(allinfo_Remove, "Remove")
# show_csv(allinfo_InsertAfter, "InsertAfter")
# show_csv(allinfo_InsertBefore, "InsertBefore")
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
matplotlib.rcParams.update({'font.size': 20})
Expand_cp3_5 = np.array(allinfo_Expand[tests[0]])
Expand_cp1_5 = np.array(allinfo_Expand[tests[1]])
Expand_cp3_9 = np.array(allinfo_Expand[tests[2]])
Expand_cp1_9 = np.array(allinfo_Expand[tests[3]])
Expand_noseqcon_cp3_5 = np.array(allinfo_Expand[tests[4]])
Expand_noseqcon_cp1_5 = np.array(allinfo_Expand[tests[5]])
Expand_noseqcon_cp3_9 = np.array(allinfo_Expand[tests[6]])
Expand_noseqcon_cp1_9 = np.array(allinfo_Expand[tests[7]])
Expand_nolambda_cp3_5 = np.array(allinfo_Expand[tests[8]])
Expand_nolambda_cp1_5 = np.array(allinfo_Expand[tests[9]])
Expand_nolambda_cp3_9 = np.array(allinfo_Expand[tests[10]])
Expand_nolambda_cp1_9 = np.array(allinfo_Expand[tests[11]])
Expand_cp3_5_mean = np.mean(Expand_cp3_5)
Expand_cp3_5_std = np.std(Expand_cp3_5)
Expand_cp1_5_mean = np.mean(Expand_cp1_5)
Expand_cp1_5_std = np.std(Expand_cp1_5)
Expand_cp3_9_mean = np.mean(Expand_cp3_9)
Expand_cp3_9_std = np.std(Expand_cp3_9)
Expand_cp1_9_mean = np.mean(Expand_cp1_9)
Expand_cp1_9_std = np.std(Expand_cp1_9)
Expand_noseqcon_cp3_5_mean = np.mean(Expand_noseqcon_cp3_5)
Expand_noseqcon_cp3_5_std = np.std(Expand_noseqcon_cp3_5)
Expand_noseqcon_cp1_5_mean = np.mean(Expand_noseqcon_cp1_5)
Expand_noseqcon_cp1_5_std = np.std(Expand_noseqcon_cp1_5)
Expand_noseqcon_cp3_9_mean = np.mean(Expand_noseqcon_cp3_9)
Expand_noseqcon_cp3_9_std = np.std(Expand_noseqcon_cp3_9)
Expand_noseqcon_cp1_9_mean = np.mean(Expand_noseqcon_cp1_9)
Expand_noseqcon_cp1_9_std = np.std(Expand_noseqcon_cp1_9)
Expand_nolambda_cp3_5_mean = np.mean(Expand_nolambda_cp3_5)
Expand_nolambda_cp3_5_std = np.std(Expand_nolambda_cp3_5)
Expand_nolambda_cp1_5_mean = np.mean(Expand_nolambda_cp1_5)
Expand_nolambda_cp1_5_std = np.std(Expand_nolambda_cp1_5)
Expand_nolambda_cp3_9_mean = np.mean(Expand_nolambda_cp3_9)
Expand_nolambda_cp3_9_std = np.std(Expand_nolambda_cp3_9)
Expand_nolambda_cp1_9_mean = np.mean(Expand_nolambda_cp1_9)
Expand_nolambda_cp1_9_std = np.std(Expand_nolambda_cp1_9)
Remove_cp3_5 = np.array(allinfo_Remove[tests[0]])
Remove_cp1_5 = np.array(allinfo_Remove[tests[1]])
Remove_cp3_9 = np.array(allinfo_Remove[tests[2]])
Remove_cp1_9 = np.array(allinfo_Remove[tests[3]])
Remove_noseqcon_cp3_5 = np.array(allinfo_Remove[tests[4]])
Remove_noseqcon_cp1_5 = np.array(allinfo_Remove[tests[5]])
Remove_noseqcon_cp3_9 = np.array(allinfo_Remove[tests[6]])
Remove_noseqcon_cp1_9 = np.array(allinfo_Remove[tests[7]])
Remove_nolambda_cp3_5 = np.array(allinfo_Remove[tests[8]])
Remove_nolambda_cp1_5 = np.array(allinfo_Remove[tests[9]])
Remove_nolambda_cp3_9 = np.array(allinfo_Remove[tests[10]])
Remove_nolambda_cp1_9 = np.array(allinfo_Remove[tests[11]])
Remove_cp3_5_mean = np.mean(Remove_cp3_5)
Remove_cp3_5_std = np.std(Remove_cp3_5)
Remove_cp1_5_mean = np.mean(Remove_cp1_5)
Remove_cp1_5_std = np.std(Remove_cp1_5)
Remove_cp3_9_mean = np.mean(Remove_cp3_9)
Remove_cp3_9_std = np.std(Remove_cp3_9)
Remove_cp1_9_mean = np.mean(Remove_cp1_9)
Remove_cp1_9_std = np.std(Remove_cp1_9)
Remove_noseqcon_cp3_5_mean = np.mean(Remove_noseqcon_cp3_5)
Remove_noseqcon_cp3_5_std = np.std(Remove_noseqcon_cp3_5)
Remove_noseqcon_cp1_5_mean = np.mean(Remove_noseqcon_cp1_5)
Remove_noseqcon_cp1_5_std = np.std(Remove_noseqcon_cp1_5)
Remove_noseqcon_cp3_9_mean = np.mean(Remove_noseqcon_cp3_9)
Remove_noseqcon_cp3_9_std = np.std(Remove_noseqcon_cp3_9)
Remove_noseqcon_cp1_9_mean = np.mean(Remove_noseqcon_cp1_9)
Remove_noseqcon_cp1_9_std = np.std(Remove_noseqcon_cp1_9)
Remove_nolambda_cp3_5_mean = np.mean(Remove_nolambda_cp3_5)
Remove_nolambda_cp3_5_std = np.std(Remove_nolambda_cp3_5)
Remove_nolambda_cp1_5_mean = np.mean(Remove_nolambda_cp1_5)
Remove_nolambda_cp1_5_std = np.std(Remove_nolambda_cp1_5)
Remove_nolambda_cp3_9_mean = np.mean(Remove_nolambda_cp3_9)
Remove_nolambda_cp3_9_std = np.std(Remove_nolambda_cp3_9)
Remove_nolambda_cp1_9_mean = np.mean(Remove_nolambda_cp1_9)
Remove_nolambda_cp1_9_std = np.std(Remove_nolambda_cp1_9)
InsertAfter_cp3_5 = np.array(allinfo_InsertAfter[tests[0]])
InsertAfter_cp1_5 = np.array(allinfo_InsertAfter[tests[1]])
InsertAfter_cp3_9 = np.array(allinfo_InsertAfter[tests[2]])
InsertAfter_cp1_9 = np.array(allinfo_InsertAfter[tests[3]])
InsertAfter_noseqcon_cp3_5 = np.array(allinfo_InsertAfter[tests[4]])
InsertAfter_noseqcon_cp1_5 = np.array(allinfo_InsertAfter[tests[5]])
InsertAfter_noseqcon_cp3_9 = np.array(allinfo_InsertAfter[tests[6]])
InsertAfter_noseqcon_cp1_9 = np.array(allinfo_InsertAfter[tests[7]])
InsertAfter_nolambda_cp3_5 = np.array(allinfo_InsertAfter[tests[8]])
InsertAfter_nolambda_cp1_5 = np.array(allinfo_InsertAfter[tests[9]])
InsertAfter_nolambda_cp3_9 = np.array(allinfo_InsertAfter[tests[10]])
InsertAfter_nolambda_cp1_9 = np.array(allinfo_InsertAfter[tests[11]])
InsertAfter_cp3_5_mean = np.mean(InsertAfter_cp3_5)
InsertAfter_cp3_5_std = np.std(InsertAfter_cp3_5)
InsertAfter_cp1_5_mean = np.mean(InsertAfter_cp1_5)
InsertAfter_cp1_5_std = np.std(InsertAfter_cp1_5)
InsertAfter_cp3_9_mean = np.mean(InsertAfter_cp3_9)
InsertAfter_cp3_9_std = np.std(InsertAfter_cp3_9)
InsertAfter_cp1_9_mean = np.mean(InsertAfter_cp1_9)
InsertAfter_cp1_9_std = np.std(InsertAfter_cp1_9)
InsertAfter_noseqcon_cp3_5_mean = np.mean(InsertAfter_noseqcon_cp3_5)
InsertAfter_noseqcon_cp3_5_std = np.std(InsertAfter_noseqcon_cp3_5)
InsertAfter_noseqcon_cp1_5_mean = np.mean(InsertAfter_noseqcon_cp1_5)
InsertAfter_noseqcon_cp1_5_std = np.std(InsertAfter_noseqcon_cp1_5)
InsertAfter_noseqcon_cp3_9_mean = np.mean(InsertAfter_noseqcon_cp3_9)
InsertAfter_noseqcon_cp3_9_std = np.std(InsertAfter_noseqcon_cp3_9)
InsertAfter_noseqcon_cp1_9_mean = np.mean(InsertAfter_noseqcon_cp1_9)
InsertAfter_noseqcon_cp1_9_std = np.std(InsertAfter_noseqcon_cp1_9)
InsertAfter_nolambda_cp3_5_mean = np.mean(InsertAfter_nolambda_cp3_5)
InsertAfter_nolambda_cp3_5_std = np.std(InsertAfter_nolambda_cp3_5)
InsertAfter_nolambda_cp1_5_mean = np.mean(InsertAfter_nolambda_cp1_5)
InsertAfter_nolambda_cp1_5_std = np.std(InsertAfter_nolambda_cp1_5)
InsertAfter_nolambda_cp3_9_mean = np.mean(InsertAfter_nolambda_cp3_9)
InsertAfter_nolambda_cp3_9_std = np.std(InsertAfter_nolambda_cp3_9)
InsertAfter_nolambda_cp1_9_mean = np.mean(InsertAfter_nolambda_cp1_9)
InsertAfter_nolambda_cp1_9_std = np.std(InsertAfter_nolambda_cp1_9)
InsertBefore_cp3_5 = np.array(allinfo_InsertBefore[tests[0]])
InsertBefore_cp1_5 = np.array(allinfo_InsertBefore[tests[1]])
InsertBefore_cp3_9 = np.array(allinfo_InsertBefore[tests[2]])
InsertBefore_cp1_9 = np.array(allinfo_InsertBefore[tests[3]])
InsertBefore_noseqcon_cp3_5 = np.array(allinfo_InsertBefore[tests[4]])
InsertBefore_noseqcon_cp1_5 = np.array(allinfo_InsertBefore[tests[5]])
InsertBefore_noseqcon_cp3_9 = np.array(allinfo_InsertBefore[tests[6]])
InsertBefore_noseqcon_cp1_9 = np.array(allinfo_InsertBefore[tests[7]])
InsertBefore_nolambda_cp3_5 = np.array(allinfo_InsertBefore[tests[8]])
InsertBefore_nolambda_cp1_5 = np.array(allinfo_InsertBefore[tests[9]])
InsertBefore_nolambda_cp3_9 = np.array(allinfo_InsertBefore[tests[10]])
InsertBefore_nolambda_cp1_9 = np.array(allinfo_InsertBefore[tests[11]])
InsertBefore_cp3_5_mean = np.mean(InsertBefore_cp3_5)
InsertBefore_cp3_5_std = np.std(InsertBefore_cp3_5)
InsertBefore_cp1_5_mean = np.mean(InsertBefore_cp1_5)
InsertBefore_cp1_5_std = np.std(InsertBefore_cp1_5)
InsertBefore_cp3_9_mean = np.mean(InsertBefore_cp3_9)
InsertBefore_cp3_9_std = np.std(InsertBefore_cp3_9)
InsertBefore_cp1_9_mean = np.mean(InsertBefore_cp1_9)
InsertBefore_cp1_9_std = np.std(InsertBefore_cp1_9)
InsertBefore_noseqcon_cp3_5_mean = np.mean(InsertBefore_noseqcon_cp3_5)
InsertBefore_noseqcon_cp3_5_std = np.std(InsertBefore_noseqcon_cp3_5)
InsertBefore_noseqcon_cp1_5_mean = np.mean(InsertBefore_noseqcon_cp1_5)
InsertBefore_noseqcon_cp1_5_std = np.std(InsertBefore_noseqcon_cp1_5)
InsertBefore_noseqcon_cp3_9_mean = np.mean(InsertBefore_noseqcon_cp3_9)
InsertBefore_noseqcon_cp3_9_std = np.std(InsertBefore_noseqcon_cp3_9)
InsertBefore_noseqcon_cp1_9_mean = | np.mean(InsertBefore_noseqcon_cp1_9) | numpy.mean |
import unittest
from ancb import NumpyCircularBuffer
from ancb import ( # type: ignore
star_can_broadcast, can_broadcast
)
from numpy import array_equal, allclose, shares_memory
from numpy import array, zeros, arange, ndarray, ones, empty
from numpy.random import rand, randint
from numpy import fill_diagonal, roll
from itertools import zip_longest
from operator import (
matmul, add, sub, mul, truediv, mod, floordiv, pow,
rshift, lshift, and_, or_, xor, neg, pos, abs, inv, invert,
iadd, iand, ifloordiv, ilshift, imod, imul,
ior, ipow, irshift, isub, itruediv, ixor
)
class TestBroadcastability(unittest.TestCase):
def test_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
self.assertTrue(can_broadcast(x.shape, y.shape))
self.assertFalse(can_broadcast(x.shape, z.shape))
self.assertFalse(can_broadcast(y.shape, z.shape))
self.assertTrue(can_broadcast(x.shape, x.shape))
self.assertTrue(can_broadcast(y.shape, y.shape))
self.assertTrue(can_broadcast(z.shape, z.shape))
self.assertTrue(can_broadcast(w.shape, w.shape))
self.assertTrue(can_broadcast(x.shape, w.shape))
self.assertTrue(can_broadcast(y.shape, w.shape))
self.assertTrue(can_broadcast(z.shape, w.shape))
def test_star_broadcastablity(self):
x = zeros((1, 2, 3, 4, 5))
y = zeros((1, 1, 1, 4, 5))
z = zeros((1, 1, 1, 3, 5))
w = zeros(1)
starexpr = zip_longest(x.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, z.shape, fillvalue=1)
self.assertFalse(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, x.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, y.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, z.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(w.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(x.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(y.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
starexpr = zip_longest(z.shape, w.shape, fillvalue=1)
self.assertTrue(star_can_broadcast(starexpr))
class OperatorTestFactory(type):
def __new__(cls, name, bases, dct):
obj = super().__new__(cls, name, bases, dct)
bin_operators = [
matmul, add, sub, mul, truediv, mod, floordiv, pow
]
un_operators = [neg, pos, abs, invert, inv]
bitbin_operators = [rshift, lshift, and_, or_, xor]
i_operators = [
iadd, ifloordiv, imul, ipow, isub, itruediv
]
bit_ioperators = [
ilshift, irshift, ior, iand, ixor, imod
]
def unop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = -arange(3, dtype=int)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(-1)
buffer.append(-2)
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # unfrag
buffer.append(-3)
test -= 1
res = op(buffer)
self.assertIsInstance(res, ndarray)
self.assertTrue(array_equal(res, op(test))) # frag
return f
def bitbinop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
test = arange(1, 4, dtype=int)
x = randint(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
res2 = op(x, buffer)
self.assertIsInstance(res1, ndarray)
self.assertIsInstance(res2, ndarray)
self.assertTrue(array_equal(res1, op(test, x)))
self.assertTrue(array_equal(res2, op(x, test)))
return f
def binop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
test = arange(1, 4, dtype=float)
x = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
buffer.append(2)
buffer.append(3)
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
buffer.append(4)
test += 1
res1 = op(buffer, x)
self.assertIsInstance(res1, ndarray)
self.assertTrue(allclose(res1, op(test, x)))
res2 = op(x, buffer)
self.assertIsInstance(res2, ndarray)
self.assertTrue(allclose(res2, op(x, test)))
return f
def iop_testcase(op):
def f(self):
data = zeros(3, dtype=float)
data2 = zeros(3, dtype=float)
test1 = arange(1, 4, dtype=float)
test2 = arange(2, 5, dtype=float)
x = rand(3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(array_equal(buffer2 + 0, test2))
return f
def bitiop_testcase(op):
def f(self):
data = zeros(3, dtype=int)
data2 = zeros(3, dtype=int)
test1 = arange(1, 4, dtype=int)
test2 = arange(2, 5, dtype=int)
x = randint(low=1, high=100, size=3)
buffer1 = NumpyCircularBuffer(data)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(1)
buffer1.append(2)
buffer1.append(3)
buffer2.append(1)
buffer2.append(2)
buffer2.append(3)
op(buffer1, x)
op(test1, x)
self.assertIsInstance(buffer1, NumpyCircularBuffer)
self.assertTrue(allclose(buffer1 + 0, test1))
buffer2.append(4)
op(buffer2, x)
op(test2, x)
self.assertIsInstance(buffer2, NumpyCircularBuffer)
self.assertTrue(allclose(buffer2 + 0, test2))
return f
for op in bin_operators:
setattr(obj, 'test_{}'.format(op.__name__), binop_testcase(op))
for op in bitbin_operators:
setattr(obj, 'test_{}'.format(op.__name__), bitbinop_testcase(op))
for op in un_operators:
setattr(obj, 'test_{}'.format(op.__name__), unop_testcase(op))
for op in i_operators:
setattr(obj, 'test_{}'.format(op.__name__), iop_testcase(op))
for op in bit_ioperators:
setattr(obj, 'test_{}'.format(op.__name__), bitiop_testcase(op))
return(obj)
class TestNumpyCircularBuffer(
unittest.TestCase, metaclass=OperatorTestFactory
):
"""
NumpyCircularBuffer tests
"""
def test_init(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertTrue(array_equal(data, buffer))
def test_fragmentation(self):
data = zeros(3)
buffer = NumpyCircularBuffer(data)
self.assertFalse(buffer.fragmented)
buffer.append(0)
self.assertFalse(buffer.fragmented)
buffer.append(1)
self.assertFalse(buffer.fragmented)
buffer.append(2)
self.assertFalse(buffer.fragmented)
buffer.append(3)
self.assertTrue(buffer.fragmented)
buffer.append(4)
self.assertTrue(buffer.fragmented)
buffer.append(5)
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
buffer.pop()
self.assertFalse(buffer.fragmented)
def test_matmul_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(buffer @ C[:1], arange(1) @ C[:1]))
buffer.append(1)
self.assertTrue(allclose(buffer @ C[:2], arange(2) @ C[:2]))
buffer.append(2)
self.assertTrue(allclose(buffer @ C, arange(3) @ C))
buffer.append(3)
self.assertTrue(allclose(buffer @ C, (arange(1, 4)) @ C))
buffer.append(4)
self.assertTrue(allclose(buffer @ C, (arange(2, 5)) @ C))
buffer.append(5)
self.assertTrue(allclose(buffer @ C, (arange(3, 6)) @ C))
buffer.append(6)
self.assertTrue(allclose(buffer @ C, (arange(4, 7)) @ C))
buffer.pop()
self.assertTrue(allclose(buffer @ C[1:], (arange(5, 7)) @ C[1:]))
buffer.pop()
self.assertTrue(allclose(buffer @ C[2:], (arange(6, 7)) @ C[2:]))
def test_matmul_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer @ A
res_b = buffer @ B
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer @ A
res_b = buffer @ B
res_c = buffer @ C
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = C[:1] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = C[:2] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = C @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = C[1:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = C[2:] @ buffer
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = A @ buffer1
res_buf2 = A @ buffer2
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = rand(12).reshape(4, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append([9, 10, 11])
test += 3
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_rmatmul_ndnd(self):
data = zeros((3, 3, 3))
A = zeros(27).reshape(3, 3, 3)
B = arange(27).reshape(3, 3, 3)
C = arange(3*8*3).reshape(3, 8, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append(filler + 27)
test += 9
res_a = A @ buffer
res_b = B @ buffer
res_c = C @ buffer
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_matmul2_1d1d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
self.assertTrue(allclose(
buffer.matmul(C[:1], empty(1)), arange(1) @ C[:1]
)
)
buffer.append(1)
self.assertTrue(allclose(
buffer.matmul(C[:2], empty(2)), arange(2) @ C[:2]
)
)
buffer.append(2)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3) @ C
)
)
buffer.append(3)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(1, 4) @ C
)
)
buffer.append(4)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(2, 5) @ C
)
)
buffer.append(5)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(3, 6) @ C
)
)
buffer.append(6)
self.assertTrue(allclose(
buffer.matmul(C, empty(3)), arange(4, 7) @ C
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[1:], empty(2)), arange(5, 7) @ C[1:]
)
)
buffer.pop()
self.assertTrue(allclose(
buffer.matmul(C[2:], empty(1)), arange(6, 7) @ C[2:]
)
)
def test_matmul2_1d2d(self):
"""Tests buffer @ X where buffer.ndim == 1 and X.ndim == 2"""
data = zeros(3)
A = zeros((3, 3))
B = rand(9).reshape(3, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, arange(3) @ A))
self.assertTrue(allclose(res_b, arange(3) @ B))
buffer.append(3)
res_a = buffer.matmul(A, empty(3))
res_b = buffer.matmul(B, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(allclose(res_a, arange(1, 4) @ A))
self.assertTrue(allclose(res_b, arange(1, 4) @ B))
def test_matmul2_2d2d(self):
"""Tests buffer @ X where buffer.ndim == 2"""
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(arange(9, 12))
test += 3
res_a = buffer.matmul(A, empty((3, 3)))
res_b = buffer.matmul(B, empty((3, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
def test_matmul2_ndnd(self):
"""Tests buffer @ X where X.ndim > 2 and buffer.ndim > 2"""
data = zeros((3, 3, 3))
A = zeros((3, 3, 3))
B = rand(27).reshape(3, 3, 3)
C = rand(12).reshape(3, 4)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
buffer.append(filler + 27)
test += 9
res_a = buffer.matmul(A, empty((3, 3, 3)))
res_b = buffer.matmul(B, empty((3, 3, 3)))
res_c = buffer.matmul(C, empty((3, 3, 4)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, test @ A))
self.assertTrue(allclose(res_b, test @ B))
self.assertTrue(allclose(res_c, test @ C))
def test_rmatmul2_1d1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim == 1"""
data = zeros(3)
C = rand(3)
buffer = NumpyCircularBuffer(data)
buffer.append(0)
res_c = buffer.rmatmul(C[:1], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:1] @ arange(1)))
buffer.append(1)
res_c = buffer.rmatmul(C[:2], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[:2] @ arange(2)))
buffer.append(2)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3)))
buffer.append(3)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(1, 4)))
buffer.append(4)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
buffer.append(6)
res_c = buffer.rmatmul(C, empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C @ arange(4, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[1:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[1:] @ arange(5, 7)))
buffer.pop()
res_c = buffer.rmatmul(C[2:], empty(1))
self.assertIsInstance(res_c, ndarray)
self.assertTrue(allclose(res_c, C[2:] @ arange(6, 7)))
def test_rmatmul2_nd1d(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data = zeros(3)
A = zeros(9).reshape(3, 3)
B = arange(9).reshape(3, 3)
C = arange(3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
buffer.append(0)
buffer.append(1)
buffer.append(2)
res_a = A @ buffer
buffer.rmatmul(A, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertTrue(array_equal(A @ buffer, A @ array([0, 1, 2])))
buffer.append(3)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ array([1, 2, 3])))
self.assertTrue(allclose(res_b, B @ array([1, 2, 3])))
self.assertTrue(allclose(res_c, C @ array([1, 2, 3])))
buffer.append(4)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(2, 5)))
self.assertTrue(allclose(res_b, B @ arange(2, 5)))
self.assertTrue(allclose(res_c, C @ arange(2, 5)))
buffer.append(5)
res_a = buffer.rmatmul(A, empty(3))
res_b = buffer.rmatmul(B, empty(3))
res_c = buffer.rmatmul(C, empty(3))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ arange(3, 6)))
self.assertTrue(allclose(res_b, B @ arange(3, 6)))
self.assertTrue(allclose(res_c, C @ arange(3, 6)))
def test_rmatmul2_1dnd(self):
"""Tests X @ buffer where X.ndim == 1 and buffer.ndim > 1"""
data1 = zeros((3, 3))
data2 = zeros((3, 3, 3))
A = rand(3)
test1 = arange(9).reshape(3, 3)
test2 = arange(27).reshape(3, 3, 3)
buffer1 = NumpyCircularBuffer(data1)
buffer2 = NumpyCircularBuffer(data2)
buffer1.append(arange(3))
buffer1.append(arange(3, 6))
buffer1.append(arange(6, 9))
buffer2.append(arange(9).reshape(3, 3))
buffer2.append(arange(9, 18).reshape(3, 3))
buffer2.append(arange(18, 27).reshape(3, 3))
res_buf1 = buffer1.rmatmul(A, empty(3))
res_buf2 = buffer2.rmatmul(A, empty((3, 3)))
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(9, 12))
buffer2.append(arange(27, 36).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = buffer1.rmatmul(A, empty(3))
res_buf2 = buffer2.rmatmul(A, empty((3, 3)))
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(12, 15))
buffer2.append(arange(36, 45).reshape(3, 3))
test1 += 3
test2 += 9
res_buf1 = buffer1.rmatmul(A, empty(3))
res_buf2 = buffer2.rmatmul(A, empty((3, 3)))
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
buffer1.append(arange(15, 18))
buffer2.append(arange(45, 54).reshape((3, 3)))
test1 += 3
test2 += 9
res_buf1 = buffer1.rmatmul(A, empty(3))
res_buf2 = buffer2.rmatmul(A, empty((3, 3)))
self.assertIsInstance(res_buf1, ndarray)
self.assertIsInstance(res_buf2, ndarray)
self.assertTrue(allclose(res_buf1, A @ test1))
self.assertTrue(allclose(res_buf2, A @ test2))
def test_rmatmul2_2d2d(self):
data = zeros((3, 3))
A = zeros(9).reshape(3, 3)
B = rand(9).reshape(3, 3)
C = rand(12).reshape(4, 3)
fill_diagonal(A, arange(1, 4))
buffer = NumpyCircularBuffer(data)
buffer.append(arange(3))
buffer.append(arange(3, 6))
buffer.append(arange(6, 9))
test = arange(9).reshape(3, 3)
self.assertTrue(array_equal(buffer, test))
res_a = buffer.rmatmul(A, empty((3, 3)))
res_b = buffer.rmatmul(B, empty((3, 3)))
res_c = buffer.rmatmul(C, empty((4, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append([9, 10, 11])
test += 3
res_a = buffer.rmatmul(A, empty((3, 3)))
res_b = buffer.rmatmul(B, empty((3, 3)))
res_c = buffer.rmatmul(C, empty((4, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_rmatmul2_ndnd(self):
data = zeros((3, 3, 3))
A = zeros(27).reshape(3, 3, 3)
B = arange(27).reshape(3, 3, 3)
C = arange(3*8*3).reshape(3, 8, 3)
fill_diagonal(A, [1, 2, 3])
buffer = NumpyCircularBuffer(data)
filler = arange(9).reshape(3, 3)
buffer.append(filler)
buffer.append(filler + 9)
buffer.append(filler + 18)
test = arange(27).reshape(3, 3, 3)
res_a = buffer.rmatmul(A, empty((3, 3, 3)))
res_b = buffer.rmatmul(B, empty((3, 3, 3)))
res_c = buffer.rmatmul(C, empty((3, 8, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
buffer.append(filler + 27)
test += 9
res_a = buffer.rmatmul(A, empty((3, 3, 3)))
res_b = buffer.rmatmul(B, empty((3, 3, 3)))
res_c = buffer.rmatmul(C, empty((3, 8, 3)))
self.assertIsInstance(res_a, ndarray)
self.assertIsInstance(res_b, ndarray)
self.assertIsInstance(res_c, ndarray)
self.assertTrue(array_equal(res_a, A @ test))
self.assertTrue(allclose(res_b, B @ test))
self.assertTrue(allclose(res_c, C @ test))
def test_forward_1d(self):
data = zeros(3)
test = zeros(3)
buffer = NumpyCircularBuffer(data)
buffer.append(1)
test[0] = 1
self.assertTrue(array_equal(buffer, test))
buffer.append(2)
test[1] = 2
self.assertTrue(array_equal(buffer, test))
buffer.append(3)
test[2] = 3
self.assertTrue(array_equal(buffer, test))
self.assertTrue(buffer.full)
self.assertEqual(buffer.pop(), data[0])
self.assertEqual(buffer.pop(), data[1])
self.assertEqual(buffer.pop(), data[2])
self.assertTrue(buffer.empty)
def test_forward_nd(self):
data = zeros((3, 3, 3))
buffer = NumpyCircularBuffer(data)
test = zeros((3, 3, 3))
buffer.append(1)
test[0] = 1
self.assertTrue(array_equal(buffer, test))
buffer.append(2)
test[1] = 2
self.assertTrue(array_equal(buffer, test))
buffer.append(3)
test[2] = 3
self.assertTrue(array_equal(buffer, test))
self.assertTrue(buffer.full)
self.assertTrue(array_equal(buffer.pop(), data[0]))
self.assertTrue(array_equal(buffer.pop(), data[1]))
self.assertTrue(array_equal(buffer.pop(), data[2]))
self.assertTrue(buffer.empty)
def test_peek(self):
data = zeros((3, 3, 3))
buffer = NumpyCircularBuffer(data)
self.assertRaises(ValueError, buffer.peek)
buffer.append(1)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3))))
buffer.append(2)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3))))
buffer.append(3)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3))))
buffer.append(4)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3)) * 2))
buffer.append(5)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3)) * 3))
buffer.append(6)
self.assertTrue(array_equal(buffer.peek(), ones((3, 3)) * 4))
def test_all(self):
data = zeros((3, 3, 3))
buffer = NumpyCircularBuffer(data)
buffer.append(1)
self.assertTrue(buffer.all())
buffer.append(1)
self.assertTrue(buffer.all())
buffer.append(0)
self.assertFalse(buffer.all())
buffer.append(1)
self.assertFalse(buffer.all())
buffer.append(1)
self.assertFalse(buffer.all())
buffer.append(2)
self.assertTrue(buffer.all())
def test_any(self):
data = zeros((3, 3, 3))
buffer = NumpyCircularBuffer(data)
buffer.append([0, 0, 1])
self.assertTrue(buffer.any())
buffer.append(0)
self.assertTrue(buffer.any())
buffer.append(0)
self.assertTrue(buffer.any())
buffer.append(0)
self.assertFalse(buffer.any())
buffer.append(0)
self.assertFalse(buffer.any())
buffer.append(2)
self.assertTrue(buffer.any())
def test_byteswap(self):
data = zeros(3)
test = zeros(3)
buffer = NumpyCircularBuffer(data)
r = randint(100)
buffer.append(r)
test[0] = r
r = randint(100)
buffer.append(r)
test[1] = r
r = randint(100)
buffer.append(r)
test[2] = r
res = buffer.byteswap()
self.assertTrue(array_equal(res, test.byteswap()))
self.assertIsInstance(res, ndarray)
self.assertIsNot(buffer, res)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.byteswap()
self.assertTrue(array_equal(res, test.byteswap()))
self.assertIsInstance(res, ndarray)
self.assertIsNot(buffer, res)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.byteswap()
self.assertTrue(array_equal(res, test.byteswap()))
self.assertIsInstance(res, ndarray)
self.assertIsNot(buffer, res)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.byteswap()
self.assertTrue(array_equal(res, test.byteswap()))
self.assertIsInstance(res, ndarray)
self.assertIsNot(buffer, res)
r = randint(100)
buffer.append(r)
res = buffer.byteswap()
inplace_res = buffer.byteswap(inplace=True)
self.assertTrue(array_equal(res, roll(inplace_res.view(ndarray), -1)))
self.assertTrue(shares_memory(inplace_res, buffer))
self.assertIsInstance(res, ndarray)
self.assertIsInstance(inplace_res, ndarray)
def test_clip(self):
data = zeros(3)
test = zeros(3)
buffer = NumpyCircularBuffer(data)
r = randint(100)
buffer.append(r)
test[0] = r
r = randint(100)
buffer.append(r)
test[1] = r
r = randint(100)
buffer.append(r)
test[2] = r
res = buffer.clip(1, 10)
self.assertTrue(array_equal(test, buffer))
self.assertTrue(array_equal(res, test.clip(1, 10)))
self.assertIsInstance(res, ndarray)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.clip(1, 10)
self.assertTrue(array_equal(res, test.clip(1, 10)))
self.assertIsInstance(res, ndarray)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.clip(1, 10)
self.assertTrue(array_equal(res, test.clip(1, 10)))
self.assertIsInstance(res, ndarray)
r = randint(100)
buffer.append(r)
test[0] = r
test = roll(test, -1)
res = buffer.clip(1, 10)
self.assertTrue(array_equal(res, test.clip(1, 10)))
self.assertIsInstance(res, ndarray)
self.assertIsNot(buffer, res)
r = randint(100)
buffer.append(r)
res = buffer.clip(1, 10)
inplace_res = buffer.clip(1, 10, out=buffer)
self.assertTrue(array_equal(res, roll(inplace_res.view(ndarray), -1)))
self.assertTrue(shares_memory(inplace_res.data, buffer.data))
self.assertIsInstance(res, ndarray)
self.assertIsInstance(inplace_res, ndarray)
def test_conj(self):
data = zeros(3, dtype=complex)
test = zeros(3, dtype=complex)
buffer = NumpyCircularBuffer(data)
r = rand()
buffer.append(r + r*1j)
test[0] = r + r*1j
r = rand()
buffer.append(r + r*1j)
test[1] = r + r*1j
r = rand()
buffer.append(r + r*1j)
test[2] = r + r*1j
res = buffer.conj()
self.assertTrue(array_equal(res, test.conj()))
self.assertIsInstance(res, ndarray)
r = rand()
buffer.append(r + r*1j)
test[0] = r + r*1j
test = | roll(test, -1) | numpy.roll |
#!/usr/bin/env python
# -*-coding:utf-8-*-
# Original code from: https://github.com/ugursogukpinar/csv2arff/blob/master/csv2arff/csv2arff.py
import argparse
import numpy as np
import sys
class Csv2Arff():
'''
Reads a CSV file and determines attributes' types and converts
to an ARFF file.
'''
def __init__(self, args):
print("Converting CSV to ARFF...")
self.args = args
self.attribute_types = {}
if self.args['input'] == self.args['output']:
sys.exit("input file can't be the same as the output file!")
self.read_csv()
self.determine_attribute_types()
self.write_arff()
print("Finished conversion. ARFF file written at: %s" % self.args['output'])
def read_csv(self):
if self.verbose():
print("Reading CSV file '%s'" % (self.args['input']))
data = np.genfromtxt(self.args['input'], delimiter=self.args.get('delimiter',','),
dtype='str')
self.columns = data[0]
if self.is_without_labels():
self.data = np.array(data[:])
else:
self.data = | np.array(data[1:]) | numpy.array |
import numpy as np
from os.path import join as os_join
import matplotlib.pyplot as plt
import source.postprocess as post
from source.utilities import utilities as utils
from source.utilities import global_definitions as GD
from plot_settings import plot_settings
COLORS = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red']
#params = plot_settings.get_params(w=7.3, h=5.2)
dest_folder = 'plots_new\\CAARC'
def get_CAARC_properties(src_file_name = 'CAARC_advanced_eigenmodes.txt', evaluate_at = None, interpolation_degree = 3):
'''
a dictionary is returned with information about the building A
- storey (number), storey_level, mass, frequencies, eigenmodes
the eigenmodes are appened to a list
for each dof a list is created in a dictionary
'''
src = os_join(*['inputs','eigenvectors', src_file_name])
caarc = {}
caarc['storey'] = np.flip(np.loadtxt(src, usecols = (0,))) # [-]
caarc['storey_level'] = np.flip(np.loadtxt(src, usecols = (1,))) # [m]
caarc['dimensons'] = {'x':240,'y':24, 'z':72}
caarc['mass'] = 1231000.0
caarc['frequencies'] = [0.231, 0.429, 0.536]
caarc['eigenmodes'] = {'x':[],'y':[],'z':[],'a':[]}
caarc['eigenmodes_fitted'] = {'x':[],'y':[],'z':[],'a':[]}
for i in range (3):
caarc['eigenmodes']['x'].append(np.zeros(60))
caarc['eigenmodes']['y'].append(np.flip(np.loadtxt(src, usecols = (3+3*i,))))
caarc['eigenmodes']['z'].append(np.flip(np.loadtxt(src, usecols = (2+3*i,))))
caarc['eigenmodes']['a'].append(np.flip(np.loadtxt(src, usecols = (4+3*i,))))
if not evaluate_at:
x = caarc['storey_level']
else:
x = evaluate_at
for dof_label in ['y', 'z', 'a']:
for mode_id in range(3):
y = caarc['eigenmodes'][dof_label][mode_id]
current_polynomial = np.poly1d(np.polyfit(caarc['storey_level'] ,y , interpolation_degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
caarc['eigenmodes_fitted'][dof_label].append(np.asarray(values))
return caarc
def get_CAARC_eigenform_polyfit (CAARC_eigenmodes, evaluate_at = None, degree = 5):
'''
retruns the values of a fitted caarc eigenmode.
evaluate_at must be a list of x coordiantes at which the fitted curve should be evaluated.
if it is not provided the fitted curve is evaluated at each storey level of caarc.
'''
eigenmodes_fitted = {}
#CAARC_eigenmodes = self.structure_model.CAARC_eigenmodes
# returns the fitted polynomial and the discrete array of displacements
if not evaluate_at:
x = CAARC_eigenmodes['storey_level']
else:
x = evaluate_at
eigenmodes_fitted['storey_level'] = np.copy(x)
eigenmodes_fitted['eigenmodes'] = []
for mode_id in range(1,4):
eigenmodes_fitted['eigenmodes'].append({})
for dof_label in ['y', 'z', 'a']:
y = CAARC_eigenmodes['eigenmodes'][mode_id][dof_label]
current_polynomial = np.poly1d(np.polyfit(CAARC_eigenmodes['storey_level'],y,degree))
values = []
for x_i in x:# evaluate the fitted eigenmode at certain intervals
values.append(current_polynomial(x_i))
eigenmodes_fitted['eigenmodes'][mode_id][dof_label] = np.asarray(values)
return eigenmodes_fitted
def get_m_eff(eigenmodes_dict, mode_id, main_direction_only, print_to_console):
'''
retruns the generalized mass and the participation factor of a mode
prints the effective mass that should be around 60% of the total mass (first modes)
'''
mass = eigenmodes_dict['mass'] # constant over height
phi_y = eigenmodes_dict['eigenmodes']['y'][mode_id]
phi_z = eigenmodes_dict['eigenmodes']['z'][mode_id]
if main_direction_only:
if mode_id == 1:
participation_factor = (mass * sum(phi_y))**2 # mass not in the sum since it is constant
elif mode_id == 2:
participation_factor = (mass * sum(phi_z))**2
else:
participation_factor = (mass * sum(np.add(phi_y, phi_z)))**2
if main_direction_only:
if mode_id == 1:
generalized_mass = mass * sum( | np.square(phi_y) | numpy.square |
import numpy as np
from extra_data.components import AGIPD1M
import dask.array as da
import xarray as xr
from dask.distributed import Client, progress
import warnings
import h5py as h5
import bottleneck as bn
import pdb
class Calibrator:
"""Calibrate AGIPD dataset"""
adu_per_photon = 66
mask = np.ones((16, 512, 128), "bool")
def __init__(
self,
run,
cell_ids,
train_ids,
flatfield_run_number=None,
dark_run_number=None,
mask=None,
is_dark=False,
is_flatfield=False,
apply_internal_mask=False,
dropletize=False,
stripes=None,
baseline=False,
asic_commonmode=False,
subshape=(64, 64),
cell_commonmode=False,
cellCM_window=2,
):
#: DataCollection: e.g., returned from extra_data.RunDirectory
self.run = run
#: bool: True if data is a dark run
self.is_dark = is_dark
#: bool: True if data is a flatfield run
self.is_flatfield = is_flatfield
#: tuple: asic (or subasic) shape for asic commonmode. Default (64, 64)
self.subshape = subshape
self.cell_ids = cell_ids
self.train_ids = train_ids
# corrections applied on Dask lazy array
self.corrections = {
"dark_subtraction": False,
"baseline": baseline, # baseline has to be applied
# before any masking
"masking": False, # Switch of masking with DataArray.where and mask on workers.
"internal_masking": apply_internal_mask,
# 'dropletize': dropletize,
}
# corrections applied on each worker
self.worker_corrections = {
"asic_commonmode": asic_commonmode,
"cell_commonmode": cell_commonmode,
"dropletize": dropletize,
}
self.mask = mask
self.xmask = self.mask
self.is_proc = None
#: (np.ndarray): average dark over trains
self.avr_dark = None
#: (np.ndarray): mask calculated from darks
self.dark_mask = None
#: str: file with dark data.
self.darkfile = None
# setting the dark run also sets the previous dark attributes
self.dark_run_number = dark_run_number
#: (np.ndarray): flatfield data
self.flatfield = None
#: (np.ndarray): mask calculated from flatfields
self.flatfield_mask = None
#: str: file with flatfield data.
self.flatfieldfile = None
# setting the flatfield run also sets the previous attributes
self.flatfield_run_number = flatfield_run_number
self.stripes = stripes
# Darks will not be calibrated (overwrite configfile)
if self.is_dark or self.is_flatfield:
for correction in ["corrections", "worker_corrections"]:
correction_dict = getattr(self, correction)
for key in correction_dict:
correction_dict[key] = False
#: DataArray: the run AGIPD data
self.data = None
def __getstate__(self):
"""needed for apply dask.apply_along_axis
We return only those attributes needed by the worker functions
"""
attrs = ["adu_per_photon", "worker_corrections", "subshape", "mask"]
return {attr: getattr(self, attr) for attr in attrs}
@property
def xmask(self):
"""DataArray: pixel mask as xarray.DataArray"""
return self.__xmask
@xmask.setter
def xmask(self, mask):
if isinstance(mask, xr.DataArray):
mask = mask.values
elif isinstance(mask, np.ndarray):
pass
else:
raise ValueError(f"{type(mask)} cannot be used as mask.")
self.__xmask = xr.DataArray(
mask,
dims=("module", "dim_0", "dim_1"),
coords={
"module": np.arange(16),
"dim_0": np.arange(512),
"dim_1": np.arange(128),
},
)
@property
def dark_run_number(self):
"""The run number and the file index to load the average dark"""
return self.__dark_run_number
@dark_run_number.setter
def dark_run_number(self, number):
if number is None:
pass
elif len(number) == 2:
self.darkfile = f"r{number[0]:04}-dark_{number[1]:03}.h5"
with h5.File(self.darkfile, "r") as f:
avr_dark = f["dark/intensity"][:]
pulse_ids = f["identifiers/pulse_ids"][:].flatten()
self.dark_mask = self.xmask.copy(data=f["dark/mask"][:])
xdark = xr.DataArray(
avr_dark,
dims=("pulseId", "module", "dim_0", "dim_1"),
coords={
"pulseId": pulse_ids,
"module": np.arange(16),
"dim_0": np.arange(512),
"dim_1": np.arange(128),
},
)
xdark = xdark.transpose("module", "dim_0", "dim_1", "pulseId")
self.avr_dark = xdark
self.is_proc = False
self.corrections["dark_subtraction"] = True
# internal mask available only in processed data
self.corrections["internal_masking"] = False
else:
raise ValueError(
"Dark input parameter could not be processed:\n" f"{number}"
)
self.__dark_run_number = number
@property
def flatfield_run_number(self):
"""The run number and the file index to load the flatfield"""
return self.__flatfield_run_number
@flatfield_run_number.setter
def flatfield_run_number(self, number):
if number is None:
pass
elif len(number) == 2:
self.flatfieldfile = f"r{number[0]:04}-flatfield_{number[1]:03}.h5"
with h5.File(self.flatfieldfile, "r") as f:
flatfield = f["flatfield/intensity"][:]
pulse_ids = f["identifiers/pulse_ids"][:].flatten()
self.flatfield_mask = self.xmask.copy(data=f["flatfield/mask"][:])
xflatfield = xr.DataArray(
flatfield,
dims=("pulseId", "module", "dim_0", "dim_1"),
coords={
"pulseId": pulse_ids,
"module": np.arange(16),
"dim_0": | np.arange(512) | numpy.arange |
from typing import (
Dict,
Optional,
)
import numpy as np
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
get_jit_arguments,
)
def generate_online_numba_ewma_func(engine_kwargs: Optional[Dict[str, bool]]):
"""
Generate a numba jitted groupby ewma function specified by values
from engine_kwargs.
Parameters
----------
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs)
cache_key = (lambda x: x, "online_ewma")
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
numba = import_optional_dependency("numba")
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def online_ewma(
values: np.ndarray,
deltas: np.ndarray,
minimum_periods: int,
old_wt_factor: float,
new_wt: float,
old_wt: np.ndarray,
adjust: bool,
ignore_na: bool,
):
"""
Compute online exponentially weighted mean per column over 2D values.
Takes the first observation as is, then computes the subsequent
exponentially weighted mean accounting minimum periods.
"""
result = np.empty(values.shape)
weighted_avg = values[0]
nobs = (~np.isnan(weighted_avg)).astype(np.int64)
result[0] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
for i in range(1, len(values)):
cur = values[i]
is_observations = ~np.isnan(cur)
nobs += is_observations.astype(np.int64)
for j in numba.prange(len(cur)):
if not np.isnan(weighted_avg[j]):
if is_observations[j] or not ignore_na:
# note that len(deltas) = len(vals) - 1 and deltas[i] is to be
# used in conjunction with vals[i+1]
old_wt[j] *= old_wt_factor ** deltas[j - 1]
if is_observations[j]:
# avoid numerical errors on constant series
if weighted_avg[j] != cur[j]:
weighted_avg[j] = (
(old_wt[j] * weighted_avg[j]) + (new_wt * cur[j])
) / (old_wt[j] + new_wt)
if adjust:
old_wt[j] += new_wt
else:
old_wt[j] = 1.0
elif is_observations[j]:
weighted_avg[j] = cur[j]
result[i] = np.where(nobs >= minimum_periods, weighted_avg, np.nan)
return result, old_wt
return online_ewma
class EWMMeanState:
def __init__(self, com, adjust, ignore_na, axis, shape):
alpha = 1.0 / (1.0 + com)
self.axis = axis
self.shape = shape
self.adjust = adjust
self.ignore_na = ignore_na
self.new_wt = 1.0 if adjust else alpha
self.old_wt_factor = 1.0 - alpha
self.old_wt = | np.ones(self.shape[self.axis - 1]) | numpy.ones |
# coding=utf-8
import os
import logging
from typing import Dict, List, Tuple, Union
import numpy as np
from scipy.stats import entropy
from scipy.sparse import csr_matrix
from sklearn.preprocessing import normalize
from sklearn.metrics.pairwise import cosine_similarity
try:
import networkx as nx
from networkx.algorithms.bipartite.matrix import from_biadjacency_matrix
except ImportError:
nx = None
import torch
from transformers import BertModel, BertTokenizer, XLMModel, XLMTokenizer, RobertaModel, RobertaTokenizer, XLMRobertaModel, XLMRobertaTokenizer, AutoConfig, AutoModel, AutoTokenizer
from simalign.utils import get_logger
LOG = get_logger(__name__)
class EmbeddingLoader(object):
def __init__(self, model: str="bert-base-multilingual-cased", device=torch.device('cpu'), layer: int=8):
TR_Models = {
'bert-base-uncased': (BertModel, BertTokenizer),
'bert-base-multilingual-cased': (BertModel, BertTokenizer),
'bert-base-multilingual-uncased': (BertModel, BertTokenizer),
'bert-large-uncased': (BertModel, BertTokenizer),
'bert-large-cased': (BertModel, BertTokenizer),
'xlm-mlm-100-1280': (XLMModel, XLMTokenizer),
'xlm-roberta-base': (XLMRobertaModel, XLMRobertaTokenizer),
'xlm-roberta-large': (XLMRobertaModel, XLMRobertaTokenizer),
'roberta-base': (RobertaModel, RobertaTokenizer),
'roberta-large': (RobertaModel, RobertaTokenizer), # <======================================
}
self.model = model
self.device = device
self.layer = layer
self.emb_model = None
self.tokenizer = None
if model in TR_Models:
model_class, tokenizer_class = TR_Models[model]
self.emb_model = model_class.from_pretrained(model, output_hidden_states=True)
self.emb_model.eval()
self.emb_model.to(self.device)
self.tokenizer = tokenizer_class.from_pretrained(model)
LOG.info("Initialized the EmbeddingLoader with model: {}".format(self.model))
else:
if os.path.isdir(model):
# try to load model with auto-classes
config = AutoConfig.from_pretrained(model, output_hidden_states=True)
self.emb_model = AutoModel.from_pretrained(model, config=config)
self.emb_model.eval()
self.emb_model.to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(model)
LOG.info("Initialized the EmbeddingLoader from path: {}".format(self.model))
else:
raise ValueError("The model '{}' is not recognised!".format(model))
def get_embed_list(self, sent_batch: List[List[str]]) -> torch.Tensor:
if self.emb_model is not None:
with torch.no_grad():
if not isinstance(sent_batch[0], str):
inputs = self.tokenizer(sent_batch, is_split_into_words=True, padding=True, truncation=True, return_tensors="pt")
else:
inputs = self.tokenizer(sent_batch, is_split_into_words=False, padding=True, truncation=True, return_tensors="pt")
if self.layer == "cat":
outputs = self.emb_model(**inputs.to(self.device))[2] # all the hidden layers, list of 13 tensors of torch.Size([2, 32, 768])
# Exa - No of BPE token = 32, No of Layers = 13 and emb_dim = 768
token_embeddings = torch.stack(outputs, dim=0) # [13, 2, 32, 768]
token_embeddings = token_embeddings.permute(1,0,2,3) # [2, 13, 32, 768]
token_embeddings_a = token_embeddings[0, ...] # [13, 32, 768]
token_embeddings_b = token_embeddings[1, ...] # [13, 32, 768]
token_embeddings_a = token_embeddings[0, ...].permute(1,0,2) # [32, 13, 768]
token_embeddings_b = token_embeddings[1, ...].permute(1,0,2) # [32, 13, 768]
token_vecs_cat_a = []
for token in token_embeddings_a:
cat_vec = torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0)
token_vecs_cat_a.append(cat_vec)
token_vecs_cat_b = []
for token in token_embeddings_b:
cat_vec = torch.cat((token[-1], token[-2], token[-3], token[-4]), dim=0)
token_vecs_cat_b.append(cat_vec)
outputs_a = torch.stack(token_vecs_cat_a)[np.newaxis, :, :] # [1, 32, 3072])
outputs_b = torch.stack(token_vecs_cat_b)[np.newaxis, :, :] # [1, 32, 3072])
outputs = torch.cat((outputs_a, outputs_b), dim = 0) # [2, 32, 3072]) # torch.Size([2, no_of_bpes, emb_dim X 4])
elif self.layer == "sum":
outputs = self.emb_model(**inputs.to(self.device))[2] # all the hidden layers, list of 13 tensors of torch.Size([2, 32, 768])
# Exa - No of BPE token = 32, No of Layers = 13 and emb_dim = 768
token_embeddings = torch.stack(outputs, dim=0) # [13, 2, 32, 768]
token_embeddings = token_embeddings.permute(1,0,2,3) # [2, 13, 32, 768]
token_embeddings_a = token_embeddings[0, ...] # [13, 32, 768]
token_embeddings_b = token_embeddings[1, ...] # [13, 32, 768]
token_embeddings_a = token_embeddings[0, ...].permute(1,0,2) # [32, 13, 768]
token_embeddings_b = token_embeddings[1, ...].permute(1,0,2) # [32, 13, 768]
token_vecs_sum_a = []
for token in token_embeddings_a:
sum_vec = torch.sum(token[-4:], dim=0)
token_vecs_sum_a.append(sum_vec)
token_vecs_sum_b = []
for token in token_embeddings_b:
sum_vec = torch.sum(token[-4:], dim=0)
token_vecs_sum_b.append(sum_vec)
outputs_a = torch.stack(token_vecs_sum_a)[np.newaxis, :, :] # [1, 32, 768])
outputs_b = torch.stack(token_vecs_sum_b)[np.newaxis, :, :] # [1, 32, 768])
outputs = torch.cat((outputs_a, outputs_b), dim = 0) # [2, 32, 768]) # torch.Size([2, no_of_bpes, emb_dim])
else:
outputs = self.emb_model(**inputs.to(self.device))[2][self.layer] # [2, 32, 768]) # torch.Size([2, no_of_bpes, emb_dim])
return outputs[:, 1:-1, :]
else:
return None
class SentenceAligner(object):
def __init__(self, model: str = "bert", token_type: str = "bpe", distortion: float = 0.0, matching_methods: str = "mai", device: str = "cpu", layer: int = 8):
model_names = {
"bert_bu": 'bert-base-uncased', # b = base, u = uncased
"bert_bmc": "bert-base-multilingual-cased", # m = multilingual, c = cased
"bert_bmu": "bert-base-multilingual-uncased",
"bert_lu": "bert-large-uncased", # l = large
"bert_lc": "bert-large-cased",
"xlmr_base": "xlm-roberta-base",
"xlmr_large": "xlm-roberta-large",
"roberta_base": "roberta-base",
"roberta_large": "roberta-large", # <======================================
}
all_matching_methods = {"a": "inter", "m": "mwmf", "i": "itermax", "f": "fwd", "r": "rev"}
self.model = model
if model in model_names:
self.model = model_names[model]
self.token_type = token_type
self.distortion = distortion
self.matching_methods = [all_matching_methods[m] for m in matching_methods]
self.device = torch.device(device)
self.embed_loader = EmbeddingLoader(model=self.model, device=self.device, layer=layer)
@staticmethod
def get_max_weight_match(sim: np.ndarray) -> np.ndarray:
if nx is None:
raise ValueError("networkx must be installed to use match algorithm.")
def permute(edge):
if edge[0] < sim.shape[0]:
return edge[0], edge[1] - sim.shape[0]
else:
return edge[1], edge[0] - sim.shape[0]
G = from_biadjacency_matrix(csr_matrix(sim))
matching = nx.max_weight_matching(G, maxcardinality=True)
matching = [permute(x) for x in matching]
matching = sorted(matching, key=lambda x: x[0])
res_matrix = np.zeros_like(sim)
for edge in matching:
res_matrix[edge[0], edge[1]] = 1
return res_matrix
@staticmethod
def get_similarity(X: np.ndarray, Y: np.ndarray) -> np.ndarray:
return (cosine_similarity(X, Y) + 1.0) / 2.0
@staticmethod
def average_embeds_over_words(bpe_vectors: np.ndarray, word_tokens_pair: List[List[str]]) -> List[np.array]:
w2b_map = []
cnt = 0
w2b_map.append([])
for wlist in word_tokens_pair[0]:
w2b_map[0].append([])
for x in wlist:
w2b_map[0][-1].append(cnt)
cnt += 1
cnt = 0
w2b_map.append([])
for wlist in word_tokens_pair[1]:
w2b_map[1].append([])
for x in wlist:
w2b_map[1][-1].append(cnt)
cnt += 1
new_vectors = []
for l_id in range(2):
w_vector = []
for word_set in w2b_map[l_id]:
w_vector.append(bpe_vectors[l_id][word_set].mean(0))
new_vectors.append(np.array(w_vector))
return new_vectors
@staticmethod
def get_alignment_matrix(sim_matrix: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
return forward, backward.transpose()
@staticmethod
def apply_distortion(sim_matrix: np.ndarray, ratio: float = 0.5) -> np.ndarray:
shape = sim_matrix.shape
if (shape[0] < 2 or shape[1] < 2) or ratio == 0.0:
return sim_matrix
pos_x = np.array([[y / float(shape[1] - 1) for y in range(shape[1])] for x in range(shape[0])])
pos_y = np.array([[x / float(shape[0] - 1) for x in range(shape[0])] for y in range(shape[1])])
distortion_mask = 1.0 - ((pos_x - np.transpose(pos_y)) ** 2) * ratio
return np.multiply(sim_matrix, distortion_mask)
@staticmethod
def iter_max(sim_matrix: np.ndarray, max_count: int=2) -> np.ndarray:
alpha_ratio = 0.9
m, n = sim_matrix.shape
forward = np.eye(n)[sim_matrix.argmax(axis=1)] # m x n
backward = np.eye(m)[sim_matrix.argmax(axis=0)] # n x m
inter = forward * backward.transpose()
if min(m, n) <= 2:
return inter
new_inter = np.zeros((m, n))
count = 1
while count < max_count:
mask_x = 1.0 - np.tile(inter.sum(1)[:, np.newaxis], (1, n)).clip(0.0, 1.0)
mask_y = 1.0 - np.tile(inter.sum(0)[np.newaxis, :], (m, 1)).clip(0.0, 1.0)
mask = ((alpha_ratio * mask_x) + (alpha_ratio * mask_y)).clip(0.0, 1.0)
mask_zeros = 1.0 - ((1.0 - mask_x) * (1.0 - mask_y))
if mask_x.sum() < 1.0 or mask_y.sum() < 1.0:
mask *= 0.0
mask_zeros *= 0.0
new_sim = sim_matrix * mask
fwd = np.eye(n)[new_sim.argmax(axis=1)] * mask_zeros
bac = | np.eye(m) | numpy.eye |
"""
Run random agent to test the 3D environment
"""
import numpy as np
import gym
import gym_pcgrl
from pdb import set_trace as TT
# from utils import make_vec_envs
from gym_pcgrl.envs.helper_3D import calc_num_regions, debug_path, get_string_map,\
get_tile_locations, calc_longest_path, run_dijkstra
import matplotlib.pyplot as plt
################################################################################
# test the helper functions
tile_types = ["AIR", "DIRT"]
######## Test the path finding func and region counting func in stairing logic #########
# test_map_1:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 29 = 59
test_map_1 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 1, 1, 0]
],
[
[0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 1, 0, 1, 0],
[0, 1, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 1, 1, 0]
]
]
# test_map_2:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
test_map_2 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 1, 1, 0, 0, 0]
]
]
# test_map_3:
# size: 7 * 7 * 5
# longest path length: 28 + 2 + 27 = 57
# info: identical to test_map_2, except that some unnecessary tiles are removed (to test region number)
test_map_3 = [
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 1],
[0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 0],
[1, 1, 0, 0, 0, 0, 0]
],
[
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 1, 1, 1]
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 0, 1, 0, 0, 0] in test_map_2
],
[
[1, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 1, 1, 1, 1, 0],
[0, 0, 0, 0, 0, 0, 1],
[1, 1, 1, 1, 1, 0, 0],
[0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 0, 0, 0] # diff: [0, 0, 1, 1, 0, 0, 0] in test_map_2
]
]
# test_map_4:
# size: 3 * 6 * 6
# longest path length: 2 + 1 + 1 + 1 = 5
# info: small map for testing climbing stairs
test_map_4 = [
[
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 0, 1],
[1, 0, 1],
[1, 1, 1]
],
[
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1],
[1, 1, 1]
]
]
########### For testing the 3D plotting ###########
# test_map_5:
# size: 3 * 3 * 3
test_map_5 = [
[
[1, 1, 1, 1, 1, 1, 1, 1],
[1, 0, 0, 0, 0, 0, 0, 0],
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]
]
]
############ Test the path finding func in the jumping logic #############
# Note: In Minecraft jumping, the extra head room of the staring position and extra head room of the position 1 before
# foothold needs to be garanteded
#
# |__
# O
# 大_ __
# | |
# | |
# test_map_6:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 2
# region number: 1
# jump: 1
# jump distance: 3
test_map_6 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
]
]
# test_map_7:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 2
# region number: 1
# jump: 1
# jump distance: 3
# info: valid jump, the head room of the foothold position is trivial
test_map_7 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 1] # the head room of the foothold position is trivial
]
]
# test_map_8:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 1
# region number: 1
# jump: 0
# jump distance: 3
# info: head blocked in starting position in either direction
test_map_8 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[1, 0, 0, 0, 1] # head blocked in starting position in either direction
]
]
# test_map_9:
# size: 5 * 1 * 6
# This is the max jump distance in Minecraft (press double w + space to jump)
# path length: 1
# region number: 1
# jump: 0
# jump distance: 3
# info: head blocked in the position before foothold position
test_map_9 = [
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[1, 0, 0, 0, 1]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 0, 0]
],
[
[0, 0, 0, 1, 1] # head blocked in the position before foothold position
]
]
# test_map_10:
# size: 4 * 1 * 6
# jump distance: 2
# path length: 2
# region number: 1
# jump: 1
test_map_10 = [
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[1, 0, 0, 1]
],
[
[0, 0, 0, 0]
],
[
[0, 0, 0, 0]
],
[
[0, 0, 0, 0]
]
]
# test_map_11:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
test_map_11 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_12:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 1
# info: the height difference of starting point and foothold position is 1
test_map_12 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0] # the height difference of starting point and foothold position is 1
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_13:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 2
# info: the height difference of starting point and foothold position is 1
test_map_13 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0]
],
[
[1, 0, 0] # the height difference of starting point and foothold position is 2
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
]
]
# test_map_14:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 1
# region number: 1
# jump: 0
# height difference: 0
# info: head blocked in starting position in either direction
test_map_14 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[1, 0, 1]
]
]
# test_map_15:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 1
# region number: 1
# jump: 0
# height difference: 0
# info: head blocked in foothold position
test_map_15 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 1, 1]
]
]
# test_map_16:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: 0
# info: valid jump
test_map_16 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[0, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 1]
]
]
# test_map_17:
# size: 3 * 1 * 6
# jump distance: 1
# path length: 2
# region number: 1
# jump: 1
# height difference: -1
# info: valid jump
test_map_17 = [
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 1]
],
[
[1, 0, 0]
],
[
[0, 0, 0]
],
[
[0, 0, 1]
],
[
[0, 0, 1]
]
]
# TODO: test map for falling distance > 1 and <= 3
"""
get the state of the test maps
"""
def get_test_state(test_map, tile_types):
test_map = np.array(test_map)
test_string_map = get_string_map(test_map, tile_types)
map_locations = get_tile_locations(test_string_map, tile_types)
# get the state of the test map
path_length, path_coords = calc_longest_path(test_string_map, map_locations, ["AIR"], get_path=True)
num_regions = calc_num_regions(test_string_map, map_locations, ["AIR"])
debug_path_coords = debug_path(path_coords, test_string_map, ["AIR"])
print("longest path length:", path_length)
print("number of regions:", num_regions)
print(f"The path is: {debug_path_coords}")
return path_length, path_coords, num_regions
"""
plot the test maps using matplotlib 3D voxel / volumetric plotting
"""
def plot_3d_map(test_map):
test_map = np.array(test_map)
# change the map axis for plotting
test_map = np.moveaxis(test_map, (0, 2), (2, 1))
# create the boolen map of the maze
boolen_map = np.array(test_map) == 1
# create the color map of the maze
color_map = | np.empty(test_map.shape, dtype=object) | numpy.empty |
"""
Created on Mon Nov 05 03:52:36 2018
@author: Paul
"""
### Boiler-Plate ###
import matplotlib.pylab as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import scipy as sp
from numpy import random
import time
import csv
from Class1_Eq import *
from Func import *
""" Change this value when changed in restart .i files """
global t_final
t_final = 10000 # seconds
global ss_fail_penalty
ss_fail_penalty = 700
global cost_multiplier_for_nucl_safety_grade
cost_multiplier_for_nucl_safety_grade = 5.0
###########################################################################
""""""""" Tri-System Option Class """"""""" ###########################
###########################################################################
class Option:
"""
Inputs:
x1 = Zion core loop x-optimization parameters
x2 = PERCS loop x-optimization parameters
x3 = PCS superstructure x-optimization parameters
y = PCS superstructure y-optimization parameters
Parameters:
*Individual optimization parameters (explained in __init__() function)
Core Loop:
cards = Array of RELAP5 card numbers with core loop value changes
i_vals = Array of column numbers for core loop value changes
vals = Array of new values for core loop value changes
T_fuel_cent_max = Maximum fuel centerline temperature (constraint)
T_clad_surf_max = Maximum cladding surface temperature (constraint)
MDNBR = Minimum departure from nucleate boiling ratio (constraint)
T_f_over_max = [Boolean] Did fuel temperature go over the max?
T_clad_surf_max = [Boolean] Did cladding temperature go over the max?
MDNBR_below_1 = [Boolean] Did MDNBR go below 1.0?
peanlized = [Boolean] Did the core loop receive a penalty?
failed = [Boolean] Did the RELAP5 core loop model fail early?
csvFileLocation = [String] Core's PyPost results file location
*Parameters for T, P, m_dot, H, & x_e core data from PyPost
k_eff = Effective multiplication factor per neutron cycle in core
rho_0 = Initial reactivity of the core
Bc = Cycle burn-up of the fuel [EFPD = effective full-power days]
nBc = Discharge burn-up of the fuel
cost_RCPs = Capital cost of RCPs
op_cost_RCPs = Operating cost of RCPs (40 yrs)
cost_total_fuel = Cost of UO2 fuel (40 yrs)
PERCS Loop:
list_card = Array of RELAP5 card numbers with PERCS value changes
list_i_change = Array of column numbers for PERCS value changes
list_change = Array of new values for PERCS value changes
len_diff_717 = Parameter used to calculate length of Pipe 717
n_tubes = Number of tubes w/in PERCS tank
m_MgCO3 = Mass of Magnesium Carbonate w/in PERCS tank
T_over_620 = [Boolean] Did the core outlet T go above 620K?
T_over_635 = [Boolean] Did the core outlet T go above 635K?
csvFileLocation2 = [String] PERCS's PyPost results file location
*Parameters for T & alpha PERCS data from PyPost
PERCS_failed = [Boolean] Did the PERCS RELAP5 model fail early?
PERCS_penalty = [Boolean] Did the PERCS receive a penalty?
cost_penalty = Multaplicative cost penalty if 'PERCS_failed' = TRUE
ss_fail = [Boolean] Redundant of Core's 'failed'
p716, p717 = Pipes 716 & 717 (for cost purposes)
support = Support structure for PERCS tank (for cost purposes)
hx = Fake heat exchanger (for cost purposes)
tank = PERCS tank (for cost purposes)
chemical = MgCO3 in tank (for cost purposes)
PCS Loop:
pinch_point = [Boolean]
s = Array of Stream instances for all 37 PCS superstructure streams
phx = PHX instance representing the Steam Generator
t1a, t1b, t1c, t2a, t2b = Turbines representing the diff. stages
t1, t2 = Actual turbines (for cost purposes)
t3, t4, t5 = Turbine instances for LPTs
ms1, ms2 = Moisture separator instances
rh1, rh2 = Reheater heat exchanger instances
cond = Condenser instance
fwh1, fwh2, fwh3, fwh4 = Feedwater heater instances
p1, p2, p3, p4, p5, p6 = Pump instances
Objective Functions:
W_rcp = Core Obj. 1 - Total work of RCPs
cost_1 = Core Obj. 2 - Total core loop costs
obj_1_1 = Normalized W_rcp
obj_1_2 = Normalized cost_1
fmm_1 = Maximin fitness value for core loop
cost_2 = PERCS Obj. 1 - Total PERCS equipment cost
dT_int = PERCS Obj. 2 - Integral of deviation of core outlet T
alpha = PERCS Obj. 3 - Consumption of MgCO3
obj_2_1 = Normalized cost_2
obj_2_2 = Normalized dT_int
obj_2_3 = Normalized alpha
fmm_2 = Maximin fitness value for PERCS loop
color = [String] PCS superstructure color/configuration
eff = PCS Obj. 1 - Thermodynamic Efficiency
cost_3 = PCS Obj. 2 - Total PCS equipment cost
obj_3_1 = Normalized eff
obj_3_2 = Normalized cost_3
fmm_3 = Maximin fitness value for PCS loop
obj_fmm_1 = Normalized fmm_1
obj_fmm_2 = Normalized fmm_2
obj_fmm_3 = Normalized fmm_3
fmm_o = Overall Maximin fitness value
Functions:
init_ZION_calcs() - Fills arrays to make core loop RELAP5 value changes
init_PERCS_calcs() - Fills arrays to make PERCS RELAP5 value changes
final_ZION_calcs() - Grabs PyPost data, Performs final core loop calcs
final_PERCS_calcs() - Grabs PyPost data, Performs final PERCS calcs
Alpha_calcs() - Grabs alpha PyPost data, Calcs overall Alpha
PCS_SS_calcs() - Calls solve_PCS(), Performs final PCS calcs
solve_PCS() - Fills out PCS superstructure & converges the cycle
"""
def __init__(self,x1_in,x2_in,x3_in,y_in):
self.opt_ID = 0
self.last_sec_penalty = False
# Define the x- and y-optimization parameter arrays
self.x1 = x1_in # ZION x-opt parameters
self.x2 = x2_in # PERCS x-opt parameters
self.x3 = x3_in # PCS x-opt parameters
self.y = y_in # PCS y-opt parameters
# Further define the ZION Core loop opt. parameters
self.R_f = self.x1[0] # ft (radius of fuel per pin)
self.H_fuel = self.x1[1] # ft (height of fuel pins)
self.Dh_00 = self.x1[2] # ft (hydraulic D of pipes _00)
self.Dh_12 = self.x1[3] # ft (hydraulic D of pipes _12)
self.Dh_14 = self.x1[4] # ft (hydraulic D of pipes _14)
# Further define the PERCS loop opt. parameters
self.R_tank = self.x2[0] # ft (radius of PERCS HX tank)
self.pitch = self.x2[1] # ft (pitch b/t tubes in PERCS)
self.D_h = self.x2[2] # ft (hydraulic D of tubes)
self.th = self.x2[3] # ft (thickness of tubes)
self.Len = self.x2[4] # ft (length of tubes / height of tank)
self.elev = self.x2[5] # ft (height diff. b/t core outlet & PERCS inlet)
# Further define the PCS superstructure x-opt. parameters
self.To_PHX = self.x3[0] # degC
self.Po_t1a = self.x3[1] # bar
self.mf_t1a = self.x3[2]
self.Po_t1b = self.x3[3] # bar
self.mf_t1b = self.x3[4]
self.Po_t1c = self.x3[5] # bar
self.Po_t2a = self.x3[6] # bar
self.mf_t2a = self.x3[7]
self.Po_t2b = self.x3[8] # bar
# Further define the PCS superstructure y-opt. parameters
self.y_ipt = self.y[0] # IPT
self.y_rh1 = self.y[1] # RH 1
self.y_rh2 = self.y[2] # RH 2
self.y_s14 = self.y[3] # s[14]
self.y_s4 = self.y[4] # s[4]
self.y_s5 = self.y[5] # s[5]
################################
""" Init stuff for ZION Core """
################################
# Initialize card, i_change, and change lists for ZION
self.cards = np.empty(119,dtype='<U32')
self.i_vals = np.zeros(119,dtype=int)
self.vals = np.zeros(119)
# Initiate the Booleans that tracks thermal design limit violations
self.T_fuel_cent_max = 2100 # degC
self.T_clad_surf_max = 348 # degC
self.MDNBR = 0
self.T_f_over_max = False
self.T_c_over_max = False
self.MDNBR_below_1 = False
self.penalized = False
self.failed = False
# Parameter data grabbed from .csv files using PyPost
self.csvFileLocation = 'None'
self.T_106 = 0.0 # degC
self.T_110 = 0.0 # degC
self.P_106 = 0.0 # bar
self.P_110 = 0.0 # bar
self.P_335 = np.zeros(6) # MPa
self.P_p_out = 0.0 # bar
self.m_dot_100 = 0.0 # kg/s
self.m_dot_335 = 0.0 # kg/s
self.m_dot_400 = 0.0 # kg/s
self.m_dot_600 = 0.0 # kg/s
self.m_dot_200 = 0.0 # kg/s
self.H_106 = 0.0 # kJ/kg
self.H_110 = 0.0 # kJ/kg
self.H_335_1 = 0.0 # kJ/kg
self.H_112_5 = 0.0 # kJ/kg
self.H_114 = 0.0 # kJ/kg
self.H_412_5 = 0.0 # kJ/kg
self.H_414 = 0.0 # kJ/kg
self.H_612_5 = 0.0 # kJ/kg
self.H_614 = 0.0 # kJ/kg
self.H_212_5 = 0.0 # kJ/kg
self.H_214 = 0.0 # kJ/kg
self.T_1336_1 = np.zeros(6) # K
self.T_1336_17 = np.zeros(6) # K
self.x_e_335 = np.zeros(6)
# Other parameters that should be reported in Excel
self.k_eff = 0.0
self.rho_0 = 0.0
self.Bc = 0.0 # EFPD
self.nBc = 0.0 # yr
# Three cost parameters that make up 'cost_1'
self.cost_RCPs = 0.0 # $
self.op_cost_RCPs = 0.0 # $
self.cost_total_fuel = 0.0 # $
############################
""" Init stuff for PERCS """
############################
# Initialize card, i_change, and change lists for PERCS
self.list_card = np.empty(39,dtype='<U32')
self.list_i_change = np.zeros(39,dtype=int)
self.list_change = np.empty(39)
# Needed to calc the elev of Pipe 717, calc'd in Init_ZION_Calcs()
self.len_diff_717 = 0.0 # ft
# Initialize some stuff
self.n_tubes = 0
self.m_MgCO3 = 0 # kg
# Initiate the Boolean that says whether T goes over 620 K and/or 635 K
self.T_over_620 = False
self.T_over_635 = False
# Initiate the arrays for t and T and the matrix for a (alpha)
self.csvFileLocation2 = 'None'
self.t = np.zeros(0)
self.T_335_6 = np.zeros(0)
self.dT_335_6 = np.zeros(0)
self.a_array = np.zeros(100)
self.a = np.zeros((10,10))
# Initiate the Boolean that says if there was a penalty for failing before t_final
self.PERCS_failed = False
self.PERCS_penalty = 1.0
self.cost_penalty = 1.0
self.ss_fail = False # Redundant
# Initialize PERCS system equipment
self.p716 = Pipe(self.elev)
self.p717 = Pipe(0.0)
self.support = Support(self.R_tank,self.Len,0.0)
self.hx = HX()
self.tank = Tank(self.R_tank,self.Len)
self.chemical = Chemical(0)
##########################
""" Init stuff for PCS """
##########################
self.pinch_point = False
# Initialize all Streams with zeros
self.s = np.array([0])
for i in range(1,37):
self.s = np.append(self.s,Stream(0.0,0.0,0.0,0.0))
# Create the PCS equipment w/ original opt. parameters
self.phx = PHX(self.To_PHX)
self.t1a = Turbine(0.0,0.0,0.0,0.0,self.Po_t1a)
self.t1b = Turbine(0.0,0.0,0.0,0.0,self.Po_t1b)
self.t1c = Turbine(0.0,0.0,0.0,0.0,self.Po_t1c)
self.t1 = Turbine(0.0,0.0,0.0,0.0,self.Po_t1c)
self.ms1 = MS(self.Po_t1c,0.0,0.0,0.0)
self.rh1 = Reheater(1,self.Po_t1a,0.0,0.0,0.0,self.Po_t1c,0.0,0.0,False)
self.t2a = Turbine(0.0,0.0,0.0,0.0,self.Po_t2a)
self.t2b = Turbine(0.0,0.0,0.0,0.0,self.Po_t2b)
self.t2 = Turbine(0.0,0.0,0.0,0.0,self.Po_t2b)
self.ms2 = MS(self.Po_t2b,0.0,0.0,0.0)
self.rh2 = Reheater(2,0.0,0.0,0.0,0.0,self.Po_t2b,0.0,0.0,False)
self.t3 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.t4 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.t5 = Turbine(0.0,0.0,0.0,0.0,0.086)
self.cond = Condenser(0.086,0.0,0.0,0.0)
self.fwh1 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh2 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh3 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.fwh4 = FWH(0.0,0.0,0.0,0.0,0.0,0.0,0.0)
self.p1 = Pump(0.0,0.0,0.0,self.phx.Pin)
self.p2 = Pump(0.0,0.0,0.0,self.Po_t1a)
self.p3 = Pump(0.0,0.0,0.0,0.0)
self.p4 = Pump(0.0,0.0,0.0,0.0)
self.p5 = Pump(0.0,0.0,0.0,0.0)
self.p6 = Pump(0.0,0.0,0.0,0.0)
##########################################################
""" Initiate all objective function and maximin values """
##########################################################
# For ZION Core
self.W_rcp = 0.0 # 1
self.cost_1 = 0.0 # 2
self.obj_1_1 = 0.0 # W_rcp
self.obj_1_2 = 0.0 # cost_1
self.fmm_1 = 0
# For PERCS
self.cost_2 = 0.0 # 1
self.dT_int = 0.0 # 2
self.alpha = 0.0 # 3
self.obj_2_1 = 0.0 # cost_2
self.obj_2_2 = 0.0 # dT_int
self.obj_2_3 = 0.0 # consumption(alpha)
self.fmm_2 = 0
# For Rankine PCS
self.color = 'black'
self.eff = 0.0
self.inv_eff = 0.0 # 1
self.cost_3 = 0.0 # 2
self.obj_3_1 = 0.0 # inv_eff
self.obj_3_2 = 0.0 # cost_3
self.fmm_3 = 0
# Overall fmm-value
self.obj_fmm_1 = 0.0 # normalized fmm_1
self.obj_fmm_2 = 0.0 # normalized fmm_2
self.obj_fmm_3 = 0.0 # normalized fmm_3
self.fmm_o = 0
#######################################################
""" Perform the initial calculations for the Option """
#######################################################
self.init_ZION_calcs()
self.init_PERCS_calcs()
"""
The initial calcs take place in the init_ZION_calcs(), init_PERCS_calcs()
function below.
The RELAP5 and PyPost files are run from the Population.calc_Options() function.
The obj. function and constraints calcs are run from the
Population.final_Option_calcs() function.
"""
def init_ZION_calcs(self):
##############################################
""" Calcs corresponding to a change in R_f """
##############################################
#-----------------------------
""" Core Area Calculations """
#-----------------------------
## Constants and Ratios
ratio_f2m = 0.48374681 # Fuel to Moderator Ratio
th_g = 0.002 # ft
th_c = 0.0005 # ft
self.n_pins = 41958.0554 # ~42,000 (value derived from RELAP5 model)
ratio_p2D = 1.35532 # Fuel Pin Pitch to Diameter Ratio
## Calculations
self.R_g = np.round(self.R_f + th_g, 4) # Gap radius [ft]
self.R_c = np.round(self.R_f + th_g + th_c, 4) # Cladding radius [ft]
pitch = ratio_p2D * (2.0 * self.R_c) # ft
self.p = np.round(pitch, 4) # Fuel pin pitch [ft]
A_f = np.pi * self.R_f**2.0 # Fuel A_c [ft^2]
A_g = np.pi * (self.R_g**2.0 - self.R_f**2.0) # Gap A_c [ft^2]
A_c = np.pi * (self.R_c**2.0 - self.R_g**2.0) # Cladding A_c [ft^2]
A_p = A_f + A_g + A_c # Fuel pin A_c [ft^2]
self.A_fuel = self.n_pins * A_f # Total fuel pin A_c [ft^2]
self.A_gap = self.n_pins * A_g # Total gap A_c [ft^2]
self.A_clad = self.n_pins * A_c # Total cladding A_c [ft^2]
A_pins = self.n_pins * A_p # Total fuel pin A_c [ft^2]
self.A_H2O = self.A_fuel / ratio_f2m # Core coolant A_c [ft^2]
self.A_total = A_pins + self.A_H2O # Total core A_c [ft^2]
self.A_335 = np.round(self.A_H2O,5) # Rounded core A_c [ft^2]
A_jun_diff_335 = 2.207 # Total A_c of the baffle [ft^2]
# Junction A_c at end of core flow segment
self.A_jun_335 = np.round(self.A_H2O - A_jun_diff_335, 5) # ft^2
# Hydraulic diameter of core flow segment 335 [ft]
D_hyd = 4.0 * (pitch**2.0 - np.pi*self.R_c**2.0) / (2.0*np.pi*self.R_c)
# Rounded hydraulic diameter of core flow segment 335
self.Dh_335 = np.round(D_hyd,5) # ft
# A_c of branch 336 (core above baffle) [ft^2]
A_336 = np.round(0.272*(self.A_H2O-self.A_jun_335)+self.A_jun_335, 5)
## Fill the lists
self.cards[114:117] = ['13360101','13360102','13360103']
self.cards[78:80] = ['3350101','3350201']
self.cards[86:88] = ['3350801','3360101']
self.i_vals[114:117] = [3,3,3]
self.i_vals[78:80] = [2,2]
self.i_vals[86:88] = [3,4]
self.vals[114:117] = [self.R_f,self.R_g,self.R_c]
self.vals[78:80] = [self.A_335,self.A_jun_335]
self.vals[86:88] = [self.Dh_335,A_336]
#------------------------------------
""" Outer Area/R_eff Calculations """
#------------------------------------
## Constants and Ratios
R_in_barrel = 6.1667 # Inner radius of the barrel [ft]
th_baffle = 0.0937 # Thickness of the barrel [ft]
ratio_baffle_2_core = 1.2577045 # Ratio b/t core and effective baffle
## Calculations
self.R_core = np.sqrt(self.A_total/np.pi) # Radius of the entire core [ft]
# Effective inner radius of the baffle
Reff_in_baffle = self.R_core * ratio_baffle_2_core # ft
# Rounded effective inner radius of the baffle
left_bc_1335 = np.round(Reff_in_baffle, 4) # ft
# Effective outer radius of the the baffle
Reff_out_baffle = Reff_in_baffle + th_baffle # ft
# Rounded effective outer radius of the baffle
right_bc_1335 = np.round(Reff_out_baffle, 4) # ft
# A_c taken up by the baffle
A_baffle = np.pi * (Reff_out_baffle**2.0 - Reff_in_baffle**2.0) # ft^2
# Total A_c of core contents (calc'd from inside out)
A_total_plus_baffle = self.A_total + A_baffle # ft^2
# Total A_c of core (calc'd from outside in)
A_total_in_barrel = np.pi * R_in_barrel**2.0 # ft^2
self.A_320_bypass = 0.0
if (A_total_in_barrel - A_total_plus_baffle) > 18.6736:
self.A_320_bypass = 18.6736 # ft^2
else:
self.A_320_bypass = A_total_in_barrel - A_total_plus_baffle # ft^2
Dh_320 = 0.9591 # Hydraulic diameter of core bypass [ft]
## Fill the lists
self.cards[106:108],self.cards[70],self.cards[77] = ['13350000','13350101'],'3200101','3200801'
self.i_vals[106:108],self.i_vals[70],self.i_vals[77] = [6,3],2,3
self.vals[106:108],self.vals[70],self.vals[77] = [left_bc_1335,right_bc_1335],self.A_320_bypass,Dh_320
#################################################
""" Calcs corresponding to a change in H_fuel """
#################################################
#---------------------------
""" RPV len's and elev's """
#---------------------------
## Ratios and Percentages
# Height ratio b/t core flow segment (335) and actual fuel w/in pins
ratio_H335_2_Hfuel = 1.1145844358
# Length fractions per node along core flow segment (335)
L_frac_335 = np.array((0.187389,0.1632396,0.1632396,0.1632396,0.1632396,0.1596523))
# Length fractions per node along fuel in pins
L_frac_pin = np.array((0.1819444,0.1819444,0.1819444,0.1819444,0.1819444,0.090278))
## Calculations
# Height of core flow segment (335)
self.H_335 = self.H_fuel * ratio_H335_2_Hfuel # ft
# Lengths per node along core flow segment (335)
len_335 = np.round(self.H_335 * L_frac_335, 5) # ft
# Lengths of 'len_335' for upward-oriented RELAP5 flow segments
Lu = [len_335[0],len_335[3],len_335[5]] # ft
# Lengths of 'len_335' for downward-oriented RELAP5 flow segments
Ld = [len_335[5],len_335[3],len_335[0]] # ft
# Lengths of 'len_335' for downward-flowing RELAP5 flow segments
nLd = [-len_335[5],-len_335[3],-len_335[0]] # ft
len_pin = np.round(self.H_fuel * L_frac_pin, 5) # Rounded length of pin [ft]
C_pin = 2.0*np.pi * self.R_c # Circumference of fuel pin [ft]
# Total pin surface area on node 5
SA_1336_5R = np.round(self.n_pins * C_pin * len_pin[4], 4) # ft^2
# Total pin surface area on node 6
SA_1336_6R = np.round(self.n_pins * C_pin * len_pin[5], 4) # ft^2
## Fill the lists
self.cards[80:86] = ['3350301','3350302','3350303','3350701','3350702','3350703']
self.i_vals[80:86] = [2,2,2,2,2,2]
self.vals[80:86] = Lu+Lu
self.cards[71:77] = ['3200301','3200302','3200303','3200701','3200702','3200703']
self.i_vals[71:77] = [2,2,2,2,2,2]
self.vals[71:77] = Ld+nLd
self.cards[64:70] = ['3150301','3150302','3150303','3150701','3150702','3150703']
self.i_vals[64:70] = [2,2,2,2,2,2]
self.vals[64:70] = Ld+nLd
self.cards[88:94] = ['13150501','13150502','13150503','13150601','13150602','13150603']
self.i_vals[88:94] = [6,6,6,6,6,6]
self.vals[88:94] = Ld+Ld
self.cards[94:100] = ['13160501','13160502','13160503','13160601','13160602','13160603']
self.i_vals[94:100] = [6,6,6,6,6,6]
self.vals[94:100] = Ld+Ld
self.cards[100:106] = ['13200501','13200502','13200503','13200601','13200602','13200603']
self.i_vals[100:106] = [6,6,6,6,6,6]
self.vals[100:106] = Ld+Ld
self.cards[108:114] = ['13350501','13350502','13350503','13350601','13350602','13350603']
self.i_vals[108:114] = [6,6,6,6,6,6]
self.vals[108:114] = Lu+Lu
self.cards[117:119] = ['13360601','13360602']
self.i_vals[117:119] = [6,6]
self.vals[117:119] = [SA_1336_5R,SA_1336_6R]
#------------------------------
""" PERCS p717 len and elev """
#------------------------------
## Calculations
# Deviation from original height of the fuel (for PERCS pipe 717 calc)
self.len_diff_717 = ratio_H335_2_Hfuel * (self.H_fuel - 11.99971) # ft
##################################################
""" Calcs corresponding to changes in pipe D's """
##################################################
## Calculations
A_00 = np.round(np.pi/4.0*self.Dh_00**2.0, 3) # A_c of pipes _00 [ft^2]
A_12 = np.round(np.pi/4.0*self.Dh_12**2.0, 3) # A_c of pipes _12 [ft^2]
A_14 = np.round(np.pi/4.0*self.Dh_14**2.0, 3) # A_c of pipes _14 [ft^2]
## Fill the lists
self.cards[0:6] = ['1000101','1000801','1020101','1020101','1040101','1040801']
self.i_vals[0:6] = [2,3,2,9,2,3]
self.vals[0:6] = [A_00,self.Dh_00,A_00,self.Dh_00,A_00,self.Dh_00]
self.cards[6:10] = ['1120101','1120801','1130101','1130108']
self.i_vals[6:10] = [2,3,2,3]
self.vals[6:10] = [A_12,self.Dh_12,A_12,A_12]
self.cards[10:19] = ['1130109','1140101','1140801','1160101','1160101','1161101','1162101','1180101','1180801']
self.i_vals[10:19] = [3,2,3,2,9,4,4,2,3]
self.vals[10:19] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[19:25] = ['4000101','4000801','4120101','4120801','4130101','4130108']
self.i_vals[19:25] = [2,3,2,3,2,3]
self.vals[19:25] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[25:34] = ['4130109','4140101','4140801','4160101','4160101','4161101','4162101','4180101','4180801']
self.i_vals[25:34] = [3,2,3,2,9,4,4,2,3]
self.vals[25:34] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[34:40] = ['6000101','6000801','6120101','6120801','6130101','6130108']
self.i_vals[34:40] = [2,3,2,3,2,3]
self.vals[34:40] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[40:49] = ['6130109','6140101','6140801','6160101','6160101','6161101','6162101','6180101','6180801']
self.i_vals[40:49] = [3,2,3,2,9,4,4,2,3]
self.vals[40:49] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
self.cards[49:55] = ['2000101','2000801','2120101','2120801','2130101','2130108']
self.i_vals[49:55] = [2,3,2,3,2,3]
self.vals[49:55] = [A_00,self.Dh_00,A_12,self.Dh_12,A_12,A_12]
self.cards[55:64] = ['2130109','2140101','2140801','2160101','2160101','2161101','2162101','2180101','2180801']
self.i_vals[55:64] = [3,2,3,2,9,4,4,2,3]
self.vals[55:64] = [A_14,A_14,self.Dh_14,A_14,self.Dh_14,A_14,A_14,A_14,self.Dh_14]
def init_PERCS_calcs(self):
# Calc the number of tubes in PERCS
Ac_tank = np.pi * self.R_tank**2.0 # A_c of entire tank ft^2
Ac_hex = np.sqrt(3)/2 * self.pitch**2.0 # A_c of hexagon around tube [ft^2]
self.n_tubes = np.round(Ac_tank / Ac_hex) # Number of PERCS tubes
self.hx.n = self.n_tubes
# Calc the heat transfer Surface Area in PERCS
OD_tube = self.D_h + 2.0*self.th # Outer D of tube [ft]
SA_tube = np.pi*OD_tube*self.Len # Surface area of tube [ft^2]
SA_tot = SA_tube * self.n_tubes # Total surface area of tubes ft^2]
self.hx.A = SA_tot / 10.7639 # m^2
# Perform calcs for HX and Tank
self.hx.calc_HX()
self.tank.calc_Tank()
# Calc the total cross-sectional Area of all tubes
Ac_tube = np.pi*(self.D_h/2.0)**2 # ft^2
Ac_tubes = np.round(Ac_tube*self.n_tubes,5) # ft^2
# Calc the length of a single node along the tubes
len_node = np.round((self.Len / 10.0),5) # ft
# Calc the thickness of a single MgCO3 section (there being 10 across)
R_hex = np.sqrt(Ac_hex/np.pi) # ft
OR_tube = OD_tube / 2.0 # ft
th_MgCO3 = '%.5g'%((R_hex - OR_tube)/10.0) # ft
# Calc the heat transfer length between all tubes and MgCO3 per node
HT_len_per_node = np.round((len_node*self.n_tubes),5) # ft
# Calc the len and elev of Pipe 717
self.elev_717 = np.round(-(15.62469 + self.elev - self.Len + self.len_diff_717),5) # ft * was 15.62463
# Now replace elev_717 values and solve Pipe 717 and Support
self.p717.len = -self.elev_717 # ft
self.p717.calc_Pipe()
self.support.elev = np.round(-self.elev_717,5) # ft
self.support.calc_Support()
""" Calc the masses of MgCO3 in each of the 10 sections per axial node """
# Define rho_MgCO3, then Calc Ac_tank, Vol_MgCO3, m_MgCO3
rho_MgCO3 = 5.29903 # kg/ft^3
Ac_tank = np.pi*self.R_tank**2 # ft^2
Vol_MgCO3 = (Ac_tank - Ac_tubes)*self.Len #ft^3
self.m_MgCO3 = rho_MgCO3 * Vol_MgCO3 # kg
# Since we calculated the total chemical mass, calc the Chemical costs
self.chemical.mass = self.m_MgCO3
self.chemical.calc_Chemical()
# Create array of radii of the all radial nodes
radii = np.empty(11)
for f in range(11):
radii[f] = 0.5*self.D_h + self.th + f*float(th_MgCO3) # ft
# Create array of annular area for all radial nodes
areas = np.empty(10)
for h in range(10):
areas[h] = np.pi*radii[h+1]**2.0 - np.pi*radii[h]**2.0 # ft^2
tot_area = sum(areas) # ft^2
# Create array of area ratios for all radial nodes
self.ratio = np.empty(10)
for k in range(10):
self.ratio[k] = areas[k]/tot_area
# Create array of MgCO3 masses per axial node for all radial nodes
self.masses = np.empty(10)
for n in range(10):
self.masses[n] = np.round(self.ratio[n]*self.m_MgCO3/10,5) # kg
""" The Filling of the card, i_change, and change Lists """
# Start filling the card, i_change, and change lists
self.list_card[0:4] = ['7160101','7160101','7170101','7170101']
self.list_i_change[0:4] = [3,7,3,7]
self.list_change[0:4] = [self.elev,self.elev,self.p717.len,self.elev_717]
# Fill lists w/ changes to PERCS tube Ac, node length, & D_h
self.list_card[4:7] = ['7020101','7020301','7020801']
self.list_i_change[4:7] = [2,2,3]
self.list_change[4:7] = [Ac_tubes,len_node,self.D_h]
# Fill lists w/ the remaining tube node lengths
card_ = 7070301
for k in range(9):
self.list_card[7+k] = repr(card_)
self.list_i_change[7+k] = 2
self.list_change[7+k] = len_node
card_ = card_ + 10000
# Fill lists w/ changes to tube & MgCO3 thicknesses
self.list_card[16:18] = ['14000101','14000102']
self.list_i_change[16:18] = [2,2]
self.list_change[16:18] = [self.th,th_MgCO3]
# Fill lists w/ changes to "heat transfer length b/t all tubes and MgCO3 per node"
self.list_card[18:20] = ['14000501','14000601']
self.list_i_change[18:20] = [6,6]
self.list_change[18:20] = [HT_len_per_node,HT_len_per_node]
# Fill lists w/ changes to 9 other MgCO3 thicknesses
card_ = 14001101
for k in range(9):
self.list_card[20+k] = repr(card_)
self.list_i_change[20+k] = 2
self.list_change[20+k] = th_MgCO3
card_ = card_ + 1000
# Fill lists w/ changes to MgCO3 masses for all 10 sections
self.list_card[29] = '20507070'
self.list_i_change[29] = 4
self.list_change[29] = self.masses[0]
card_ = 20514020
for k in range(1,10):
self.list_card[29+k] = repr(card_)
self.list_i_change[29+k] = 4
self.list_change[29+k] = self.masses[k]
card_ = card_ + 10
def final_ZION_calcs(self):
###################################
""" Grab all the .csv file data """
###################################
#--------------------------
""" tempf.csv file data """
#--------------------------
# Read steam generator T-data from .csv file into 'tempf_data' array
csv_name = self.csvFileLocation + "\\tempf_data.csv"
tempf_data = np.zeros(2)
with open(csv_name) as csvfile:
numreader = csv.reader(csvfile)
i = 0
for row in numreader:
tempf_data[i] = float(row[0]) # K
i = i + 1
# Assign tempf_data to the correct variables
self.T_106 = tempf_data[0] - 273.15 # degC
self.T_110 = tempf_data[1] - 273.15 # degC
#----------------------
""" p.csv file data """
#----------------------
# Read P-data from .csv file into 'P_106', 'P_110', and 'P_335[]'
csv_name2 = self.csvFileLocation + "\\p_data.csv"
p_data = np.zeros(9)
with open(csv_name2) as csvfile2:
numreader2 = csv.reader(csvfile2)
i = 0
for row in numreader2:
p_data[i] = float(row[0]) # Pa
i = i + 1
# Assign p_data to the correct variables
self.P_106 = p_data[0] / 10**5.0 # bar
self.P_110 = p_data[1] / 10**5.0 # bar
for i in range(6):
self.P_335[i] = p_data[i+2] / 10**6.0 # MPa
self.P_p_out = p_data[8] / 10**5.0 # bar
#---------------------------
""" mflowj.csv file data """
#---------------------------
# Read m_dot-data from .csv file into 1 combined array
csv_name3 = self.csvFileLocation + "\\mflowj_data.csv"
mflowj_data = np.zeros(29)
with open(csv_name3) as csvfile3:
numreader3 = csv.reader(csvfile3)
i = 0
for row in numreader3:
mflowj_data[i] = float(row[0]) # kg/s
i = i + 1
# Assign averaged mflowj_data to the correct variables
m_dot_100_data = mflowj_data[0:7] # kg/s
self.m_dot_100 = np.average(m_dot_100_data) # kg/s
m_dot_400_data = mflowj_data[7:14] # kg/s
self.m_dot_400 = np.average(m_dot_400_data) # kg/s
m_dot_600_data = mflowj_data[14:21] # kg/s
self.m_dot_600 = np.average(m_dot_600_data) # kg/s
m_dot_200_data = mflowj_data[21:28] # kg/s
self.m_dot_200 = np.average(m_dot_200_data) # kg/s
self.m_dot_335 = mflowj_data[28] # kg/s
#--------------------------
""" hvmix.csv file data """
#--------------------------
# Read enthalpy data from .csv file into 1 combined array
csv_name4 = self.csvFileLocation + "\\hvmix_data.csv"
hvmix_data = np.zeros(11)
with open(csv_name4) as csvfile4:
numreader4 = csv.reader(csvfile4)
i = 0
for row in numreader4:
hvmix_data[i] = float(row[0]) # J/kg
i = i + 1
# Assign hvmix_data to the correct variables
self.H_106 = hvmix_data[0] / 10**3.0 # kJ/kg
self.H_110 = hvmix_data[1] / 10**3.0 # kJ/kg
self.H_335_1 = hvmix_data[2] / 10**3.0 # kJ/kg
self.H_112_5 = hvmix_data[3] / 10**3.0 # kJ/kg
self.H_114 = hvmix_data[4] / 10**3.0 # kJ/kg
self.H_412_5 = hvmix_data[5] / 10**3.0 # kJ/kg
self.H_414 = hvmix_data[6] / 10**3.0 # kJ/kg
self.H_612_5 = hvmix_data[7] / 10**3.0 # kJ/kg
self.H_614 = hvmix_data[8] / 10**3.0 # kJ/kg
self.H_212_5 = hvmix_data[9] / 10**3.0 # kJ/kg
self.H_214 = hvmix_data[10] / 10**3.0 # kJ/kg
#---------------------------
""" httemp.csv file data """
#---------------------------
# Read fuel/cladding T-data from .csv file into 2 arrays
csv_name5 = self.csvFileLocation + "\\httemp_data.csv"
httemp_data = np.zeros(12)
with open(csv_name5) as csvfile5:
numreader5 = csv.reader(csvfile5)
i = 0
for row in numreader5:
httemp_data[i] = float(row[0]) # K
i = i + 1
# Assign httemp_data to the correct variables
for j in range(6):
self.T_1336_1[j] = httemp_data[j] - 273.15 # degC
for j in range(6):
self.T_1336_17[j] = httemp_data[j+6] - 273.15 # degC
#--------------------------
""" quale.csv file data """
#--------------------------
# Read core quality data from .csv file into array
csv_name6 = self.csvFileLocation + "\\quale_data.csv"
quale_data = | np.zeros(6) | numpy.zeros |
# This files contains your custom actions which can be used to run
# custom Python code.
#
# See this guide on how to implement these action:
# https://rasa.com/docs/rasa/core/actions/#custom-actions/
# This is a simple example for a custom action which utters "Hello World!"
import json
from typing import Any, Text, Dict, List
from bert_serving.client import BertClient
from rasa_sdk import Action, Tracker
from rasa_sdk.executor import CollectingDispatcher
import numpy as np
import os
import boto3
import glob
bertHost = 'bert'
BUCKET = 'BUCKET'
FAQ = 'FAQ'
VOLUME = 'VOLUME'
class ActionGetFAQAnswer(Action):
def __init__(self):
super(ActionGetFAQAnswer, self).__init__()
self.bc = BertClient(bertHost)
self.faq, self.encoder, self.encoder_len = encode_faq(self.bc)
print(self.encoder.shape)
def find_question(self, query_question):
query_vector = self.bc.encode([query_question])[0]
score = np.sum((self.encoder * query_vector), axis=1) / (
self.encoder_len * (np.sum(query_vector * query_vector) ** 0.5))
top_id = np.argsort(score)[::-1][0]
return top_id, score[top_id]
def name(self) -> Text:
return "action_get_answer"
def run(
self,
dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]
) -> List[Dict[Text, Any]]:
query = tracker.latest_message['text']
most_similar_id, score = self.find_question(query)
if float(score) > 0.93:
response = self.faq[most_similar_id]['a']
dispatcher.utter_message(response)
else:
response = 'Sorry, this question is beyond my ability...'
dispatcher.utter_message(response)
return []
def get_faq(volume):
bucket = None
faq = None
if BUCKET in os.environ:
bucket = os.environ[BUCKET]
if FAQ in os.environ:
faq = os.environ[FAQ]
if bucket and faq:
s3client = boto3.client('s3')
try:
print(f'Fetching s3://{bucket}/{faq}')
r = s3client.get_object(Bucket=bucket, Key=faq)
data = json.load(r['Body'])
faq = f'{volume}/{os.path.basename(faq)}'
print(f'Saving {faq}')
with open(faq, 'wt', encoding='utf-8') as f:
json.dump(data, f, ensure_ascii=False, indent=4)
return data
except Exception as ex:
print(f'Cannot get FAQ from s3://{bucket}/{faq}: {ex}')
if not os.path.exists(faq):
faq = f'{volume}/{os.path.basename(faq)}'
print(f'Loading FAQ from {faq}')
if os.path.exists(faq):
with open(faq, 'rt', encoding='utf-8') as f:
return json.load(f)
return None
def encode_faq(bc):
volume = '.'
if VOLUME in os.environ:
volume = os.environ[VOLUME]
faq = get_faq(volume)
questions = [each['q'] for each in faq]
with open(f'{volume}/faq.md', 'wt', encoding="utf-8") as f:
f.write('## intent:faq\n')
for q in questions:
f.write(f'- {q}\n')
print(f'FAQ size {len(questions)}')
print('Calculating encoder')
encoder = bc.encode(questions)
np.save(f'{volume}/questions', encoder)
encoder_len = np.sqrt( | np.sum(encoder * encoder, axis=1) | numpy.sum |
#%%
import numpy as np
import pandas as pd
import tensorflow as tf
import scipy.io
import random
import xgboost as xgb
import seaborn as sns
from tensorflow import keras
from tensorflow.keras import layers
from sklearn.model_selection import train_test_split
from sklearn import metrics, decomposition
import plotly.graph_objects as go
import matplotlib.pyplot as plt
from tensorflow.python.keras.backend import std
from tensorflow.python.keras.layers import embeddings
from tensorflow.python.keras.layers.core import RepeatVector
from tensorflow.python.keras.losses import mean_squared_error
from tensorflow.python.util.nest import _IF_SHALLOW_IS_SEQ_INPUT_MUST_BE_SEQ
from load_data import *
#%%
SAMPLE_LEN = 5
channels = 8
latent_dim = 3
data, label = split_session(SAMPLE_LEN)
x_train, x_test, y_train, y_test = train_test_split(session_data_1, session_data_1, train_size = 0.85)
x_val, x_test, y_val, y_test = train_test_split(x_test, y_test, train_size = 0.5)
#%%
new_batch = []
for i, batch in enumerate(session_data_1):
print(batch.shape[1]//SAMPLE_LEN)
new_batch.extend(np.array_split(batch, batch.shape[1]//SAMPLE_LEN, axis = 1)[1:batch.shape[1]//SAMPLE_LEN-2])
#%%
np.array(new_batch)
#%%
def create_model():
input = keras.Input(shape = (sample_len,channels)) # Variable-length sequence of ints
x = layers.LSTM(64, return_sequences=True)(input)
x = layers.LSTM(64, return_sequences=True)(x)
#x = layers.RepeatVector(sample_len)(x)
#x = layers.LSTM(64, return_sequences=True)(x)
x = layers.TimeDistributed(layers.Dense(16))(x)
x = layers.Dense(latent_dim, name = 'embedding')(x)
x = layers.Dense(16)(x)
output = layers.Dense(channels, activation='linear')(x)
model = keras.Model(inputs=[input],outputs=[output])
return model
model = create_model()
keras.utils.plot_model(model, "keras_LSTM_autoencoder.png", show_shapes=True)
#%%
model.summary()
model.compile(optimizer=keras.optimizers.Adam(1e-3), loss='mse') #tf.keras.metrics.mean_squared_error
history = model.fit(x_train, y_train,
validation_data = (x_val, y_val),
epochs=100, batch_size=32)
plt.plot(history.history['loss'])
plt.yscale("log")
plt.show()
def get_embedding(sequences, model):
encoder_model = keras.Model(inputs = model.input,
outputs = model.get_layer('embedding').output)
try:
emb = encoder_model.predict(sequences)[:,-1,:]
except:
sequences = list(sequences)
emb = encoder_model.predict(sequences)[:,-1,:]
return emb
def plot_3D_embedding(sequences, model):
emb_vec = get_embedding(sequences, model)
try:
emb_vec = emb_vec.reshape(3,len(list(emb_vec)))
except:
'Embedding dim != 3'
fig = go.Figure(data=[go.Scatter3d(x=emb_vec[0], y=emb_vec[1], z=emb_vec[2],
mode='markers')])
fig.show()
def plot_curve_comparison(sequences, model, n_sample = 1, title = ''):
for i in range(n_sample):
pred = model.predict(sequences[i])
plt.plot(sequences[i], label = 'Truth')
plt.plot(pred.reshape(sample_len), label = 'Pred')
plt.title(title)
plt.legend()
plt.show()
def plot_reconstruction_error(sequences, model, n_sample = 1, title = ''):
res = model.predict(sequences)
try:
s_len = len(sequences)
except:
s_len = sequences.shape[0]
for i in np.random.randint(0, s_len - 1, size = n_sample):
plt.plot(res[i] - sequences[i])
plt.title(title)
plt.show()
#%%
plot_reconstruction_error(x_train, model, 2, title = 'Train: Rec - Truth')
plot_reconstruction_error(x_test, model, 2, title = 'Test: Rec - Truth')
plot_reconstruction_error(x_val, model, 2, title = 'Val: Rec - Truth')
if channels == 1:
plot_curve_comparison(x_train, model, title = 'Train')
plot_curve_comparison(x_test, model, title = 'Test')
plot_curve_comparison(x_val, model, title = 'Validation')
if latent_dim == 3:
plot_3D_embedding(x_train, model)
plot_3D_embedding(x_test, model)
plot_3D_embedding(x_val, model)
#%%
if channels == 1:
def stress_seq(rand = 'normal'):
ampl = np.random.uniform(0.1, 1.)
freq = np.random.uniform(0.01, 0.1)
high_noise = np.random.uniform(0.1, 0.5)
low_noise = | np.random.uniform(0.1, 0.5) | numpy.random.uniform |
from warnings import warn
import numpy as np
from scipy import sparse
def _asarray(a):
"""convenience - turn np.matrix to np.array including dim reduction"""
return np.array(a).squeeze()
def _sum_false(msk, axis):
if axis is None:
top = msk.shape[0] * msk.shape[1]
else:
top = msk.shape[axis]
return _asarray(top - msk.sum(axis=axis))
def make_nan_mask(x):
nans = np.isnan(x.data)
msk = sparse.csr_matrix((nans, x.indices, x.indptr), shape=x.shape)
return msk
def mean(x, axis=None, mask=None, **kwargs):
if mask is None:
m = np.mean(x, axis=axis, **kwargs)
if | np.isnan(m) | numpy.isnan |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for fedjax.google.experimental.serialization."""
from absl.testing import absltest
from fedjax.experimental import serialization
import numpy as np
import numpy.testing as npt
class SerializationTest(absltest.TestCase):
def test_dict(self):
original = {
'int32':
np.arange(4, dtype=np.int32).reshape([2, 2]),
'float64':
-np.arange(4, dtype=np.float64).reshape([1, 4]),
'bytes':
np.array([b'a', b'bc', b'def'], dtype=np.object).reshape([3, 1]),
}
output = serialization.msgpack_deserialize(
serialization.msgpack_serialize(original))
self.assertCountEqual(output, original)
self.assertEqual(output['int32'].dtype, np.int32)
npt.assert_array_equal(output['int32'], original['int32'])
self.assertEqual(output['float64'].dtype, np.float64)
npt.assert_array_equal(output['float64'], original['float64'])
self.assertEqual(output['bytes'].dtype, np.object)
npt.assert_array_equal(output['bytes'], original['bytes'])
def test_nested_list(self):
original = [
np.arange(4, dtype=np.int32).reshape([2, 2]),
[
-np.arange(4, dtype=np.float64).reshape([1, 4]),
[
np.array([b'a', b'bc', b'def'],
dtype=np.object).reshape([3, 1]), []
]
]
]
output = serialization.msgpack_deserialize(
serialization.msgpack_serialize(original))
int32_array, rest = output
self.assertEqual(int32_array.dtype, np.int32)
npt.assert_array_equal(int32_array, original[0])
float64_array, rest = rest
self.assertEqual(float64_array.dtype, np.float64)
npt.assert_array_equal(float64_array, original[1][0])
bytes_array, rest = rest
self.assertEqual(bytes_array.dtype, np.object)
| npt.assert_array_equal(bytes_array, original[1][1][0]) | numpy.testing.assert_array_equal |
import os
import numpy as np
import numpy.random as rnd
import matplotlib.pyplot as plt
import logging
from pandas import DataFrame
from common.gen_samples import *
from common.data_plotter import *
from aad.aad_globals import *
from aad.aad_support import *
from aad.forest_description import *
from aad.anomaly_dataset_support import *
# from percept.percept import *
"""
pythonw -m aad.plot_anomalies_rectangle
"""
def get_x_tau(x, w, tau):
v = x.dot(w)
ranked = np.argsort(-v)
tau_id = ranked[int(tau * len(v))]
return tau_id, x[tau_id]
def plot_anomalies_ifor(outdir, plot=False, plot_legends=False):
u_theta = np.pi * 4. / 4 + np.pi * 5 / 180
x, y = get_sphere_samples([(50, 0, np.pi * 4. / 4, np.pi * 4. / 4 + np.pi * 2 / 4),
(15, 1, u_theta - np.pi * 5 / 180, u_theta + np.pi * 5 / 180),
(15, 1, np.pi * 6. / 4 - np.pi * 1.5 / 180, np.pi * 6. / 4)])
n, d = x.shape
id_nomls = np.where(y == 0)[0]
id_anoms = np.where(y == 1)[0]
n_anoms = len(id_anoms)
x_nomls, y_nomls = x[id_nomls, :], y[id_nomls]
x_anoms, y_anoms = x[id_anoms, :], y[id_anoms]
if plot:
axis_fontsize = 16
line_colors = ["blue", "red", "red"]
line_types = ["--", "--", "-"]
line_widths = [2, 2, 2]
lines = list()
line_labels = list()
tau = n_anoms * 1. / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([np.min(x[:, 0]), np.max(x[:, 0])])
tau_id, x_tau = get_x_tau(x, w, tau)
q_tau = w.dot(x_tau)
# plot the true weight vector
u = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.]), [0., 0.],
[np.cos(u_theta + np.pi * 1 / 4), np.sin(u_theta + np.pi * 1 / 4)])
lines.append(u)
line_labels.append(r"True weights ${\bf u}$")
zd = interpolate_2D_line_by_point_and_vec(np.array([-1., 1.0]), [0., 0.], w)
lines.append(zd)
line_labels.append(r"Uniform weights ${\bf w}_{unif}$")
zw = interpolate_2D_line_by_slope_and_intercept(np.array([-1., 1.]), -w[0] / w[1], q_tau / w[1])
lines.append(zw)
line_labels.append(r"hyperplane $\perp$ ${\bf w}_{unif}$")
pdffile = os.path.join(outdir, "anomalies_in_ifor.pdf")
dp = DataPlotter(pdfpath=pdffile, rows=1, cols=1)
pl = dp.get_next_plot()
pl.set_aspect('equal')
# plt.xlabel('x', fontsize=axis_fontsize)
# plt.ylabel('y', fontsize=axis_fontsize)
plt.xticks([])
plt.yticks([])
plt.xlim([-1.05, 1.05])
plt.ylim([-1.05, 1.05])
pl.scatter(x_nomls[:, 0], x_nomls[:, 1], s=45, c="blue", marker="+", label="Nominal")
pl.scatter(x_anoms[:, 0], x_anoms[:, 1], s=45, c="red", marker="+", label="Anomaly")
for i, line in enumerate(lines):
color = "blue" if line_colors is None else line_colors[i]
pl.plot(line[:, 0], line[:, 1], line_types[i], color=color, linewidth=line_widths[i],
label=line_labels[i] if plot_legends else None)
plt.axhline(0, linestyle="--", color="lightgrey")
plt.axvline(0, linestyle="--", color="lightgrey")
if plot_legends:
pl.legend(loc='lower right', prop={'size': 12})
dp.close()
return x, y
def plot_anomalies_rect(outdir, plot=False, plot_legends=False):
x_nomls = rnd.uniform(0., 1., 500)
x_nomls = np.reshape(x_nomls, newshape=(250, -1))
anom_mu = (0.83, 0.95)
u_theta = np.arctan(0.9 / 0.8)
anom_score_dist = MVNParams(
mu=np.array([anom_mu[0], anom_mu[1]]),
mcorr=np.array([
[1, -0.5],
[0, 1.0]]),
dvar=np.array([0.002, 0.0005])
)
n_anoms = 30
x_anoms = generate_dependent_normal_samples(n_anoms,
anom_score_dist.mu,
anom_score_dist.mcorr,
anom_score_dist.dvar)
x = np.vstack([x_nomls, x_anoms])
y = np.array(np.zeros(x_nomls.shape[0], dtype=int))
y = np.append(y, np.ones(x_anoms.shape[0], dtype=int))
if plot:
n, d = x.shape
# tau is computed assuming that the anomalies occupy tau-proportion
# of the circumference
tau = n_anoms * 1.3 / n # multiplying by a factor to move the plane lower
w = normalize(np.ones(2))
r = np.array([ | np.min(x[:, 0]) | numpy.min |
import os
import copy
import math
import time
import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn import svm
from model_coin.prediction_strength import optimalK
from tqdm import tqdm
class COIN(object):
def __init__(self, data, inds_otlr, nbrs_ratio,
AUG=1.0, MIN_CLUSTER_SIZE=5, MAX_NUM_CLUSTER=4, VAL_TIMES=10, C_SVM=1.,
RESOLUTION=0.05, THRE_PS=0.85, DEFK=0):
"""
data: Data matrix, each row represents one instance
inds_otlr: A vector with each entry telling whether this instance is outlier (1) or not (0)
nbrs_ratio: The ratio of normal instances as the context for each outlier
AUG: An additional feature attached to the input as data augmentation
MIN_CLUSTER_SIZE: Minimum number of nodes in each cluster
MAX_NUM_CLUSTER: Maximum number of clusters considered in prediction strength computation
VAL_TIMES: Number of iterations for computing prediction strength
C_SVM: A hyperparameter in SVM (optimum value would be better to be estimated through validation)
DEFK: Predefined number of clusters in each context. Value 0 means using Prediction Strength to estimate it.
"""
self.data = data
self.dim = data.shape[1]
self.inds_otlr = inds_otlr
self.ano_idx = np.where(inds_otlr == 1)[0]
self.AUG = float(AUG)
self.num_inst = data.shape[0]
self.num_feat = data.shape[1]
self.num_nbrs = int(nbrs_ratio * self.num_inst)
self.MIN_CLUSTER_SIZE = MIN_CLUSTER_SIZE
self.MAX_NUM_CLUSTER = MAX_NUM_CLUSTER
self.VAL_TIMES = VAL_TIMES
self.C_SVM = C_SVM
self.RESOLUTION = RESOLUTION
self.THRE_PS = THRE_PS
self.DEFK = DEFK
# normal instances
self.data_normal = self.data[np.where(self.inds_otlr == 0)[0]]
# nearest nbrs object based on normal instances
self.nbrs = NearestNeighbors(n_neighbors=self.num_nbrs, n_jobs=-1)
self.nbrs.fit(self.data_normal)
def interpret_outliers(self, ids_target, sgnf_vec, int_flag=0):
"""
ids_target: Indices of target outliers
sgnf_vec: A vector indicating the importance of each attribute, as prior knowledge
int_flag: Discrete attribute or not
:return: A list of sorted (outlier_ID, outlierness) tuples, a list of clfs, attr importance 2D-array
"""
# Attach 0 to the augmented feature
if isinstance(sgnf_vec, int) or isinstance(sgnf_vec, float):
sgnf_vec = np.hstack((np.ones(self.num_feat), 0))
else:
sgnf_vec = np.hstack((sgnf_vec, [0]))
# Interpret each target outlier
oid_devt_dict = dict() # id-score tuples
score_attr_mat = []
for ii in tqdm(range(len(ids_target))):
i = ids_target[ii]
# Do clustering on the context, build one classifier for each cluster
nums_c, clfs, cluster_attr_scale = self.cluster_context(i, int_flag)
# Calculate outlierness score
devt_i = self.CalculateOutlierness(i, clfs, nums_c, sgnf_vec)
oid_devt_dict[i] = devt_i
# Find outlying attributes
score_attr = np.zeros(self.num_feat)
for num_c, clf in zip(nums_c, clfs):
score_attr += num_c * np.abs(clf.coef_[0]) # weighted by the normal cluster size
score_attr /= float(np.sum(nums_c))
score_attr /= np.sum(score_attr) # relative importance
score_attr_mat.append(copy.copy(score_attr))
# print(score_attr)
return np.array(score_attr_mat), oid_devt_dict
def cluster_context(self, id_outlier, int_flag):
# find the context of the outlier
dist_btwn, otlr_nbrs = self.nbrs.kneighbors([self.data[id_outlier]])
dist_btwn, otlr_nbrs = dist_btwn[0], self.data_normal[otlr_nbrs[0], :]
# print(self.data[id_outlier])
# print(otlr_nbrs)
# choose the number of clusters in the context
if self.DEFK == 0:
k_best = optimalK(otlr_nbrs, self.VAL_TIMES, self.MAX_NUM_CLUSTER, self.THRE_PS)
else:
k_best = self.DEFK
k_best = min(k_best+1, self.MAX_NUM_CLUSTER) # empirically, it is better to have a lager K
# print('Best k:', k_best)
# clutering the context
kmeans = KMeans(n_clusters=k_best, random_state=0).fit(otlr_nbrs)
label_nbrs = kmeans.labels_
clfs = []
nbrs_mean = []
nums_c = []
cluster_attr_scale = []
# build a linear classifier for each cluster of nbrs
for c in range(k_best):
# indices for instances in cluster c
inds_c = np.where(label_nbrs == c)[0]
# the cluster cannot be too small
if np.size(inds_c) < self.MIN_CLUSTER_SIZE:
continue
nums_c.append(len(inds_c))
# instances for cluster c
otlr_nbrs_c = otlr_nbrs[inds_c, :]
dist_btwn_c = dist_btwn[inds_c]
# distance property of cluster c
cluster_attr_scale.append(np.hstack((np.max(otlr_nbrs_c, axis=0) - np.min(otlr_nbrs_c, axis=0), 0))) # scale for each attr
# synthetic sampling to build two classes
insts_c0 = self.SyntheticSampling(otlr_nbrs_c, self.data[id_outlier], int_flag)
insts_c1 = otlr_nbrs_c
clf = self.SVCInterpreter(insts_c0, insts_c1)
clfs.append(clf)
nbrs_mean.append(np.average(insts_c1, axis=0))
return nums_c, clfs, cluster_attr_scale
def SyntheticSampling(self, insts, otlr, int_flag):
'''
Expand the outlier into a class.
insts: normal instances
otlr: the outlier instance
expand_ratio: expand ratio
int_flag: whether to round to int
:return: two classes of data points
'''
num_c0_new = insts.shape[0] - 1
coeff_c0_new = np.random.rand(num_c0_new, insts.shape[0]) # transformation matrix for synthetic sampling
nbrs_local = NearestNeighbors(n_neighbors=1).fit(insts)
min_dist_to_nbr = nbrs_local.kneighbors([otlr])[0][0, 0]/insts.shape[1]
for r in range(coeff_c0_new.shape[0]):
coeff_c0_new[r, :] /= sum(coeff_c0_new[r, :])
insts_c0_new = np.dot(coeff_c0_new, insts - np.dot(np.ones((insts.shape[0], 1)), [otlr]))
for r in range(insts_c0_new.shape[0]): # shrink to prevent overlap
insts_c0_new[r, :] *= (0.2 * np.random.rand(1)[0] * min_dist_to_nbr)
insts_c0_new += np.dot(np.ones((num_c0_new, 1)), [otlr]) # origin + shift
if int_flag:
insts_c0_new = np.round(insts_c0_new)
insts_c0 = np.vstack((otlr, insts_c0_new))
return insts_c0
def SVCInterpreter(self, insts_c0, insts_c1):
# classification between normal instances and outliers, where outliers have negative output
clf = svm.LinearSVC(penalty='l1', C=self.C_SVM, dual=False, intercept_scaling=self.AUG)
X_c = np.vstack((insts_c0, insts_c1))
y_c = np.hstack((np.zeros(insts_c0.shape[0]), np.ones(insts_c1.shape[0])))
clf.fit(X_c, y_c)
#print(insts_c1)
#print(insts_c0)
return clf
def CalculateOutlierness(self, id_outlier, clfs, nums_c, sgnf_vec):
otlr = self.data[id_outlier]
devt_overall = 0.
for c in range(len(nums_c)):
# distance to the boundary
otlr_aug = np.hstack((otlr, self.AUG))
w = np.hstack((clfs[c].coef_[0], clfs[c].intercept_[0]/self.AUG))
w_a = np.hstack((clfs[c].coef_[0], 0))
dist = -min(0, np.inner(otlr_aug, w))/np.linalg.norm(w_a)
# rescale deviation according to attributes' importance
devt = np.linalg.norm(np.multiply(dist * w_a / np.linalg.norm(w_a), sgnf_vec))
if np.isnan(devt):
devt = 0.
# weighted by the opponent cluster size
devt_overall += devt * nums_c[c]
devt_overall /= sum(nums_c)
return devt_overall
def fit(self, sgnf_prior):
importance_attr, outlierness = self.interpret_outliers(self.ano_idx, sgnf_prior)
return importance_attr
def weight2subspace(self, weight, r=0.7, num=-1):
threshold = r * np.sum(weight)
tmp_s = 0
exp_subspace = []
sorted_idx1 = | np.argsort(weight) | numpy.argsort |
# Import required libraries:
import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import curve_fit
from sklearn.metrics import r2_score
# Generating dataset:
# Y = A*sin(B(X + C)) + D
# A = Amplitude
# Period = 2*pi/B
# Period = Length of One Cycle
# C = Phase Shift (In Radian)
# D = Vertical Shift
X = np.linspace(0,1,100) #(Start,End,Points)
# Here…
# A = 1
# B= 2*pi
# B = 2*pi/Period
# Period = 1
# C = 0
# D = 0
Y = 1*np.sin(2*np.pi*X)
# Adding some Noise :
Noise = 0.4* | np.random.normal(size=100) | numpy.random.normal |
import cv2
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
def cal_hist_point(gt, score, thr_l=0.4, thr_r=0.5):
p = 0
n = 0
gt_len = len(gt)
score_len = len(score)
comp_len = min(gt_len, score_len)
for i in range(0, comp_len):
if score[i] >= thr_l and score[i] < thr_r:
if gt[i] == 1.0:
p += 1
else:
n += 1
print("range[{}, {}] p={}, n={}".format(thr_l, thr_r, p, n))
return thr_l, thr_r, p, n
def cal_far_frr_point(gt, score, thr=0.5):
# far = false acceptance rate = false positive / inter match num
# frr = false rejection rate = false negative / intra match num
tp = 0
tn = 0
fp = 0
fn = 0
inter_num = 0
intra_num = 0
gt_len = len(gt)
score_len = len(score)
comp_len = min(gt_len, score_len)
for i in range(0, comp_len):
if float(gt[i]) >= 1.0:
intra_num += 1
if float(gt[i]) <= 0.0:
inter_num += 1
if float(score[i]) >= thr and float(gt[i]) >= 1.0:
tp += 1
elif float(score[i]) < thr and float(gt[i]) <= 0.0:
tn += 1
elif float(score[i]) >= thr and float(gt[i]) <= 0.0:
fp += 1
elif float(score[i]) < thr and float(gt[i]) >= 1.0:
fn += 1
frr = float(fn) / intra_num
far = float(fp) / inter_num
# print("thr={}, tp={}, fp={}, tn={}, fn={}".format(thr, tp, fp, tn, fn))
# print("intra_num={}, inter_num={}".format(intra_num, inter_num))
print("FRR={}, FAR={}".format(frr, far))
return frr, far
def cal_roc_point(gt, score, thr=0.5):
tp = 0
tn = 0
fp = 0
fn = 0
# tpr = recall = tp / (tp + fn)
# fpr = fp / (fp + tn)
# fp
gt_len = len(gt)
score_len = len(score)
comp_len = min(gt_len, score_len)
for i in range(0, comp_len):
if float(score[i]) >= thr and float(gt[i]) >= 1.0:
tp += 1
elif float(score[i]) < thr and float(gt[i]) <= 0.0:
tn += 1
elif float(score[i]) >= thr and float(gt[i]) <= 0.0:
fp += 1
elif float(score[i]) < thr and float(gt[i]) >= 1.0:
fn += 1
# print("thr={}, tp={}, fp={}, tn={}, fn={}".format(thr, tp, fp, tn, fn))
recall = float(tp) / (tp + fn)
print("recall={}, fp={}".format(recall, fp))
return recall, fp
# gt_file = "face_list_lfw_gt.txt"
# score_file = "vggface2_lfwdata/facenet_vggface2_inception_resnet_v2_192k_lfw.txt"
# gt_file = "face_list_gt.txt"
# score_file = "vggface2_160data/facenet_vggface2_inception_resnet_v2_192k_160data.txt"
if __name__ == '__main__':
gt_file = sys.argv[1]
score_file = sys.argv[2]
if not os.path.exists(gt_file):
print("{} does not exist, quit".format(gt_file))
exit()
if not os.path.exists(score_file):
print("{} does not exist, quit".format(score_file))
exit()
list_gt_fd = open(gt_file, "r")
gt_lines = list_gt_fd.readlines()
list_score_fd = open(score_file, "r")
score_lines = list_score_fd.readlines()
gt_list = []
score_list = []
for idx, l in enumerate(gt_lines):
gt, lm, rm = l.strip("\n").split(" ")
score = score_lines[idx].strip("\n")
# print(gt, score)
# if int(gt) == 1 and float(score) < 0.2:
# print("____{}_{}".format(score, idx))
#
# if int(gt) == 0 and float(score) > 0.5:
# print("####{}_{}".format(score, idx))
gt_list.append(float(gt))
score = score.split("[[")[1].split("]]")[0]
score_list.append(float(score))
thrs = np.arange(0, 1, 0.01)[::-1]
print(thrs)
# calculate roc
recall_list = []
fp_list = []
for thr in thrs:
recall, fp = cal_roc_point(gt_list, score_list, thr)
recall_list.append(recall)
fp_list.append(fp)
# calculate auc
fp_max = max(fp_list)
fpr_list = []
for idx in range(0, len(fp_list)):
fpr = float(fp_list[idx])/fp_max
fpr_list.append(fpr)
print("fpr={}".format(fpr))
auc = 0
for idx in range(0, len(recall_list) - 1):
auc += (recall_list[idx] + recall_list[idx + 1]) * (fpr_list[idx + 1] - fpr_list[idx]) / 2
print("auc={}".format(auc))
print("AUC={}".format(auc))
# calculate far & frr
frr_list = []
far_list = []
for thr in thrs:
frr, far = cal_far_frr_point(gt_list, score_list, thr)
frr_list.append(frr)
far_list.append(far)
thrs = | np.arange(0, 1.0, 0.1) | numpy.arange |
import numpy as np
import numpy.ma as ma
import numpy.testing as npt
import pandas as pd
import pymc3_ext as pm
import scipy.sparse as sps
import theano
import theano.tensor as tt
import theano.sparse as sparse
class TestHelperFunc:
def test_pandas_to_array(self):
"""
Ensure that pandas_to_array returns the dense array, masked array,
graph variable, TensorVariable, or sparse matrix as appropriate.
"""
# Create the various inputs to the function
sparse_input = sps.csr_matrix(np.eye(3))
dense_input = np.arange(9).reshape((3, 3))
input_name = 'input_variable'
theano_graph_input = tt.as_tensor(dense_input, name=input_name)
pandas_input = pd.DataFrame(dense_input)
# All the even numbers are replaced with NaN
missing_pandas_input = pd.DataFrame(np.array([[np.nan, 1, np.nan],
[3, np.nan, 5],
[np.nan, 7, np.nan]]))
masked_array_input = ma.array(dense_input,
mask=(np.mod(dense_input, 2) == 0))
# Create a generator object. Apparently the generator object needs to
# yield numpy arrays.
square_generator = (np.array([i**2], dtype=int) for i in range(100))
# Alias the function to be tested
func = pm.model.pandas_to_array
#####
# Perform the various tests
#####
# Check function behavior with dense arrays and pandas dataframes
# without missing values
for input_value in [dense_input, pandas_input]:
func_output = func(input_value)
assert isinstance(func_output, np.ndarray)
assert func_output.shape == input_value.shape
| npt.assert_allclose(func_output, dense_input) | numpy.testing.assert_allclose |
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, Calour development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from unittest import main
from os.path import join
from numpy.testing import assert_array_equal, assert_almost_equal
import numpy as np
import pandas as pd
import pandas.util.testing as pdt
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.model_selection import KFold
import calour as ca
from calour._testing import Tests
from calour.training import (
plot_cm, plot_roc, plot_prc, plot_scatter,
SortedStratifiedKFold, RepeatedSortedStratifiedKFold,
_interpolate_precision_recall)
class TTests(Tests):
def setUp(self):
super().setUp()
self.test2_sparse = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, normalize=None)
self.test2_dense = ca.read(self.test2_biom, self.test2_samp, self.test2_feat, sparse=False, normalize=None)
def test_add_sample_metadata_as_features(self):
new = self.test2_sparse.add_sample_metadata_as_features(['categorical'])
dat = new.data.toarray()
assert_array_equal(dat[:, 0:3],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]] * 3)
self.assertListEqual(new.feature_metadata.index[:3].tolist(),
['categorical=A', 'categorical=B', 'categorical=C'])
def test_add_sample_metadata_as_features_dense(self):
new = self.test2_dense.add_sample_metadata_as_features(['categorical'])
assert_array_equal(new.data[:, 0:3],
[[1, 0, 0], [0, 1, 0], [0, 0, 1]] * 3)
self.assertListEqual(new.feature_metadata.index[:3].tolist(),
['categorical=A', 'categorical=B', 'categorical=C'])
def test_split_train_test(self):
train_X, test_X, train_y, test_y = self.test2_dense.split_train_test(
test_size=3, field='group', stratify='categorical', random_state=7)
self.assertListEqual(test_y.tolist(), [1, 2, 1])
self.assertListEqual(test_y.index.tolist(), ['S3', 'S8', 'S1'])
self.assertListEqual(train_y.tolist(), [2, 1, 1, 1, 1, 1])
self.assertListEqual(train_y.index.tolist(), ['S9', 'S6', 'S5', 'S2', 'S4', 'S7'])
def test_regress(self):
diabetes = datasets.load_diabetes()
X = diabetes.data[:9]
y = diabetes.target[:9]
smd = pd.DataFrame({'diabetes': y})
exp = ca.Experiment(X, smd, sparse=False)
run = exp.regress('diabetes', KNeighborsRegressor(), KFold(3, random_state=0))
res = next(run)
obs = pd.read_table(join(self.test_data_dir, 'diabetes_pred.txt'), index_col=0)
# make sure the column order are the same for comparison
pdt.assert_frame_equal(res.sort_index(axis=1), obs.sort_index(axis=1))
def test_plot_scatter(self):
res = pd.read_table(join(self.test_data_dir, 'diabetes_pred.txt'), index_col=0)
title = 'foo'
ax = plot_scatter(res, title=title)
self.assertEqual(title, ax.get_title())
cor = 'r=-0.62 p-value=0.078'
self.assertEqual(cor, ax.texts[0].get_text())
dots = []
for collection in ax.collections:
dots.append(collection.get_offsets())
assert_array_equal(np.concatenate(dots, axis=0),
res[['Y_TRUE', 'Y_PRED']].values)
def test_classify(self):
iris = datasets.load_iris()
n = len(iris.target)
np.random.seed(0)
i = np.random.randint(0, n, 36)
X = iris.data[i]
y = iris.target[i]
d = dict(enumerate(iris.target_names))
smd = pd.DataFrame({'plant': y}).replace(d)
exp = ca.Experiment(X, smd, sparse=False)
run = exp.classify('plant', KNeighborsClassifier(),
predict='predict_proba',
cv=KFold(3, random_state=0))
res = next(run)
obs = pd.read_table(join(self.test_data_dir, 'iris_pred.txt'), index_col=0)
pdt.assert_frame_equal(res, obs)
# plot_roc(res)
# from matplotlib import pyplot as plt
# plt.show()
def test_plot_roc_multi(self):
result = pd.read_table(join(self.test_data_dir, 'iris_pred.txt'))
ax = plot_roc(result)
legend = ax.get_legend()
exp = {'Luck',
'setosa (0.99 $\pm$ 0.00)',
'virginica (0.96 $\\pm$ 0.05)',
'versicolor (0.95 $\\pm$ 0.07)'}
obs = {i.get_text() for i in legend.get_texts()}
self.assertSetEqual(exp, obs)
# from matplotlib import pyplot as plt
# plt.show()
def test_plot_roc_binary(self):
result = pd.read_table(join(self.test_data_dir, 'iris_pred.txt'))
result['Y_TRUE'] = ['virginica' if i == 'virginica' else 'not virginica'
for i in result['Y_TRUE']]
result['not virginica'] = 1 - result['virginica']
ax = plot_roc(result, classes=['virginica'])
# from matplotlib import pyplot as plt
# plt.show()
legend = ax.get_legend()
exp = {'Luck',
'virginica (0.96 $\pm$ 0.05)'}
obs = {i.get_text() for i in legend.get_texts()}
self.assertSetEqual(exp, obs)
def test_interpolate_precision_recall(self):
n = 9
recall = np.linspace(0.0, 1.0, num=n)
rand = np.random.RandomState(9)
precision = rand.rand(n) * (1 - recall)
x = np.linspace(0, 1, num=20)
obs = _interpolate_precision_recall(x, recall, precision)
exp = np.array([0.43914, 0.43914, 0.43914, 0.37183, 0.37183, 0.104627,
0.104627, 0.104627, 0.104627, 0.104627, 0.104627, 0.104627,
0.104627, 0.104627, 0.104627, 0.031013, 0.031013, 0.,
0., 0.])
assert_almost_equal(obs, exp, decimal=5)
# # use the plot to visually check the func works as expected
# from matplotlib import pyplot as plt
# fig, axes = plt.subplots(nrows=2, ncols=1)
# axes[0].hold(True)
# axes[0].plot(recall, precision, '--b')
# decreasing_max_precision = np.maximum.accumulate(precision[::-1])[::-1]
# axes[0].step(recall, decreasing_max_precision, '-r')
# axes[1].step(x, obs, '-g')
# plt.show()
def test_plot_prc(self):
# generating test data set:
# http://scikit-learn.org/stable/auto_examples/model_selection/plot_precision_recall.html#plot-the-precision-recall-curve
# from sklearn import svm, datasets
# from sklearn.model_selection import train_test_split
# iris = datasets.load_iris()
# X = iris.data
# y = iris.target
# # Add noisy features
# random_state = np.random.RandomState(0)
# n_samples, n_features = X.shape
# X = np.c_[X, random_state.randn(n_samples, 200 * n_features)]
# # Limit to the two first classes, and split into training and test
# X_train, X_test, y_train, y_test = train_test_split(X[y < 2], y[y < 2],
# test_size=.5,
# random_state=random_state)
# # Create a simple classifier
# classifier = svm.LinearSVC(random_state=random_state)
# classifier.fit(X_train, y_train)
# y_score = classifier.decision_function(X_test)
# result = pd.DataFrame({'Y_TRUE': ['setosa' if i == 1 else 'non-setosa' for i in y_test],
# 'setosa': y_score, 'CV': 0})
f = join(self.test_data_dir, 'plot_prc.txt')
result = pd.read_table(f, index_col=0)
ax = plot_prc(result, classes=['setosa'])
legend = ax.get_legend()
exp = {'iso-f1 curves',
'setosa (0.88 $\\pm$ 0.00)'}
obs = {i.get_text() for i in legend.get_texts()}
self.assertSetEqual(exp, obs)
def test_plot_cm(self):
result = pd.read_table(join(self.test_data_dir, 'iris_pred.txt'), index_col=0)
ax = plot_cm(result, classes=['setosa', 'virginica', 'versicolor'])
# from matplotlib import pyplot as plt
# plt.show()
# make sure you don't confuse x, y label
self.assertEqual(ax.get_xlabel(), 'Prediction')
self.assertEqual(ax.get_ylabel(), 'Observation')
obs = [((0, 0), '13'), ((1, 0), '0'), ((2, 0), '0'),
((0, 1), '0'), ((1, 1), '9'), ((2, 1), '1'),
((0, 2), '0'), ((1, 2), '3'), ((2, 2), '10')]
for exp, obs in zip(ax.texts, obs):
self.assertEqual(exp.get_text(), obs[1])
self.assertEqual(exp.get_position(), obs[0])
class RTests(Tests):
def setUp(self):
self.y = np.array([9.1, 7.1, 8.1, 5.1, 3.1, 1.1, 2.1, 6.1, 4.1])
self.X = self.y[:, np.newaxis]
def test_sorted_stratified_k3(self):
k = SortedStratifiedKFold(3, shuffle=True, random_state=9)
splits = [[9.1, 8.1, 5.1, 3.1, 2.1, 4.1], [7.1, 1.1, 6.1],
[7.1, 8.1, 1.1, 2.1, 6.1, 4.1], [9.1, 5.1, 3.1],
[9.1, 7.1, 5.1, 3.1, 1.1, 6.1], [8.1, 2.1, 4.1]]
for i, (train, test) in enumerate(k.split(self.X, self.y)):
exp_train = splits[i * 2]
exp_test = splits[i * 2 + 1]
assert_array_equal(self.y[train], exp_train)
| assert_array_equal(self.y[test], exp_test) | numpy.testing.assert_array_equal |
################################################################################
#
# test_estimator.py - testing the basic estimator class
#
# author: <NAME> <<EMAIL>>
#
################################################################################
from nose.tools import assert_raises, assert_true, assert_equal
from pytram.estimator import Estimator, ExpressionError
import numpy as np
def test_expression_error_None():
"""test Estimator throws ExpressionError with None"""
assert_raises(ExpressionError, Estimator, None)
def test_expression_error_int():
"""test Estimator throws ExpressionError with number"""
assert_raises(ExpressionError, Estimator, 5)
def test_expression_error_list():
"""test Estimator throws ExpressionError with list"""
assert_raises(ExpressionError, Estimator, [1, 2])
def test_expression_error_dim():
"""test Estimator throws ExpressionError with wrong dimension"""
assert_raises(ExpressionError, Estimator, np.ones(shape=(2, 2), dtype=np.intc))
def test_expression_error_markov():
"""test Estimator throws ExpressionError with wrong Markov state count"""
assert_raises(ExpressionError, Estimator, | np.ones(shape=(2, 2, 3), dtype=np.intc) | numpy.ones |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import numpy as np
import operator
import tvm
from tvm.contrib import graph_runtime
from tvm.relay.testing.config import ctx_list
from tvm import relay
import mxnet as mx
from mxnet import gluon
from mxnet.gluon.model_zoo import vision
import model_zoo
def verify_mxnet_frontend_impl(mx_symbol,
data_shape=(1, 3, 224, 224),
out_shape=(1, 1000),
gluon_impl=False,
name=None,
dtype='float32'):
"""Use name different from test to avoid pytest picking it up"""
if gluon_impl:
def get_gluon_output(name, x):
net = vision.get_model(name)
net.collect_params().initialize(mx.init.Xavier())
net_sym = gluon.nn.SymbolBlock(outputs=net(mx.sym.var('data')),
inputs=mx.sym.var('data'),
params=net.collect_params())
out = net_sym(mx.nd.array(x.astype(dtype))).asnumpy()
return out, net_sym
else:
def get_mxnet_output(symbol, x, dtype='float32'):
from collections import namedtuple
Batch = namedtuple('Batch', ['data'])
mod = mx.mod.Module(symbol, label_names=None)
mod.bind(data_shapes=[('data', x.shape)], for_training=False)
mod.init_params()
mod.forward(Batch([mx.nd.array(x.astype(dtype))]))
out = mod.get_outputs()[0].asnumpy()
args, auxs = mod.get_params()
return out, args, auxs
def get_tvm_output(symbol, x, args, auxs, target, ctx, dtype='float32'):
shape_dict = {"data": x.shape}
if gluon_impl:
mod, params = relay.frontend.from_mxnet(symbol, shape_dict)
else:
mod, params = relay.frontend.from_mxnet(symbol,
shape_dict,
arg_params=args,
aux_params=auxs)
with relay.build_config(opt_level=3):
graph, lib, params = relay.build(mod, target, params=params)
m = graph_runtime.create(graph, lib, ctx)
# set inputs
m.set_input("data", tvm.nd.array(x.astype(dtype)))
m.set_input(**params)
m.run()
# get outputs
out = m.get_output(0, tvm.nd.empty(out_shape, dtype))
return out.asnumpy()
# random input
x = np.random.uniform(size=data_shape)
if gluon_impl:
gluon_out, gluon_sym = get_gluon_output(name, x)
for target, ctx in ctx_list():
tvm_out = get_tvm_output(gluon_sym, x, None, None, target, ctx, dtype)
tvm.testing.assert_allclose(gluon_out, tvm_out, rtol=1e-5, atol=1e-5)
else:
mx_out, args, auxs = get_mxnet_output(mx_symbol, x, dtype)
assert "data" not in args
for target, ctx in ctx_list():
tvm_out = get_tvm_output(mx_symbol, x, args, auxs, target, ctx, dtype)
tvm.testing.assert_allclose(mx_out, tvm_out, rtol=1e-5, atol=1e-5)
def test_forward_mlp():
mlp = model_zoo.mx_mlp()
verify_mxnet_frontend_impl(mlp,
data_shape=(1, 1, 28, 28),
out_shape=(1, 10))
def test_forward_vgg():
for n in [11]:
mx_sym = model_zoo.mx_vgg(n)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_resnet():
for n in [18]:
mx_sym = model_zoo.mx_resnet(18)
verify_mxnet_frontend_impl(mx_sym)
def test_forward_elu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='elu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_rrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='rrelu', lower_bound=0.3, upper_bound=0.7)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_prelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.LeakyReLU(data, act_type='prelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_softrelu():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.Activation(data, act_type='softrelu')
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_fc_flatten():
# test flatten=True option in mxnet 0.11.1
data = mx.sym.var('data')
try:
mx_sym = mx.sym.FullyConnected(data, num_hidden=100, flatten=True)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
mx_sym = mx.sym.FullyConnected(mx.sym.Flatten(data), num_hidden=100, flatten=False)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 100))
except:
pass
def test_forward_clip():
data = mx.sym.var('data')
data = mx.sym.concat(data, -data, dim=1) # negative part explicitly
mx_sym = mx.sym.clip(data, a_min=0, a_max=1)
verify_mxnet_frontend_impl(mx_sym, (1, 3, 100, 100), (1, 6, 100, 100))
def test_forward_split():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=False)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 1, 2, 1))
def test_forward_split_squeeze():
data = mx.sym.var('data')
mx_sym = mx.sym.split(data, axis=1, num_outputs=4, squeeze_axis=True)
verify_mxnet_frontend_impl(mx_sym, (1, 4, 2, 1), (1, 2, 1))
def test_forward_expand_dims():
data = mx.sym.var('data')
mx_sym = mx.sym.expand_dims(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 1, 3, 4))
def test_forward_pooling():
data = mx.sym.var('data')
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='avg')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
mx_sym = mx.sym.Pooling(data, kernel=(3, 3), pad=(1, 1), pool_type='max')
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 8, 8))
def test_forward_adaptive_pooling():
data = mx.sym.var('data')
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(1,))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 1, 1))
mx_sym = mx.sym.contrib.AdaptiveAvgPooling2D(data, output_size=(3, 3))
verify_mxnet_frontend_impl(mx_sym, (1, 20, 8, 8), (1, 20, 3, 3))
def test_forward_lrn():
data = mx.sym.var('data')
mx_sym = mx.sym.LRN(data, alpha=2, beta=2, knorm=1, nsize=5)
verify_mxnet_frontend_impl(mx_sym, (1, 10, 24, 24), (1, 10, 24, 24))
def test_forward_ones():
data = mx.sym.var('data')
ones = mx.sym.ones(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, ones)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros():
data = mx.sym.var('data')
zeros = mx.sym.zeros(shape=(2, 3, 4), dtype='float32')
mx_sym = mx.sym.elemwise_add(data, zeros)
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_ones_like():
data = mx.sym.var('data')
mx_sym = mx.sym.ones_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_zeros_like():
data = mx.sym.var('data')
mx_sym = mx.sym.zeros_like(data, dtype='float32')
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4), (2, 3, 4))
def test_forward_argmax():
data = mx.sym.var('data')
mx_sym = mx.sym.argmax(data, axis=1)
verify_mxnet_frontend_impl(mx_sym, (5, 3), (5,))
def test_forward_argmin():
data = mx.sym.var('data')
mx_sym = mx.sym.argmin(data, axis=0)
verify_mxnet_frontend_impl(mx_sym, (5, 4), (4,))
def test_forward_slice():
data = mx.sym.var('data')
mx_sym = mx.sym.slice(data, begin=(0, 1), end=(2, 4))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 3))
mx_sym = mx.sym.slice(data, begin=(-1, 1), end=(-3, 4), step=(-1, 2))
verify_mxnet_frontend_impl(mx_sym, (3, 4), (2, 2))
def test_forward_where():
cond = mx.sym.var('cond')
x = mx.sym.var('x')
y = mx.sym.var('y')
dshape = (2, 2)
dtype = 'float32'
mx_sym = mx.sym.where(cond, x, y)
np_cond = np.array([[0, 1], [-1, 0]]).astype(dtype)
np_x = np.random.uniform(size=dshape).astype(dtype)
np_y = np.random.uniform(size=dshape).astype(dtype)
mx_cond = mx.nd.array(np_cond)
mx_x = mx.nd.array(np_x)
mx_y = mx.nd.array(np_y)
shapes = {'cond': dshape, 'x': dshape, 'y': dshape}
mod = mx.mod.Module(mx_sym, label_names=None, data_names=['cond', 'x', 'y'])
mod.bind(data_shapes=shapes.items(), for_training=False)
mod.init_params()
args, auxs = mod.get_params()
mx_out = mx.nd.where(mx_cond, mx_x, mx_y).asnumpy()
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, args, auxs)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(np_cond, np_x, np_y)
tvm.testing.assert_allclose(op_res.asnumpy(), mx_out)
def test_forward_arange():
def _mx_symbol(F, start, stop, step):
if start is None and step is None:
sym = F.arange(stop)
elif start is None:
sym = F.arange(stop, step=step)
elif step is None:
sym = F.arange(start, stop)
else:
sym = F.arange(start, stop, step)
return sym
def verify(start, stop, step):
ref_res = _mx_symbol(mx.nd, start, stop, step).asnumpy()
mx_sym = _mx_symbol(mx.sym, start, stop, step)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res)
verify(0, 20, None)
verify(0, 20, 2)
verify(1, 20, None)
verify(1, 20, 2)
verify(1, 20, 1.5)
verify(1, 20.5, None)
verify(1, 20, 3)
verify(20, 1, -1)
verify(20, 1, -1.5)
def _mx_symbol(F, op_name, inputs):
op = getattr(F, op_name)
return op(*inputs)
def test_forward_broadcast_ops():
for op in ["broadcast_add", "broadcast_sub", "broadcast_mul",
"broadcast_div", "broadcast_mod", "broadcast_maximum",
"broadcast_minimum", "broadcast_equal", "broadcast_not_equal",
"broadcast_greater", "broadcast_greater_equal",
"broadcast_lesser", "broadcast_lesser_equal"]:
a_shape = (3, 4, 5)
b_shape = (4, 5)
if op == "broadcast_mod":
dtype = 'int32'
a_np = np.random.randint(1, 100, size=a_shape).astype(dtype)
b_np = np.random.randint(1, 100, size=b_shape).astype(dtype)
else:
dtype = 'float32'
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_np = np.random.uniform(size=b_shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': a_shape, 'b': b_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_elemwise_ops():
for op in ["elemwise_add", "elemwise_sub", "elemwise_mul",
"elemwise_div", "maximum", "minimum"]:
shape = (3, 4, 5)
dtype = 'float32'
a_np = np.random.uniform(size=shape).astype(dtype)
b_np = np.random.uniform(size=shape).astype(dtype)
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), mx.sym.var('b')])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), mx.nd.array(b_np)])
shapes = {'a': shape, 'b': shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np, b_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_scalar_ops():
for op in [operator.add, operator.sub, operator.mul, operator.truediv,
operator.pow, operator.lt, operator.le, operator.eq,
operator.ne, operator.gt, operator.ge]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = op(mx.sym.var('a'), b_scalar)
ref_res = op(mx.nd.array(a_np), b_scalar)
shapes = {'a': a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
for op in ["maximum", "minimum"]:
dtype='float32'
a_shape = (3, 4, 5)
a_np = np.random.uniform(size=a_shape).astype(dtype)
b_scalar = 2.3
mx_sym = _mx_symbol(mx.sym, op, [mx.sym.var('a'), b_scalar])
ref_res = _mx_symbol(mx.nd, op, [mx.nd.array(a_np), b_scalar])
shapes = {'a': a_shape}
mod, _ = relay.frontend.from_mxnet(mx_sym, shapes, dtype)
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(a_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
def test_forward_slice_axis():
def verify(shape, axis, begin, end):
data_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.slice_axis(mx.nd.array(data_np), axis, begin, end)
mx_sym = mx.sym.slice_axis(mx.sym.var("data"), axis, begin, end)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"data": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(data_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), 0, 1, 2)
verify((3, 4), 0, 1, None)
verify((3, 4), 1, 0, 2)
verify((3, 4), 1, -3, -1)
verify((3, 4), -1, -3, -1)
def test_forward_slice_like():
def verify(x_shape, y_shape, axes):
x_np = np.random.uniform(size=x_shape).astype("float32")
y_np = np.random.uniform(size=y_shape).astype("float32")
if axes is None:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np))
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"))
else:
ref_res = mx.nd.slice_like(mx.nd.array(x_np), mx.nd.array(y_np), axes=axes)
mx_sym = mx.sym.slice_like(mx.sym.var("x"), mx.sym.var("y"), axes=axes)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": x_shape, "y": y_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np, y_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((3, 4), (2, 3), None)
verify((3, 4), (2, 3), (0, 1))
verify((3, 4), (2, 3), (0))
verify((3, 4), (2, 3), (-1))
def test_forward_l2_normalize():
data = mx.sym.var('data')
mx_sym = mx.sym.L2Normalization(data, mode="channel")
verify_mxnet_frontend_impl(mx_sym, (2, 3, 4, 5), (2, 3, 4, 5))
def test_forward_shape_array():
def verify(shape):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.shape_array(mx.nd.array(x_np))
mx_sym = mx.sym.shape_array(mx.sym.var("x"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1,))
verify((3, 4, 5))
verify((3, 4, 5, 6))
def test_forward_squeeze():
def verify(shape, axis):
x_np = np.random.uniform(size=shape).astype("float32")
if axis is None:
ref_res = mx.nd.squeeze(mx.nd.array(x_np))
mx_sym = mx.sym.squeeze(mx.sym.var("x"))
else:
ref_res = mx.nd.squeeze(mx.nd.array(x_np), axis=axis)
mx_sym = mx.sym.squeeze(mx.sym.var("x"), axis=axis)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 3, 1), None)
verify((1, 3, 1), 0)
verify((1, 3, 1), 2)
verify((1, 3, 1), (0, 2))
def test_forward_broadcast_axis():
def verify(shape, axis, size):
x_np = np.random.uniform(size=shape).astype("float32")
ref_res = mx.nd.broadcast_axis(mx.nd.array(x_np), axis=axis, size=size)
mx_sym = mx.sym.broadcast_axis(mx.sym.var("x"), axis=axis, size=size)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 2, 1), 2, 3)
verify((1, 2, 1), (0, 2), (2, 3))
def test_forward_full():
def verify(val, shape, dtype):
ctx = mx.cpu()
ref_res = mx.nd.full(shape, val, dtype=dtype)
mx_sym = mx.sym.full(shape, val, dtype=dtype)
mod, _ = relay.frontend.from_mxnet(mx_sym, {})
for target, ctx in ctx_list():
# Skip testing graph runtime because this op will be optimized out
# by constant folding.
for kind in ["debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()()
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify(2, (3, 4), "float32")
verify(2, (3, 4), "int32")
verify(3.5, (1, 3, 4), "float32")
def test_forward_embedding():
def verify(data_shape, weight_shape):
in_dim, out_dim = weight_shape
x_np = np.random.randint(0, weight_shape[0], size=data_shape).astype("float32")
w_np = np.random.uniform(size=weight_shape).astype("float32")
ref_res = mx.nd.Embedding(mx.nd.array(x_np), mx.nd.array(w_np),
input_dim=in_dim, output_dim=out_dim)
mx_sym = mx.sym.Embedding(mx.sym.var("x"), mx.sym.var("w"),
input_dim=in_dim, output_dim=out_dim)
mod, _ = relay.frontend.from_mxnet(
mx_sym, {"x": data_shape, "w": weight_shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x=x_np, w=w_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (4, 5))
verify((2, 3, 4), (4, 5))
def test_forward_smooth_l1():
data = mx.sym.var('data')
mx_sym = mx.sym.smooth_l1(data)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
mx_sym = mx.sym.smooth_l1(data, scalar=1.0)
verify_mxnet_frontend_impl(mx_sym, (3, 4), (3, 4))
def test_forward_take():
def verify(shape, indices_src, axis, mode="clip"):
x_np = np.random.uniform(size=shape).astype("float32")
indices_np = np.array(indices_src, dtype="float32")
ref_res = mx.nd.take(mx.nd.array(x_np), mx.nd.array(indices_np), axis, mode)
mx_sym = mx.sym.take(mx.sym.var("x"), mx.sym.var("y"), axis, mode)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": shape, "y": indices_np.shape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_np, indices_np)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2,2), [[[1,0],[0,1]]], 0)
verify((2,2), [[[1,0],[0,1]]], 1)
verify((4,3,5,6), [[2,1,0,0]], -2)
verify((3,4), [-1, 5], 0)
verify((3,4), [-1, 5], 0, mode="wrap")
verify((3,4), [-1, 5], 1)
verify((3,4), [-1, 5], 1, mode="wrap")
def test_forward_gather_nd():
def verify(xshape, yshape, y_data):
x_data = np.random.uniform(size=xshape).astype("float32")
ref_res = mx.nd.gather_nd(mx.nd.array(x_data), mx.nd.array(y_data))
mx_sym = mx.sym.gather_nd(mx.sym.var("x_data"), mx.sym.var("y_data"))
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x_data": xshape, "y_data": yshape}, {"x_data": "float32", "y_data": "int32"})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(x_data, y_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((2, 2), (2, 3), [[1, 1, 0], [0, 1, 0]])
verify((2, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2, 2), (2, 2), [[0, 1], [1, 0]])
verify((3, 2), (2, 2, 3), [[[0, 1, 2], [2, 0, 1]], [[0, 0, 0], [1, 1, 1]]])
def test_forward_bilinear_resize():
# add tests including scale_height and scale_width when mxnet is updated to version 1.5
data = mx.sym.var('data')
mx_sym = mx.sym.contrib.BilinearResize2D(data, height=5, width=10)
verify_mxnet_frontend_impl(mx_sym, (1, 2, 3, 4), (1, 2, 5, 10))
def test_forward_rnn_layer():
def verify(mode, seq_len, input_size, hidden_size, num_layers,
batch=1, init_states=True, bidirectional=False):
if mode == "rnn":
layer = gluon.rnn.RNN(hidden_size, num_layers, bidirectional=bidirectional)
elif mode == "gru":
layer = gluon.rnn.GRU(hidden_size, num_layers, bidirectional=bidirectional)
else: # mode == "lstm"
layer = gluon.rnn.LSTM(hidden_size, num_layers, bidirectional=bidirectional)
num_states = 2 if mode == "lstm" else 1
layer.initialize()
layer.hybridize()
dtype = "float32"
directions = 2 if bidirectional else 1
data_np = np.random.uniform(size=(seq_len, batch, input_size)).astype(dtype)
data_mx = mx.nd.array(data_np)
if init_states:
shape_dict = {'data0': data_np.shape}
inputs = {'data0': data_np}
state_shape = (num_layers*directions, batch, hidden_size)
states_np = []
states_mx = []
for i in range(num_states):
s = np.random.uniform(size=state_shape).astype(dtype)
states_np.append(s)
states_mx.append(mx.nd.array(s))
shape_dict['data%s' % (i+1)] = s.shape
inputs['data%s' % (i+1)] = s
mx_out, mx_states = layer(data_mx, states_mx)
mx_res = [mx_out] + mx_states
else:
shape_dict = {'data': data_np.shape}
inputs = {'data': data_np}
mx_res = layer(data_mx)
mx_sym = layer._cached_graph[1]
mx_params = {}
for name, param in layer.collect_params().items():
mx_params[name] = param._reduce()
mod, params = relay.frontend.from_mxnet(
mx_sym, shape=shape_dict, arg_params=mx_params)
for target, ctx in ctx_list():
# only test graph runtime because debug runtime is too slow
for kind in ["graph"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
op_res = intrp.evaluate()(**inputs, **params)
if init_states:
assert len(op_res) == len(mx_res)
for i, val in enumerate(op_res):
tvm.testing.assert_allclose(
val.asnumpy(), mx_res[i].asnumpy(), rtol=1e-3)
else:
tvm.testing.assert_allclose(
op_res.asnumpy(), mx_res.asnumpy(), rtol=1e-3)
for mode in ["rnn", "gru", "lstm"]:
verify(mode, 1, 64, 64, 1)
verify(mode, 10, 64, 64, 2)
verify(mode, 10, 64, 32, 2)
verify(mode, 10, 64, 32, 2, batch=2)
verify(mode, 10, 32, 64, 1, bidirectional=True)
# The following two codeblocks need to be fixed for mxnet 1.5
# verify(mode, 10, 64, 64, 3, init_states=False)
# verify(mode, 10, 64, 64, 3, batch=2, bidirectional=True, init_states=False)
def test_forward_Crop():
def verify(xshape, yshape, offset=None):
x_data = np.random.uniform(size=xshape).astype("float32")
y_data = np.random.uniform(size=yshape).astype("float32")
if offset is None:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"))
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data))
else:
mx_sym = mx.sym.Crop(mx.sym.var("x"), mx.sym.var("y"), offset=offset)
ref_res = mx.nd.Crop(mx.nd.array(x_data), mx.nd.array(y_data), offset=offset)
mod, _ = relay.frontend.from_mxnet(mx_sym, {"x": xshape, "y": yshape})
for target, ctx in ctx_list():
for kind in ["graph", "debug"]:
intrp = relay.create_executor(kind, mod=mod, ctx=ctx, target=target)
if offset is None or offset == (0, 0):
op_res = intrp.evaluate()(x_data, y_data)
else:
op_res = intrp.evaluate()(x_data)
tvm.testing.assert_allclose(op_res.asnumpy(), ref_res.asnumpy())
verify((1, 3, 40, 40), (1, 3, 20, 20))
verify((1, 3, 40, 40), (1, 3, 20, 20), (0, 0))
verify((1, 3, 40, 40), (1, 3, 20, 20), (10, 10))
verify((5, 32, 40, 40), (5, 32, 25, 25))
verify((5, 32, 40, 40), (5, 32, 25, 25), (5, 5))
def test_forward_argsort():
def verify(shape, axis, is_ascend, dtype="float32"):
x_np = | np.random.uniform(size=shape) | numpy.random.uniform |
import time
import matplotlib.pyplot as pyplot
import networkx as networkx
import numpy as numpy
import scipy as scipy
import scipy.integrate
import seaborn
########################################################
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
# @ @#
# @ [LEGACY] BASIC SEIRS MODELS @#
# @ @#
# @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@#
########################################################
class SEIRSModel:
"""
A class to simulate the Deterministic SEIRS Model
===================================================
Params: beta Rate of transmission (exposure)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
beta_D Rate of transmission (exposure) for individuals with detected infections
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interacting with others
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(
self,
initN,
beta,
sigma,
gamma,
xi=0,
mu_I=0,
mu_0=0,
nu=0,
p=0,
beta_D=None,
sigma_D=None,
gamma_D=None,
mu_D=None,
theta_E=0,
theta_I=0,
psi_E=0,
psi_I=0,
q=0,
initE=0,
initI=10,
initD_E=0,
initD_I=0,
initR=0,
initF=0,
):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = beta
self.sigma = sigma
self.gamma = gamma
self.xi = xi
self.mu_I = mu_I
self.mu_0 = mu_0
self.nu = nu
self.p = p
# Testing-related parameters:
self.beta_D = beta_D if beta_D is not None else self.beta
self.sigma_D = sigma_D if sigma_D is not None else self.sigma
self.gamma_D = gamma_D if gamma_D is not None else self.gamma
self.mu_D = mu_D if mu_D is not None else self.mu_I
self.theta_E = theta_E if theta_E is not None else self.theta_E
self.theta_I = theta_I if theta_I is not None else self.theta_I
self.psi_E = psi_E if psi_E is not None else self.psi_E
self.psi_I = psi_I if psi_I is not None else self.psi_I
self.q = q if q is not None else self.q
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tseries = numpy.array([0])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of individuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.N = numpy.array([int(initN)])
self.numE = numpy.array([int(initE)])
self.numI = numpy.array([int(initI)])
self.numD_E = numpy.array([int(initD_E)])
self.numD_I = numpy.array([int(initD_I)])
self.numR = numpy.array([int(initR)])
self.numF = numpy.array([int(initF)])
self.numS = numpy.array(
[
self.N[-1]
- self.numE[-1]
- self.numI[-1]
- self.numD_E[-1]
- self.numD_I[-1]
- self.numR[-1]
- self.numF[-1]
]
)
assert (
self.numS[0] >= 0
), "The specified initial population size N must be greater than or equal to the initial compartment counts."
@staticmethod
def system_dfes(
t,
variables,
beta,
sigma,
gamma,
xi,
mu_I,
mu_0,
nu,
beta_D,
sigma_D,
gamma_D,
mu_D,
theta_E,
theta_I,
psi_E,
psi_I,
q,
):
(
S,
E,
I,
D_E,
D_I,
R,
F,
) = variables # variables is a list with compartment counts as elements
N = S + E + I + D_E + D_I + R
dS = (
-(beta * S * I) / N
- q * (beta_D * S * D_I) / N
+ xi * R
+ nu * N
- mu_0 * S
)
dE = (
(beta * S * I) / N
+ q * (beta_D * S * D_I) / N
- sigma * E
- theta_E * psi_E * E
- mu_0 * E
)
dI = sigma * E - gamma * I - mu_I * I - theta_I * psi_I * I - mu_0 * I
dDE = theta_E * psi_E * E - sigma_D * D_E - mu_0 * D_E
dDI = (
theta_I * psi_I * I
+ sigma_D * D_E
- gamma_D * D_I
- mu_D * D_I
- mu_0 * D_I
)
dR = gamma * I + gamma_D * D_I - xi * R - mu_0 * R
dF = mu_I * I + mu_D * D_I
return [dS, dE, dI, dDE, dDI, dR, dF]
def run_epoch(self, runtime, dt=0.1):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create a list of times at which the ODE solver should output system values.
# Append this list of times as the model's time series
t_eval = numpy.arange(start=self.t, stop=self.t + runtime, step=dt)
# Define the range of time values for the integration:
t_span = (self.t, self.t + runtime)
# Define the initial conditions as the system's current state:
# (which will be the t=0 condition if this is the first run of this model,
# else where the last sim left off)
init_cond = [
self.numS[-1],
self.numE[-1],
self.numI[-1],
self.numD_E[-1],
self.numD_I[-1],
self.numR[-1],
self.numF[-1],
]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Solve the system of differential eqns:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
solution = scipy.integrate.solve_ivp(
lambda t, X: SEIRSModel.system_dfes(
t,
X,
self.beta,
self.sigma,
self.gamma,
self.xi,
self.mu_I,
self.mu_0,
self.nu,
self.beta_D,
self.sigma_D,
self.gamma_D,
self.mu_D,
self.theta_E,
self.theta_I,
self.psi_E,
self.psi_I,
self.q,
),
t_span=t_span,
y0=init_cond,
t_eval=t_eval,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Store the solution output as the model's time series and data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.tseries = numpy.append(self.tseries, solution["t"])
self.numS = numpy.append(self.numS, solution["y"][0])
self.numE = numpy.append(self.numE, solution["y"][1])
self.numI = numpy.append(self.numI, solution["y"][2])
self.numD_E = numpy.append(self.numD_E, solution["y"][3])
self.numD_I = numpy.append(self.numD_I, solution["y"][4])
self.numR = numpy.append(self.numR, solution["y"][5])
self.numF = numpy.append(self.numF, solution["y"][6])
self.t = self.tseries[-1]
def run(self, T, dt=0.1, checkpoints=None, verbose=False):
if T > 0:
self.tmax += T
else:
return False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-process checkpoint values:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if checkpoints:
numCheckpoints = len(checkpoints["t"])
paramNames = [
"beta",
"sigma",
"gamma",
"xi",
"mu_I",
"mu_0",
"nu",
"beta_D",
"sigma_D",
"gamma_D",
"mu_D",
"theta_E",
"theta_I",
"psi_E",
"psi_I",
"q",
]
for param in paramNames:
# For params that don't have given checkpoint values (or bad value given),
# set their checkpoint values to the value they have now for all checkpoints.
if (
param not in list(checkpoints.keys())
or not isinstance(checkpoints[param], (list, numpy.ndarray))
or len(checkpoints[param]) != numCheckpoints
):
checkpoints[param] = [getattr(self, param)] * numCheckpoints
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
# Run the simulation loop:
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
if not checkpoints:
self.run_epoch(runtime=self.tmax, dt=dt)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if verbose:
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
else: # checkpoints provided
for checkpointIdx, checkpointTime in enumerate(checkpoints["t"]):
# Run the sim until the next checkpoint time:
self.run_epoch(runtime=checkpointTime - self.t, dt=dt)
# Having reached the checkpoint, update applicable parameters:
print("[Checkpoint: Updating parameters]")
for param in paramNames:
setattr(self, param, checkpoints[param][checkpointIdx])
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
print("t = %.2f" % self.t)
if verbose:
print("\t S = " + str(self.numS[-1]))
print("\t E = " + str(self.numE[-1]))
print("\t I = " + str(self.numI[-1]))
print("\t D_E = " + str(self.numD_E[-1]))
print("\t D_I = " + str(self.numD_I[-1]))
print("\t R = " + str(self.numR[-1]))
print("\t F = " + str(self.numF[-1]))
if self.t < self.tmax:
self.run_epoch(runtime=self.tmax - self.t, dt=dt)
return True
def total_num_infections(self, t_idx=None):
if t_idx is None:
return self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:]
else:
return (
self.numE[t_idx]
+ self.numI[t_idx]
+ self.numD_E[t_idx]
+ self.numD_I[t_idx]
)
def plot(
self,
ax=None,
plot_S="line",
plot_E="line",
plot_I="line",
plot_R="line",
plot_F="line",
plot_D_E="line",
plot_D_I="line",
combine_D=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_D_E="mediumorchid",
color_D_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Create an Axes object if None provided:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if not ax:
fig, ax = pyplot.subplots()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Prepare data series to be plotted:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Fseries = self.numF / self.N if plot_percentages else self.numF
Eseries = self.numE / self.N if plot_percentages else self.numE
Dseries = (
(self.numD_E + self.numD_I) / self.N
if plot_percentages
else (self.numD_E + self.numD_I)
)
D_Eseries = self.numD_E / self.N if plot_percentages else self.numD_E
D_Iseries = self.numD_I / self.N if plot_percentages else self.numD_I
Iseries = self.numI / self.N if plot_percentages else self.numI
Rseries = self.numR / self.N if plot_percentages else self.numR
Sseries = self.numS / self.N if plot_percentages else self.numS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the reference data:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if dashed_reference_results:
dashedReference_tseries = dashed_reference_results.tseries[
:: int(self.N / 100)
]
dashedReference_IDEstack = (
dashed_reference_results.numI
+ dashed_reference_results.numD_I
+ dashed_reference_results.numD_E
+ dashed_reference_results.numE
)[:: int(self.N / 100)] / (self.N if plot_percentages else 1)
ax.plot(
dashedReference_tseries,
dashedReference_IDEstack,
color="#E0E0E0",
linestyle="--",
label="$I+D+E$ (" + dashed_reference_label + ")",
zorder=0,
)
if shaded_reference_results:
shadedReference_tseries = shaded_reference_results.tseries
shadedReference_IDEstack = (
shaded_reference_results.numI
+ shaded_reference_results.numD_I
+ shaded_reference_results.numD_E
+ shaded_reference_results.numE
) / (self.N if plot_percentages else 1)
ax.fill_between(
shaded_reference_results.tseries,
shadedReference_IDEstack,
0,
color="#EFEFEF",
label="$I+D+E$ (" + shaded_reference_label + ")",
zorder=0,
)
ax.plot(
shaded_reference_results.tseries,
shadedReference_IDEstack,
color="#E0E0E0",
zorder=1,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the stacked variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
topstack = numpy.zeros_like(self.tseries)
if any(Fseries) and plot_F == "stacked":
ax.fill_between(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries),
topstack,
color=color_F,
alpha=0.5,
label="$F$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, topstack + Fseries),
color=color_F,
zorder=3,
)
topstack = topstack + Fseries
if any(Eseries) and plot_E == "stacked":
ax.fill_between(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries),
topstack,
color=color_E,
alpha=0.5,
label="$E$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, topstack + Eseries),
color=color_E,
zorder=3,
)
topstack = topstack + Eseries
if combine_D and plot_D_E == "stacked" and plot_D_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries),
topstack,
color=color_D_E,
alpha=0.5,
label="$D_{all}$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, topstack + Dseries),
color=color_D_E,
zorder=3,
)
topstack = topstack + Dseries
else:
if any(D_Eseries) and plot_D_E == "stacked":
ax.fill_between(
numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries),
topstack,
color=color_D_E,
alpha=0.5,
label="$D_E$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, topstack + D_Eseries),
color=color_D_E,
zorder=3,
)
topstack = topstack + D_Eseries
if any(D_Iseries) and plot_D_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries),
topstack,
color=color_D_I,
alpha=0.5,
label="$D_I$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, topstack + D_Iseries),
color=color_D_I,
zorder=3,
)
topstack = topstack + D_Iseries
if any(Iseries) and plot_I == "stacked":
ax.fill_between(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries),
topstack,
color=color_I,
alpha=0.5,
label="$I$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, topstack + Iseries),
color=color_I,
zorder=3,
)
topstack = topstack + Iseries
if any(Rseries) and plot_R == "stacked":
ax.fill_between(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries),
topstack,
color=color_R,
alpha=0.5,
label="$R$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, topstack + Rseries),
color=color_R,
zorder=3,
)
topstack = topstack + Rseries
if any(Sseries) and plot_S == "stacked":
ax.fill_between(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries),
topstack,
color=color_S,
alpha=0.5,
label="$S$",
zorder=2,
)
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, topstack + Sseries),
color=color_S,
zorder=3,
)
topstack = topstack + Sseries
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the shaded variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if any(Fseries) and plot_F == "shaded":
ax.fill_between(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
0,
color=color_F,
alpha=0.5,
label="$F$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F,
zorder=5,
)
if any(Eseries) and plot_E == "shaded":
ax.fill_between(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
0,
color=color_E,
alpha=0.5,
label="$E$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E,
zorder=5,
)
if combine_D and (
any(Dseries) and plot_D_E == "shaded" and plot_D_E == "shaded"
):
ax.fill_between(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
0,
color=color_D_E,
alpha=0.5,
label="$D_{all}$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_D_E,
zorder=5,
)
else:
if any(D_Eseries) and plot_D_E == "shaded":
ax.fill_between(
numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries),
0,
color=color_D_E,
alpha=0.5,
label="$D_E$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries),
color=color_D_E,
zorder=5,
)
if any(D_Iseries) and plot_D_I == "shaded":
ax.fill_between(
numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries),
0,
color=color_D_I,
alpha=0.5,
label="$D_I$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries),
color=color_D_I,
zorder=5,
)
if any(Iseries) and plot_I == "shaded":
ax.fill_between(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
0,
color=color_I,
alpha=0.5,
label="$I$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I,
zorder=5,
)
if any(Sseries) and plot_S == "shaded":
ax.fill_between(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
0,
color=color_S,
alpha=0.5,
label="$S$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S,
zorder=5,
)
if any(Rseries) and plot_R == "shaded":
ax.fill_between(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
0,
color=color_R,
alpha=0.5,
label="$R$",
zorder=4,
)
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R,
zorder=5,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the line variables:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if any(Fseries) and plot_F == "line":
ax.plot(
numpy.ma.masked_where(Fseries <= 0, self.tseries),
numpy.ma.masked_where(Fseries <= 0, Fseries),
color=color_F,
label="$F$",
zorder=6,
)
if any(Eseries) and plot_E == "line":
ax.plot(
numpy.ma.masked_where(Eseries <= 0, self.tseries),
numpy.ma.masked_where(Eseries <= 0, Eseries),
color=color_E,
label="$E$",
zorder=6,
)
if combine_D and (any(Dseries) and plot_D_E == "line" and plot_D_E == "line"):
ax.plot(
numpy.ma.masked_where(Dseries <= 0, self.tseries),
numpy.ma.masked_where(Dseries <= 0, Dseries),
color=color_D_E,
label="$D_{all}$",
zorder=6,
)
else:
if any(D_Eseries) and plot_D_E == "line":
ax.plot(
numpy.ma.masked_where(D_Eseries <= 0, self.tseries),
numpy.ma.masked_where(D_Eseries <= 0, D_Eseries),
color=color_D_E,
label="$D_E$",
zorder=6,
)
if any(D_Iseries) and plot_D_I == "line":
ax.plot(
numpy.ma.masked_where(D_Iseries <= 0, self.tseries),
numpy.ma.masked_where(D_Iseries <= 0, D_Iseries),
color=color_D_I,
label="$D_I$",
zorder=6,
)
if any(Iseries) and plot_I == "line":
ax.plot(
numpy.ma.masked_where(Iseries <= 0, self.tseries),
numpy.ma.masked_where(Iseries <= 0, Iseries),
color=color_I,
label="$I$",
zorder=6,
)
if any(Sseries) and plot_S == "line":
ax.plot(
numpy.ma.masked_where(Sseries <= 0, self.tseries),
numpy.ma.masked_where(Sseries <= 0, Sseries),
color=color_S,
label="$S$",
zorder=6,
)
if any(Rseries) and plot_R == "line":
ax.plot(
numpy.ma.masked_where(Rseries <= 0, self.tseries),
numpy.ma.masked_where(Rseries <= 0, Rseries),
color=color_R,
label="$R$",
zorder=6,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the vertical line annotations:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if len(vlines) > 0 and len(vline_colors) == 0:
vline_colors = ["gray"] * len(vlines)
if len(vlines) > 0 and len(vline_labels) == 0:
vline_labels = [None] * len(vlines)
if len(vlines) > 0 and len(vline_styles) == 0:
vline_styles = [":"] * len(vlines)
for vline_x, vline_color, vline_style, vline_label in zip(
vlines, vline_colors, vline_styles, vline_labels
):
if vline_x is not None:
ax.axvline(
x=vline_x,
color=vline_color,
linestyle=vline_style,
alpha=1,
label=vline_label,
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Draw the plot labels:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
ax.set_xlabel("days")
ax.set_ylabel(
"percent of population" if plot_percentages else "number of individuals"
)
ax.set_xlim(0, (max(self.tseries) if not xlim else xlim))
ax.set_ylim(0, ylim)
if plot_percentages:
ax.set_yticklabels(["{:,.0%}".format(y) for y in ax.get_yticks()])
if legend:
legend_handles, legend_labels = ax.get_legend_handles_labels()
ax.legend(
legend_handles[::-1],
legend_labels[::-1],
loc="upper right",
facecolor="white",
edgecolor="none",
framealpha=0.9,
prop={"size": 8},
)
if title:
ax.set_title(title, size=12)
if side_title:
ax.annotate(
side_title,
(0, 0.5),
xytext=(-45, 0),
ha="right",
va="center",
size=12,
rotation=90,
xycoords="axes fraction",
textcoords="offset points",
)
return ax
def figure_basic(
self,
plot_S="line",
plot_E="line",
plot_I="line",
plot_R="line",
plot_F="line",
plot_D_E="line",
plot_D_I="line",
combine_D=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_D_E="mediumorchid",
color_D_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
figsize=(12, 8),
use_seaborn=True,
show=True,
):
fig, ax = pyplot.subplots(figsize=figsize)
if use_seaborn:
seaborn.set_style("ticks")
seaborn.despine()
self.plot(
ax=ax,
plot_S=plot_S,
plot_E=plot_E,
plot_I=plot_I,
plot_R=plot_R,
plot_F=plot_F,
plot_D_E=plot_D_E,
plot_D_I=plot_D_I,
combine_D=combine_D,
color_S=color_S,
color_E=color_E,
color_I=color_I,
color_R=color_R,
color_F=color_F,
color_D_E=color_D_E,
color_D_I=color_D_I,
color_reference=color_reference,
dashed_reference_results=dashed_reference_results,
dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results,
shaded_reference_label=shaded_reference_label,
vlines=vlines,
vline_colors=vline_colors,
vline_styles=vline_styles,
vline_labels=vline_labels,
ylim=ylim,
xlim=xlim,
legend=legend,
title=title,
side_title=side_title,
plot_percentages=plot_percentages,
)
if show:
pyplot.show()
return fig, ax
def figure_infections(
self,
plot_S=False,
plot_E="stacked",
plot_I="stacked",
plot_R=False,
plot_F=False,
plot_D_E="stacked",
plot_D_I="stacked",
combine_D=True,
color_S="tab:green",
color_E="orange",
color_I="crimson",
color_R="tab:blue",
color_F="black",
color_D_E="mediumorchid",
color_D_I="mediumorchid",
color_reference="#E0E0E0",
dashed_reference_results=None,
dashed_reference_label="reference",
shaded_reference_results=None,
shaded_reference_label="reference",
vlines=[],
vline_colors=[],
vline_styles=[],
vline_labels=[],
ylim=None,
xlim=None,
legend=True,
title=None,
side_title=None,
plot_percentages=True,
figsize=(12, 8),
use_seaborn=True,
show=True,
):
fig, ax = pyplot.subplots(figsize=figsize)
if use_seaborn:
seaborn.set_style("ticks")
seaborn.despine()
self.plot(
ax=ax,
plot_S=plot_S,
plot_E=plot_E,
plot_I=plot_I,
plot_R=plot_R,
plot_F=plot_F,
plot_D_E=plot_D_E,
plot_D_I=plot_D_I,
combine_D=combine_D,
color_S=color_S,
color_E=color_E,
color_I=color_I,
color_R=color_R,
color_F=color_F,
color_D_E=color_D_E,
color_D_I=color_D_I,
color_reference=color_reference,
dashed_reference_results=dashed_reference_results,
dashed_reference_label=dashed_reference_label,
shaded_reference_results=shaded_reference_results,
shaded_reference_label=shaded_reference_label,
vlines=vlines,
vline_colors=vline_colors,
vline_styles=vline_styles,
vline_labels=vline_labels,
ylim=ylim,
xlim=xlim,
legend=legend,
title=title,
side_title=side_title,
plot_percentages=plot_percentages,
)
if show:
pyplot.show()
return fig, ax
class SEIRSNetworkModel:
"""
A class to simulate the SEIRS Stochastic Network Model
===================================================
Params: G Network adjacency matrix (numpy array) or Networkx graph object.
beta Rate of transmission (exposure) (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals (optional)
sigma Rate of infection (upon exposure)
gamma Rate of recovery (upon infection)
xi Rate of re-susceptibility (upon recovery)
mu_I Rate of infection-related death
mu_0 Rate of baseline death
nu Rate of baseline birth
p Probability of interaction outside adjacent nodes
Q Quarantine adjacency matrix (numpy array) or Networkx graph object.
beta_D Rate of transmission (exposure) for individuals with detected infections (global)
beta_local Rate(s) of transmission (exposure) for adjacent individuals with detected infections (optional)
sigma_D Rate of infection (upon exposure) for individuals with detected infections
gamma_D Rate of recovery (upon infection) for individuals with detected infections
mu_D Rate of infection-related death for individuals with detected infections
theta_E Rate of baseline testing for exposed individuals
theta_I Rate of baseline testing for infectious individuals
phi_E Rate of contact tracing testing for exposed individuals
phi_I Rate of contact tracing testing for infectious individuals
psi_E Probability of positive test results for exposed individuals
psi_I Probability of positive test results for exposed individuals
q Probability of quarantined individuals interaction outside adjacent nodes
initE Init number of exposed individuals
initI Init number of infectious individuals
initD_E Init number of detected infectious individuals
initD_I Init number of detected infectious individuals
initR Init number of recovered individuals
initF Init number of infection-related fatalities
(all remaining nodes initialized susceptible)
"""
def __init__(
self,
G,
beta,
sigma,
gamma,
xi=0,
mu_I=0,
mu_0=0,
nu=0,
beta_local=None,
p=0,
Q=None,
beta_D=None,
sigma_D=None,
gamma_D=None,
mu_D=None,
beta_D_local=None,
theta_E=0,
theta_I=0,
phi_E=0,
phi_I=0,
psi_E=1,
psi_I=1,
q=0,
initE=0,
initI=10,
initD_E=0,
initD_I=0,
initR=0,
initF=0,
node_groups=None,
store_Xseries=False,
):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Adjacency matrix:
self.update_G(G)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Setup Quarantine Adjacency matrix:
if Q is None:
Q = G # If no Q graph is provided, use G in its place
self.update_Q(Q)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model Parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.parameters = {
"beta": beta,
"sigma": sigma,
"gamma": gamma,
"xi": xi,
"mu_I": mu_I,
"mu_0": mu_0,
"nu": nu,
"beta_D": beta_D,
"sigma_D": sigma_D,
"gamma_D": gamma_D,
"mu_D": mu_D,
"beta_local": beta_local,
"beta_D_local": beta_D_local,
"p": p,
"q": q,
"theta_E": theta_E,
"theta_I": theta_I,
"phi_E": phi_E,
"phi_I": phi_I,
"psi_E": psi_E,
"psi_I": psi_I,
}
self.update_parameters()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Each node can undergo up to 4 transitions (sans vitality/re-susceptibility returns to S state),
# so there are ~numNodes*4 events/timesteps expected; initialize numNodes*5 timestep slots to start
# (will be expanded during run if needed)
self.tseries = numpy.zeros(5 * self.numNodes)
self.numE = numpy.zeros(5 * self.numNodes)
self.numI = numpy.zeros(5 * self.numNodes)
self.numD_E = numpy.zeros(5 * self.numNodes)
self.numD_I = numpy.zeros(5 * self.numNodes)
self.numR = numpy.zeros(5 * self.numNodes)
self.numF = numpy.zeros(5 * self.numNodes)
self.numS = numpy.zeros(5 * self.numNodes)
self.N = numpy.zeros(5 * self.numNodes)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Timekeeping:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.t = 0
self.tmax = 0 # will be set when run() is called
self.tidx = 0
self.tseries[0] = 0
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize Counts of inidividuals with each state:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.numE[0] = int(initE)
self.numI[0] = int(initI)
self.numD_E[0] = int(initD_E)
self.numD_I[0] = int(initD_I)
self.numR[0] = int(initR)
self.numF[0] = int(initF)
self.numS[0] = (
self.numNodes
- self.numE[0]
- self.numI[0]
- self.numD_E[0]
- self.numD_I[0]
- self.numR[0]
- self.numF[0]
)
self.N[0] = (
self.numS[0]
+ self.numE[0]
+ self.numI[0]
+ self.numD_E[0]
+ self.numD_I[0]
+ self.numR[0]
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Node states:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.S = 1
self.E = 2
self.I = 3
self.D_E = 4
self.D_I = 5
self.R = 6
self.F = 7
self.X = numpy.array(
[self.S] * int(self.numS[0])
+ [self.E] * int(self.numE[0])
+ [self.I] * int(self.numI[0])
+ [self.D_E] * int(self.numD_E[0])
+ [self.D_I] * int(self.numD_I[0])
+ [self.R] * int(self.numR[0])
+ [self.F] * int(self.numF[0])
).reshape((self.numNodes, 1))
numpy.random.shuffle(self.X)
self.store_Xseries = store_Xseries
if store_Xseries:
self.Xseries = numpy.zeros(
shape=(5 * self.numNodes, self.numNodes), dtype="uint8"
)
self.Xseries[0, :] = self.X.T
self.transitions = {
"StoE": {"currentState": self.S, "newState": self.E},
"EtoI": {"currentState": self.E, "newState": self.I},
"ItoR": {"currentState": self.I, "newState": self.R},
"ItoF": {"currentState": self.I, "newState": self.F},
"RtoS": {"currentState": self.R, "newState": self.S},
"EtoDE": {"currentState": self.E, "newState": self.D_E},
"ItoDI": {"currentState": self.I, "newState": self.D_I},
"DEtoDI": {"currentState": self.D_E, "newState": self.D_I},
"DItoR": {"currentState": self.D_I, "newState": self.R},
"DItoF": {"currentState": self.D_I, "newState": self.F},
"_toS": {"currentState": True, "newState": self.S},
}
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Initialize node subgroup data series:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.nodeGroupData = None
if node_groups:
self.nodeGroupData = {}
for groupName, nodeList in node_groups.items():
self.nodeGroupData[groupName] = {
"nodes": numpy.array(nodeList),
"mask": numpy.isin(range(self.numNodes), nodeList).reshape(
(self.numNodes, 1)
),
}
self.nodeGroupData[groupName]["numS"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numE"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numI"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numD_E"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numD_I"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numR"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numF"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["N"] = numpy.zeros(5 * self.numNodes)
self.nodeGroupData[groupName]["numS"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.S
)
self.nodeGroupData[groupName]["numE"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.E
)
self.nodeGroupData[groupName]["numI"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.I
)
self.nodeGroupData[groupName]["numD_E"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.D_E
)
self.nodeGroupData[groupName]["numD_I"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.D_I
)
self.nodeGroupData[groupName]["numR"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.R
)
self.nodeGroupData[groupName]["numF"][0] = numpy.count_nonzero(
self.nodeGroupData[groupName]["mask"] * self.X == self.F
)
self.nodeGroupData[groupName]["N"][0] = (
self.nodeGroupData[groupName]["numS"][0]
+ self.nodeGroupData[groupName]["numE"][0]
+ self.nodeGroupData[groupName]["numI"][0]
+ self.nodeGroupData[groupName]["numD_E"][0]
+ self.nodeGroupData[groupName]["numD_I"][0]
+ self.nodeGroupData[groupName]["numR"][0]
)
def update_parameters(self):
updatestart = time.time()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Model parameters:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.beta = (
numpy.array(self.parameters["beta"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["beta"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta"], shape=(self.numNodes, 1)
)
)
self.sigma = (
numpy.array(self.parameters["sigma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["sigma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["sigma"], shape=(self.numNodes, 1)
)
)
self.gamma = (
numpy.array(self.parameters["gamma"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["gamma"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["gamma"], shape=(self.numNodes, 1)
)
)
self.xi = (
numpy.array(self.parameters["xi"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["xi"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["xi"], shape=(self.numNodes, 1))
)
self.mu_I = (
numpy.array(self.parameters["mu_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_I"], shape=(self.numNodes, 1)
)
)
self.mu_0 = (
numpy.array(self.parameters["mu_0"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_0"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_0"], shape=(self.numNodes, 1)
)
)
self.nu = (
numpy.array(self.parameters["nu"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["nu"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["nu"], shape=(self.numNodes, 1))
)
self.p = (
numpy.array(self.parameters["p"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["p"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["p"], shape=(self.numNodes, 1))
)
# Testing-related parameters:
self.beta_D = (
(
numpy.array(self.parameters["beta_D"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["beta_D"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["beta_D"], shape=(self.numNodes, 1)
)
)
if self.parameters["beta_D"] is not None
else self.beta
)
self.sigma_D = (
(
numpy.array(self.parameters["sigma_D"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["sigma_D"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["sigma_D"], shape=(self.numNodes, 1)
)
)
if self.parameters["sigma_D"] is not None
else self.sigma
)
self.gamma_D = (
(
numpy.array(self.parameters["gamma_D"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["gamma_D"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["gamma_D"], shape=(self.numNodes, 1)
)
)
if self.parameters["gamma_D"] is not None
else self.gamma
)
self.mu_D = (
(
numpy.array(self.parameters["mu_D"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["mu_D"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["mu_D"], shape=(self.numNodes, 1)
)
)
if self.parameters["mu_D"] is not None
else self.mu_I
)
self.theta_E = (
numpy.array(self.parameters["theta_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["theta_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["theta_E"], shape=(self.numNodes, 1)
)
)
self.theta_I = (
numpy.array(self.parameters["theta_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["theta_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["theta_I"], shape=(self.numNodes, 1)
)
)
self.phi_E = (
numpy.array(self.parameters["phi_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["phi_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["phi_E"], shape=(self.numNodes, 1)
)
)
self.phi_I = (
numpy.array(self.parameters["phi_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["phi_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["phi_I"], shape=(self.numNodes, 1)
)
)
self.psi_E = (
numpy.array(self.parameters["psi_E"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["psi_E"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["psi_E"], shape=(self.numNodes, 1)
)
)
self.psi_I = (
numpy.array(self.parameters["psi_I"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["psi_I"], (list, numpy.ndarray))
else numpy.full(
fill_value=self.parameters["psi_I"], shape=(self.numNodes, 1)
)
)
self.q = (
numpy.array(self.parameters["q"]).reshape((self.numNodes, 1))
if isinstance(self.parameters["q"], (list, numpy.ndarray))
else numpy.full(fill_value=self.parameters["q"], shape=(self.numNodes, 1))
)
# Local transmission parameters:
if self.parameters["beta_local"] is not None:
if isinstance(self.parameters["beta_local"], (list, numpy.ndarray)):
if isinstance(self.parameters["beta_local"], list):
self.beta_local = numpy.array(self.parameters["beta_local"])
else: # is numpy.ndarray
self.beta_local = self.parameters["beta_local"]
if self.beta_local.ndim == 1:
self.beta_local.reshape((self.numNodes, 1))
elif self.beta_local.ndim == 2:
self.beta_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_local = numpy.full_like(
self.beta, fill_value=self.parameters["beta_local"]
)
else:
self.beta_local = self.beta
# ----------------------------------------
if self.parameters["beta_D_local"] is not None:
if isinstance(self.parameters["beta_D_local"], (list, numpy.ndarray)):
if isinstance(self.parameters["beta_D_local"], list):
self.beta_D_local = numpy.array(self.parameters["beta_D_local"])
else: # is numpy.ndarray
self.beta_D_local = self.parameters["beta_D_local"]
if self.beta_D_local.ndim == 1:
self.beta_D_local.reshape((self.numNodes, 1))
elif self.beta_D_local.ndim == 2:
self.beta_D_local.reshape((self.numNodes, self.numNodes))
else:
self.beta_D_local = numpy.full_like(
self.beta_D, fill_value=self.parameters["beta_D_local"]
)
else:
self.beta_D_local = self.beta_D
# Pre-multiply beta values by the adjacency matrix ("transmission weight connections")
if self.beta_local.ndim == 1:
self.A_beta = scipy.sparse.csr_matrix.multiply(
self.A, numpy.tile(self.beta_local, (1, self.numNodes))
).tocsr()
elif self.beta_local.ndim == 2:
self.A_beta = scipy.sparse.csr_matrix.multiply(
self.A, self.beta_local
).tocsr()
# Pre-multiply beta_D values by the quarantine adjacency matrix ("transmission weight connections")
if self.beta_D_local.ndim == 1:
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(
self.A_Q, numpy.tile(self.beta_D_local, (1, self.numNodes))
).tocsr()
elif self.beta_D_local.ndim == 2:
self.A_Q_beta_D = scipy.sparse.csr_matrix.multiply(
self.A_Q, self.beta_D_local
).tocsr()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update scenario flags:
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
self.update_scenario_flags()
def node_degrees(self, Amat):
return Amat.sum(axis=0).reshape(self.numNodes, 1) # sums of adj matrix cols
def update_G(self, new_G):
self.G = new_G
# Adjacency matrix:
if type(new_G) == numpy.ndarray:
self.A = scipy.sparse.csr_matrix(new_G)
elif type(new_G) == networkx.classes.graph.Graph:
self.A = networkx.adj_matrix(
new_G
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes = int(self.A.shape[1])
self.degree = numpy.asarray(self.node_degrees(self.A)).astype(float)
return
def update_Q(self, new_Q):
self.Q = new_Q
# Quarantine Adjacency matrix:
if type(new_Q) == numpy.ndarray:
self.A_Q = scipy.sparse.csr_matrix(new_Q)
elif type(new_Q) == networkx.classes.graph.Graph:
self.A_Q = networkx.adj_matrix(
new_Q
) # adj_matrix gives scipy.sparse csr_matrix
else:
raise BaseException("Input an adjacency matrix or networkx object only.")
self.numNodes_Q = int(self.A_Q.shape[1])
self.degree_Q = numpy.asarray(self.node_degrees(self.A_Q)).astype(float)
assert (
self.numNodes == self.numNodes_Q
), "The normal and quarantine adjacency graphs must be of the same size."
return
def update_scenario_flags(self):
self.testing_scenario = (
numpy.any(self.psi_I) and (numpy.any(self.theta_I) or numpy.any(self.phi_I))
) or (
numpy.any(self.psi_E) and (numpy.any(self.theta_E) or numpy.any(self.phi_E))
)
self.tracing_scenario = (numpy.any(self.psi_E) and numpy.any(self.phi_E)) or (
numpy.any(self.psi_I) and numpy.any(self.phi_I)
)
self.vitality_scenario = numpy.any(self.mu_0) and numpy.any(self.nu)
self.resusceptibility_scenario = numpy.any(self.xi)
def total_num_infections(self, t_idx=None):
if t_idx is None:
return self.numE[:] + self.numI[:] + self.numD_E[:] + self.numD_I[:]
else:
return (
self.numE[t_idx]
+ self.numI[t_idx]
+ self.numD_E[t_idx]
+ self.numD_I[t_idx]
)
def calc_propensities(self):
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Pre-calculate matrix multiplication terms that may be used in multiple propensity calculations,
# and check to see if their computation is necessary before doing the multiplication
transmissionTerms_I = numpy.zeros(shape=(self.numNodes, 1))
if numpy.any(self.numI[self.tidx]) and numpy.any(self.beta != 0):
transmissionTerms_I = numpy.asarray(
scipy.sparse.csr_matrix.dot(self.A_beta, self.X == self.I)
)
transmissionTerms_DI = numpy.zeros(shape=(self.numNodes, 1))
if (
self.testing_scenario
and numpy.any(self.numD_I[self.tidx])
and numpy.any(self.beta_D)
):
transmissionTerms_DI = numpy.asarray(
scipy.sparse.csr_matrix.dot(self.A_Q_beta_D, self.X == self.D_I)
)
numContacts_D = numpy.zeros(shape=(self.numNodes, 1))
if self.tracing_scenario and (
numpy.any(self.numD_E[self.tidx]) or numpy.any(self.numD_I[self.tidx])
):
numContacts_D = numpy.asarray(
scipy.sparse.csr_matrix.dot(
self.A, ((self.X == self.D_E) | (self.X == self.D_I))
)
)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_StoE = (
self.p
* (
(
self.beta * self.numI[self.tidx]
+ self.q * self.beta_D * self.numD_I[self.tidx]
)
/ self.N[self.tidx]
)
+ (1 - self.p)
* numpy.divide(
(transmissionTerms_I + transmissionTerms_DI),
self.degree,
out=numpy.zeros_like(self.degree),
where=self.degree != 0,
)
) * (self.X == self.S)
propensities_EtoI = self.sigma * (self.X == self.E)
propensities_ItoR = self.gamma * (self.X == self.I)
propensities_ItoF = self.mu_I * (self.X == self.I)
# propensities_EtoDE = ( self.theta_E + numpy.divide((self.phi_E*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_E*(self.X==self.E)
propensities_EtoDE = (
(self.theta_E + self.phi_E * numContacts_D)
* self.psi_E
* (self.X == self.E)
)
# propensities_ItoDI = ( self.theta_I + numpy.divide((self.phi_I*numContacts_D), self.degree, out=numpy.zeros_like(self.degree), where=self.degree!=0) )*self.psi_I*(self.X==self.I)
propensities_ItoDI = (
(self.theta_I + self.phi_I * numContacts_D)
* self.psi_I
* (self.X == self.I)
)
propensities_DEtoDI = self.sigma_D * (self.X == self.D_E)
propensities_DItoR = self.gamma_D * (self.X == self.D_I)
propensities_DItoF = self.mu_D * (self.X == self.D_I)
propensities_RtoS = self.xi * (self.X == self.R)
propensities__toS = self.nu * (self.X != self.F)
propensities = numpy.hstack(
[
propensities_StoE,
propensities_EtoI,
propensities_ItoR,
propensities_ItoF,
propensities_EtoDE,
propensities_ItoDI,
propensities_DEtoDI,
propensities_DItoR,
propensities_DItoF,
propensities_RtoS,
propensities__toS,
]
)
columns = [
"StoE",
"EtoI",
"ItoR",
"ItoF",
"EtoDE",
"ItoDI",
"DEtoDI",
"DItoR",
"DItoF",
"RtoS",
"_toS",
]
return propensities, columns
def increase_data_series_length(self):
self.tseries = numpy.pad(
self.tseries, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numS = numpy.pad(
self.numS, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numE = numpy.pad(
self.numE, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numI = numpy.pad(
self.numI, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numD_E = numpy.pad(
self.numD_E, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numD_I = numpy.pad(
self.numD_I, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numR = numpy.pad(
self.numR, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.numF = numpy.pad(
self.numF, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
self.N = numpy.pad(
self.N, [(0, 5 * self.numNodes)], mode="constant", constant_values=0
)
if self.store_Xseries:
self.Xseries = numpy.pad(
self.Xseries,
[(0, 5 * self.numNodes), (0, 0)],
mode="constant",
constant_values=0,
)
if self.nodeGroupData:
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]["numS"] = numpy.pad(
self.nodeGroupData[groupName]["numS"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numE"] = numpy.pad(
self.nodeGroupData[groupName]["numE"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numI"] = numpy.pad(
self.nodeGroupData[groupName]["numI"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numD_E"] = numpy.pad(
self.nodeGroupData[groupName]["numD_E"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numD_I"] = numpy.pad(
self.nodeGroupData[groupName]["numD_I"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numR"] = numpy.pad(
self.nodeGroupData[groupName]["numR"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["numF"] = numpy.pad(
self.nodeGroupData[groupName]["numF"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
self.nodeGroupData[groupName]["N"] = numpy.pad(
self.nodeGroupData[groupName]["N"],
[(0, 5 * self.numNodes)],
mode="constant",
constant_values=0,
)
return None
def finalize_data_series(self):
self.tseries = numpy.array(self.tseries, dtype=float)[: self.tidx + 1]
self.numS = numpy.array(self.numS, dtype=float)[: self.tidx + 1]
self.numE = numpy.array(self.numE, dtype=float)[: self.tidx + 1]
self.numI = numpy.array(self.numI, dtype=float)[: self.tidx + 1]
self.numD_E = numpy.array(self.numD_E, dtype=float)[: self.tidx + 1]
self.numD_I = numpy.array(self.numD_I, dtype=float)[: self.tidx + 1]
self.numR = numpy.array(self.numR, dtype=float)[: self.tidx + 1]
self.numF = numpy.array(self.numF, dtype=float)[: self.tidx + 1]
self.N = numpy.array(self.N, dtype=float)[: self.tidx + 1]
if self.store_Xseries:
self.Xseries = self.Xseries[: self.tidx + 1, :]
if self.nodeGroupData:
for groupName in self.nodeGroupData:
self.nodeGroupData[groupName]["numS"] = numpy.array(
self.nodeGroupData[groupName]["numS"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numE"] = numpy.array(
self.nodeGroupData[groupName]["numE"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numI"] = numpy.array(
self.nodeGroupData[groupName]["numI"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numD_E"] = numpy.array(
self.nodeGroupData[groupName]["numD_E"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numD_I"] = numpy.array(
self.nodeGroupData[groupName]["numD_I"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numR"] = numpy.array(
self.nodeGroupData[groupName]["numR"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["numF"] = numpy.array(
self.nodeGroupData[groupName]["numF"], dtype=float
)[: self.tidx + 1]
self.nodeGroupData[groupName]["N"] = numpy.array(
self.nodeGroupData[groupName]["N"], dtype=float
)[: self.tidx + 1]
return None
def run_iteration(self):
if self.tidx >= len(self.tseries) - 1:
# Room has run out in the timeseries storage arrays; double the size of these arrays:
self.increase_data_series_length()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 1. Generate 2 random numbers uniformly distributed in (0,1)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
r1 = numpy.random.rand()
r2 = numpy.random.rand()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 2. Calculate propensities
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities, transitionTypes = self.calc_propensities()
# Terminate when probability of all events is 0:
if propensities.sum() <= 0.0:
self.finalize_data_series()
return False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 3. Calculate alpha
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
propensities_flat = propensities.ravel(order="F")
cumsum = propensities_flat.cumsum()
alpha = propensities_flat.sum()
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 4. Compute the time until the next event takes place
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
tau = (1 / alpha) * numpy.log(float(1 / r1))
self.t += tau
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 5. Compute which event takes place
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
transitionIdx = numpy.searchsorted(cumsum, r2 * alpha)
transitionNode = transitionIdx % self.numNodes
transitionType = transitionTypes[int(transitionIdx / self.numNodes)]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# 6. Update node states and data series
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
assert (
self.X[transitionNode] == self.transitions[transitionType]["currentState"]
and self.X[transitionNode] != self.F
), (
"Assertion error: Node "
+ str(transitionNode)
+ " has unexpected current state "
+ str(self.X[transitionNode])
+ " given the intended transition of "
+ str(transitionType)
+ "."
)
self.X[transitionNode] = self.transitions[transitionType]["newState"]
self.tidx += 1
self.tseries[self.tidx] = self.t
self.numS[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.S), a_min=0, a_max=self.numNodes
)
self.numE[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.E), a_min=0, a_max=self.numNodes
)
self.numI[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.I), a_min=0, a_max=self.numNodes
)
self.numD_E[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.D_E), a_min=0, a_max=self.numNodes
)
self.numD_I[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.D_I), a_min=0, a_max=self.numNodes
)
self.numR[self.tidx] = numpy.clip(
numpy.count_nonzero(self.X == self.R), a_min=0, a_max=self.numNodes
)
self.numF[self.tidx] = numpy.clip(
| numpy.count_nonzero(self.X == self.F) | numpy.count_nonzero |
import numpy as np
import os
import glob
import healpy as hp
from rubin_sim.photUtils import Sed, Bandpass
from .twilightFunc import twilightFunc
from scipy.interpolate import InterpolatedUnivariateSpline, interp1d
from rubin_sim.data import get_data_dir
# Make backwards compatible with healpy
if hasattr(hp, 'get_interp_weights'):
get_neighbours = hp.get_interp_weights
elif hasattr(hp, 'get_neighbours'):
get_neighbours = hp.get_neighbours
else:
print("Could not find appropriate healpy function for get_interp_weight or get_neighbours")
__all__ = ['id2intid', 'intid2id', 'loadSpecFiles', 'BaseSingleInterp', 'ScatteredStar', 'LowerAtm',
'UpperAtm', 'MergedSpec', 'Airglow', 'TwilightInterp', 'MoonInterp',
'ZodiacalInterp']
def id2intid(ids):
"""
take an array of ids, and convert them to an integer id.
Handy if you want to put things into a sparse array.
"""
uids = np.unique(ids)
order = np.argsort(ids)
oids = ids[order]
uintids = np.arange(np.size(uids), dtype=int)
left = np.searchsorted(oids, uids)
right = np.searchsorted(oids, uids, side='right')
intids = np.empty(ids.size, dtype=int)
for i in range(np.size(left)):
intids[left[i]:right[i]] = uintids[i]
result = intids*0
result[order] = intids
return result, uids, uintids
def intid2id(intids, uintids, uids, dtype=int):
"""
convert an int back to an id
"""
ids = np.zeros(np.size(intids))
order = np.argsort(intids)
ointids = intids[order]
left = np.searchsorted(ointids, uintids, side='left')
right = np.searchsorted(ointids, uintids, side='right')
for i, (le, ri) in enumerate(zip(left, right)):
ids[le:ri] = uids[i]
result = np.zeros(np.size(intids), dtype=dtype)
result[order] = ids
return result
def loadSpecFiles(filenames, mags=False):
"""
Load up the ESO spectra.
The ESO npz files contain the following arrays:
filterWave: The central wavelengths of the pre-computed magnitudes
wave: wavelengths for the spectra
spec: array of spectra and magnitudes along with the relevant variable inputs. For example,
airglow has dtype = [('airmass', '<f8'), ('solarFlux', '<f8'), ('spectra', '<f8', (17001,)),
('mags', '<f8', (6,)]
For each unique airmass and solarFlux value, there is a 17001 elements spectra and 6 magnitudes.
"""
if len(filenames) == 1:
temp = np.load(filenames[0])
wave = temp['wave'].copy()
filterWave = temp['filterWave'].copy()
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
spec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
spec[key] = temp['spec'][key].copy()
else:
spec = temp['spec'].copy()
else:
temp = np.load(filenames[0])
wave = temp['wave'].copy()
filterWave = temp['filterWave'].copy()
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
spec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
spec[key] = temp['spec'][key].copy()
else:
spec = temp['spec'].copy()
for filename in filenames[1:]:
temp = np.load(filename)
if mags:
# don't copy the spectra to save memory space
dt = np.dtype([(key, temp['spec'].dtype[i]) for
i, key in enumerate(temp['spec'].dtype.names) if key != 'spectra'])
tempspec = np.zeros(temp['spec'].size, dtype=dt)
for key in temp['spec'].dtype.names:
if key != 'spectra':
tempspec[key] = temp['spec'][key].copy()
else:
tempspec = temp['spec']
spec = np.append(spec, tempspec)
return spec, wave, filterWave
class BaseSingleInterp(object):
"""
Base class for sky components that only need to be interpolated on airmass
"""
def __init__(self, compName=None, sortedOrder=['airmass', 'nightTimes'], mags=False):
"""
mags: Rather than the full spectrum, return the LSST ugrizy magnitudes.
"""
self.mags = mags
dataDir = os.path.join(get_data_dir(), 'skybrightness', 'ESO_Spectra/'+compName)
filenames = sorted(glob.glob(dataDir+'/*.npz'))
self.spec, self.wave, self.filterWave = loadSpecFiles(filenames, mags=self.mags)
# Take the log of the spectra in case we want to interp in log space.
if not mags:
self.logSpec = np.zeros(self.spec['spectra'].shape, dtype=float)
good = np.where(self.spec['spectra'] != 0)
self.logSpec[good] = np.log10(self.spec['spectra'][good])
self.specSize = self.spec['spectra'][0].size
else:
self.specSize = 0
# What order are the dimesions sorted by (from how the .npz was packaged)
self.sortedOrder = sortedOrder
self.dimDict = {}
self.dimSizes = {}
for dt in self.sortedOrder:
self.dimDict[dt] = np.unique(self.spec[dt])
self.dimSizes[dt] = np.size(np.unique(self.spec[dt]))
# Set up and save the dict to order the filters once.
self.filterNameDict = {'u': 0, 'g': 1, 'r': 2, 'i': 3, 'z': 4, 'y': 5}
def __call__(self, intepPoints, filterNames=['u', 'g', 'r', 'i', 'z', 'y']):
if self.mags:
return self.interpMag(intepPoints, filterNames=filterNames)
else:
return self.interpSpec(intepPoints)
def indxAndWeights(self, points, grid):
"""
for given 1-D points, find the grid points on either side and return the weights
assume grid is sorted
"""
order = | np.argsort(points) | numpy.argsort |
import simulation.quadrotor3 as quad
import simulation.config as cfg
import numpy as np
import random
from math import pi, sin, cos, exp
import gym
from gym import error, spaces, utils
from gym.utils import seeding
import simulation.animation_gl as ani_gl
class PerchMTEnv(gym.Env):
"""
Environment wrapper for training low-level flying skills. In this environment, the aircraft
is meant to perch on any of the four walls at a 90 degree angle
"""
def __init__(self):
metadata = {'render.modes': ['human']}
self.r_max = 2.5
self.goal_thresh = 0.2
self.t = 0
self.T = 10
self.action_space = np.zeros((4,))
self.observation_space = np.zeros((34,))
#Dictionary to hold the wall data
self.wall_data = {"wall_plane":None,"PR":None,"ELA":None,"wall_pos":None}
wall_goal = self.get_wall_goal()
self.wall =wall_goal[0]
self.goal_xyz = wall_goal[1]
zeta_goal = self.get_goal_zeta()
self.goal_zeta_sin = np.sin(zeta_goal)
self.goal_zeta_cos = np.cos(zeta_goal)
self.goal_uvw = np.array([[0.],
[0.],
[0.]])
self.goal_pqr = np.array([[0.],
[0.],
[0.]])
# simulation parameters
self.params = cfg.params
self.iris = quad.Quadrotor(self.params)
self.sim_dt = self.params["dt"]
self.ctrl_dt = 0.05
self.steps = range(int(self.ctrl_dt/self.sim_dt))
self.action_bound = [0, self.iris.max_rpm]
self.H = int(self.T/self.ctrl_dt)
self.hov_rpm = self.iris.hov_rpm
self.trim = [self.hov_rpm, self.hov_rpm,self.hov_rpm, self.hov_rpm]
self.trim_np = np.array(self.trim)
self.bandwidth = 35.
xyz, zeta, uvw, pqr = self.iris.get_state()
self.vec_xyz = xyz-self.goal_xyz
self.vec_zeta_sin = np.sin(zeta)-self.goal_zeta_sin
self.vec_zeta_cos = np.cos(zeta)-self.goal_zeta_cos
self.vec_uvw = uvw-self.goal_uvw
self.vec_pqr = pqr-self.goal_pqr
self.vel_r = pqr[2][0]
exp_wall_approach_angle = self.wall_data["ELA"]
if self.wall_data["PR"] == "R":
self.wall_approach_angle = abs(exp_wall_approach_angle - zeta[0][0])+ abs(0 - zeta[1][0])
self.approach_line = ((self.goal_xyz[0][0]-xyz[0][0])**2+(self.goal_xyz[2][0]-xyz[2][0])**2)**0.5
else:
self.wall_approach_angle = abs(exp_wall_approach_angle - zeta[1][0]) + abs(0 - zeta[0][0])
self.approach_line = ((self.goal_xyz[1][0]-xyz[1][0])**2+(self.goal_xyz[2][0]-xyz[2][0])**2)**0.5
self.dist_norm = np.linalg.norm(self.vec_xyz)
self.att_norm_sin = np.linalg.norm(self.vec_zeta_sin)
self.att_norm_cos = np.linalg.norm(self.vec_zeta_cos)
self.vel_norm = np.linalg.norm(self.vec_uvw)
self.ang_norm = np.linalg.norm(self.vec_pqr)
self.init_rendering = False
def get_goal(self):
return self.goal_xyz
def get_wall_goal(self):
"""
sets a wall in either 4 corners of of the world
The goal is anywehere on this wall, the location of the wall
and goal is randomly generated per reset
- Landing is more consistent if the landing pad is not randomly
located on each of the walls. Only randomness would be the wall.
- PR = pitch or roll, needs to be controlled
- ELA = expected landing angle, ie -90 or +90
- wall pos = is the location of the wall
- wall plane = X or Y axis contains the wall
"""
size = 6
if random.randint(0,1):
x = -3
if random.randint(0,1):
y = -3
self.wall_data["ELA"] = np.pi/2
self.wall_data["wall_pos"] = y
else:
y = 3
self.wall_data["ELA"] = -np.pi/2
self.wall_data["wall_pos"] = y
g_x = 0#random.randint(-5,5)/10. #Comment this out for more consistency
g_z = 2.5
goal = np.array([[g_x],[y],[g_z]])
self.wall_data["wall_plane"] = 'y'
self.wall_data["PR"] = 'R'
A1 = np.array([x,y,-3])
A2 = np.array([x+size,y,-3])
A3 = np.array([x+size,y,3])
A4 = np.array([x,y,3])
else:
y = -3
if random.randint(0,1):
x = -3
self.wall_data["ELA"] = -np.pi/2
self.wall_data["wall_pos"] = x
else:
x = 3
self.wall_data["ELA"] = np.pi/2
self.wall_data["wall_pos"] = x
g_y = 0#random.randint(-5,5)/10. #Comment this out for more consistency
g_z = 2.5
goal = np.array([[x],[g_y],[g_z]])
self.wall_data["wall_plane"] = 'x'
self.wall_data["PR"] = 'P'
A1 = np.array([x,y,-3])
A2 = np.array([x,y+size,-3])
A3 = | np.array([x,y+size,3]) | numpy.array |
import os
import random
import numpy as np
import cv2
def normalize(img, mean, std, max_pixel_value=255.0):
mean = np.array(mean, dtype=np.float32)
mean *= max_pixel_value
std = np.array(std, dtype=np.float32)
std *= max_pixel_value
denominator = np.reciprocal(std, dtype=np.float32)
img = img.astype(np.float32)
img -= mean
img *= denominator
return img
###################### get image path ######################
def get_image_paths(dataroot):
paths = []
if dataroot is not None:
paths_img = os.listdir(dataroot)
for _ in sorted(paths_img):
path = os.path.join(dataroot, _)
paths.append(path)
return paths
###################### read images ######################
def read_img(path):
img = cv2.imread(path, cv2.IMREAD_UNCHANGED)
img = img.astype(np.float32) / 255.
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
# img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
# some images have 4 channels
if img.shape[2] > 3:
img = img[:, :, :3]
img = normalize(img, mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225], max_pixel_value=1)
return img
###################### read labels ######################
def read_nodule_label(label_path):
img_label = cv2.imread(label_path, cv2.IMREAD_UNCHANGED)
img_label = img_label / 255
return img_label
def read_cell_label(label_path):
img_label = cv2.imread(label_path, cv2.IMREAD_UNCHANGED)
img_label = np.where(img_label == 128, 1, img_label)
img_label = np.where(img_label == 255, 2, img_label)
return img_label
#----------------------------------------------------------------------------------------------------#
# 数据增广----Z
#----------------------------------------------------------------------------------------------------#
img_w = 512
img_h = 512
# def gamma_transform(img, gamma):
# gamma_table = [np.power(x / 255.0, gamma) * 255.0 for x in range(256)]
# gamma_table = np.round(np.array(gamma_table)).astype(np.uint8)
# return cv2.LUT(img, gamma_table)
#
#
# def random_gamma_transform(img, gamma_vari):
# log_gamma_vari = np.log(gamma_vari)
# alpha = np.random.uniform(-log_gamma_vari, log_gamma_vari)
# gamma = np.exp(alpha)
# return gamma_transform(img, gamma)
def rotate(xb, yb, angle):
M_rotate = cv2.getRotationMatrix2D((img_w / 2, img_h / 2), angle, 1)
xb = cv2.warpAffine(xb, M_rotate, (img_w, img_h))
yb = cv2.warpAffine(yb, M_rotate, (img_w, img_h))
return xb, yb
def img_blur(img):
img = cv2.blur(img, (3, 3))
return img
def add_noise(img):
for i in range(200): # 添加点噪声
temp_x = np.random.randint(0, img.shape[0])
temp_y = np.random.randint(0, img.shape[1])
img[temp_x][temp_y] = 255
return img
def channel_change(img):
channels = [0, 1, 2]
random.shuffle(channels)
img = img[:, :, channels]
return img
def data_augment(img_list, rot=True, flip=True, blur=True, noise=True, channel_ch=True):
xb, yb = img_list
if rot:
if np.random.random() < 0.25:
xb, yb = rotate(xb, yb, 90)
if np.random.random() < 0.25:
xb, yb = rotate(xb, yb, 180)
if np.random.random() < 0.25:
xb, yb = rotate(xb, yb, 270)
if flip:
if np.random.random() < 0.25:
xb = cv2.flip(xb, 1) # flipcode > 0:沿y轴翻转
yb = cv2.flip(yb, 1)
if np.random.random() < 0.25:
xb = cv2.flip(xb, 0) # flipcode > 0:沿x
yb = cv2.flip(yb, 0)
if np.random.random() < 0.25:
xb = cv2.flip(xb, -1) # flipcode > 0:沿x,y
yb = cv2.flip(yb, -1)
if blur:
if | np.random.random() | numpy.random.random |
from CoolProp.CoolProp import PropsSI
import CoolProp.CoolProp as CP
import numpy as np
import matplotlib.pyplot as plt
#from scipy.interpolate import interp1d
def curva_limitePH(mix, color, label, pc):
n=20
if pc==0:
pc = mix.keyed_output(CP.iP_critical)
#pc = 30*10**5#mix.keyed_output(CP.iP_critical)
#tc = mix.keyed_output(CP.iT_critical)
pt=101325*2
H_l = | np.zeros(n) | numpy.zeros |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 11 11:31:35 2018
@author: mvanoudh
"""
import numpy as np
from sklearn.dummy import DummyRegressor, DummyClassifier
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeRegressor, DecisionTreeClassifier
from sklearn.neighbors import KNeighborsRegressor, KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor, RandomForestClassifier
from sklearn.ensemble import GradientBoostingRegressor, GradientBoostingClassifier
from sklearn.ensemble import ExtraTreesRegressor, ExtraTreesClassifier
# Format is
# model_constructor, is_regressor, is_tree, max_try, { parameters }
model_list = [
(DummyRegressor(), True, False, 10, {
'mo__strategy': ['mean', 'median']
}),
(DummyClassifier(), False, False, 10, {
'mo__strategy':['stratified', 'most_frequent', 'prior', 'uniform']
}),
(Ridge(), True, False, 100, {
'mo__alpha': np.power(10., range(2, -5, -1)),
'mo__normalize': [True, False]
}),
(LogisticRegression(), False, False, 1, {
'mo__penalty': ['l1', 'l2'],
'mo__C': [1.0, 0.1, 10]
}),
(KNeighborsRegressor(), True, False, 10, {
'mo__n_neighbors': range(1, 10),
'mo__algorithm' : ('auto', 'ball_tree', 'kd_tree', 'brute')
}),
(KNeighborsClassifier(), False, False, 10, {
'mo__n_neighbors': range(1, 10),
'mo__algorithm' : ('auto', 'ball_tree', 'kd_tree', 'brute'),
}),
(DecisionTreeRegressor(), True, True, 1, {
'mo__max_features': ('auto', 'sqrt', 'log2'),
'mo__criterion': ('mae', 'mse',),
'mo__max_depth': range(1, 5),
'mo__min_impurity_decrease': np.arange(0., 10e-7, step=1e-7),
'mo__min_samples_split': range(2, 5),
'mo__min_samples_leaf': range(1, 10),
'mo__random_state': (0, )
}),
(DecisionTreeClassifier(), False, True, 10, {
'mo__max_features': np.arange(.05, 1, step=.05),
'mo__criterion': ('entropy',),
'mo__max_depth': range(3, 20),
'mo__min_impurity_decrease': np.arange(1e-7, 10e-7, step=1e-7),
'mo__min_samples_split': range(2, 5),
'mo__random_state': (0, )
}),
(RandomForestRegressor(), True, True, 10, {
'mo__n_estimators': range(10, 100),
'mo__max_features': np.arange(.05, .9, step=.1),
'mo__criterion': ('mae', 'mse'),
'mo__max_depth': [0.2, 0.5, 0.8, None],
'mo__min_samples_split': range(2, 10),
'mo__random_state': (0, )
}),
(RandomForestClassifier(), False, True, 10, {
'mo__n_estimators': range(10, 100),
'mo__max_features': | np.arange(.05, .9, step=.1) | numpy.arange |
import numpy as np
import keras
from learning import max_error, boundary_cond, transform, back_transform
from scipy import interpolate
preprocessing = "shift_and_rescale"
def shorten_gf(tau, gf, new_n_tau):
skip_factor = (len(tau) - 1) // (new_n_tau - 1)
return tau[::skip_factor], gf[::skip_factor]
def load_nn_model(filename):
return keras.models.load_model(filename, custom_objects={'max_error': max_error,
'boundary_cond': boundary_cond})
def nn_gf(weak_gf, strong_gf, model):
n_tau = len(weak_gf)
X = np.zeros((1, 2 * n_tau))
X[0, :n_tau] = weak_gf
X[0, n_tau:] = strong_gf
X = transform(X, preprocessing)
Y = model.predict(X)
Y = back_transform(Y, preprocessing)
return Y[0]
def interpolate_gf(tau, gf, new_n_tau):
tck = interpolate.splrep(tau, gf, s=0.000001)
new_tau = np.linspace(0, tau[-1], num=new_n_tau, endpoint=True)
new_gf = interpolate.splev(new_tau, tck, der=0)
return new_tau, new_gf
def save_gf(filename, tau, G) :
if single_spin == True:
data = np.asarray([tau, G[0]])
else:
data = np.asarray([tau, G[0], G[1]])
if not only_gf:
np.savetxt(filename, data.T)
else:
with open(filename, 'ab') as f:
np.savetxt(f, data[1][::skip_factor, np.newaxis].T, delimiter=",", fmt="%1.4f")
if __name__ == "__main__":
from params import *
tau_strong, strong_gf = | np.loadtxt(G_strong_filename, unpack=True) | numpy.loadtxt |
import pytest
import unittest.mock as mock
import open_cp.sepp_base as sepp_base
import open_cp.data
import open_cp.predictors
import numpy as np
import datetime
class OurModel(sepp_base.ModelBase):
def background(self, points):
assert len(points.shape) == 2
assert points.shape[0] == 3
return points[0] * np.exp(-(points[1]**2 + points[2]**2))
def trigger(self, pt, dpts):
assert pt.shape == (3,)
assert len(dpts.shape) == 2
assert dpts.shape[0] == 3
w = np.sum(np.abs(pt))
return dpts[0] * np.exp(-(dpts[1]**2 + dpts[2]**2) / w)
def slow_p_matrix(model, points):
assert points.shape[0] == 3
d = points.shape[1]
p = np.zeros((d,d))
for i in range(d):
pt = points[:,i]
p[i,i] = model.background(pt[:,None])
for j in range(i):
dp = pt - points[:,j]
if dp[0] <= 0:
p[j,i] = 0
else:
p[j,i] = model.trigger(pt, dp[:,None])
for i in range(d):
p[:,i] /= np.sum(p[:,i])
return p
def test_p_matrix():
model = OurModel()
for _ in range(10):
points = | np.random.random((3,20)) | numpy.random.random |
"""Tests for coco_tools. """
import unittest
import numpy as np
from neural_compressor.experimental.metric.coco_tools import *
class TestCOCO(unittest.TestCase):
@classmethod
def setUpClass(cls):
groundtruth_annotations_list = [
{
'id': 1,
'image_id': 1,
'category_id': 1,
'bbox': [387.99,97.43,84.99,81.29],
'area': 2991.9213,
'iscrowd': 0,
'segmentation':[
[387.99,176.5,398.34,164.68,405.733,156.55,412.38,141.77,
419.77,136.6,424.94,125.51,432.33,116.64,434.55,102.6,
436.77,97.43,441.944,102.6,453.76,101.12,459.68,109.99,
457.46,115.9,463.37,124.03,470.76,128.47,472.98,137.34,
465.559,143.25,447.11,137.34,444.9,142.51,442.68,156.55,
444.9,163.2,446.37,176.5,444.9,178.72]
]
}
]
image_list = [{'id': 1}]
category_list = [{'id': 0, 'name': 'person'},
{'id': 1, 'name': 'cat'},
{'id': 2, 'name': 'dog'}]
cls.groundtruth_dict = {
'annotations': groundtruth_annotations_list,
'images': image_list,
'categories': category_list
}
cls.detections_list = [
{
'image_id': 1,
'category_id': 1,
'bbox': [387.99,97.43,84.99,81.29],
'score': .8,
'segmentation':[
[387.99,176.5,398.34,164.68,405.733,156.55,412.38,141.77,
419.77,136.6,424.94,125.51,432.33,116.64,434.55,102.6,
436.77,97.43,441.944,102.6,453.76,101.12,459.68,109.99,
457.46,115.9,463.37,124.03,470.76,128.47,472.98,137.34,
465.559,143.25,447.11,137.34,444.9,142.51,442.68,156.55,
444.9,163.2,446.37,176.5,444.9,178.72]
]
},
]
def testCOCOWrapper(self):
with self.assertRaises(ValueError):
wrap = COCOWrapper(None, 'test')
wrap = COCOWrapper(TestCOCO.groundtruth_dict)
with self.assertRaises(ValueError):
wrap.LoadAnnotations(TestCOCO.groundtruth_dict)
wrong_detection = {
'image_id': 'test',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
}
with self.assertRaises(ValueError):
wrap.LoadAnnotations(wrong_detection)
wrong_detection = [
{
'image_id': 'test',
'category_id': 1,
'bbox': [100., 100., 100., 100.],
'score': .8
}
]
with self.assertRaises(ValueError):
wrap.LoadAnnotations(wrong_detection)
groundtruth = COCOWrapper(TestCOCO.groundtruth_dict)
detections = groundtruth.LoadAnnotations(TestCOCO.detections_list)
evaluator = COCOEvalWrapper(groundtruth, detections)
self.assertEqual(evaluator.GetCategory(1)['name'], 'cat')
self.assertTrue(not evaluator.GetAgnosticMode())
self.assertEqual(evaluator.GetCategoryIdList(), [0, 1, 2])
evaluator = COCOEvalWrapper(groundtruth, detections, agnostic_mode=True)
self.assertTrue(evaluator.GetAgnosticMode())
summary_metrics, _ = evaluator.ComputeMetrics()
self.assertAlmostEqual(1.0, summary_metrics['Precision/mAP'])
with self.assertRaises(ValueError):
summary_metrics, _ = evaluator.ComputeMetrics(True, True)
iou_thrs = '0.5:0.05:0.95'
map_points = 101
evaluator = COCOEvalWrapper(groundtruth, detections, iou_thrs=iou_thrs, map_points=map_points)
evaluator.evaluate()
evaluator.accumulate()
self.assertEqual(evaluator.eval['counts'], [10, 101, 3, 4, 3])
iou_thrs = 0.5
map_points = 11
evaluator = COCOEvalWrapper(groundtruth, detections, iou_thrs=iou_thrs, map_points=map_points)
evaluator.evaluate()
evaluator.accumulate()
self.assertEqual(evaluator.eval['counts'], [1, 11, 3, 4, 3])
iou_thrs = 0.5
map_points = 0
evaluator = COCOEvalWrapper(groundtruth, detections, iou_thrs=iou_thrs, map_points=map_points)
evaluator.evaluate()
evaluator.accumulate()
self.assertEqual(evaluator.eval['counts'], [1, 1, 3, 4, 3])
def testExportSingleImageDetectionBoxesToCoco(self):
with self.assertRaises(ValueError):
ExportSingleImageDetectionBoxesToCoco(None, None, None,
np.array([0]), np.array([[0,0]]))
with self.assertRaises(ValueError):
ExportSingleImageDetectionBoxesToCoco(None, None, np.array([0,0]),
np.array([0]), np.array([0]))
with self.assertRaises(ValueError):
ExportSingleImageDetectionBoxesToCoco(None, None, np.array([[0,0]]),
np.array([0]), np.array([0]))
def testExportSingleImageGroundtruthToCoco(self):
with self.assertRaises(ValueError):
ExportSingleImageGroundtruthToCoco(None, None, None,
np.array([0,0]), np.array([0]))
with self.assertRaises(ValueError):
ExportSingleImageGroundtruthToCoco(None, None, None,
np.array([[0,0]]), np.array([0]))
with self.assertRaises(ValueError):
ExportSingleImageGroundtruthToCoco(None, None, None,
np.array([[1,1,5,5]]), np.array([1]), np.array([[[1]]]), np.array([[1,0]]))
ExportSingleImageGroundtruthToCoco(1, 2, [0,1,2], np.array([[1,1,5,5]]),
np.array([1]), np.array([[[1]]], dtype=np.uint8), np.array([1,0]))
def testExportSingleImageDetectionMasksToCoco(self):
with self.assertRaises(ValueError):
ExportSingleImageDetectionMasksToCoco(None, None, None,
np.array([0]), np.array([[0,0]]))
with self.assertRaises(ValueError):
ExportSingleImageDetectionMasksToCoco(None, None, np.array([0,0]),
np.array([0]), np.array([0]))
mask=[
[387.99,176.5,398.34,164.68,405.733,156.55,412.38,141.77,
419.77,136.6,424.94,125.51,432.33,116.64,434.55,102.6,
436.77,97.43,441.944,102.6,453.76,101.12,459.68,109.99,
457.46,115.9,463.37,124.03,470.76,128.47,472.98,137.34,
465.559,143.25,447.11,137.34,444.9,142.51,442.68,156.55,
444.9,163.2,446.37,176.5,444.9,178.72]
]
result = ExportSingleImageDetectionMasksToCoco(
1, [0,1,2], mask, np.array([0.8]), | np.array([1]) | numpy.array |
#!/usr/bin/env python3
# ~ https://dataportals.pangaea.de/bsrn/?q=LR0100
import pandas as pd
from pandas._libs.tslibs.timestamps import Timestamp
import datetime
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
from pysolar.solar import *
import numpy as np
from scipy.special import lambertw
from libreria_clear_sky_models_panda import *
# get the number of the last line of header comment
def get_skip_pangaea( _filename ):
skip_line = 0
document = open( _filename , 'r+')
with document as f:
for line in f:
skip_line += 1
if "*/" in line:
break
document.close()
''' import file as a panda object'''
ds = pd.read_csv(_filename,
sep='\t',
lineterminator = "\n",
skiprows= skip_line
)
return ds
def clear_minus_to_0(_col):
if _col <0:
_col = 0
return _col
# get cenital angle
def theta_z_calculation( _col , _longitude, _latitude):
col = Timestamp(_col, freq='MS').to_pydatetime()
date = col.replace(tzinfo=datetime.timezone.utc)
# ~ get_altitude(self.lat, self.lon, d)
return ( 90-get_altitude(_latitude, _longitude, date) )
def check_if_value_is_in_range(_col, _max_std):
std_col = _col
max_std = _max_std
if std_col<= max_std:
return(1)
return(0)
''' Extraterrestial functions implementations '''
def get_Ext_irradiance_pandas(_time ):
# Calculed like: Worldwide performance assessment of 75 global clear-sky irradiance models using Principal Component Analysis
day_time = Timestamp(_time, freq='MS').to_pydatetime()
N = day_time.timetuple().tm_yday
I_0 = 1361.2 # extaterrestrial solar irradiance [w/m2]
c = np.pi/180
c_360 = 360*(N-1)/365*c
fac1 = 1.00011 + 0.034221* np.cos(c_360)
fac2 = 0.00128 * np.sin(c_360) + 0.000719*np.cos(2*c_360)
fac3 = 0.000077 * np.sin(2*c_360)
I_ext = I_0 * ( fac1 + fac2 + fac3)
return(I_ext)
# get a and b parameter of SANCHEZ SEGuRA - PENA CRUZ K_n model
def compute_SSPCnormalDistribution_parameters(_theta_1, _DNI_1 , _theta_2 , _DNI_2, _Iext):
''' y = a e**(b x**2)'''
kt1 = 1 - _DNI_1 / _Iext
kt2 = 1 - _DNI_2 / _Iext
theta_z1 = _theta_1 * np.pi/180
theta_z2 = _theta_2 * np.pi/180
x1 = kt1 * np.sin(theta_z1)
y1 = kt1 * np.cos(theta_z1)
x2 = kt2 * np.sin(theta_z2)
y2 = kt2 * np.cos(theta_z2)
b = np.real( 1/(x1**2-x2**2)*np.log(y1/y2) )
a = y1 * (y1/y2)**(-x1**2/(x1**2-x2**2))
# validation to obtain a real curve using a and b parametres
sscp = compute_SSPC_using_a_b( theta_z1, a, b, 1000)
# ~ print("sscp is: " , type(sscp) )
# ~ print("sscp is nan: " , np.isnan(sscp) )
# ~ print("sscp value: " , sscp)
return ( a , b , sscp)
def compute_SSPC_using_a_b(_theta_z, a, b, _I_ext):
theta_z1 = _theta_z * np.pi/180
if _theta_z < 90:
#productLogfunction
w = np.real(lambertw(-2*a**2*b*(np.tan(theta_z1))**2 ))
x = 1 * (w)**0.5 /(2*-b)**0.5
y = x /np.tan(theta_z1)
else:
x = 0
y = 1
kt = 1- (x**2 + y**2)**0.5
if kt < 0:
kt = 0
SSPC = kt*_I_ext
return (SSPC)
def main():
# data
save_document_name = "DB_.csv"
''' ------------------------ '''
filename_tl = "Alert_TL.csv" #Linke Turbidity coefficient data from soda
path_base = "ALE_radiation_"
name_station = "Alert"
latitude = 82.490000
longitude = -62.420000
elevation = 127.0
''' ------------------------ '''
''' Reno's conditions'''
max_std = 0.2 # max std permited inman
max_M = 75
max_mean = 75
''' import linke turbidity data by months '''
Tl_ds = get_tl_by_interpolation_soda( filename_tl )
print('Tl_ds' , Tl_ds)
ds = | np.asarray([]) | numpy.asarray |
import helpers
import numpy
import pytest
import toughio
write_read = lambda x, **kwargs: helpers.write_read(
"INFILE", x, toughio.write_input, toughio.read_input, **kwargs
)
write_read_tough = lambda x: write_read(
x,
writer_kws={"file_format": "tough"},
reader_kws={"file_format": "tough"},
)
write_read_json = lambda x: write_read(
x,
writer_kws={"file_format": "json"},
reader_kws={"file_format": "json"},
)
@pytest.mark.parametrize(
"write_read, single",
[
(write_read_tough, True),
(write_read_tough, False),
(write_read_json, True),
(write_read_json, False),
],
)
def test_title(write_read, single):
parameters_ref = {
"title": (
helpers.random_string(80)
if single
else [helpers.random_string(80) for _ in range(numpy.random.randint(5) + 2)]
),
}
parameters = write_read(parameters_ref)
assert parameters_ref["title"] == parameters["title"]
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_rocks(write_read):
keys = [
"density",
"porosity",
"permeability",
"conductivity",
"specific_heat",
"compressibility",
"expansivity",
"conductivity_dry",
"tortuosity",
"klinkenberg_parameter",
"distribution_coefficient_3",
"distribution_coefficient_4",
]
parameters_ref = {
"rocks": {
helpers.random_string(5): {key: numpy.random.rand() for key in keys[:5]},
helpers.random_string(5): {
key: numpy.random.rand()
if key != "permeability"
else numpy.random.rand(3)
for key in keys[:5]
},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
helpers.random_string(5): {key: numpy.random.rand() for key in keys},
}
}
names = list(parameters_ref["rocks"].keys())
parameters_ref["rocks"][names[-1]].update(
{
"relative_permeability": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-2]].update(
{
"capillarity": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters_ref["rocks"][names[-3]].update(
{
"relative_permeability": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
"capillarity": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
},
}
)
parameters = write_read(parameters_ref)
assert sorted(parameters_ref["rocks"].keys()) == sorted(parameters["rocks"].keys())
for k, v in parameters_ref["rocks"].items():
for kk, vv in v.items():
if not isinstance(vv, dict):
assert numpy.allclose(vv, parameters["rocks"][k][kk], atol=1.0e-5)
else:
helpers.allclose_dict(vv, parameters["rocks"][k][kk], atol=1.0e-4)
@pytest.mark.parametrize(
"write_read, rpcap",
[
(write_read_tough, "rp"),
(write_read_tough, "cap"),
(write_read_tough, "both"),
(write_read_json, "rp"),
(write_read_json, "cap"),
(write_read_json, "both"),
],
)
def test_rpcap(write_read, rpcap):
parameters_ref = {"default": {}}
if rpcap in {"rp", "both"}:
parameters_ref["default"]["relative_permeability"] = {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
}
if rpcap in {"cap", "both"}:
parameters_ref["default"]["capillarity"] = {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand(numpy.random.randint(7) + 1),
}
parameters = write_read(parameters_ref)
for k, v in parameters_ref["default"].items():
helpers.allclose_dict(v, parameters["default"][k], atol=1.0e-4)
@pytest.mark.parametrize("write_read", [write_read_tough, write_read_json])
def test_flac(write_read):
parameters_ref = {
"flac": {
"creep": bool(numpy.random.randint(2)),
"porosity_model": numpy.random.randint(10),
"version": numpy.random.randint(10),
},
"rocks": {
helpers.random_string(5): {
"permeability_model": {
"id": numpy.random.randint(10),
"parameters": numpy.random.rand( | numpy.random.randint(7) | numpy.random.randint |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.